1 /******************************************************************************
2     QtAV:  Multimedia framework based on Qt and FFmpeg
3     Copyright (C) 2012-2016 Wang Bin <wbsecg1@gmail.com>
4 
5 *   This file is part of QtAV
6 
7     This library is free software; you can redistribute it and/or
8     modify it under the terms of the GNU Lesser General Public
9     License as published by the Free Software Foundation; either
10     version 2.1 of the License, or (at your option) any later version.
11 
12     This library is distributed in the hope that it will be useful,
13     but WITHOUT ANY WARRANTY; without even the implied warranty of
14     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15     Lesser General Public License for more details.
16 
17     You should have received a copy of the GNU Lesser General Public
18     License along with this library; if not, write to the Free Software
19     Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
20 ******************************************************************************/
21 
22 #include "QtAV/VideoFormat.h"
23 #include <cmath>
24 #include <QtCore/QVector>
25 #ifndef QT_NO_DEBUG_STREAM
26 #include <QtDebug>
27 #endif
28 #include "QtAV/private/AVCompat.h"
29 extern "C" {
30 #include <libavutil/imgutils.h>
31 }
32 
33 #define FF_HAS_YUV12BITS FFMPEG_MODULE_CHECK(LIBAVUTIL, 51, 73, 101)
34 #if (Q_BYTE_ORDER == Q_BIG_ENDIAN)
35 #define PIXFMT_NE(B, L) VideoFormat::Format_##B
36 #else
37 #define PIXFMT_NE(B, L) VideoFormat::Format_##L
38 #endif
39 // ffmpeg3.0 151aa2e/ libav>11 2268db2
40 #if AV_MODULE_CHECK(LIBAVUTIL, 55, 0, 0, 0, 100)
41 #define DESC_VAL(X) (X)
42 #else
43 #define DESC_VAL(X) (X##_minus1 + 1)
44 #endif
45 namespace QtAV {
46 class VideoFormatPrivate : public QSharedData
47 {
48 public:
VideoFormatPrivate(VideoFormat::PixelFormat fmt)49     VideoFormatPrivate(VideoFormat::PixelFormat fmt)
50         : pixfmt(fmt)
51         , pixfmt_ff(QTAV_PIX_FMT_C(NONE))
52         , qpixfmt(QImage::Format_Invalid)
53         , planes(0)
54         , bpp(0)
55         , bpp_pad(0)
56         , bpc(0)
57         , pixdesc(0)
58     {
59         if (fmt == VideoFormat::Format_Invalid) {
60             pixfmt_ff = QTAV_PIX_FMT_C(NONE);
61             qpixfmt = QImage::Format_Invalid;
62             return;
63         }
64         init(fmt);
65     }
VideoFormatPrivate(AVPixelFormat fmt)66     VideoFormatPrivate(AVPixelFormat fmt)
67         : pixfmt(VideoFormat::Format_Invalid)
68         , pixfmt_ff(fmt)
69         , qpixfmt(QImage::Format_Invalid)
70         , planes(0)
71         , bpp(0)
72         , bpp_pad(0)
73         , bpc(0)
74         , pixdesc(0)
75     {
76         init(fmt);
77     }
VideoFormatPrivate(QImage::Format fmt)78     VideoFormatPrivate(QImage::Format fmt)
79         : pixfmt(VideoFormat::Format_Invalid)
80         , pixfmt_ff(QTAV_PIX_FMT_C(NONE))
81         , qpixfmt(fmt)
82         , planes(0)
83         , bpp(0)
84         , bpp_pad(0)
85         , bpc(0)
86         , pixdesc(0)
87     {
88         init(fmt);
89     }
init(VideoFormat::PixelFormat fmt)90     void init(VideoFormat::PixelFormat fmt) {
91         pixfmt = fmt;
92         pixfmt_ff = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(pixfmt);
93         qpixfmt = VideoFormat::imageFormatFromPixelFormat(pixfmt);
94         init();
95     }
init(QImage::Format fmt)96     void init(QImage::Format fmt) {
97         qpixfmt = fmt;
98         pixfmt = VideoFormat::pixelFormatFromImageFormat(fmt);
99         pixfmt_ff = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(pixfmt);
100         init();
101     }
init(AVPixelFormat fffmt)102     void init(AVPixelFormat fffmt) {
103         pixfmt_ff = fffmt;
104         pixfmt = VideoFormat::pixelFormatFromFFmpeg(pixfmt_ff);
105         qpixfmt = VideoFormat::imageFormatFromPixelFormat(pixfmt);
106         init();
107     }
108 
init()109     void init() {
110         // TODO: what if other formats not supported by ffmpeg? give attributes in QtAV?
111         if (pixfmt_ff == QTAV_PIX_FMT_C(NONE)) {
112             qWarning("Invalid pixel format");
113             return;
114         }
115         planes = qMax(av_pix_fmt_count_planes(pixfmt_ff), 0);
116         bpps.reserve(planes);
117         channels.reserve(planes);
118         bpps.resize(planes);
119         channels.resize(planes);
120         pixdesc = const_cast<AVPixFmtDescriptor*>(av_pix_fmt_desc_get(pixfmt_ff));
121         if (!pixdesc)
122             return;
123         initBpp();
124     }
name() const125     QString name() const {
126         return QLatin1String(av_get_pix_fmt_name(pixfmt_ff));
127     }
flags() const128     int flags() const {
129         if (!pixdesc)
130             return 0;
131         return pixdesc->flags;
132     }
bytesPerLine(int width,int plane) const133     int bytesPerLine(int width, int plane) const {
134         return av_image_get_linesize(pixfmt_ff, width, plane);
135     }
136 
137     VideoFormat::PixelFormat pixfmt;
138     AVPixelFormat pixfmt_ff;
139     QImage::Format qpixfmt;
140     quint8 planes;
141     quint8 bpp;
142     quint8 bpp_pad;
143     quint8 bpc;
144     QVector<int> bpps;
145     QVector<int> channels;
146 
147     AVPixFmtDescriptor *pixdesc;
148 private:
149     // from libavutil/pixdesc.c
initBpp()150     void initBpp() {
151         //TODO: call later when bpp need
152         bpp = 0;
153         bpp_pad = 0;
154         //libavutil55: depth, step, offset
155         bpc = DESC_VAL(pixdesc->comp[0].depth);
156         const int log2_pixels = pixdesc->log2_chroma_w + pixdesc->log2_chroma_h;
157         int steps[4];
158         memset(steps, 0, sizeof(steps));
159         for (int c = 0; c < pixdesc->nb_components; c++) {
160             const AVComponentDescriptor *comp = &pixdesc->comp[c];
161             int s = c == 1 || c == 2 ? 0 : log2_pixels; //?
162             bpps[comp->plane] += DESC_VAL(comp->depth);
163             steps[comp->plane] = DESC_VAL(comp->step) << s;
164             channels[comp->plane] += 1;
165             bpp += DESC_VAL(comp->depth) << s;
166             if (DESC_VAL(comp->depth) != bpc)
167                 bpc = 0;
168         }
169         for (int i = 0; i < planes; ++i) {
170             bpp_pad += steps[i];
171         }
172         if (!(pixdesc->flags & AV_PIX_FMT_FLAG_BITSTREAM))
173             bpp_pad *= 8;
174         bpp >>= log2_pixels;
175         bpp_pad >>= log2_pixels;
176     }
177 };
178 
179 // TODO: use FFmpeg macros to get right endian
180 static const struct {
181     VideoFormat::PixelFormat fmt;
182     AVPixelFormat ff; //int
183 } pixfmt_map[] = {
184     { VideoFormat::Format_YUV420P, QTAV_PIX_FMT_C(YUV420P) },   ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
185     { VideoFormat::Format_YV12, QTAV_PIX_FMT_C(YUV420P) },   ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
186     { VideoFormat::Format_YUYV, QTAV_PIX_FMT_C(YUYV422) }, //??   ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
187     { VideoFormat::Format_RGB24, QTAV_PIX_FMT_C(RGB24) },     ///< packed RGB 8:8:8, 24bpp, RGBRGB...
188     { VideoFormat::Format_BGR24, QTAV_PIX_FMT_C(BGR24) },     ///< packed RGB 8:8:8, 24bpp, BGRBGR...
189     { VideoFormat::Format_YUV422P, QTAV_PIX_FMT_C(YUV422P)},   ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
190     { VideoFormat::Format_YUV444P, QTAV_PIX_FMT_C(YUV444P) },   ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
191     { VideoFormat::Format_YUV410P, QTAV_PIX_FMT_C(YUV410P) },   ///< planar YUV 4:1:0,  9bpp, (1 Cr & Cb sample per 4x4 Y samples)
192     { VideoFormat::Format_YUV411P, QTAV_PIX_FMT_C(YUV411P) },   ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
193     { VideoFormat::Format_Y8, QTAV_PIX_FMT_C(GRAY8) },     ///<        Y        ,  8bpp
194     //QTAV_PIX_FMT_C(MONOWHITE), ///<        Y        ,  1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
195     //QTAV_PIX_FMT_C(MONOBLACK), ///<        Y        ,  1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
196     //QTAV_PIX_FMT_C(PAL8),      ///< 8 bit with PIX_FMT_RGB32 palette
197     { VideoFormat::Format_YUV420P, QTAV_PIX_FMT_C(YUVJ420P) },  ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
198     //QTAV_PIX_FMT_C(YUVJ422P),  ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
199     //QTAV_PIX_FMT_C(YUVJ444P),  ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
200     //QTAV_PIX_FMT_C(XVMC_MPEG2_MC),///< XVideo Motion Acceleration via common packet passing
201     //QTAV_PIX_FMT_C(XVMC_MPEG2_IDCT),
202     { VideoFormat::Format_UYVY, QTAV_PIX_FMT_C(UYVY422) },   ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
203     //QTAV_PIX_FMT_C(UYYVYY411), ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
204     //QTAV_PIX_FMT_C(BGR8),      ///< packed RGB 3:3:2,  8bpp, (msb)2B 3G 3R(lsb)
205     //QTAV_PIX_FMT_C(BGR4),      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
206     //QTAV_PIX_FMT_C(BGR4_BYTE), ///< packed RGB 1:2:1,  8bpp, (msb)1B 2G 1R(lsb)
207     //QTAV_PIX_FMT_C(RGB8),      ///< packed RGB 3:3:2,  8bpp, (msb)2R 3G 3B(lsb)
208     //QTAV_PIX_FMT_C(RGB4),      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
209     //QTAV_PIX_FMT_C(RGB4_BYTE), ///< packed RGB 1:2:1,  8bpp, (msb)1R 2G 1B(lsb)
210     { VideoFormat::Format_NV12, QTAV_PIX_FMT_C(NV12) },      ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
211     { VideoFormat::Format_NV21, QTAV_PIX_FMT_C(NV21) },      ///< as above, but U and V bytes are swapped
212     { VideoFormat::Format_ARGB32, QTAV_PIX_FMT_C(ARGB) },      ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
213     { VideoFormat::Format_RGBA32, QTAV_PIX_FMT_C(RGBA) },      ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
214     { VideoFormat::Format_ABGR32, QTAV_PIX_FMT_C(ABGR) },      ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
215     { VideoFormat::Format_BGRA32, QTAV_PIX_FMT_C(BGRA) },      ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
216     //QTAV_PIX_FMT_C(GRAY16BE),  ///<        Y        , 16bpp, big-endian
217     { VideoFormat::Format_Y16, QTAV_PIX_FMT_C(GRAY16LE) },  ///<        Y        , 16bpp, little-endian
218     //QTAV_PIX_FMT_C(YUV440P),   ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
219     //QTAV_PIX_FMT_C(YUVJ440P),  ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
220     //QTAV_PIX_FMT_C(YUVA420P),  ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
221     /*
222     QTAV_PIX_FMT_C(VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
223     QTAV_PIX_FMT_C(VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
224     QTAV_PIX_FMT_C(VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
225     QTAV_PIX_FMT_C(VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
226     QTAV_PIX_FMT_C(VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
227     */
228     { VideoFormat::Format_RGB48BE, QTAV_PIX_FMT_C(RGB48BE) },   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
229     { VideoFormat::Format_RGB48LE, QTAV_PIX_FMT_C(RGB48LE) },   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
230     { VideoFormat::Format_RGB565, QTAV_PIX_FMT_C(RGB565) },  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), native-endian
231     { VideoFormat::Format_RGB555, QTAV_PIX_FMT_C(RGB555) },  ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), native-endian, be: most significant bit to 1
232     { VideoFormat::Format_BGR565, QTAV_PIX_FMT_C(BGR565) },  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), native-endian
233     { VideoFormat::Format_BGR555, QTAV_PIX_FMT_C(BGR555) },  ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), native-endian, be: most significant bit to 1
234 /*
235     QTAV_PIX_FMT_C(VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
236     QTAV_PIX_FMT_C(VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
237     QTAV_PIX_FMT_C(VAAPI_VLD,  ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
238 */
239 #if FF_HAS_YUV12BITS
240     { VideoFormat::Format_YUV420P16LE, QTAV_PIX_FMT_C(YUV420P16LE) },  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
241     { VideoFormat::Format_YUV420P16BE, QTAV_PIX_FMT_C(YUV420P16BE) },  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
242     { VideoFormat::Format_YUV422P16LE, QTAV_PIX_FMT_C(YUV422P16LE) },  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
243     { VideoFormat::Format_YUV422P16BE, QTAV_PIX_FMT_C(YUV422P16BE) },  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
244     { VideoFormat::Format_YUV444P16LE, QTAV_PIX_FMT_C(YUV444P16LE) },  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
245     { VideoFormat::Format_YUV444P16BE, QTAV_PIX_FMT_C(YUV444P16BE) },  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
246 #endif //FF_HAS_YUV12BITS
247 /*
248     QTAV_PIX_FMT_C(VDPAU_MPEG4,  ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
249     QTAV_PIX_FMT_C(DXVA2_VLD,    ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
250 
251     QTAV_PIX_FMT_C(RGB444LE,  ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
252     QTAV_PIX_FMT_C(RGB444BE,  ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
253     QTAV_PIX_FMT_C(BGR444LE,  ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
254     QTAV_PIX_FMT_C(BGR444BE,  ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
255     QTAV_PIX_FMT_C(GRAY8A,    ///< 8bit gray, 8bit alpha
256     */
257     { VideoFormat::Format_BGR48BE, QTAV_PIX_FMT_C(BGR48BE) },   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
258     { VideoFormat::Format_BGR48LE, QTAV_PIX_FMT_C(BGR48LE) },   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
259     //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus
260     //If you want to support multiple bit depths, then using QTAV_PIX_FMT_C(YUV420P16* with the bpp stored separately
261     //is better
262 // the ffmpeg QtAV can build against( >= 0.9) supports 9,10 bits
263     { VideoFormat::Format_YUV420P9BE, QTAV_PIX_FMT_C(YUV420P9BE) }, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
264     { VideoFormat::Format_YUV420P9LE, QTAV_PIX_FMT_C(YUV420P9LE) }, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
265     { VideoFormat::Format_YUV420P10BE, QTAV_PIX_FMT_C(YUV420P10BE) },///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
266     { VideoFormat::Format_YUV420P10LE, QTAV_PIX_FMT_C(YUV420P10LE) },///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
267     { VideoFormat::Format_YUV422P10BE, QTAV_PIX_FMT_C(YUV422P10BE) },///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
268     { VideoFormat::Format_YUV422P10LE, QTAV_PIX_FMT_C(YUV422P10LE) },///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
269     { VideoFormat::Format_YUV444P9BE, QTAV_PIX_FMT_C(YUV444P9BE) }, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
270     { VideoFormat::Format_YUV444P9LE, QTAV_PIX_FMT_C(YUV444P9LE) }, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
271     { VideoFormat::Format_YUV444P10BE, QTAV_PIX_FMT_C(YUV444P10BE) },///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
272     { VideoFormat::Format_YUV444P10LE, QTAV_PIX_FMT_C(YUV444P10LE) },///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
273     { VideoFormat::Format_YUV422P9BE, QTAV_PIX_FMT_C(YUV422P9BE) }, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
274     { VideoFormat::Format_YUV422P9LE, QTAV_PIX_FMT_C(YUV422P9LE) }, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
275     //QTAV_PIX_FMT_C(VDA_VLD,    ///< hardware decoding through VDA
276 /*
277 #ifdef AV_PIX_FMT_ABI_GIT_MASTER
278     QTAV_PIX_FMT_C(RGBA64BE) },  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
279     QTAV_PIX_FMT_C(RGBA64LE) },  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
280     QTAV_PIX_FMT_C(BGRA64BE) },  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
281     QTAV_PIX_FMT_C(BGRA64LE) },  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
282 #endif
283     QTAV_PIX_FMT_C(GBRP,      ///< planar GBR 4:4:4 24bpp
284     QTAV_PIX_FMT_C(GBRP9BE,   ///< planar GBR 4:4:4 27bpp, big-endian
285     QTAV_PIX_FMT_C(GBRP9LE,   ///< planar GBR 4:4:4 27bpp, little-endian
286     QTAV_PIX_FMT_C(GBRP10BE,  ///< planar GBR 4:4:4 30bpp, big-endian
287     QTAV_PIX_FMT_C(GBRP10LE,  ///< planar GBR 4:4:4 30bpp, little-endian
288     QTAV_PIX_FMT_C(GBRP16BE,  ///< planar GBR 4:4:4 48bpp, big-endian
289     QTAV_PIX_FMT_C(GBRP16LE,  ///< planar GBR 4:4:4 48bpp, little-endian
290 */
291     /**
292      * duplicated pixel formats for compatibility with libav.
293      * FFmpeg supports these formats since May 8 2012 and Jan 28 2012 (commits f9ca1ac7 and 143a5c55)
294      * Libav added them Oct 12 2012 with incompatible values (commit 6d5600e85)
295      */
296 /*
297     QTAV_PIX_FMT_C(YUVA422P_LIBAV,  ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
298     QTAV_PIX_FMT_C(YUVA444P_LIBAV,  ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
299 
300     QTAV_PIX_FMT_C(YUVA420P9BE,  ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
301     QTAV_PIX_FMT_C(YUVA420P9LE,  ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
302     QTAV_PIX_FMT_C(YUVA422P9BE,  ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
303     QTAV_PIX_FMT_C(YUVA422P9LE,  ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
304     QTAV_PIX_FMT_C(YUVA444P9BE,  ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
305     QTAV_PIX_FMT_C(YUVA444P9LE,  ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
306     // Y: 2bytes/pix U: 1, V: 1
307     QTAV_PIX_FMT_C(YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
308     QTAV_PIX_FMT_C(YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
309     QTAV_PIX_FMT_C(YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
310     QTAV_PIX_FMT_C(YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
311     QTAV_PIX_FMT_C(YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
312     QTAV_PIX_FMT_C(YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
313     QTAV_PIX_FMT_C(YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
314     QTAV_PIX_FMT_C(YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
315     QTAV_PIX_FMT_C(YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
316     QTAV_PIX_FMT_C(YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
317     QTAV_PIX_FMT_C(YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
318     QTAV_PIX_FMT_C(YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
319 */
320     //QTAV_PIX_FMT_C(VDPAU,     ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface
321 // doc/APIChanges: 2014-04-07 - 0a1cc04 / 8b17243 - lavu 52.75.100 / 53.11.0 - pixfmt.h
322     //Add AV_PIX_FMT_YVYU422 pixel format.
323 #if (FFMPEG_MODULE_CHECK(LIBAVUTIL, 52, 75, 100) || LIBAV_MODULE_CHECK(LIBAVUTIL, 53, 11, 0))
324     { VideoFormat::Format_YVYU, QTAV_PIX_FMT_C(YVYU422) },
325 #endif
326 // 2014-03-16 - 6b1ca17 / 1481d24 - lavu 52.67.100 / 53.6.0 before ffmpeg2.2 libav11 RGBA64_LIBAV
327 #if (QTAV_USE_FFMPEG(LIBAVUTIL) || LIBAV_MODULE_CHECK(LIBAVUTIL, 53, 6, 0))
328     { VideoFormat::Format_RGBA64BE, QTAV_PIX_FMT_C(RGBA64BE)},  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
329     { VideoFormat::Format_RGBA64LE, QTAV_PIX_FMT_C(RGBA64LE)},  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
330     { VideoFormat::Format_BGRA64BE, QTAV_PIX_FMT_C(BGRA64BE)},  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
331     { VideoFormat::Format_BGRA64LE, QTAV_PIX_FMT_C(BGRA64LE)},  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
332 #endif
333 #if QTAV_USE_FFMPEG(LIBAVUTIL) //still use rgba formats but check hasAplha is required
334     { VideoFormat::Format_ARGB32, QTAV_PIX_FMT_C(0RGB)},      ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...
335     { VideoFormat::Format_RGBA32, QTAV_PIX_FMT_C(RGB0)},      ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...
336     { VideoFormat::Format_ABGR32, QTAV_PIX_FMT_C(0BGR)},      ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...
337     { VideoFormat::Format_BGRA32, QTAV_PIX_FMT_C(BGR0)},      ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...
338 #endif //
339     //QTAV_PIX_FMT_C(YUVA444P,  ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
340     //QTAV_PIX_FMT_C(YUVA422P,  ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
341 
342 #if FF_HAS_YUV12BITS
343     { VideoFormat::Format_YUV420P12BE, QTAV_PIX_FMT_C(YUV420P12BE) }, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
344     { VideoFormat::Format_YUV420P12LE, QTAV_PIX_FMT_C(YUV420P12LE) }, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
345     { VideoFormat::Format_YUV420P14BE, QTAV_PIX_FMT_C(YUV420P14BE) }, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
346     { VideoFormat::Format_YUV420P14LE, QTAV_PIX_FMT_C(YUV420P14LE) }, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
347     { VideoFormat::Format_YUV422P12BE, QTAV_PIX_FMT_C(YUV422P12BE) }, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
348     { VideoFormat::Format_YUV422P12LE, QTAV_PIX_FMT_C(YUV422P12LE) }, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
349     { VideoFormat::Format_YUV422P14BE, QTAV_PIX_FMT_C(YUV422P14BE) }, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
350     { VideoFormat::Format_YUV422P14LE, QTAV_PIX_FMT_C(YUV422P14LE) }, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
351     { VideoFormat::Format_YUV444P12BE, QTAV_PIX_FMT_C(YUV444P12BE) }, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
352     { VideoFormat::Format_YUV444P12LE, QTAV_PIX_FMT_C(YUV444P12LE) }, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
353     { VideoFormat::Format_YUV444P14BE, QTAV_PIX_FMT_C(YUV444P14BE) }, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
354     { VideoFormat::Format_YUV444P14LE, QTAV_PIX_FMT_C(YUV444P14LE) }, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
355 #endif //FF_HAS_YUV12BITS
356 /*
357     QTAV_PIX_FMT_C(GBRP12BE,    ///< planar GBR 4:4:4 36bpp, big-endian
358     QTAV_PIX_FMT_C(GBRP12LE,    ///< planar GBR 4:4:4 36bpp, little-endian
359     QTAV_PIX_FMT_C(GBRP14BE,    ///< planar GBR 4:4:4 42bpp, big-endian
360     QTAV_PIX_FMT_C(GBRP14LE,    ///< planar GBR 4:4:4 42bpp, little-endian
361 */
362     // native endian formats
363     // QTAV_PIX_FMT_C(RGB32) is depends on byte order, ARGB for BE, BGRA for LE
364     { VideoFormat::Format_RGB32, QTAV_PIX_FMT_C(RGB32) }, //auto endian
365     // AV_PIX_FMT_BGR32_1: bgra, argb
366     { VideoFormat::Format_BGR32, QTAV_PIX_FMT_C(BGR32) }, //auto endian
367     { VideoFormat::Format_RGB48, QTAV_PIX_FMT_C(RGB48) },   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
368     { VideoFormat::Format_BGR48, QTAV_PIX_FMT_C(BGR48) },   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
369 #if QTAV_USE_FFMPEG(LIBAVUTIL)
370     { VideoFormat::Format_RGBA64, QTAV_PIX_FMT_C(RGBA64) },
371     { VideoFormat::Format_BGRA64, QTAV_PIX_FMT_C(BGRA64) },
372 #endif //QTAV_USE_FFMPEG(LIBAVUTIL)
373     { VideoFormat::Format_VYUY, QTAV_PIX_FMT_C(UYVY422) }, // FIXME: hack for invalid ffmpeg formats
374 
375     { VideoFormat::Format_VYU, QTAV_PIX_FMT_C(RGB32) },
376 #ifdef AV_PIX_FMT_XYZ12
377     { VideoFormat::Format_XYZ12, QTAV_PIX_FMT_C(XYZ12) },
378     { VideoFormat::Format_XYZ12LE, QTAV_PIX_FMT_C(XYZ12LE) },
379     { VideoFormat::Format_XYZ12BE, QTAV_PIX_FMT_C(XYZ12BE) },
380 #endif
381     { VideoFormat::Format_Invalid, QTAV_PIX_FMT_C(NONE) },
382 };
383 
pixelFormatFromFFmpeg(int ff)384 VideoFormat::PixelFormat VideoFormat::pixelFormatFromFFmpeg(int ff)
385 {
386     for (unsigned int i = 0; i < sizeof(pixfmt_map)/sizeof(pixfmt_map[0]); ++i) {
387         if (pixfmt_map[i].ff == ff)
388             return pixfmt_map[i].fmt;
389     }
390     return VideoFormat::Format_Invalid;
391 }
392 
pixelFormatToFFmpeg(VideoFormat::PixelFormat fmt)393 int VideoFormat::pixelFormatToFFmpeg(VideoFormat::PixelFormat fmt)
394 {
395     for (unsigned int i = 0; i < sizeof(pixfmt_map)/sizeof(pixfmt_map[0]); ++i) {
396         if (pixfmt_map[i].fmt == fmt)
397             return pixfmt_map[i].ff;
398     }
399     return QTAV_PIX_FMT_C(NONE);
400 }
401 
pixelFormatsFFmpeg()402 QVector<int> VideoFormat::pixelFormatsFFmpeg()
403 {
404     static QVector<int> sFmts;
405     if (sFmts.isEmpty()) {
406         const AVPixFmtDescriptor *desc = NULL;
407         while ((desc = av_pix_fmt_desc_next(desc))) {
408             if ((desc->flags & AV_PIX_FMT_FLAG_HWACCEL) == AV_PIX_FMT_FLAG_HWACCEL)
409                 continue;
410             sFmts.append(av_pix_fmt_desc_get_id(desc));
411         }
412     }
413     return sFmts;
414 }
415 
416 /*!
417     Returns a video pixel format equivalent to an image \a format.  If there is no equivalent
418     format VideoFormat::InvalidType is returned instead.
419 
420     \note In general \l QImage does not handle YUV formats.
421 
422 */
423 static const struct {
424     VideoFormat::PixelFormat fmt;
425     QImage::Format qfmt;
426 } qpixfmt_map[] = {
427     // QImage::Format_ARGB32: 0xAARRGGBB, VideoFormat::Format_BGRA32: layout is BBGGRRAA
428     { PIXFMT_NE(ARGB32, BGRA32), QImage::Format_ARGB32 },
429     { VideoFormat::Format_RGB32, QImage::Format_ARGB32 },
430     { VideoFormat::Format_RGB32, QImage::Format_RGB32 },
431 #if QT_VERSION >= QT_VERSION_CHECK(5, 2, 0)
432     { VideoFormat::Format_RGBA32, QImage::Format_RGBA8888 }, //be 0xRRGGBBAA, le 0xAABBGGRR
433 #endif
434     { VideoFormat::Format_RGB565, QImage::Format_RGB16 },
435     { VideoFormat::Format_BGR565, (QImage::Format)-QImage::Format_RGB16 },
436     { VideoFormat::Format_RGB555, QImage::Format_RGB555 },
437     { VideoFormat::Format_BGR555, (QImage::Format)-QImage::Format_RGB555 },
438     { VideoFormat::Format_RGB24, QImage::Format_RGB888 },
439     { VideoFormat::Format_BGR24, (QImage::Format)-QImage::Format_RGB888 },
440     { VideoFormat::Format_Invalid, QImage::Format_Invalid }
441 };
442 
pixelFormatFromImageFormat(QImage::Format format)443 VideoFormat::PixelFormat VideoFormat::pixelFormatFromImageFormat(QImage::Format format)
444 {
445     for (int i = 0; qpixfmt_map[i].fmt != Format_Invalid; ++i) {
446         if (qpixfmt_map[i].qfmt == format)
447             return qpixfmt_map[i].fmt;
448     }
449     return Format_Invalid;
450 }
451 
imageFormatFromPixelFormat(PixelFormat format)452 QImage::Format VideoFormat::imageFormatFromPixelFormat(PixelFormat format)
453 {
454     for (int i = 0; qpixfmt_map[i].fmt != Format_Invalid; ++i) {
455         if (qpixfmt_map[i].fmt == format)
456             return qpixfmt_map[i].qfmt;
457     }
458     return QImage::Format_Invalid;
459 }
460 
461 
VideoFormat(PixelFormat format)462 VideoFormat::VideoFormat(PixelFormat format)
463     :d(new VideoFormatPrivate(format))
464 {
465 }
466 
VideoFormat(int formatFF)467 VideoFormat::VideoFormat(int formatFF)
468     :d(new VideoFormatPrivate((AVPixelFormat)formatFF))
469 {
470 }
471 
VideoFormat(QImage::Format fmt)472 VideoFormat::VideoFormat(QImage::Format fmt)
473     :d(new VideoFormatPrivate(fmt))
474 {
475 }
476 
VideoFormat(const QString & name)477 VideoFormat::VideoFormat(const QString &name)
478     :d(new VideoFormatPrivate(av_get_pix_fmt(name.toUtf8().constData())))
479 {
480 }
481 
VideoFormat(const VideoFormat & other)482 VideoFormat::VideoFormat(const VideoFormat &other)
483     :d(other.d)
484 {
485 }
486 
~VideoFormat()487 VideoFormat::~VideoFormat()
488 {
489 
490 }
491 
492 
493 /*!
494     Assigns \a other to this VideoFormat implementation.
495 */
operator =(const VideoFormat & other)496 VideoFormat& VideoFormat::operator=(const VideoFormat &other)
497 {
498     d = other.d;
499     return *this;
500 }
501 
operator =(VideoFormat::PixelFormat fmt)502 VideoFormat& VideoFormat::operator =(VideoFormat::PixelFormat fmt)
503 {
504     d = new VideoFormatPrivate(fmt);
505     return *this;
506 }
507 
operator =(QImage::Format qpixfmt)508 VideoFormat& VideoFormat::operator =(QImage::Format qpixfmt)
509 {
510     d = new VideoFormatPrivate(qpixfmt);
511     return *this;
512 }
513 
operator =(int fffmt)514 VideoFormat& VideoFormat::operator =(int fffmt)
515 {
516     d = new VideoFormatPrivate((AVPixelFormat)fffmt);
517     return *this;
518 }
519 
operator ==(const VideoFormat & other) const520 bool VideoFormat::operator==(const VideoFormat &other) const
521 {
522     return d->pixfmt_ff == other.d->pixfmt_ff;
523 }
524 
operator ==(VideoFormat::PixelFormat fmt) const525 bool VideoFormat::operator==(VideoFormat::PixelFormat fmt) const
526 {
527     return d->pixfmt == fmt;
528 }
529 
operator ==(QImage::Format qpixfmt) const530 bool VideoFormat::operator==(QImage::Format qpixfmt) const
531 {
532     return d->qpixfmt == qpixfmt;
533 }
534 
operator ==(int fffmt) const535 bool VideoFormat::operator==(int fffmt) const
536 {
537     return d->pixfmt_ff == fffmt;
538 }
539 
operator !=(const VideoFormat & other) const540 bool VideoFormat::operator!=(const VideoFormat& other) const
541 {
542     return !(*this == other);
543 }
544 
operator !=(VideoFormat::PixelFormat fmt) const545 bool VideoFormat::operator!=(VideoFormat::PixelFormat fmt) const
546 {
547     return d->pixfmt != fmt;
548 }
549 
operator !=(QImage::Format qpixfmt) const550 bool VideoFormat::operator!=(QImage::Format qpixfmt) const
551 {
552     return d->qpixfmt != qpixfmt;
553 }
554 
operator !=(int fffmt) const555 bool VideoFormat::operator!=(int fffmt) const
556 {
557     return d->pixfmt_ff != fffmt;
558 }
559 
isValid() const560 bool VideoFormat::isValid() const
561 {
562     return d->pixfmt_ff != QTAV_PIX_FMT_C(NONE) || d->pixfmt != Format_Invalid;
563 }
564 
pixelFormat() const565 VideoFormat::PixelFormat VideoFormat::pixelFormat() const
566 {
567     return d->pixfmt;
568 }
569 
pixelFormatFFmpeg() const570 int VideoFormat::pixelFormatFFmpeg() const
571 {
572     return d->pixfmt_ff;
573 }
574 
imageFormat() const575 QImage::Format VideoFormat::imageFormat() const
576 {
577     return d->qpixfmt;
578 }
579 
name() const580 QString VideoFormat::name() const
581 {
582     return d->name();
583 }
584 
setPixelFormat(PixelFormat format)585 void VideoFormat::setPixelFormat(PixelFormat format)
586 {
587     d->pixfmt = format;
588     d->init(format);
589 }
590 
setPixelFormatFFmpeg(int format)591 void VideoFormat::setPixelFormatFFmpeg(int format)
592 {
593     d->pixfmt_ff = (AVPixelFormat)format;
594     d->init((AVPixelFormat)format);
595 }
596 
channels() const597 int VideoFormat::channels() const
598 {
599     if (!d->pixdesc)
600         return 0;
601     return d->pixdesc->nb_components;
602 }
603 
channels(int plane) const604 int VideoFormat::channels(int plane) const
605 {
606     if (plane > d->channels.size())
607         return 0;
608     return d->channels[plane];
609 }
610 
planeCount() const611 int VideoFormat::planeCount() const
612 {
613     return d->planes;
614 }
615 
bitsPerPixel() const616 int VideoFormat::bitsPerPixel() const
617 {
618     return d->bpp;
619 }
620 
bitsPerPixelPadded() const621 int VideoFormat::bitsPerPixelPadded() const
622 {
623     return d->bpp_pad;
624 }
625 
bitsPerPixel(int plane) const626 int VideoFormat::bitsPerPixel(int plane) const
627 {
628     //must be a valid index position in the vector
629     if (plane >= d->bpps.size())
630         return 0;
631     return d->bpps[plane];
632 }
633 
bytesPerPixel() const634 int VideoFormat::bytesPerPixel() const
635 {
636     return (bitsPerPixel() + 7) >> 3;
637 }
638 
bytesPerPixel(int plane) const639 int VideoFormat::bytesPerPixel(int plane) const
640 {
641     return (bitsPerPixel(plane) + 7) >> 3;
642 }
643 
bitsPerComponent() const644 int VideoFormat::bitsPerComponent() const
645 {
646     return d->bpc;
647 }
648 
bytesPerLine(int width,int plane) const649 int VideoFormat::bytesPerLine(int width, int plane) const
650 {
651     return d->bytesPerLine(width, plane);
652 }
653 
chromaWidth(int lumaWidth) const654 int VideoFormat::chromaWidth(int lumaWidth) const
655 {
656     return -((-lumaWidth) >> d->pixdesc->log2_chroma_w);
657 }
658 
chromaHeight(int lumaHeight) const659 int VideoFormat::chromaHeight(int lumaHeight) const
660 {
661     return -((-lumaHeight) >> d->pixdesc->log2_chroma_h);
662 }
663 
width(int lumaWidth,int plane) const664 int VideoFormat::width(int lumaWidth, int plane) const
665 {
666     if (plane <= 0)
667         return lumaWidth;
668     return chromaWidth(lumaWidth);
669 }
670 
height(int lumaHeight,int plane) const671 int VideoFormat::height(int lumaHeight, int plane) const
672 {
673     if (plane <= 0)
674         return lumaHeight;
675     return chromaHeight(lumaHeight);
676 }
677 
normalizedWidth(int plane) const678 qreal VideoFormat::normalizedWidth(int plane) const
679 {
680     if (plane <= 0)
681         return 1.0;
682     return 1.0/std::pow(2.0, qreal(d->pixdesc->log2_chroma_w));
683 }
684 
normalizedHeight(int plane) const685 qreal VideoFormat::normalizedHeight(int plane) const
686 {
687     if (plane <= 0)
688         return 1.0;
689     return 1.0/std::pow(2.0, qreal(d->pixdesc->log2_chroma_h));
690 }
691 
692 // test AV_PIX_FMT_FLAG_XXX
isBigEndian() const693 bool VideoFormat::isBigEndian() const
694 {
695     return (d->flags() & AV_PIX_FMT_FLAG_BE) == AV_PIX_FMT_FLAG_BE;
696 }
697 
hasPalette() const698 bool VideoFormat::hasPalette() const
699 {
700     return (d->flags() & AV_PIX_FMT_FLAG_PAL) == AV_PIX_FMT_FLAG_PAL;
701 }
702 
isPseudoPaletted() const703 bool VideoFormat::isPseudoPaletted() const
704 {
705     return (d->flags() & AV_PIX_FMT_FLAG_PSEUDOPAL) == AV_PIX_FMT_FLAG_PSEUDOPAL;
706 }
707 
isBitStream() const708 bool VideoFormat::isBitStream() const
709 {
710     return (d->flags() & AV_PIX_FMT_FLAG_BITSTREAM) == AV_PIX_FMT_FLAG_BITSTREAM;
711 }
712 
isHWAccelerated() const713 bool VideoFormat::isHWAccelerated() const
714 {
715     return (d->flags() & AV_PIX_FMT_FLAG_HWACCEL) == AV_PIX_FMT_FLAG_HWACCEL;
716 }
717 
isPlanar() const718 bool VideoFormat::isPlanar() const
719 {
720     return (d->flags() & AV_PIX_FMT_FLAG_PLANAR) == AV_PIX_FMT_FLAG_PLANAR;
721 }
722 
isRGB() const723 bool VideoFormat::isRGB() const
724 {
725     return (d->flags() & AV_PIX_FMT_FLAG_RGB) == AV_PIX_FMT_FLAG_RGB && d->pixfmt != Format_VYU;
726 }
727 
isXYZ() const728 bool VideoFormat::isXYZ() const
729 {
730     return d->pixfmt == Format_XYZ12 || d->pixfmt == Format_XYZ12LE || d->pixfmt == Format_XYZ12BE;
731 }
732 
hasAlpha() const733 bool VideoFormat::hasAlpha() const
734 {
735     return (d->flags() & AV_PIX_FMT_FLAG_ALPHA) == AV_PIX_FMT_FLAG_ALPHA;
736 }
737 
isPlanar(PixelFormat pixfmt)738 bool VideoFormat::isPlanar(PixelFormat pixfmt)
739 {
740     return pixfmt == Format_YUV420P || pixfmt == Format_NV12 || pixfmt == Format_NV21 || pixfmt == Format_YV12
741             || pixfmt == Format_YUV410P || pixfmt == Format_YUV411P || pixfmt == Format_YUV422P
742             || pixfmt == Format_YUV444P || pixfmt == Format_AYUV444
743         || pixfmt == Format_IMC1 || pixfmt == Format_IMC2 || pixfmt == Format_IMC3 || pixfmt == Format_IMC4
744             ;
745 }
746 
isRGB(PixelFormat pixfmt)747 bool VideoFormat::isRGB(PixelFormat pixfmt)
748 {
749     return pixfmt == Format_RGB32 || pixfmt == Format_ARGB32
750         || pixfmt == Format_RGB24 || pixfmt == Format_BGRA32
751         || pixfmt == Format_ABGR32 || pixfmt == Format_RGBA32
752         || pixfmt == Format_BGR565 || pixfmt == Format_RGB555 || pixfmt == Format_RGB565
753         || pixfmt == Format_BGR24 || pixfmt == Format_BGR32 || pixfmt == Format_BGR555
754         || pixfmt == Format_RGB48 || pixfmt == Format_RGB48LE || pixfmt == Format_RGB48BE
755         || pixfmt == Format_BGR48 || pixfmt == Format_BGR48LE || pixfmt == Format_BGR48BE
756         || pixfmt == Format_RGBA64 || pixfmt == Format_RGBA64LE || pixfmt == Format_RGBA64BE
757         || pixfmt == Format_BGRA64 || pixfmt == Format_BGRA64LE || pixfmt == Format_BGRA64BE
758             ;
759 }
760 
hasAlpha(PixelFormat pixfmt)761 bool VideoFormat::hasAlpha(PixelFormat pixfmt)
762 {
763     return pixfmt == Format_ARGB32 || pixfmt == Format_BGRA32
764         || pixfmt == Format_AYUV444// || pixfmt == Format_RGB555 || pixfmt == Format_BGR555
765             ;
766 }
767 
768 
769 #ifndef QT_NO_DEBUG_STREAM
operator <<(QDebug dbg,const VideoFormat & fmt)770 QDebug operator<<(QDebug dbg, const VideoFormat &fmt)
771 {
772     dbg.nospace() << "QtAV::VideoFormat(pixelFormat: " << (int)fmt.pixelFormat() << " " << fmt.name() << " alpha: " << fmt.hasAlpha();
773     dbg.nospace() << ", channels: " << fmt.channels();
774     dbg.nospace() << ", planes: " << fmt.planeCount();
775     dbg.nospace() << ", bpc: " << fmt.bitsPerComponent();
776     dbg.nospace() << ", bpp: " << fmt.bitsPerPixel() << "/" << fmt.bitsPerPixelPadded() << " ";
777     for (int i = 0; i < fmt.planeCount(); ++i) {
778         dbg.nospace() << "-" << fmt.bitsPerPixel(i);
779     }
780     dbg.nospace() << ")";
781     return dbg.space();
782 }
783 
operator <<(QDebug dbg,VideoFormat::PixelFormat pixFmt)784 QDebug operator<<(QDebug dbg, VideoFormat::PixelFormat pixFmt)
785 {
786     dbg.nospace() << (int)pixFmt << " " << av_get_pix_fmt_name((AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(pixFmt));
787     return dbg.space();
788 }
789 #endif
790 
791 namespace {
792     class VideoFormatPrivateRegisterMetaTypes
793     {
794     public:
VideoFormatPrivateRegisterMetaTypes()795         VideoFormatPrivateRegisterMetaTypes()
796         {
797             qRegisterMetaType<QtAV::VideoFormat>();
798             qRegisterMetaType<QtAV::VideoFormat::PixelFormat>();
799         }
800     } _registerMetaTypes;
801 }
802 
803 } //namespace QtAV
804