1 /* GStreamer
2 * Copyright (C) 1999 Erik Walthinsen <omega@cse.ogi.edu>
3 * Copyright (C) 2006 Tim-Philipp Müller <tim centricular net>
4 * Copyright (C) 2010 Sebastian Dröge <sebastian.droege@collabora.co.uk>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
15 *
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the
18 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
19 * Boston, MA 02110-1301, USA.
20 */
21 /**
22 * SECTION:element-videobox
23 * @see_also: #GstVideoCrop
24 *
25 * This plugin crops or enlarges the image. It takes 4 values as input, a
26 * top, bottom, left and right offset. Positive values will crop that much
27 * pixels from the respective border of the image, negative values will add
28 * that much pixels. When pixels are added, you can specify their color.
29 * Some predefined colors are usable with an enum property.
30 *
31 * The plugin is alpha channel aware and will try to negotiate with a format
32 * that supports alpha channels first. When alpha channel is active two
33 * other properties, alpha and border_alpha can be used to set the alpha
34 * values of the inner picture and the border respectively. an alpha value of
35 * 0.0 means total transparency, 1.0 is opaque.
36 *
37 * The videobox plugin has many uses such as doing a mosaic of pictures,
38 * letterboxing video, cutting out pieces of video, picture in picture, etc..
39 *
40 * Setting autocrop to true changes the behavior of the plugin so that
41 * caps determine crop properties rather than the other way around: given
42 * input and output dimensions, the crop values are selected so that the
43 * smaller frame is effectively centered in the larger frame. This
44 * involves either cropping or padding.
45 *
46 * If you use autocrop there is little point in setting the other
47 * properties manually because they will be overriden if the caps change,
48 * but nothing stops you from doing so.
49 *
50 * Sample pipeline:
51 * |[
52 * gst-launch-1.0 videotestsrc ! videobox autocrop=true ! \
53 * "video/x-raw, width=600, height=400" ! videoconvert ! ximagesink
54 * ]|
55 */
56
57 #ifdef HAVE_CONFIG_H
58 #include "config.h"
59 #endif
60
61 #include "gstvideobox.h"
62 #include "gstvideoboxorc.h"
63
64 #include <math.h>
65 #include <string.h>
66
67 GST_DEBUG_CATEGORY_STATIC (videobox_debug);
68 #define GST_CAT_DEFAULT videobox_debug
69
70 /* From videotestsrc.c */
71 static const guint8 yuv_sdtv_colors_Y[VIDEO_BOX_FILL_LAST] =
72 { 16, 145, 41, 81, 210, 235 };
73 static const guint8 yuv_sdtv_colors_U[VIDEO_BOX_FILL_LAST] =
74 { 128, 54, 240, 90, 16, 128 };
75 static const guint8 yuv_sdtv_colors_V[VIDEO_BOX_FILL_LAST] =
76 { 128, 34, 110, 240, 146, 128 };
77
78 static const guint8 yuv_hdtv_colors_Y[VIDEO_BOX_FILL_LAST] =
79 { 16, 173, 32, 63, 219, 235 };
80 static const guint8 yuv_hdtv_colors_U[VIDEO_BOX_FILL_LAST] =
81 { 128, 42, 240, 102, 16, 128 };
82 static const guint8 yuv_hdtv_colors_V[VIDEO_BOX_FILL_LAST] =
83 { 128, 26, 118, 240, 138, 128 };
84
85 static const guint8 rgb_colors_R[VIDEO_BOX_FILL_LAST] =
86 { 0, 0, 0, 255, 255, 255 };
87 static const guint8 rgb_colors_G[VIDEO_BOX_FILL_LAST] =
88 { 0, 255, 0, 0, 255, 255 };
89 static const guint8 rgb_colors_B[VIDEO_BOX_FILL_LAST] =
90 { 0, 0, 255, 0, 0, 255 };
91
92 /* Generated by -bad/ext/cog/generate_tables */
93 static const int cog_ycbcr_to_rgb_matrix_8bit_hdtv[] = {
94 298, 0, 459, -63514,
95 298, -55, -136, 19681,
96 298, 541, 0, -73988,
97 };
98
99 static const int cog_ycbcr_to_rgb_matrix_8bit_sdtv[] = {
100 298, 0, 409, -57068,
101 298, -100, -208, 34707,
102 298, 516, 0, -70870,
103 };
104
105 static const gint cog_rgb_to_ycbcr_matrix_8bit_hdtv[] = {
106 47, 157, 16, 4096,
107 -26, -87, 112, 32768,
108 112, -102, -10, 32768,
109 };
110
111 static const gint cog_rgb_to_ycbcr_matrix_8bit_sdtv[] = {
112 66, 129, 25, 4096,
113 -38, -74, 112, 32768,
114 112, -94, -18, 32768,
115 };
116
117 static const gint cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit[] = {
118 256, -30, -53, 10600,
119 0, 261, 29, -4367,
120 0, 19, 262, -3289,
121 };
122
123 static const gint cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit[] = {
124 256, 25, 49, -9536,
125 0, 253, -28, 3958,
126 0, -19, 252, 2918,
127 };
128
129 static const gint cog_identity_matrix_8bit[] = {
130 256, 0, 0, 0,
131 0, 256, 0, 0,
132 0, 0, 256, 0,
133 };
134
135 #define APPLY_MATRIX(m,o,v1,v2,v3) ((m[o*4] * v1 + m[o*4+1] * v2 + m[o*4+2] * v3 + m[o*4+3]) >> 8)
136
137 static void
fill_ayuv(GstVideoBoxFill fill_type,guint b_alpha,GstVideoFrame * frame,gboolean sdtv)138 fill_ayuv (GstVideoBoxFill fill_type, guint b_alpha,
139 GstVideoFrame * frame, gboolean sdtv)
140 {
141 guint32 empty_pixel;
142 guint8 *dest;
143 gint width, height;
144 gint stride;
145
146 width = GST_VIDEO_FRAME_WIDTH (frame);
147 height = GST_VIDEO_FRAME_HEIGHT (frame);
148
149 b_alpha = CLAMP (b_alpha, 0, 255);
150
151 if (sdtv)
152 empty_pixel = GUINT32_FROM_BE ((b_alpha << 24) |
153 (yuv_sdtv_colors_Y[fill_type] << 16) |
154 (yuv_sdtv_colors_U[fill_type] << 8) | yuv_sdtv_colors_V[fill_type]);
155 else
156 empty_pixel = GUINT32_FROM_BE ((b_alpha << 24) |
157 (yuv_hdtv_colors_Y[fill_type] << 16) |
158 (yuv_hdtv_colors_U[fill_type] << 8) | yuv_hdtv_colors_V[fill_type]);
159
160 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
161 stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
162
163 if (G_LIKELY (stride == 4 * width))
164 video_box_orc_splat_u32 ((guint32 *) dest, empty_pixel, width * height);
165 else if (height) {
166 for (; height; --height) {
167 video_box_orc_splat_u32 ((guint32 *) dest, empty_pixel, width);
168 dest += stride;
169 }
170 }
171 }
172
173 static void
copy_ayuv_ayuv(guint i_alpha,GstVideoFrame * dest_frame,gboolean dest_sdtv,gint dest_x,gint dest_y,GstVideoFrame * src_frame,gboolean src_sdtv,gint src_x,gint src_y,gint w,gint h)174 copy_ayuv_ayuv (guint i_alpha, GstVideoFrame * dest_frame,
175 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
176 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
177 {
178 gint i, j;
179 gint src_stride;
180 gint dest_stride;
181 guint8 *dest, *src;
182
183 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
184 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
185
186 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
187 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
188
189 dest = dest + dest_y * dest_stride + dest_x * 4;
190 src = src + src_y * src_stride + src_x * 4;
191
192 w *= 4;
193
194 if (dest_sdtv != src_sdtv) {
195 gint matrix[12];
196 gint y, u, v;
197
198 memcpy (matrix,
199 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
200 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
201
202 for (i = 0; i < h; i++) {
203 for (j = 0; j < w; j += 4) {
204 /* ORC FIXME */
205 dest[j] = (src[j] * i_alpha) >> 8;
206 y = src[j + 1];
207 u = src[j + 2];
208 v = src[j + 3];
209 dest[j + 1] = APPLY_MATRIX (matrix, 0, y, u, v);
210 dest[j + 2] = APPLY_MATRIX (matrix, 1, y, u, v);
211 dest[j + 3] = APPLY_MATRIX (matrix, 2, y, u, v);
212 }
213 dest += dest_stride;
214 src += src_stride;
215 }
216 } else {
217 for (i = 0; i < h; i++) {
218 for (j = 0; j < w; j += 4) {
219 /* ORC FIXME */
220 dest[j] = (src[j] * i_alpha) >> 8;
221 dest[j + 1] = src[j + 1];
222 dest[j + 2] = src[j + 2];
223 dest[j + 3] = src[j + 3];
224 }
225 dest += dest_stride;
226 src += src_stride;
227 }
228 }
229 }
230
231 static void
copy_ayuv_i420(guint i_alpha,GstVideoFrame * dest_frame,gboolean dest_sdtv,gint dest_x,gint dest_y,GstVideoFrame * src_frame,gboolean src_sdtv,gint src_x,gint src_y,gint w,gint h)232 copy_ayuv_i420 (guint i_alpha, GstVideoFrame * dest_frame,
233 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
234 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
235 {
236 gint i, j;
237 guint8 *destY, *destY2, *destU, *destV;
238 gint dest_strideY, dest_strideU, dest_strideV;
239 const guint8 *src2;
240 gint src_stride;
241 gint y_idx, uv_idx;
242 gint y1, y2, y3, y4;
243 gint u1, u2, u3, u4;
244 gint v1, v2, v3, v4;
245 gint matrix[12];
246 guint8 *src;
247 gint dest_height, src_height, dest_width;
248
249 dest_height = GST_VIDEO_FRAME_HEIGHT (dest_frame);
250 dest_width = GST_VIDEO_FRAME_WIDTH (dest_frame);
251 src_height = GST_VIDEO_FRAME_HEIGHT (src_frame);
252
253 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest_frame, 0);
254 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest_frame, 1);
255 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest_frame, 2);
256
257 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
258
259 destY = GST_VIDEO_FRAME_COMP_DATA (dest_frame, 0);
260 destU = GST_VIDEO_FRAME_COMP_DATA (dest_frame, 1);
261 destV = GST_VIDEO_FRAME_COMP_DATA (dest_frame, 2);
262
263 destY = destY + dest_y * dest_strideY + dest_x;
264 destY2 = (dest_y < dest_height) ? destY + dest_strideY : destY;
265 destU = destU + (dest_y / 2) * dest_strideU + dest_x / 2;
266 destV = destV + (dest_y / 2) * dest_strideV + dest_x / 2;
267
268 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
269 src = src + src_y * src_stride + src_x * 4;
270 src2 = (src_y < src_height) ? src + src_stride : src;
271
272 h = dest_y + h;
273 w = dest_x + w;
274
275 if (src_sdtv != dest_sdtv)
276 memcpy (matrix,
277 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
278 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
279 else
280 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
281
282 /* 1. Handle the first destination scanline specially if it
283 * doesn't start at the macro pixel boundary, i.e. blend
284 * with the background! */
285 if (dest_y % 2 == 1) {
286 /* 1.1. Handle the first destination pixel if it doesn't
287 * start at the macro pixel boundary, i.e. blend with
288 * the background! */
289 if (dest_x % 2 == 1) {
290 y1 = src[4 * 0 + 1];
291 u1 = src[4 * 0 + 2];
292 v1 = src[4 * 0 + 3];
293
294 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
295 destU[0] =
296 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
297 255);
298 destV[0] =
299 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
300 255);
301
302 j = dest_x + 1;
303 y_idx = uv_idx = 1;
304 } else {
305 j = dest_x;
306 y_idx = uv_idx = 0;
307 }
308
309 /* 1.2. Copy all macro pixels from the source to the destination
310 * but blend with the background because we're only filling
311 * the lower part of the macro pixels. */
312 for (; j < w - 1; j += 2) {
313 y1 = src[4 * y_idx + 1];
314 y2 = src[4 * y_idx + 4 + 1];
315
316 u1 = src[4 * y_idx + 2];
317 u2 = src[4 * y_idx + 4 + 2];
318
319 v1 = src[4 * y_idx + 3];
320 v2 = src[4 * y_idx + 4 + 3];
321
322 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
323 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
324 destU[uv_idx] = CLAMP (
325 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
326 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
327 destV[uv_idx] = CLAMP (
328 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
329 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
330
331 y_idx += 2;
332 uv_idx++;
333 }
334
335 /* 1.3. Now copy the last pixel if one exists and blend it
336 * with the background because we only fill part of
337 * the macro pixel. In case this is the last pixel of
338 * the destination we will a larger part. */
339 if (j == w - 1 && j == dest_width - 1) {
340 y1 = src[4 * y_idx + 1];
341 u1 = src[4 * y_idx + 2];
342 v1 = src[4 * y_idx + 3];
343
344 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
345 destU[uv_idx] = CLAMP (
346 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
347 destV[uv_idx] = CLAMP (
348 (destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
349 } else if (j == w - 1) {
350 y1 = src[4 * y_idx + 1];
351 u1 = src[4 * y_idx + 2];
352 v1 = src[4 * y_idx + 3];
353
354 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
355 destU[uv_idx] = CLAMP (
356 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
357 255);
358 destV[uv_idx] =
359 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4,
360 0, 255);
361 }
362
363 destY += dest_strideY;
364 destY2 += dest_strideY;
365 destU += dest_strideU;
366 destV += dest_strideV;
367 src += src_stride;
368 src2 += src_stride;
369 i = dest_y + 1;
370 } else {
371 i = dest_y;
372 }
373
374 /* 2. Copy all macro pixel scanlines, the destination scanline
375 * now starts at macro pixel boundary. */
376 for (; i < h - 1; i += 2) {
377 /* 2.1. Handle the first destination pixel if it doesn't
378 * start at the macro pixel boundary, i.e. blend with
379 * the background! */
380 if (dest_x % 2 == 1) {
381 y1 = src[4 * 0 + 1];
382 y2 = src2[4 * 0 + 1];
383 u1 = src[4 * 0 + 2];
384 u2 = src2[4 * 0 + 2];
385 v1 = src[4 * 0 + 3];
386 v2 = src2[4 * 0 + 3];
387
388 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
389 destY2[0] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
390 destU[0] = CLAMP (
391 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
392 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
393 destV[0] = CLAMP (
394 (2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
395 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
396 j = dest_x + 1;
397 y_idx = uv_idx = 1;
398 } else {
399 j = dest_x;
400 y_idx = uv_idx = 0;
401 }
402
403 /* 2.2. Copy all macro pixels from the source to the destination.
404 * All pixels now start at macro pixel boundary, i.e. no
405 * blending with the background is necessary. */
406 for (; j < w - 1; j += 2) {
407 y1 = src[4 * y_idx + 1];
408 y2 = src[4 * y_idx + 4 + 1];
409 y3 = src2[4 * y_idx + 1];
410 y4 = src2[4 * y_idx + 4 + 1];
411
412 u1 = src[4 * y_idx + 2];
413 u2 = src[4 * y_idx + 4 + 2];
414 u3 = src2[4 * y_idx + 2];
415 u4 = src2[4 * y_idx + 4 + 2];
416
417 v1 = src[4 * y_idx + 3];
418 v2 = src[4 * y_idx + 4 + 3];
419 v3 = src2[4 * y_idx + 3];
420 v4 = src2[4 * y_idx + 4 + 3];
421
422 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
423 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
424 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
425 destY2[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
426
427 destU[uv_idx] = CLAMP (
428 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
429 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
430 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
431 destV[uv_idx] = CLAMP (
432 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
433 u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
434 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
435
436 y_idx += 2;
437 uv_idx++;
438 }
439
440 /* 2.3. Now copy the last pixel if one exists and blend it
441 * with the background because we only fill part of
442 * the macro pixel. In case this is the last pixel of
443 * the destination we will a larger part. */
444 if (j == w - 1 && j == dest_width - 1) {
445 y1 = src[4 * y_idx + 1];
446 y2 = src2[4 * y_idx + 1];
447
448 u1 = src[4 * y_idx + 2];
449 u2 = src2[4 * y_idx + 2];
450
451 v1 = src[4 * y_idx + 3];
452 v2 = src2[4 * y_idx + 3];
453
454 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
455 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
456 destU[uv_idx] = CLAMP (
457 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
458 u2, v2)) / 2, 0, 255);
459 destV[uv_idx] = CLAMP (
460 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
461 u2, v2)) / 2, 0, 255);
462 } else if (j == w - 1) {
463 y1 = src[4 * y_idx + 1];
464 y2 = src2[4 * y_idx + 1];
465
466 u1 = src[4 * y_idx + 2];
467 u2 = src2[4 * y_idx + 2];
468
469 v1 = src[4 * y_idx + 3];
470 v2 = src2[4 * y_idx + 3];
471
472 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
473 destY2[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
474 destU[uv_idx] = CLAMP (
475 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
476 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
477 destV[uv_idx] = CLAMP (
478 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
479 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
480 }
481
482 destY += 2 * dest_strideY;
483 destY2 += 2 * dest_strideY;
484 destU += dest_strideU;
485 destV += dest_strideV;
486 src += 2 * src_stride;
487 src2 += 2 * src_stride;
488 }
489
490 /* 3. Handle the last scanline if one exists. This again
491 * doesn't start at macro pixel boundary but should
492 * only fill the upper part of the macro pixels. */
493 if (i == h - 1 && i == dest_height - 1) {
494 /* 3.1. Handle the first destination pixel if it doesn't
495 * start at the macro pixel boundary, i.e. blend with
496 * the background! */
497 if (dest_x % 2 == 1) {
498 y1 = src[4 * 0 + 1];
499 u1 = src[4 * 0 + 2];
500 v1 = src[4 * 0 + 3];
501
502 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
503 destU[0] =
504 CLAMP ((destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
505 destV[0] =
506 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
507
508 j = dest_x + 1;
509 y_idx = uv_idx = 1;
510 } else {
511 j = dest_x;
512 y_idx = uv_idx = 0;
513 }
514
515 /* 3.2. Copy all macro pixels from the source to the destination
516 * but blend with the background because we're only filling
517 * the upper part of the macro pixels. */
518 for (; j < w - 1; j += 2) {
519 y1 = src[4 * y_idx + 1];
520 y2 = src[4 * y_idx + 4 + 1];
521
522 u1 = src[4 * y_idx + 2];
523 u2 = src[4 * y_idx + 4 + 2];
524
525 v1 = src[4 * y_idx + 3];
526 v2 = src[4 * y_idx + 4 + 3];
527
528 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
529 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
530
531 destU[uv_idx] = CLAMP (
532 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
533 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
534 destV[uv_idx] = CLAMP (
535 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
536 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
537
538 y_idx += 2;
539 uv_idx++;
540 }
541
542 /* 3.3. Now copy the last pixel if one exists and blend it
543 * with the background because we only fill part of
544 * the macro pixel. In case this is the last pixel of
545 * the destination we will a larger part. */
546 if (j == w - 1 && j == dest_width - 1) {
547 y1 = src[4 * y_idx + 1];
548 u1 = src[4 * y_idx + 2];
549 v1 = src[4 * y_idx + 3];
550
551 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
552 destU[uv_idx] = CLAMP (
553 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
554 destV[uv_idx] = CLAMP (
555 (destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
556 } else if (j == w - 1) {
557 y1 = src[4 * y_idx + 1];
558 u1 = src[4 * y_idx + 2];
559 v1 = src[4 * y_idx + 3];
560
561 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
562 destU[uv_idx] = CLAMP (
563 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
564 255);
565 destV[uv_idx] =
566 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
567 0, 255);
568 }
569 } else if (i == h - 1) {
570 /* 3.1. Handle the first destination pixel if it doesn't
571 * start at the macro pixel boundary, i.e. blend with
572 * the background! */
573 if (dest_x % 2 == 1) {
574 y1 = src[4 * 0 + 1];
575 u1 = src[4 * 0 + 2];
576 v1 = src[4 * 0 + 3];
577
578 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
579 destU[0] =
580 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
581 255);
582 destV[0] =
583 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
584 255);
585
586 j = dest_x + 1;
587 y_idx = uv_idx = 1;
588 } else {
589 j = dest_x;
590 y_idx = uv_idx = 0;
591 }
592
593 /* 3.2. Copy all macro pixels from the source to the destination
594 * but blend with the background because we're only filling
595 * the upper part of the macro pixels. */
596 for (; j < w - 1; j += 2) {
597 y1 = src[4 * y_idx + 1];
598 y2 = src[4 * y_idx + 4 + 1];
599
600 u1 = src[4 * y_idx + 2];
601 u2 = src[4 * y_idx + 4 + 2];
602
603 v1 = src[4 * y_idx + 3];
604 v2 = src[4 * y_idx + 4 + 3];
605
606 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
607 destY[y_idx + 1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
608
609 destU[uv_idx] = CLAMP (
610 (2 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
611 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
612 destV[uv_idx] = CLAMP (
613 (2 * destV[uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
614 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
615
616 y_idx += 2;
617 uv_idx++;
618 }
619
620 /* 3.3. Now copy the last pixel if one exists and blend it
621 * with the background because we only fill part of
622 * the macro pixel. In case this is the last pixel of
623 * the destination we will a larger part. */
624 if (j == w - 1 && j == dest_width - 1) {
625 y1 = src[4 * y_idx + 1];
626 u1 = src[4 * y_idx + 2];
627 v1 = src[4 * y_idx + 3];
628
629 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
630 destU[uv_idx] = CLAMP (
631 (destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
632 destV[uv_idx] = CLAMP (
633 (destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
634 } else if (j == w - 1) {
635 y1 = src[4 * y_idx + 1];
636 u1 = src[4 * y_idx + 2];
637 v1 = src[4 * y_idx + 3];
638
639 destY[y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
640 destU[uv_idx] = CLAMP (
641 (3 * destU[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
642 255);
643 destV[uv_idx] =
644 CLAMP ((3 * destV[uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
645 0, 255);
646 }
647 }
648 }
649
650 static void
fill_planar_yuv(GstVideoBoxFill fill_type,guint b_alpha,GstVideoFrame * frame,gboolean sdtv)651 fill_planar_yuv (GstVideoBoxFill fill_type, guint b_alpha,
652 GstVideoFrame * frame, gboolean sdtv)
653 {
654 guint8 empty_pixel[3];
655 guint8 *destY, *destU, *destV;
656 gint strideY, strideU, strideV;
657 gint heightY, heightU, heightV;
658 gint widthY, widthU, widthV;
659
660 if (sdtv) {
661 empty_pixel[0] = yuv_sdtv_colors_Y[fill_type];
662 empty_pixel[1] = yuv_sdtv_colors_U[fill_type];
663 empty_pixel[2] = yuv_sdtv_colors_V[fill_type];
664 } else {
665 empty_pixel[0] = yuv_hdtv_colors_Y[fill_type];
666 empty_pixel[1] = yuv_hdtv_colors_U[fill_type];
667 empty_pixel[2] = yuv_hdtv_colors_V[fill_type];
668 }
669
670 strideY = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
671 strideU = GST_VIDEO_FRAME_COMP_STRIDE (frame, 1);
672 strideV = GST_VIDEO_FRAME_COMP_STRIDE (frame, 2);
673
674 destY = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
675 destU = GST_VIDEO_FRAME_COMP_DATA (frame, 1);
676 destV = GST_VIDEO_FRAME_COMP_DATA (frame, 2);
677
678 widthY = GST_VIDEO_FRAME_COMP_WIDTH (frame, 0);
679 widthU = GST_VIDEO_FRAME_COMP_WIDTH (frame, 1);
680 widthV = GST_VIDEO_FRAME_COMP_WIDTH (frame, 2);
681
682 heightY = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 0);
683 heightU = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 1);
684 heightV = GST_VIDEO_FRAME_COMP_HEIGHT (frame, 2);
685
686 if (strideY == widthY) {
687 memset (destY, empty_pixel[0], strideY * heightY);
688 } else if (heightY) {
689 for (; heightY; --heightY) {
690 memset (destY, empty_pixel[0], widthY);
691 destY += strideY;
692 }
693 }
694 if (strideU == widthU) {
695 memset (destU, empty_pixel[1], strideU * heightU);
696 } else if (heightU) {
697 for (; heightU; --heightU) {
698 memset (destU, empty_pixel[1], widthU);
699 destU += strideU;
700 }
701 }
702 if (strideV == widthV) {
703 memset (destV, empty_pixel[2], strideV * heightV);
704 } else if (heightV) {
705 for (; heightV; --heightV) {
706 memset (destV, empty_pixel[2], widthV);
707 destV += strideV;
708 }
709 }
710 }
711
712 static void
copy_y444_y444(guint i_alpha,GstVideoFrame * dest,gboolean dest_sdtv,gint dest_x,gint dest_y,GstVideoFrame * src,gboolean src_sdtv,gint src_x,gint src_y,gint w,gint h)713 copy_y444_y444 (guint i_alpha, GstVideoFrame * dest,
714 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src,
715 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
716 {
717 gint i, j;
718 guint8 *destY, *destU, *destV;
719 const guint8 *srcY, *srcU, *srcV;
720 gint dest_strideY, dest_strideU, dest_strideV;
721 gint src_strideY, src_strideU, src_strideV;
722
723 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest, 0);
724 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest, 1);
725 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest, 2);
726
727 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src, 0);
728 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src, 1);
729 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src, 2);
730
731 destY = GST_VIDEO_FRAME_COMP_DATA (dest, 0);
732 destU = GST_VIDEO_FRAME_COMP_DATA (dest, 1);
733 destV = GST_VIDEO_FRAME_COMP_DATA (dest, 2);
734
735 srcY = GST_VIDEO_FRAME_COMP_DATA (src, 0);
736 srcU = GST_VIDEO_FRAME_COMP_DATA (src, 1);
737 srcV = GST_VIDEO_FRAME_COMP_DATA (src, 2);
738
739 destY = destY + dest_y * dest_strideY + dest_x;
740 destU = destU + dest_y * dest_strideU + dest_x;
741 destV = destV + dest_y * dest_strideV + dest_x;
742
743 srcY = srcY + src_y * src_strideY + src_x;
744 srcU = srcU + src_y * src_strideU + src_x;
745 srcV = srcV + src_y * src_strideV + src_x;
746
747 if (src_sdtv != dest_sdtv) {
748 gint matrix[12];
749 gint y, u, v;
750
751 memcpy (matrix,
752 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
753 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
754
755 for (i = 0; i < h; i++) {
756 for (j = 0; j < w; j++) {
757 y = APPLY_MATRIX (matrix, 0, srcY[j], srcU[j], srcV[j]);
758 u = APPLY_MATRIX (matrix, 1, srcY[j], srcU[j], srcV[j]);
759 v = APPLY_MATRIX (matrix, 2, srcY[j], srcU[j], srcV[j]);
760
761 destY[j] = y;
762 destU[j] = u;
763 destV[j] = v;
764 }
765 destY += dest_strideY;
766 destU += dest_strideU;
767 destV += dest_strideV;
768
769 srcY += src_strideY;
770 srcU += src_strideU;
771 srcV += src_strideV;
772 }
773 } else {
774 for (i = 0; i < h; i++) {
775 memcpy (destY, srcY, w);
776 memcpy (destU, srcU, w);
777 memcpy (destV, srcV, w);
778
779 destY += dest_strideY;
780 destU += dest_strideU;
781 destV += dest_strideV;
782
783 srcY += src_strideY;
784 srcU += src_strideU;
785 srcV += src_strideV;
786 }
787 }
788 }
789
790 static void
copy_y42b_y42b(guint i_alpha,GstVideoFrame * dest,gboolean dest_sdtv,gint dest_x,gint dest_y,GstVideoFrame * src,gboolean src_sdtv,gint src_x,gint src_y,gint w,gint h)791 copy_y42b_y42b (guint i_alpha, GstVideoFrame * dest,
792 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src,
793 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
794 {
795 gint i, j;
796 guint8 *destY, *destU, *destV;
797 const guint8 *srcY, *srcU, *srcV;
798 gint dest_strideY, dest_strideU, dest_strideV;
799 gint src_strideY, src_strideU, src_strideV;
800 gint src_y_idx, src_uv_idx;
801 gint dest_y_idx, dest_uv_idx;
802 gint matrix[12];
803 gint y1, y2;
804 gint u1, u2;
805 gint v1, v2;
806 gint dest_width;
807
808 dest_width = GST_VIDEO_FRAME_WIDTH (dest);
809
810 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest, 0);
811 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest, 1);
812 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest, 2);
813
814 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src, 0);
815 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src, 1);
816 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src, 2);
817
818 destY = GST_VIDEO_FRAME_COMP_DATA (dest, 0);
819 destU = GST_VIDEO_FRAME_COMP_DATA (dest, 1);
820 destV = GST_VIDEO_FRAME_COMP_DATA (dest, 2);
821
822 srcY = GST_VIDEO_FRAME_COMP_DATA (src, 0);
823 srcU = GST_VIDEO_FRAME_COMP_DATA (src, 1);
824 srcV = GST_VIDEO_FRAME_COMP_DATA (src, 2);
825
826 destY = destY + dest_y * dest_strideY + dest_x;
827 destU = destU + dest_y * dest_strideU + dest_x / 2;
828 destV = destV + dest_y * dest_strideV + dest_x / 2;
829
830 srcY = srcY + src_y * src_strideY + src_x;
831 srcU = srcU + src_y * src_strideU + src_x / 2;
832 srcV = srcV + src_y * src_strideV + src_x / 2;
833
834 h = dest_y + h;
835 w = dest_x + w;
836
837 if (src_sdtv != dest_sdtv)
838 memcpy (matrix,
839 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
840 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
841 else
842 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
843
844 /* 1. Copy all macro pixel scanlines, the destination scanline
845 * now starts at macro pixel boundary. */
846 for (i = dest_y; i < h; i++) {
847 /* 1.1. Handle the first destination pixel if it doesn't
848 * start at the macro pixel boundary, i.e. blend with
849 * the background! */
850 if (dest_x % 2 == 1) {
851 y1 = srcY[0];
852 u1 = srcU[0];
853 v1 = srcV[0];
854
855 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
856 destU[0] = CLAMP (
857 (destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
858 destV[0] = CLAMP (
859 (destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
860 j = dest_x + 1;
861 src_y_idx = dest_y_idx = dest_uv_idx = 1;
862 src_uv_idx = (src_x % 2) + 1;
863 } else {
864 j = dest_x;
865 src_y_idx = dest_y_idx = dest_uv_idx = 0;
866 src_uv_idx = (src_x % 2);
867 }
868
869 /* 1.2. Copy all macro pixels from the source to the destination.
870 * All pixels now start at macro pixel boundary, i.e. no
871 * blending with the background is necessary. */
872 for (; j < w - 1; j += 2) {
873 y1 = srcY[src_y_idx];
874 y2 = srcY[src_y_idx + 1];
875
876 u1 = srcU[src_uv_idx / 2];
877 v1 = srcV[src_uv_idx / 2];
878 src_uv_idx++;
879 u2 = srcU[src_uv_idx / 2];
880 v2 = srcV[src_uv_idx / 2];
881 src_uv_idx++;
882
883 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
884 destY[dest_y_idx + 1] =
885 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
886
887 destU[dest_uv_idx] = CLAMP (
888 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
889 u2, v2)) / 2, 0, 255);
890 destV[dest_uv_idx] = CLAMP (
891 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
892 u2, v2)) / 2, 0, 255);
893
894 dest_y_idx += 2;
895 src_y_idx += 2;
896 dest_uv_idx++;
897 }
898
899 /* 1.3. Now copy the last pixel if one exists and blend it
900 * with the background because we only fill part of
901 * the macro pixel. In case this is the last pixel of
902 * the destination we will a larger part. */
903 if (j == w - 1 && j == dest_width - 1) {
904 y1 = srcY[src_y_idx];
905 u1 = srcU[src_uv_idx / 2];
906 v1 = srcV[src_uv_idx / 2];
907
908 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
909 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
910 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
911 } else if (j == w - 1) {
912 y1 = srcY[src_y_idx];
913 u1 = srcU[src_uv_idx / 2];
914 v1 = srcV[src_uv_idx / 2];
915
916 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
917 destU[dest_uv_idx] = CLAMP (
918 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
919 v1)) / 2, 0, 255);
920 destV[dest_uv_idx] = CLAMP (
921 (destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
922 v1)) / 2, 0, 255);
923 }
924
925 destY += dest_strideY;
926 destU += dest_strideU;
927 destV += dest_strideV;
928 srcY += src_strideY;
929
930 srcU += src_strideU;
931 srcV += src_strideV;
932 }
933 }
934
935 static void
copy_y41b_y41b(guint i_alpha,GstVideoFrame * dest,gboolean dest_sdtv,gint dest_x,gint dest_y,GstVideoFrame * src,gboolean src_sdtv,gint src_x,gint src_y,gint w,gint h)936 copy_y41b_y41b (guint i_alpha, GstVideoFrame * dest,
937 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src,
938 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
939 {
940 gint i, j;
941 guint8 *destY, *destU, *destV;
942 const guint8 *srcY, *srcU, *srcV;
943 gint dest_strideY, dest_strideU, dest_strideV;
944 gint src_strideY, src_strideU, src_strideV;
945 gint src_y_idx, src_uv_idx;
946 gint dest_y_idx, dest_uv_idx;
947 gint matrix[12];
948 gint y1, y2, y3, y4;
949 gint u1, u2, u3, u4;
950 gint v1, v2, v3, v4;
951 gint dest_width;
952
953 dest_width = GST_VIDEO_FRAME_WIDTH (dest);
954
955 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest, 0);
956 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest, 1);
957 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest, 2);
958
959 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src, 0);
960 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src, 1);
961 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src, 2);
962
963 destY = GST_VIDEO_FRAME_COMP_DATA (dest, 0);
964 destU = GST_VIDEO_FRAME_COMP_DATA (dest, 1);
965 destV = GST_VIDEO_FRAME_COMP_DATA (dest, 2);
966
967 srcY = GST_VIDEO_FRAME_COMP_DATA (src, 0);
968 srcU = GST_VIDEO_FRAME_COMP_DATA (src, 1);
969 srcV = GST_VIDEO_FRAME_COMP_DATA (src, 2);
970
971 destY = destY + dest_y * dest_strideY + dest_x;
972 destU = destU + dest_y * dest_strideU + dest_x / 4;
973 destV = destV + dest_y * dest_strideV + dest_x / 4;
974
975 srcY = srcY + src_y * src_strideY + src_x;
976 srcU = srcU + src_y * src_strideU + src_x / 4;
977 srcV = srcV + src_y * src_strideV + src_x / 4;
978
979 h = dest_y + h;
980 w = dest_x + w;
981
982 if (src_sdtv != dest_sdtv)
983 memcpy (matrix,
984 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
985 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
986 else
987 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
988
989 /* 1. Copy all macro pixel scanlines, the destination scanline
990 * now starts at macro pixel boundary. */
991 for (i = dest_y; i < h; i++) {
992 /* 1.1. Handle the first destination pixel if it doesn't
993 * start at the macro pixel boundary, i.e. blend with
994 * the background! */
995 if (dest_x % 4 == 1) {
996 y1 = srcY[0];
997 y2 = srcY[1];
998 y3 = srcY[2];
999 u1 = srcU[0];
1000 v1 = srcV[0];
1001
1002 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1003 destY[1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1004 destY[2] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1005
1006 destU[0] = CLAMP (
1007 (destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1008 v1) + APPLY_MATRIX (matrix, 1, y2, u1,
1009 v1) + APPLY_MATRIX (matrix, 1, y3, u1, v1)) / 4, 0, 255);
1010 destV[0] =
1011 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1012 v1) + APPLY_MATRIX (matrix, 2, y2, u1,
1013 v1) + APPLY_MATRIX (matrix, 2, y3, u1, v1)) / 4, 0, 255);
1014
1015 j = dest_x + 3;
1016 src_y_idx = dest_y_idx = 3;
1017 dest_uv_idx = 1;
1018 src_uv_idx = (src_x % 4) + 3;
1019 } else if (dest_x % 4 == 2) {
1020 y1 = srcY[0];
1021 y2 = srcY[1];
1022 u1 = srcU[0];
1023 v1 = srcV[0];
1024
1025 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1026 destY[1] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1027
1028 destU[0] = CLAMP (
1029 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1030 v1) + APPLY_MATRIX (matrix, 1, y2, u1, v1)) / 4, 0, 255);
1031 destV[0] =
1032 CLAMP ((2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1033 v1) + APPLY_MATRIX (matrix, 2, y2, u1, v1)) / 4, 0, 255);
1034
1035 j = dest_x + 2;
1036 src_y_idx = dest_y_idx = 2;
1037 dest_uv_idx = 1;
1038 src_uv_idx = (src_x % 4) + 2;
1039 } else if (dest_x % 4 == 3) {
1040 y1 = srcY[0];
1041 u1 = srcU[0];
1042 v1 = srcV[0];
1043
1044 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1045
1046 destU[0] = CLAMP (
1047 (3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0, 255);
1048 destV[0] = CLAMP (
1049 (3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0, 255);
1050
1051 j = dest_x + 1;
1052 src_y_idx = dest_y_idx = 1;
1053 dest_uv_idx = 1;
1054 src_uv_idx = (src_x % 4) + 1;
1055 } else {
1056 j = dest_x;
1057 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1058 src_uv_idx = (src_x % 4);
1059 }
1060
1061 /* 1.2. Copy all macro pixels from the source to the destination.
1062 * All pixels now start at macro pixel boundary, i.e. no
1063 * blending with the background is necessary. */
1064 for (; j < w - 3; j += 4) {
1065 y1 = srcY[src_y_idx];
1066 y2 = srcY[src_y_idx + 1];
1067 y3 = srcY[src_y_idx + 2];
1068 y4 = srcY[src_y_idx + 3];
1069
1070 u1 = srcU[src_uv_idx / 4];
1071 v1 = srcV[src_uv_idx / 4];
1072 src_uv_idx++;
1073 u2 = srcU[src_uv_idx / 4];
1074 v2 = srcV[src_uv_idx / 4];
1075 src_uv_idx++;
1076 u3 = srcU[src_uv_idx / 4];
1077 v3 = srcV[src_uv_idx / 4];
1078 src_uv_idx++;
1079 u4 = srcU[src_uv_idx / 4];
1080 v4 = srcV[src_uv_idx / 4];
1081 src_uv_idx++;
1082
1083 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1084 destY[dest_y_idx + 1] =
1085 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1086 destY[dest_y_idx + 2] =
1087 CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
1088 destY[dest_y_idx + 3] =
1089 CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
1090
1091 destU[dest_uv_idx] = CLAMP (
1092 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
1093 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
1094 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
1095 destV[dest_uv_idx] =
1096 CLAMP ((APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix,
1097 2, y2, u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
1098 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
1099
1100 dest_y_idx += 4;
1101 src_y_idx += 4;
1102 dest_uv_idx++;
1103 }
1104
1105 /* 1.3. Now copy the last pixel if one exists and blend it
1106 * with the background because we only fill part of
1107 * the macro pixel. In case this is the last pixel of
1108 * the destination we will a larger part. */
1109 if (j == w - 1 && j == dest_width - 1) {
1110 y1 = srcY[src_y_idx];
1111 u1 = srcU[src_uv_idx / 4];
1112 v1 = srcV[src_uv_idx / 4];
1113
1114 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1115 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1116 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1117 } else if (j == w - 1) {
1118 y1 = srcY[src_y_idx];
1119 u1 = srcU[src_uv_idx / 4];
1120 v1 = srcV[src_uv_idx / 4];
1121
1122 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1123 destU[dest_uv_idx] = CLAMP (
1124 (destU[dest_uv_idx] + 3 * APPLY_MATRIX (matrix, 1, y1, u1,
1125 v1)) / 4, 0, 255);
1126 destV[dest_uv_idx] = CLAMP (
1127 (destV[dest_uv_idx] + 3 * APPLY_MATRIX (matrix, 1, y1, u1,
1128 v1)) / 4, 0, 255);
1129 } else if (j == w - 2 && j == dest_width - 2) {
1130 y1 = srcY[src_y_idx];
1131 y2 = srcY[src_y_idx + 1];
1132 u1 = srcU[src_uv_idx / 4];
1133 v1 = srcV[src_uv_idx / 4];
1134
1135 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1136 destY[dest_y_idx + 1] =
1137 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1138 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1139 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1140 } else if (j == w - 2) {
1141 y1 = srcY[src_y_idx];
1142 y2 = srcY[src_y_idx + 1];
1143 u1 = srcU[src_uv_idx / 4];
1144 v1 = srcV[src_uv_idx / 4];
1145
1146 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1147 destY[dest_y_idx + 1] =
1148 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1149 destU[dest_uv_idx] =
1150 CLAMP ((destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1151 v1)) / 2, 0, 255);
1152 destV[dest_uv_idx] =
1153 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1154 v1)) / 2, 0, 255);
1155 } else if (j == w - 3 && j == dest_width - 3) {
1156 y1 = srcY[src_y_idx];
1157 y2 = srcY[src_y_idx + 1];
1158 y3 = srcY[src_y_idx + 2];
1159 u1 = srcU[src_uv_idx / 4];
1160 v1 = srcV[src_uv_idx / 4];
1161
1162 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1163 destY[dest_y_idx + 1] =
1164 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1165 destY[dest_y_idx + 2] =
1166 CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1167 destU[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1168 destV[dest_uv_idx] = CLAMP (APPLY_MATRIX (matrix, 1, y1, u1, v1), 0, 255);
1169 } else if (j == w - 3) {
1170 y1 = srcY[src_y_idx];
1171 y2 = srcY[src_y_idx + 1];
1172 y3 = srcY[src_y_idx + 2];
1173 u1 = srcU[src_uv_idx / 4];
1174 v1 = srcV[src_uv_idx / 4];
1175
1176 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1177 destY[dest_y_idx + 1] =
1178 CLAMP (APPLY_MATRIX (matrix, 0, y2, u1, v1), 0, 255);
1179 destY[dest_y_idx + 2] =
1180 CLAMP (APPLY_MATRIX (matrix, 0, y3, u1, v1), 0, 255);
1181 destU[dest_uv_idx] =
1182 CLAMP ((3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1183 v1)) / 4, 0, 255);
1184 destV[dest_uv_idx] =
1185 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1186 v1)) / 4, 0, 255);
1187 }
1188
1189 destY += dest_strideY;
1190 destU += dest_strideU;
1191 destV += dest_strideV;
1192 srcY += src_strideY;
1193 srcU += src_strideU;
1194 srcV += src_strideV;
1195 }
1196 }
1197
1198 static void
copy_i420_i420(guint i_alpha,GstVideoFrame * dest,gboolean dest_sdtv,gint dest_x,gint dest_y,GstVideoFrame * src,gboolean src_sdtv,gint src_x,gint src_y,gint w,gint h)1199 copy_i420_i420 (guint i_alpha, GstVideoFrame * dest,
1200 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src,
1201 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
1202 {
1203 gint i, j;
1204 guint8 *destY, *destU, *destV;
1205 const guint8 *srcY, *srcU, *srcV;
1206 guint8 *destY2;
1207 const guint8 *srcY2, *srcU2, *srcV2;
1208 gint dest_strideY, dest_strideU, dest_strideV;
1209 gint src_strideY, src_strideU, src_strideV;
1210 gint src_y_idx, src_uv_idx;
1211 gint dest_y_idx, dest_uv_idx;
1212 gint matrix[12];
1213 gint y1, y2, y3, y4;
1214 gint u1, u2, u3, u4;
1215 gint v1, v2, v3, v4;
1216 gint dest_width, dest_height;
1217
1218 dest_width = GST_VIDEO_FRAME_WIDTH (dest);
1219 dest_height = GST_VIDEO_FRAME_HEIGHT (dest);
1220
1221 dest_strideY = GST_VIDEO_FRAME_COMP_STRIDE (dest, 0);
1222 dest_strideU = GST_VIDEO_FRAME_COMP_STRIDE (dest, 1);
1223 dest_strideV = GST_VIDEO_FRAME_COMP_STRIDE (dest, 2);
1224
1225 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src, 0);
1226 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src, 1);
1227 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src, 2);
1228
1229 destY = GST_VIDEO_FRAME_COMP_DATA (dest, 0);
1230 destU = GST_VIDEO_FRAME_COMP_DATA (dest, 1);
1231 destV = GST_VIDEO_FRAME_COMP_DATA (dest, 2);
1232
1233 srcY = GST_VIDEO_FRAME_COMP_DATA (src, 0);
1234 srcU = GST_VIDEO_FRAME_COMP_DATA (src, 1);
1235 srcV = GST_VIDEO_FRAME_COMP_DATA (src, 2);
1236
1237 destY = destY + dest_y * dest_strideY + dest_x;
1238 destU = destU + (dest_y / 2) * dest_strideU + dest_x / 2;
1239 destV = destV + (dest_y / 2) * dest_strideV + dest_x / 2;
1240
1241 srcY = srcY + src_y * src_strideY + src_x;
1242 srcU = srcU + (src_y / 2) * src_strideU + src_x / 2;
1243 srcV = srcV + (src_y / 2) * src_strideV + src_x / 2;
1244
1245 destY2 = destY + dest_strideY;
1246 srcY2 = srcY + src_strideY;
1247
1248 h = dest_y + h;
1249 w = dest_x + w;
1250
1251 if (src_sdtv != dest_sdtv)
1252 memcpy (matrix,
1253 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
1254 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
1255 else
1256 memcpy (matrix, cog_identity_matrix_8bit, 12 * sizeof (gint));
1257
1258 /* 1. Handle the first destination scanline specially if it
1259 * doesn't start at the macro pixel boundary, i.e. blend
1260 * with the background! */
1261 if (dest_y % 2 == 1) {
1262 /* 1.1. Handle the first destination pixel if it doesn't
1263 * start at the macro pixel boundary, i.e. blend with
1264 * the background! */
1265 if (dest_x % 2 == 1) {
1266 y1 = srcY[0];
1267 u1 = srcU[0];
1268 v1 = srcV[0];
1269
1270 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1271 destU[0] =
1272 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
1273 255);
1274 destV[0] =
1275 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
1276 255);
1277
1278 j = dest_x + 1;
1279 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1280 src_uv_idx = (src_x % 2) + 1;
1281 } else {
1282 j = dest_x;
1283 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1284 src_uv_idx = (src_x % 2);
1285 }
1286
1287 /* 1.2. Copy all macro pixels from the source to the destination
1288 * but blend with the background because we're only filling
1289 * the lower part of the macro pixels. */
1290 for (; j < w - 1; j += 2) {
1291 y1 = srcY[src_y_idx];
1292 y2 = srcY[src_y_idx + 1];
1293
1294 u1 = srcU[src_uv_idx / 2];
1295 v1 = srcV[src_uv_idx / 2];
1296 src_uv_idx++;
1297 u2 = srcU[src_uv_idx / 2];
1298 v2 = srcV[src_uv_idx / 2];
1299 src_uv_idx++;
1300
1301 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1302 destY[dest_y_idx + 1] =
1303 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1304 destU[dest_uv_idx] =
1305 CLAMP ((2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1306 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1307 destV[dest_uv_idx] =
1308 CLAMP ((2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1309 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1310
1311 dest_y_idx += 2;
1312 src_y_idx += 2;
1313 dest_uv_idx++;
1314 }
1315
1316 /* 1.3. Now copy the last pixel if one exists and blend it
1317 * with the background because we only fill part of
1318 * the macro pixel. In case this is the last pixel of
1319 * the destination we will a larger part. */
1320 if (j == w - 1 && j == dest_width - 1) {
1321 y1 = srcY[src_y_idx];
1322 u1 = srcU[src_uv_idx / 2];
1323 v1 = srcV[src_uv_idx / 2];
1324
1325 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1326 destU[dest_uv_idx] = CLAMP (
1327 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1328 255);
1329 destV[dest_uv_idx] =
1330 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1331 v1)) / 2, 0, 255);
1332 } else if (j == w - 1) {
1333 y1 = srcY[src_y_idx];
1334 u1 = srcU[src_uv_idx / 2];
1335 v1 = srcV[src_uv_idx / 2];
1336
1337 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1338 destU[dest_uv_idx] = CLAMP (
1339 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1340 0, 255);
1341 destV[dest_uv_idx] =
1342 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1343 v1)) / 4, 0, 255);
1344 }
1345
1346 destY += dest_strideY;
1347 destY2 += dest_strideY;
1348 destU += dest_strideU;
1349 destV += dest_strideV;
1350 srcY += src_strideY;
1351 srcY2 += src_strideY;
1352 src_y++;
1353 if (src_y % 2 == 0) {
1354 srcU += src_strideU;
1355 srcV += src_strideV;
1356 }
1357 i = dest_y + 1;
1358 } else {
1359 i = dest_y;
1360 }
1361
1362 /* 2. Copy all macro pixel scanlines, the destination scanline
1363 * now starts at macro pixel boundary. */
1364 for (; i < h - 1; i += 2) {
1365 /* 2.1. Handle the first destination pixel if it doesn't
1366 * start at the macro pixel boundary, i.e. blend with
1367 * the background! */
1368
1369 srcU2 = srcU;
1370 srcV2 = srcV;
1371 if (src_y % 2 == 1) {
1372 srcU2 += src_strideU;
1373 srcV2 += src_strideV;
1374 }
1375
1376 if (dest_x % 2 == 1) {
1377 y1 = srcY[0];
1378 y2 = srcY2[0];
1379 u1 = srcU[0];
1380 v1 = srcV[0];
1381 u2 = srcU2[0];
1382 v2 = srcV2[0];
1383
1384 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1385 destY2[0] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1386 destU[0] = CLAMP (
1387 (2 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1,
1388 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1389 destV[0] = CLAMP (
1390 (2 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1,
1391 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1392 j = dest_x + 1;
1393 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1394 src_uv_idx = (src_x % 2) + 1;
1395 } else {
1396 j = dest_x;
1397 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1398 src_uv_idx = (src_x % 2);
1399 }
1400
1401 /* 2.2. Copy all macro pixels from the source to the destination.
1402 * All pixels now start at macro pixel boundary, i.e. no
1403 * blending with the background is necessary. */
1404 for (; j < w - 1; j += 2) {
1405 y1 = srcY[src_y_idx];
1406 y2 = srcY[src_y_idx + 1];
1407 y3 = srcY2[src_y_idx];
1408 y4 = srcY2[src_y_idx + 1];
1409
1410 u1 = srcU[src_uv_idx / 2];
1411 u3 = srcU2[src_uv_idx / 2];
1412 v1 = srcV[src_uv_idx / 2];
1413 v3 = srcV2[src_uv_idx / 2];
1414 src_uv_idx++;
1415 u2 = srcU[src_uv_idx / 2];
1416 u4 = srcU2[src_uv_idx / 2];
1417 v2 = srcV[src_uv_idx / 2];
1418 v4 = srcV2[src_uv_idx / 2];
1419 src_uv_idx++;
1420
1421 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1422 destY[dest_y_idx + 1] =
1423 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1424 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y3, u3, v3), 0, 255);
1425 destY2[dest_y_idx + 1] =
1426 CLAMP (APPLY_MATRIX (matrix, 0, y4, u4, v4), 0, 255);
1427
1428 destU[dest_uv_idx] = CLAMP (
1429 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 1, y2,
1430 u2, v2) + APPLY_MATRIX (matrix, 1, y3, u3,
1431 v3) + APPLY_MATRIX (matrix, 1, y4, u4, v4)) / 4, 0, 255);
1432 destV[dest_uv_idx] = CLAMP (
1433 (APPLY_MATRIX (matrix, 2, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1434 u2, v2) + APPLY_MATRIX (matrix, 2, y3, u3,
1435 v3) + APPLY_MATRIX (matrix, 2, y4, u4, v4)) / 4, 0, 255);
1436
1437 dest_y_idx += 2;
1438 src_y_idx += 2;
1439 dest_uv_idx++;
1440 }
1441
1442 /* 2.3. Now copy the last pixel if one exists and blend it
1443 * with the background because we only fill part of
1444 * the macro pixel. In case this is the last pixel of
1445 * the destination we will a larger part. */
1446 if (j == w - 1 && j == dest_width - 1) {
1447 y1 = srcY[src_y_idx];
1448 y2 = srcY2[src_y_idx];
1449
1450 u1 = srcU[src_uv_idx / 2];
1451 u2 = srcU2[src_uv_idx / 2];
1452
1453 v1 = srcV[src_uv_idx / 2];
1454 v2 = srcV2[src_uv_idx / 2];
1455
1456 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1457 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1458 destU[dest_uv_idx] = CLAMP (
1459 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1460 u2, v2)) / 2, 0, 255);
1461 destV[dest_uv_idx] = CLAMP (
1462 (APPLY_MATRIX (matrix, 1, y1, u1, v1) + APPLY_MATRIX (matrix, 2, y2,
1463 u2, v2)) / 2, 0, 255);
1464 } else if (j == w - 1) {
1465 y1 = srcY[src_y_idx];
1466 y2 = srcY2[src_y_idx];
1467
1468 u1 = srcU[src_uv_idx / 2];
1469 u2 = srcU2[src_uv_idx / 2];
1470
1471 v1 = srcV[src_uv_idx / 2];
1472 v2 = srcV2[src_uv_idx / 2];
1473
1474 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1475 destY2[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1476 destU[dest_uv_idx] = CLAMP (
1477 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1478 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1479 destV[dest_uv_idx] = CLAMP (
1480 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1481 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1482 }
1483
1484 destY += 2 * dest_strideY;
1485 destY2 += 2 * dest_strideY;
1486 destU += dest_strideU;
1487 destV += dest_strideV;
1488 srcY += 2 * src_strideY;
1489 srcY2 += 2 * src_strideY;
1490
1491 src_y += 2;
1492 srcU += src_strideU;
1493 srcV += src_strideV;
1494 }
1495
1496 /* 3. Handle the last scanline if one exists. This again
1497 * doesn't start at macro pixel boundary but should
1498 * only fill the upper part of the macro pixels. */
1499 if (i == h - 1 && i == dest_height - 1) {
1500 /* 3.1. Handle the first destination pixel if it doesn't
1501 * start at the macro pixel boundary, i.e. blend with
1502 * the background! */
1503 if (dest_x % 2 == 1) {
1504 y1 = srcY[0];
1505 u1 = srcU[0];
1506 v1 = srcV[0];
1507
1508 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1509 destU[0] =
1510 CLAMP ((destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0, 255);
1511 destV[0] =
1512 CLAMP ((destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 2, 0, 255);
1513
1514 j = dest_x + 1;
1515 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1516 src_uv_idx = (src_x % 2) + 1;
1517 } else {
1518 j = dest_x;
1519 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1520 src_uv_idx = (src_x % 2);
1521 }
1522
1523 /* 3.2. Copy all macro pixels from the source to the destination
1524 * but blend with the background because we're only filling
1525 * the upper part of the macro pixels. */
1526 for (; j < w - 1; j += 2) {
1527 y1 = srcY[src_y_idx];
1528 y2 = srcY[src_y_idx + 1];
1529
1530 u1 = srcU[src_uv_idx / 2];
1531 v1 = srcV[src_uv_idx / 2];
1532 src_uv_idx++;
1533 u2 = srcU[src_uv_idx / 2];
1534 v2 = srcV[src_uv_idx / 2];
1535 src_uv_idx++;
1536
1537 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1538 destY[dest_y_idx + 1] =
1539 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1540
1541 destU[dest_uv_idx] = CLAMP (
1542 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1543 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1544 destV[dest_uv_idx] = CLAMP (
1545 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1546 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1547
1548 dest_y_idx += 2;
1549 src_y_idx += 2;
1550 dest_uv_idx++;
1551 }
1552
1553 /* 3.3. Now copy the last pixel if one exists and blend it
1554 * with the background because we only fill part of
1555 * the macro pixel. In case this is the last pixel of
1556 * the destination we will a larger part. */
1557 if (j == w - 1 && j == dest_width - 1) {
1558 y1 = srcY[src_y_idx];
1559 u1 = srcU[src_uv_idx / 2];
1560 v1 = srcV[src_uv_idx / 2];
1561
1562 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1563 destU[dest_uv_idx] = CLAMP (
1564 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1565 255);
1566 destV[dest_uv_idx] =
1567 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1568 v1)) / 2, 0, 255);
1569 } else if (j == w - 1) {
1570 y1 = srcY[src_y_idx];
1571 u1 = srcU[src_uv_idx / 2];
1572 v1 = srcV[src_uv_idx / 2];
1573
1574 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1575 destU[dest_uv_idx] = CLAMP (
1576 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1577 0, 255);
1578 destV[dest_uv_idx] =
1579 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1580 v1)) / 4, 0, 255);
1581 }
1582 } else if (i == h - 1) {
1583 /* 3.1. Handle the first destination pixel if it doesn't
1584 * start at the macro pixel boundary, i.e. blend with
1585 * the background! */
1586 if (dest_x % 2 == 1) {
1587 y1 = srcY[0];
1588 u1 = srcU[0];
1589 v1 = srcV[0];
1590
1591 destY[0] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1592 destU[0] =
1593 CLAMP ((3 * destU[0] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4, 0,
1594 255);
1595 destV[0] =
1596 CLAMP ((3 * destV[0] + APPLY_MATRIX (matrix, 2, y1, u1, v1)) / 4, 0,
1597 255);
1598
1599 j = dest_x + 1;
1600 src_y_idx = dest_y_idx = dest_uv_idx = 1;
1601 src_uv_idx = (src_x % 2) + 1;
1602 } else {
1603 j = dest_x;
1604 src_y_idx = dest_y_idx = dest_uv_idx = 0;
1605 src_uv_idx = (src_x % 2);
1606 }
1607
1608 /* 3.2. Copy all macro pixels from the source to the destination
1609 * but blend with the background because we're only filling
1610 * the upper part of the macro pixels. */
1611 for (; j < w - 1; j += 2) {
1612 y1 = srcY[src_y_idx];
1613 y2 = srcY[src_y_idx + 1];
1614
1615 u1 = srcU[src_uv_idx / 2];
1616 v1 = srcV[src_uv_idx / 2];
1617 src_uv_idx++;
1618 u2 = srcU[src_uv_idx / 2];
1619 v2 = srcV[src_uv_idx / 2];
1620 src_uv_idx++;
1621
1622 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1623 destY[dest_y_idx + 1] =
1624 CLAMP (APPLY_MATRIX (matrix, 0, y2, u2, v2), 0, 255);
1625
1626 destU[dest_uv_idx] = CLAMP (
1627 (2 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1628 v1) + APPLY_MATRIX (matrix, 1, y2, u2, v2)) / 4, 0, 255);
1629 destV[dest_uv_idx] = CLAMP (
1630 (2 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 2, y1, u1,
1631 v1) + APPLY_MATRIX (matrix, 2, y2, u2, v2)) / 4, 0, 255);
1632
1633 dest_y_idx += 2;
1634 src_y_idx += 2;
1635 dest_uv_idx++;
1636 }
1637
1638 /* 3.3. Now copy the last pixel if one exists and blend it
1639 * with the background because we only fill part of
1640 * the macro pixel. In case this is the last pixel of
1641 * the destination we will a larger part. */
1642 if (j == w - 1 && j == dest_width - 1) {
1643 y1 = srcY[src_y_idx];
1644 u1 = srcU[src_uv_idx / 2];
1645 v1 = srcV[src_uv_idx / 2];
1646
1647 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1648 destU[dest_uv_idx] = CLAMP (
1649 (destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 2, 0,
1650 255);
1651 destV[dest_uv_idx] =
1652 CLAMP ((destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1653 v1)) / 2, 0, 255);
1654 } else if (j == w - 1) {
1655 y1 = srcY[src_y_idx];
1656 u1 = srcU[src_uv_idx / 2];
1657 v1 = srcV[src_uv_idx / 2];
1658
1659 destY[dest_y_idx] = CLAMP (APPLY_MATRIX (matrix, 0, y1, u1, v1), 0, 255);
1660 destU[dest_uv_idx] = CLAMP (
1661 (3 * destU[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1, v1)) / 4,
1662 0, 255);
1663 destV[dest_uv_idx] =
1664 CLAMP ((3 * destV[dest_uv_idx] + APPLY_MATRIX (matrix, 1, y1, u1,
1665 v1)) / 4, 0, 255);
1666 }
1667 }
1668 }
1669
1670 static void
copy_i420_ayuv(guint i_alpha,GstVideoFrame * dest_frame,gboolean dest_sdtv,gint dest_x,gint dest_y,GstVideoFrame * src_frame,gboolean src_sdtv,gint src_x,gint src_y,gint w,gint h)1671 copy_i420_ayuv (guint i_alpha, GstVideoFrame * dest_frame,
1672 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
1673 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
1674 {
1675 const guint8 *srcY, *srcU, *srcV;
1676 gint src_strideY, src_strideU, src_strideV;
1677 gint dest_stride;
1678 guint8 *dest;
1679
1680 src_strideY = GST_VIDEO_FRAME_COMP_STRIDE (src_frame, 0);
1681 src_strideU = GST_VIDEO_FRAME_COMP_STRIDE (src_frame, 1);
1682 src_strideV = GST_VIDEO_FRAME_COMP_STRIDE (src_frame, 2);
1683
1684 srcY = GST_VIDEO_FRAME_COMP_DATA (src_frame, 0);
1685 srcU = GST_VIDEO_FRAME_COMP_DATA (src_frame, 1);
1686 srcV = GST_VIDEO_FRAME_COMP_DATA (src_frame, 2);
1687
1688 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
1689
1690 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
1691 dest = dest + dest_y * dest_stride + dest_x * 4;
1692
1693 srcY = srcY + src_y * src_strideY + src_x;
1694 srcU = srcU + (src_y / 2) * src_strideU + src_x / 2;
1695 srcV = srcV + (src_y / 2) * src_strideV + src_x / 2;
1696
1697 i_alpha = CLAMP (i_alpha, 0, 255);
1698
1699 if (src_sdtv != dest_sdtv) {
1700 gint i, j, uv_idx;
1701 gint y, u, v;
1702 gint y1, u1, v1;
1703 gint matrix[12];
1704
1705 memcpy (matrix,
1706 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
1707 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
1708
1709 for (i = 0; i < h; i++) {
1710 for (j = 0, uv_idx = src_x % 2; j < w; j++, uv_idx++) {
1711 y = srcY[j];
1712 u = srcU[uv_idx / 2];
1713 v = srcV[uv_idx / 2];
1714
1715 y1 = APPLY_MATRIX (matrix, 0, y, u, v);
1716 u1 = APPLY_MATRIX (matrix, 1, y, u, v);
1717 v1 = APPLY_MATRIX (matrix, 2, y, u, v);
1718
1719 dest[4 * j + 0] = i_alpha;
1720 dest[4 * j + 1] = y1;
1721 dest[4 * j + 2] = u1;
1722 dest[4 * j + 3] = v1;
1723 }
1724 dest += dest_stride;
1725
1726 src_y++;
1727 srcY += src_strideY;
1728 if (src_y % 2 == 0) {
1729 srcU += src_strideU;
1730 srcV += src_strideV;
1731 }
1732 }
1733 } else {
1734 gint i, j, uv_idx;
1735 gint y, u, v;
1736
1737 for (i = 0; i < h; i++) {
1738 for (j = 0, uv_idx = src_x % 2; j < w; j++, uv_idx++) {
1739 y = srcY[j];
1740 u = srcU[uv_idx / 2];
1741 v = srcV[uv_idx / 2];
1742
1743 dest[4 * j + 0] = i_alpha;
1744 dest[4 * j + 1] = y;
1745 dest[4 * j + 2] = u;
1746 dest[4 * j + 3] = v;
1747 }
1748 dest += dest_stride;
1749
1750 src_y++;
1751 srcY += src_strideY;
1752 if (src_y % 2 == 0) {
1753 srcU += src_strideU;
1754 srcV += src_strideV;
1755 }
1756 }
1757 }
1758 }
1759
1760 static void
fill_rgb32(GstVideoBoxFill fill_type,guint b_alpha,GstVideoFrame * frame,gboolean sdtv)1761 fill_rgb32 (GstVideoBoxFill fill_type, guint b_alpha,
1762 GstVideoFrame * frame, gboolean sdtv)
1763 {
1764 guint32 empty_pixel;
1765 gint p[4];
1766 guint8 *dest;
1767 guint stride;
1768 gint width, height;
1769
1770 width = GST_VIDEO_FRAME_WIDTH (frame);
1771 height = GST_VIDEO_FRAME_HEIGHT (frame);
1772
1773 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
1774 stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
1775
1776 p[0] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 3);
1777 p[1] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 0);
1778 p[2] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 1);
1779 p[3] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 2);
1780
1781 b_alpha = CLAMP (b_alpha, 0, 255);
1782
1783 if (GST_VIDEO_FRAME_N_COMPONENTS (frame) == 4) {
1784 empty_pixel = GUINT32_FROM_LE ((b_alpha << (p[0] * 8)) |
1785 (rgb_colors_R[fill_type] << (p[1] * 8)) |
1786 (rgb_colors_G[fill_type] << (p[2] * 8)) |
1787 (rgb_colors_B[fill_type] << (p[3] * 8)));
1788 } else {
1789 empty_pixel = GUINT32_FROM_LE (
1790 (rgb_colors_R[fill_type] << (p[1] * 8)) |
1791 (rgb_colors_G[fill_type] << (p[2] * 8)) |
1792 (rgb_colors_B[fill_type] << (p[3] * 8)));
1793 }
1794
1795 if (stride == width * 4) {
1796 video_box_orc_splat_u32 ((guint32 *) dest, empty_pixel, width * height);
1797 } else if (height) {
1798 for (; height; --height) {
1799 video_box_orc_splat_u32 ((guint32 *) dest, empty_pixel, width);
1800 dest += stride;
1801 }
1802 }
1803 }
1804
1805 static void
fill_rgb24(GstVideoBoxFill fill_type,guint b_alpha,GstVideoFrame * frame,gboolean sdtv)1806 fill_rgb24 (GstVideoBoxFill fill_type, guint b_alpha,
1807 GstVideoFrame * frame, gboolean sdtv)
1808 {
1809 gint dest_stride;
1810 gint p[4];
1811 gint i, j;
1812 guint8 *dest;
1813 gint width, height;
1814
1815 width = GST_VIDEO_FRAME_WIDTH (frame);
1816 height = GST_VIDEO_FRAME_HEIGHT (frame);
1817
1818 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
1819 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
1820
1821 p[1] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 0);
1822 p[2] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 1);
1823 p[3] = GST_VIDEO_FRAME_COMP_OFFSET (frame, 2);
1824
1825 for (i = 0; i < height; i++) {
1826 for (j = 0; j < width; j++) {
1827 dest[3 * j + p[1]] = rgb_colors_R[fill_type];
1828 dest[3 * j + p[2]] = rgb_colors_G[fill_type];
1829 dest[3 * j + p[3]] = rgb_colors_B[fill_type];
1830 }
1831 dest += dest_stride;
1832 }
1833 }
1834
1835 static void
copy_rgb32(guint i_alpha,GstVideoFrame * dest_frame,gboolean dest_sdtv,gint dest_x,gint dest_y,GstVideoFrame * src_frame,gboolean src_sdtv,gint src_x,gint src_y,gint w,gint h)1836 copy_rgb32 (guint i_alpha, GstVideoFrame * dest_frame,
1837 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
1838 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
1839 {
1840 gint i, j;
1841 gint src_stride, dest_stride;
1842 gboolean in_alpha, out_alpha;
1843 gint in_bpp, out_bpp;
1844 gint p_out[4];
1845 gint p_in[4];
1846 gboolean packed_out;
1847 gboolean packed_in;
1848 guint8 *src, *dest;
1849
1850 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
1851 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
1852 in_bpp = GST_VIDEO_FRAME_COMP_PSTRIDE (src_frame, 0);
1853 out_bpp = GST_VIDEO_FRAME_COMP_PSTRIDE (dest_frame, 0);
1854 packed_in = (in_bpp < 4);
1855 packed_out = (out_bpp < 4);
1856
1857 out_alpha = GST_VIDEO_INFO_HAS_ALPHA (&dest_frame->info);
1858 p_out[0] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 3);
1859 p_out[1] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 0);
1860 p_out[2] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 1);
1861 p_out[3] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 2);
1862
1863 in_alpha = GST_VIDEO_INFO_HAS_ALPHA (&src_frame->info);
1864 p_in[0] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 3);
1865 p_in[1] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 0);
1866 p_in[2] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 1);
1867 p_in[3] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 2);
1868
1869 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
1870 dest = dest + dest_y * dest_stride + dest_x * out_bpp;
1871 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
1872 src = src + src_y * src_stride + src_x * in_bpp;
1873
1874 if (in_alpha && out_alpha) {
1875 w *= 4;
1876 for (i = 0; i < h; i++) {
1877 for (j = 0; j < w; j += 4) {
1878 dest[j + p_out[0]] = (src[j + p_in[0]] * i_alpha) >> 8;
1879 dest[j + p_out[1]] = src[j + p_in[1]];
1880 dest[j + p_out[2]] = src[j + p_in[2]];
1881 dest[j + p_out[3]] = src[j + p_in[3]];
1882 }
1883 dest += dest_stride;
1884 src += src_stride;
1885 }
1886 } else if (out_alpha && !packed_in) {
1887 w *= 4;
1888 i_alpha = CLAMP (i_alpha, 0, 255);
1889
1890 for (i = 0; i < h; i++) {
1891 for (j = 0; j < w; j += 4) {
1892 dest[j + p_out[0]] = i_alpha;
1893 dest[j + p_out[1]] = src[j + p_in[1]];
1894 dest[j + p_out[2]] = src[j + p_in[2]];
1895 dest[j + p_out[3]] = src[j + p_in[3]];
1896 }
1897 dest += dest_stride;
1898 src += src_stride;
1899 }
1900 } else if (out_alpha && packed_in) {
1901 i_alpha = CLAMP (i_alpha, 0, 255);
1902
1903 for (i = 0; i < h; i++) {
1904 for (j = 0; j < w; j++) {
1905 dest[4 * j + p_out[0]] = i_alpha;
1906 dest[4 * j + p_out[1]] = src[in_bpp * j + p_in[1]];
1907 dest[4 * j + p_out[2]] = src[in_bpp * j + p_in[2]];
1908 dest[4 * j + p_out[3]] = src[in_bpp * j + p_in[3]];
1909 }
1910 dest += dest_stride;
1911 src += src_stride;
1912 }
1913 } else if (!packed_out && !packed_in) {
1914 w *= 4;
1915 for (i = 0; i < h; i++) {
1916 for (j = 0; j < w; j += 4) {
1917 dest[j + p_out[1]] = src[j + p_in[1]];
1918 dest[j + p_out[2]] = src[j + p_in[2]];
1919 dest[j + p_out[3]] = src[j + p_in[3]];
1920 }
1921 dest += dest_stride;
1922 src += src_stride;
1923 }
1924 } else {
1925 for (i = 0; i < h; i++) {
1926 for (j = 0; j < w; j++) {
1927 dest[out_bpp * j + p_out[1]] = src[in_bpp * j + p_in[1]];
1928 dest[out_bpp * j + p_out[2]] = src[in_bpp * j + p_in[2]];
1929 dest[out_bpp * j + p_out[3]] = src[in_bpp * j + p_in[3]];
1930 }
1931 dest += dest_stride;
1932 src += src_stride;
1933 }
1934 }
1935 }
1936
1937 static void
copy_rgb32_ayuv(guint i_alpha,GstVideoFrame * dest_frame,gboolean dest_sdtv,gint dest_x,gint dest_y,GstVideoFrame * src_frame,gboolean src_sdtv,gint src_x,gint src_y,gint w,gint h)1938 copy_rgb32_ayuv (guint i_alpha, GstVideoFrame * dest_frame,
1939 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
1940 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
1941 {
1942 gint i, j;
1943 gint src_stride, dest_stride;
1944 gboolean in_alpha;
1945 gint in_bpp;
1946 gint p_in[4];
1947 gboolean packed_in;
1948 gint matrix[12];
1949 gint a;
1950 gint y, u, v;
1951 gint r, g, b;
1952 guint8 *dest, *src;
1953
1954 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
1955 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
1956 in_bpp = GST_VIDEO_FRAME_COMP_PSTRIDE (src_frame, 0);
1957 packed_in = (in_bpp < 4);
1958
1959 in_alpha = GST_VIDEO_INFO_HAS_ALPHA (&src_frame->info);
1960 p_in[0] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 3);
1961 p_in[1] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 0);
1962 p_in[2] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 1);
1963 p_in[3] = GST_VIDEO_FRAME_COMP_OFFSET (src_frame, 2);
1964
1965 memcpy (matrix,
1966 (dest_sdtv) ? cog_rgb_to_ycbcr_matrix_8bit_sdtv :
1967 cog_rgb_to_ycbcr_matrix_8bit_hdtv, 12 * sizeof (gint));
1968
1969 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
1970 dest = dest + dest_y * dest_stride + dest_x * 4;
1971 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
1972 src = src + src_y * src_stride + src_x * in_bpp;
1973
1974 if (in_alpha) {
1975 w *= 4;
1976 for (i = 0; i < h; i++) {
1977 for (j = 0; j < w; j += 4) {
1978 a = (src[j + p_in[0]] * i_alpha) >> 8;
1979 r = src[j + p_in[1]];
1980 g = src[j + p_in[2]];
1981 b = src[j + p_in[3]];
1982
1983 y = APPLY_MATRIX (matrix, 0, r, g, b);
1984 u = APPLY_MATRIX (matrix, 1, r, g, b);
1985 v = APPLY_MATRIX (matrix, 2, r, g, b);
1986
1987 dest[j + 0] = a;
1988 dest[j + 1] = CLAMP (y, 0, 255);
1989 dest[j + 2] = CLAMP (u, 0, 255);
1990 dest[j + 3] = CLAMP (v, 0, 255);
1991 }
1992 dest += dest_stride;
1993 src += src_stride;
1994 }
1995 } else if (!packed_in) {
1996 w *= 4;
1997 i_alpha = CLAMP (i_alpha, 0, 255);
1998
1999 for (i = 0; i < h; i++) {
2000 for (j = 0; j < w; j += 4) {
2001 a = i_alpha;
2002 r = src[j + p_in[1]];
2003 g = src[j + p_in[2]];
2004 b = src[j + p_in[3]];
2005
2006 y = APPLY_MATRIX (matrix, 0, r, g, b);
2007 u = APPLY_MATRIX (matrix, 1, r, g, b);
2008 v = APPLY_MATRIX (matrix, 2, r, g, b);
2009
2010 dest[j + 0] = a;
2011 dest[j + 1] = CLAMP (y, 0, 255);
2012 dest[j + 2] = CLAMP (u, 0, 255);
2013 dest[j + 3] = CLAMP (v, 0, 255);
2014 }
2015 dest += dest_stride;
2016 src += src_stride;
2017 }
2018 } else {
2019 i_alpha = CLAMP (i_alpha, 0, 255);
2020
2021 for (i = 0; i < h; i++) {
2022 for (j = 0; j < w; j++) {
2023 a = i_alpha;
2024 r = src[in_bpp * j + p_in[1]];
2025 g = src[in_bpp * j + p_in[2]];
2026 b = src[in_bpp * j + p_in[3]];
2027
2028 y = APPLY_MATRIX (matrix, 0, r, g, b);
2029 u = APPLY_MATRIX (matrix, 1, r, g, b);
2030 v = APPLY_MATRIX (matrix, 2, r, g, b);
2031
2032 dest[4 * j + 0] = a;
2033 dest[4 * j + 1] = CLAMP (y, 0, 255);
2034 dest[4 * j + 2] = CLAMP (u, 0, 255);
2035 dest[4 * j + 3] = CLAMP (v, 0, 255);
2036 }
2037 dest += dest_stride;
2038 src += src_stride;
2039 }
2040 }
2041 }
2042
2043 static void
copy_ayuv_rgb32(guint i_alpha,GstVideoFrame * dest_frame,gboolean dest_sdtv,gint dest_x,gint dest_y,GstVideoFrame * src_frame,gboolean src_sdtv,gint src_x,gint src_y,gint w,gint h)2044 copy_ayuv_rgb32 (guint i_alpha, GstVideoFrame * dest_frame,
2045 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
2046 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
2047 {
2048 gint i, j;
2049 gint src_stride, dest_stride;
2050 gboolean out_alpha;
2051 gint out_bpp;
2052 gint p_out[4];
2053 gboolean packed_out;
2054 gint matrix[12];
2055 gint a;
2056 gint y, u, v;
2057 gint r, g, b;
2058 guint8 *src, *dest;
2059
2060 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
2061 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
2062 out_bpp = GST_VIDEO_FRAME_COMP_PSTRIDE (dest_frame, 0);
2063 packed_out = (out_bpp < 4);
2064
2065 out_alpha = GST_VIDEO_INFO_HAS_ALPHA (&dest_frame->info);
2066 p_out[0] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 3);
2067 p_out[1] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 0);
2068 p_out[2] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 1);
2069 p_out[3] = GST_VIDEO_FRAME_COMP_OFFSET (dest_frame, 2);
2070
2071 memcpy (matrix,
2072 (src_sdtv) ? cog_ycbcr_to_rgb_matrix_8bit_sdtv :
2073 cog_ycbcr_to_rgb_matrix_8bit_hdtv, 12 * sizeof (gint));
2074
2075 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
2076 dest = dest + dest_y * dest_stride + dest_x * out_bpp;
2077 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
2078 src = src + src_y * src_stride + src_x * 4;
2079
2080 if (out_alpha) {
2081 w *= 4;
2082 for (i = 0; i < h; i++) {
2083 for (j = 0; j < w; j += 4) {
2084 a = (src[j + 0] * i_alpha) >> 8;
2085 y = src[j + 1];
2086 u = src[j + 2];
2087 v = src[j + 3];
2088
2089 r = APPLY_MATRIX (matrix, 0, y, u, v);
2090 g = APPLY_MATRIX (matrix, 1, y, u, v);
2091 b = APPLY_MATRIX (matrix, 2, y, u, v);
2092
2093 dest[j + p_out[0]] = a;
2094 dest[j + p_out[1]] = CLAMP (r, 0, 255);
2095 dest[j + p_out[2]] = CLAMP (g, 0, 255);
2096 dest[j + p_out[3]] = CLAMP (b, 0, 255);
2097 }
2098 dest += dest_stride;
2099 src += src_stride;
2100 }
2101 } else if (!packed_out) {
2102 w *= 4;
2103 for (i = 0; i < h; i++) {
2104 for (j = 0; j < w; j += 4) {
2105 y = src[j + 1];
2106 u = src[j + 2];
2107 v = src[j + 3];
2108
2109 r = APPLY_MATRIX (matrix, 0, y, u, v);
2110 g = APPLY_MATRIX (matrix, 1, y, u, v);
2111 b = APPLY_MATRIX (matrix, 2, y, u, v);
2112
2113 dest[j + p_out[1]] = CLAMP (r, 0, 255);
2114 dest[j + p_out[2]] = CLAMP (g, 0, 255);
2115 dest[j + p_out[3]] = CLAMP (b, 0, 255);
2116 }
2117 dest += dest_stride;
2118 src += src_stride;
2119 }
2120 } else {
2121 for (i = 0; i < h; i++) {
2122 for (j = 0; j < w; j++) {
2123 y = src[4 * j + 1];
2124 u = src[4 * j + 2];
2125 v = src[4 * j + 3];
2126
2127 r = APPLY_MATRIX (matrix, 0, y, u, v);
2128 g = APPLY_MATRIX (matrix, 1, y, u, v);
2129 b = APPLY_MATRIX (matrix, 2, y, u, v);
2130
2131 dest[out_bpp * j + p_out[1]] = CLAMP (r, 0, 255);
2132 dest[out_bpp * j + p_out[2]] = CLAMP (g, 0, 255);
2133 dest[out_bpp * j + p_out[3]] = CLAMP (b, 0, 255);
2134 }
2135 dest += dest_stride;
2136 src += src_stride;
2137 }
2138 }
2139 }
2140
2141 static void
fill_gray(GstVideoBoxFill fill_type,guint b_alpha,GstVideoFrame * frame,gboolean sdtv)2142 fill_gray (GstVideoBoxFill fill_type, guint b_alpha,
2143 GstVideoFrame * frame, gboolean sdtv)
2144 {
2145 gint i, j;
2146 gint dest_stride;
2147 guint8 *dest;
2148 gint width, height;
2149 GstVideoFormat format;
2150
2151 format = GST_VIDEO_FRAME_FORMAT (frame);
2152
2153 width = GST_VIDEO_FRAME_WIDTH (frame);
2154 height = GST_VIDEO_FRAME_HEIGHT (frame);
2155
2156 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
2157 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
2158
2159 if (format == GST_VIDEO_FORMAT_GRAY8) {
2160 guint8 val = yuv_sdtv_colors_Y[fill_type];
2161
2162 for (i = 0; i < height; i++) {
2163 memset (dest, val, width);
2164 dest += dest_stride;
2165 }
2166 } else {
2167 guint16 val = yuv_sdtv_colors_Y[fill_type] << 8;
2168
2169 if (format == GST_VIDEO_FORMAT_GRAY16_BE) {
2170 for (i = 0; i < height; i++) {
2171 for (j = 0; j < width; j++) {
2172 GST_WRITE_UINT16_BE (dest + 2 * j, val);
2173 }
2174 dest += dest_stride;
2175 }
2176 } else {
2177 for (i = 0; i < height; i++) {
2178 for (j = 0; j < width; j++) {
2179 GST_WRITE_UINT16_LE (dest + 2 * j, val);
2180 }
2181 dest += dest_stride;
2182 }
2183 }
2184 }
2185 }
2186
2187 static void
copy_packed_simple(guint i_alpha,GstVideoFrame * dest_frame,gboolean dest_sdtv,gint dest_x,gint dest_y,GstVideoFrame * src_frame,gboolean src_sdtv,gint src_x,gint src_y,gint w,gint h)2188 copy_packed_simple (guint i_alpha, GstVideoFrame * dest_frame,
2189 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
2190 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
2191 {
2192 gint i;
2193 gint src_stride, dest_stride;
2194 gint pixel_stride, row_size;
2195 guint8 *src, *dest;
2196
2197 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
2198 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
2199 pixel_stride = GST_VIDEO_FRAME_COMP_PSTRIDE (dest_frame, 0);
2200 row_size = w * pixel_stride;
2201
2202 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
2203 dest = dest + dest_y * dest_stride + dest_x * pixel_stride;
2204 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
2205 src = src + src_y * src_stride + src_x * pixel_stride;
2206
2207 for (i = 0; i < h; i++) {
2208 memcpy (dest, src, row_size);
2209 dest += dest_stride;
2210 src += src_stride;
2211 }
2212 }
2213
2214 static void
fill_yuy2(GstVideoBoxFill fill_type,guint b_alpha,GstVideoFrame * frame,gboolean sdtv)2215 fill_yuy2 (GstVideoBoxFill fill_type, guint b_alpha,
2216 GstVideoFrame * frame, gboolean sdtv)
2217 {
2218 guint8 y, u, v;
2219 gint i, j;
2220 gint stride;
2221 gint width, height;
2222 guint8 *dest;
2223 GstVideoFormat format;
2224
2225 format = GST_VIDEO_FRAME_FORMAT (frame);
2226
2227 width = GST_VIDEO_FRAME_WIDTH (frame);
2228 height = GST_VIDEO_FRAME_HEIGHT (frame);
2229
2230 dest = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
2231 stride = GST_VIDEO_FRAME_PLANE_STRIDE (frame, 0);
2232
2233 y = (sdtv) ? yuv_sdtv_colors_Y[fill_type] : yuv_hdtv_colors_Y[fill_type];
2234 u = (sdtv) ? yuv_sdtv_colors_U[fill_type] : yuv_hdtv_colors_U[fill_type];
2235 v = (sdtv) ? yuv_sdtv_colors_V[fill_type] : yuv_hdtv_colors_V[fill_type];
2236
2237 width = width + (width % 2);
2238
2239 if (format == GST_VIDEO_FORMAT_YUY2) {
2240 for (i = 0; i < height; i++) {
2241 for (j = 0; j < width; j += 2) {
2242 dest[j * 2 + 0] = y;
2243 dest[j * 2 + 1] = u;
2244 dest[j * 2 + 2] = y;
2245 dest[j * 2 + 3] = v;
2246 }
2247
2248 dest += stride;
2249 }
2250 } else if (format == GST_VIDEO_FORMAT_YVYU) {
2251 for (i = 0; i < height; i++) {
2252 for (j = 0; j < width; j += 2) {
2253 dest[j * 2 + 0] = y;
2254 dest[j * 2 + 1] = v;
2255 dest[j * 2 + 2] = y;
2256 dest[j * 2 + 3] = u;
2257 }
2258
2259 dest += stride;
2260 }
2261 } else {
2262 for (i = 0; i < height; i++) {
2263 for (j = 0; j < width; j += 2) {
2264 dest[j * 2 + 0] = u;
2265 dest[j * 2 + 1] = y;
2266 dest[j * 2 + 2] = v;
2267 dest[j * 2 + 3] = y;
2268 }
2269
2270 dest += stride;
2271 }
2272 }
2273 }
2274
2275 static void
copy_yuy2_yuy2(guint i_alpha,GstVideoFrame * dest_frame,gboolean dest_sdtv,gint dest_x,gint dest_y,GstVideoFrame * src_frame,gboolean src_sdtv,gint src_x,gint src_y,gint w,gint h)2276 copy_yuy2_yuy2 (guint i_alpha, GstVideoFrame * dest_frame,
2277 gboolean dest_sdtv, gint dest_x, gint dest_y, GstVideoFrame * src_frame,
2278 gboolean src_sdtv, gint src_x, gint src_y, gint w, gint h)
2279 {
2280 gint i, j;
2281 gint src_stride, dest_stride;
2282 guint8 *src, *dest;
2283 GstVideoFormat src_format;
2284
2285 src_format = GST_VIDEO_FRAME_FORMAT (src_frame);
2286
2287 src_stride = GST_VIDEO_FRAME_PLANE_STRIDE (src_frame, 0);
2288 dest_stride = GST_VIDEO_FRAME_PLANE_STRIDE (dest_frame, 0);
2289
2290 dest_x = (dest_x & ~1);
2291 src_x = (src_x & ~1);
2292
2293 w = w + (w % 2);
2294
2295 dest = GST_VIDEO_FRAME_PLANE_DATA (dest_frame, 0);
2296 dest = dest + dest_y * dest_stride + dest_x * 2;
2297 src = GST_VIDEO_FRAME_PLANE_DATA (src_frame, 0);
2298 src = src + src_y * src_stride + src_x * 2;
2299
2300 if (src_sdtv != dest_sdtv) {
2301 gint y1, u1, v1;
2302 gint y2, u2, v2;
2303 gint matrix[12];
2304
2305 memcpy (matrix,
2306 dest_sdtv ? cog_ycbcr_hdtv_to_ycbcr_sdtv_matrix_8bit :
2307 cog_ycbcr_sdtv_to_ycbcr_hdtv_matrix_8bit, 12 * sizeof (gint));
2308
2309 if (src_format == GST_VIDEO_FORMAT_YUY2) {
2310 for (i = 0; i < h; i++) {
2311 for (j = 0; j < w; j += 2) {
2312 y1 = src[j * 2 + 0];
2313 y2 = src[j * 2 + 2];
2314 u1 = u2 = src[j * 2 + 1];
2315 v1 = v2 = src[j * 2 + 3];
2316
2317 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2318 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 1, y1, u1, v1);
2319 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2320 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 2, y2, u2, v2);
2321 }
2322 dest += dest_stride;
2323 src += src_stride;
2324 }
2325 } else if (src_format == GST_VIDEO_FORMAT_YVYU) {
2326 for (i = 0; i < h; i++) {
2327 for (j = 0; j < w; j += 2) {
2328 y1 = src[j * 2 + 0];
2329 y2 = src[j * 2 + 2];
2330 v1 = v2 = src[j * 2 + 1];
2331 u1 = u2 = src[j * 2 + 3];
2332
2333 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2334 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 2, y1, u1, v1);
2335 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2336 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 1, y2, u2, v2);
2337 }
2338 dest += dest_stride;
2339 src += src_stride;
2340 }
2341 } else {
2342 for (i = 0; i < h; i++) {
2343 for (j = 0; j < w; j += 2) {
2344 u1 = u2 = src[j * 2 + 0];
2345 v1 = v2 = src[j * 2 + 2];
2346 y1 = src[j * 2 + 1];
2347 y2 = src[j * 2 + 3];
2348
2349 dest[j * 2 + 1] = APPLY_MATRIX (matrix, 0, y1, u1, v1);
2350 dest[j * 2 + 0] = APPLY_MATRIX (matrix, 1, y1, u1, v1);
2351 dest[j * 2 + 3] = APPLY_MATRIX (matrix, 0, y1, u2, v2);
2352 dest[j * 2 + 2] = APPLY_MATRIX (matrix, 2, y2, u2, v2);
2353 }
2354 dest += dest_stride;
2355 src += src_stride;
2356 }
2357 }
2358 } else {
2359 for (i = 0; i < h; i++) {
2360 memcpy (dest, src, w * 2);
2361 dest += dest_stride;
2362 src += src_stride;
2363 }
2364 }
2365 }
2366
2367 #define DEFAULT_LEFT 0
2368 #define DEFAULT_RIGHT 0
2369 #define DEFAULT_TOP 0
2370 #define DEFAULT_BOTTOM 0
2371 #define DEFAULT_FILL_TYPE VIDEO_BOX_FILL_BLACK
2372 #define DEFAULT_ALPHA 1.0
2373 #define DEFAULT_BORDER_ALPHA 1.0
2374
2375 enum
2376 {
2377 PROP_0,
2378 PROP_LEFT,
2379 PROP_RIGHT,
2380 PROP_TOP,
2381 PROP_BOTTOM,
2382 PROP_FILL_TYPE,
2383 PROP_ALPHA,
2384 PROP_BORDER_ALPHA,
2385 PROP_AUTOCROP
2386 /* FILL ME */
2387 };
2388
2389 static GstStaticPadTemplate gst_video_box_src_template =
2390 GST_STATIC_PAD_TEMPLATE ("src",
2391 GST_PAD_SRC,
2392 GST_PAD_ALWAYS,
2393 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("{ AYUV, "
2394 "ARGB, BGRA, ABGR, RGBA, xRGB, BGRx, xBGR, RGBx, RGB, BGR, "
2395 "Y444, Y42B, YUY2, YVYU, UYVY, I420, YV12, Y41B, "
2396 "GRAY8, GRAY16_BE, GRAY16_LE } "))
2397 );
2398
2399 static GstStaticPadTemplate gst_video_box_sink_template =
2400 GST_STATIC_PAD_TEMPLATE ("sink",
2401 GST_PAD_SINK,
2402 GST_PAD_ALWAYS,
2403 GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("{ AYUV, "
2404 "ARGB, BGRA, ABGR, RGBA, xRGB, BGRx, xBGR, RGBx, RGB, BGR, "
2405 "Y444, Y42B, YUY2, YVYU, UYVY, I420, YV12, Y41B, "
2406 "GRAY8, GRAY16_BE, GRAY16_LE } "))
2407 );
2408
2409 #define gst_video_box_parent_class parent_class
2410 G_DEFINE_TYPE (GstVideoBox, gst_video_box, GST_TYPE_VIDEO_FILTER);
2411
2412 static void gst_video_box_set_property (GObject * object, guint prop_id,
2413 const GValue * value, GParamSpec * pspec);
2414 static void gst_video_box_get_property (GObject * object, guint prop_id,
2415 GValue * value, GParamSpec * pspec);
2416
2417 static gboolean gst_video_box_recalc_transform (GstVideoBox * video_box);
2418 static GstCaps *gst_video_box_transform_caps (GstBaseTransform * trans,
2419 GstPadDirection direction, GstCaps * from, GstCaps * filter);
2420 static void gst_video_box_before_transform (GstBaseTransform * trans,
2421 GstBuffer * in);
2422 static gboolean gst_video_box_src_event (GstBaseTransform * trans,
2423 GstEvent * event);
2424
2425 static gboolean gst_video_box_set_info (GstVideoFilter * vfilter, GstCaps * in,
2426 GstVideoInfo * in_info, GstCaps * out, GstVideoInfo * out_info);
2427 static GstFlowReturn gst_video_box_transform_frame (GstVideoFilter * vfilter,
2428 GstVideoFrame * in_frame, GstVideoFrame * out_frame);
2429
2430 #define GST_TYPE_VIDEO_BOX_FILL (gst_video_box_fill_get_type())
2431 static GType
gst_video_box_fill_get_type(void)2432 gst_video_box_fill_get_type (void)
2433 {
2434 static GType video_box_fill_type = 0;
2435 static const GEnumValue video_box_fill[] = {
2436 {VIDEO_BOX_FILL_BLACK, "Black", "black"},
2437 {VIDEO_BOX_FILL_GREEN, "Green", "green"},
2438 {VIDEO_BOX_FILL_BLUE, "Blue", "blue"},
2439 {VIDEO_BOX_FILL_RED, "Red", "red"},
2440 {VIDEO_BOX_FILL_YELLOW, "Yellow", "yellow"},
2441 {VIDEO_BOX_FILL_WHITE, "White", "white"},
2442 {0, NULL, NULL},
2443 };
2444
2445 if (!video_box_fill_type) {
2446 video_box_fill_type =
2447 g_enum_register_static ("GstVideoBoxFill", video_box_fill);
2448 }
2449 return video_box_fill_type;
2450 }
2451
2452 static void
gst_video_box_finalize(GObject * object)2453 gst_video_box_finalize (GObject * object)
2454 {
2455 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2456
2457 g_mutex_clear (&video_box->mutex);
2458
2459 G_OBJECT_CLASS (parent_class)->finalize (object);
2460 }
2461
2462 static void
gst_video_box_class_init(GstVideoBoxClass * klass)2463 gst_video_box_class_init (GstVideoBoxClass * klass)
2464 {
2465 GObjectClass *gobject_class = (GObjectClass *) klass;
2466 GstElementClass *element_class = (GstElementClass *) (klass);
2467 GstBaseTransformClass *trans_class = (GstBaseTransformClass *) klass;
2468 GstVideoFilterClass *vfilter_class = (GstVideoFilterClass *) klass;
2469
2470 gobject_class->set_property = gst_video_box_set_property;
2471 gobject_class->get_property = gst_video_box_get_property;
2472 gobject_class->finalize = gst_video_box_finalize;
2473
2474 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_FILL_TYPE,
2475 g_param_spec_enum ("fill", "Fill", "How to fill the borders",
2476 GST_TYPE_VIDEO_BOX_FILL, DEFAULT_FILL_TYPE,
2477 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2478 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_LEFT,
2479 g_param_spec_int ("left", "Left",
2480 "Pixels to box at left (<0 = add a border)", G_MININT, G_MAXINT,
2481 DEFAULT_LEFT,
2482 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2483 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_RIGHT,
2484 g_param_spec_int ("right", "Right",
2485 "Pixels to box at right (<0 = add a border)", G_MININT, G_MAXINT,
2486 DEFAULT_RIGHT,
2487 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2488 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_TOP,
2489 g_param_spec_int ("top", "Top",
2490 "Pixels to box at top (<0 = add a border)", G_MININT, G_MAXINT,
2491 DEFAULT_TOP,
2492 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2493 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BOTTOM,
2494 g_param_spec_int ("bottom", "Bottom",
2495 "Pixels to box at bottom (<0 = add a border)", G_MININT, G_MAXINT,
2496 DEFAULT_BOTTOM,
2497 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2498 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_ALPHA,
2499 g_param_spec_double ("alpha", "Alpha", "Alpha value picture", 0.0, 1.0,
2500 DEFAULT_ALPHA,
2501 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2502 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_BORDER_ALPHA,
2503 g_param_spec_double ("border-alpha", "Border Alpha",
2504 "Alpha value of the border", 0.0, 1.0, DEFAULT_BORDER_ALPHA,
2505 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | GST_PARAM_CONTROLLABLE));
2506 /**
2507 * GstVideoBox:autocrop:
2508 *
2509 * If set to %TRUE videobox will automatically crop/pad the input
2510 * video to be centered in the output.
2511 */
2512 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_AUTOCROP,
2513 g_param_spec_boolean ("autocrop", "Auto crop",
2514 "Auto crop", FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
2515
2516 trans_class->before_transform =
2517 GST_DEBUG_FUNCPTR (gst_video_box_before_transform);
2518 trans_class->transform_caps =
2519 GST_DEBUG_FUNCPTR (gst_video_box_transform_caps);
2520 trans_class->src_event = GST_DEBUG_FUNCPTR (gst_video_box_src_event);
2521
2522 vfilter_class->set_info = GST_DEBUG_FUNCPTR (gst_video_box_set_info);
2523 vfilter_class->transform_frame =
2524 GST_DEBUG_FUNCPTR (gst_video_box_transform_frame);
2525
2526 gst_element_class_set_static_metadata (element_class, "Video box filter",
2527 "Filter/Effect/Video",
2528 "Resizes a video by adding borders or cropping",
2529 "Wim Taymans <wim@fluendo.com>");
2530
2531 gst_element_class_add_static_pad_template (element_class,
2532 &gst_video_box_sink_template);
2533 gst_element_class_add_static_pad_template (element_class,
2534 &gst_video_box_src_template);
2535 }
2536
2537 static void
gst_video_box_init(GstVideoBox * video_box)2538 gst_video_box_init (GstVideoBox * video_box)
2539 {
2540 video_box->box_right = DEFAULT_RIGHT;
2541 video_box->box_left = DEFAULT_LEFT;
2542 video_box->box_top = DEFAULT_TOP;
2543 video_box->box_bottom = DEFAULT_BOTTOM;
2544 video_box->crop_right = 0;
2545 video_box->crop_left = 0;
2546 video_box->crop_top = 0;
2547 video_box->crop_bottom = 0;
2548 video_box->fill_type = DEFAULT_FILL_TYPE;
2549 video_box->alpha = DEFAULT_ALPHA;
2550 video_box->border_alpha = DEFAULT_BORDER_ALPHA;
2551 video_box->autocrop = FALSE;
2552
2553 g_mutex_init (&video_box->mutex);
2554 }
2555
2556 static void
gst_video_box_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)2557 gst_video_box_set_property (GObject * object, guint prop_id,
2558 const GValue * value, GParamSpec * pspec)
2559 {
2560 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2561
2562 g_mutex_lock (&video_box->mutex);
2563 switch (prop_id) {
2564 case PROP_LEFT:
2565 video_box->box_left = g_value_get_int (value);
2566 if (video_box->box_left < 0) {
2567 video_box->border_left = -video_box->box_left;
2568 video_box->crop_left = 0;
2569 } else {
2570 video_box->border_left = 0;
2571 video_box->crop_left = video_box->box_left;
2572 }
2573 break;
2574 case PROP_RIGHT:
2575 video_box->box_right = g_value_get_int (value);
2576 if (video_box->box_right < 0) {
2577 video_box->border_right = -video_box->box_right;
2578 video_box->crop_right = 0;
2579 } else {
2580 video_box->border_right = 0;
2581 video_box->crop_right = video_box->box_right;
2582 }
2583 break;
2584 case PROP_TOP:
2585 video_box->box_top = g_value_get_int (value);
2586 if (video_box->box_top < 0) {
2587 video_box->border_top = -video_box->box_top;
2588 video_box->crop_top = 0;
2589 } else {
2590 video_box->border_top = 0;
2591 video_box->crop_top = video_box->box_top;
2592 }
2593 break;
2594 case PROP_BOTTOM:
2595 video_box->box_bottom = g_value_get_int (value);
2596 if (video_box->box_bottom < 0) {
2597 video_box->border_bottom = -video_box->box_bottom;
2598 video_box->crop_bottom = 0;
2599 } else {
2600 video_box->border_bottom = 0;
2601 video_box->crop_bottom = video_box->box_bottom;
2602 }
2603 break;
2604 case PROP_FILL_TYPE:
2605 video_box->fill_type = g_value_get_enum (value);
2606 break;
2607 case PROP_ALPHA:
2608 video_box->alpha = g_value_get_double (value);
2609 break;
2610 case PROP_BORDER_ALPHA:
2611 video_box->border_alpha = g_value_get_double (value);
2612 break;
2613 case PROP_AUTOCROP:
2614 video_box->autocrop = g_value_get_boolean (value);
2615 break;
2616 default:
2617 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2618 break;
2619 }
2620 gst_video_box_recalc_transform (video_box);
2621
2622 GST_DEBUG_OBJECT (video_box, "Calling reconfigure");
2623 gst_base_transform_reconfigure_src (GST_BASE_TRANSFORM_CAST (video_box));
2624
2625 g_mutex_unlock (&video_box->mutex);
2626 }
2627
2628 static void
gst_video_box_autocrop(GstVideoBox * video_box)2629 gst_video_box_autocrop (GstVideoBox * video_box)
2630 {
2631 gint crop_w = video_box->in_width - video_box->out_width;
2632 gint crop_h = video_box->in_height - video_box->out_height;
2633
2634 video_box->box_left = crop_w / 2;
2635 if (video_box->box_left < 0) {
2636 video_box->border_left = -video_box->box_left;
2637 video_box->crop_left = 0;
2638 } else {
2639 video_box->border_left = 0;
2640 video_box->crop_left = video_box->box_left;
2641 }
2642
2643 /* Round down/up for odd width differences */
2644 if (crop_w < 0)
2645 crop_w -= 1;
2646 else
2647 crop_w += 1;
2648
2649 video_box->box_right = crop_w / 2;
2650 if (video_box->box_right < 0) {
2651 video_box->border_right = -video_box->box_right;
2652 video_box->crop_right = 0;
2653 } else {
2654 video_box->border_right = 0;
2655 video_box->crop_right = video_box->box_right;
2656 }
2657
2658 video_box->box_top = crop_h / 2;
2659 if (video_box->box_top < 0) {
2660 video_box->border_top = -video_box->box_top;
2661 video_box->crop_top = 0;
2662 } else {
2663 video_box->border_top = 0;
2664 video_box->crop_top = video_box->box_top;
2665 }
2666
2667 /* Round down/up for odd height differences */
2668 if (crop_h < 0)
2669 crop_h -= 1;
2670 else
2671 crop_h += 1;
2672 video_box->box_bottom = crop_h / 2;
2673
2674 if (video_box->box_bottom < 0) {
2675 video_box->border_bottom = -video_box->box_bottom;
2676 video_box->crop_bottom = 0;
2677 } else {
2678 video_box->border_bottom = 0;
2679 video_box->crop_bottom = video_box->box_bottom;
2680 }
2681 }
2682
2683 static void
gst_video_box_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)2684 gst_video_box_get_property (GObject * object, guint prop_id, GValue * value,
2685 GParamSpec * pspec)
2686 {
2687 GstVideoBox *video_box = GST_VIDEO_BOX (object);
2688
2689 switch (prop_id) {
2690 case PROP_LEFT:
2691 g_value_set_int (value, video_box->box_left);
2692 break;
2693 case PROP_RIGHT:
2694 g_value_set_int (value, video_box->box_right);
2695 break;
2696 case PROP_TOP:
2697 g_value_set_int (value, video_box->box_top);
2698 break;
2699 case PROP_BOTTOM:
2700 g_value_set_int (value, video_box->box_bottom);
2701 break;
2702 case PROP_FILL_TYPE:
2703 g_value_set_enum (value, video_box->fill_type);
2704 break;
2705 case PROP_ALPHA:
2706 g_value_set_double (value, video_box->alpha);
2707 break;
2708 case PROP_BORDER_ALPHA:
2709 g_value_set_double (value, video_box->border_alpha);
2710 break;
2711 case PROP_AUTOCROP:
2712 g_value_set_boolean (value, video_box->autocrop);
2713 break;
2714 default:
2715 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2716 break;
2717 }
2718 }
2719
2720 static inline gint
gst_video_box_transform_dimension(gint val,gint delta)2721 gst_video_box_transform_dimension (gint val, gint delta)
2722 {
2723 gint64 new_val = (gint64) val + (gint64) delta;
2724
2725 new_val = CLAMP (new_val, 1, G_MAXINT);
2726
2727 return (gint) new_val;
2728 }
2729
2730 static gboolean
gst_video_box_transform_dimension_value(const GValue * src_val,gint delta,GValue * dest_val)2731 gst_video_box_transform_dimension_value (const GValue * src_val,
2732 gint delta, GValue * dest_val)
2733 {
2734 gboolean ret = TRUE;
2735
2736 g_value_init (dest_val, G_VALUE_TYPE (src_val));
2737
2738 if (G_VALUE_HOLDS_INT (src_val)) {
2739 gint ival = g_value_get_int (src_val);
2740
2741 ival = gst_video_box_transform_dimension (ival, delta);
2742 g_value_set_int (dest_val, ival);
2743 } else if (GST_VALUE_HOLDS_INT_RANGE (src_val)) {
2744 gint min = gst_value_get_int_range_min (src_val);
2745 gint max = gst_value_get_int_range_max (src_val);
2746
2747 min = gst_video_box_transform_dimension (min, delta);
2748 max = gst_video_box_transform_dimension (max, delta);
2749 if (min >= max) {
2750 ret = FALSE;
2751 g_value_unset (dest_val);
2752 } else {
2753 gst_value_set_int_range (dest_val, min, max);
2754 }
2755 } else if (GST_VALUE_HOLDS_LIST (src_val)) {
2756 gint i;
2757
2758 for (i = 0; i < gst_value_list_get_size (src_val); ++i) {
2759 const GValue *list_val;
2760 GValue newval = { 0, };
2761
2762 list_val = gst_value_list_get_value (src_val, i);
2763 if (gst_video_box_transform_dimension_value (list_val, delta, &newval))
2764 gst_value_list_append_value (dest_val, &newval);
2765 g_value_unset (&newval);
2766 }
2767
2768 if (gst_value_list_get_size (dest_val) == 0) {
2769 g_value_unset (dest_val);
2770 ret = FALSE;
2771 }
2772 } else {
2773 g_value_unset (dest_val);
2774 ret = FALSE;
2775 }
2776
2777 return ret;
2778 }
2779
2780 static GstCaps *
gst_video_box_transform_caps(GstBaseTransform * trans,GstPadDirection direction,GstCaps * from,GstCaps * filter)2781 gst_video_box_transform_caps (GstBaseTransform * trans,
2782 GstPadDirection direction, GstCaps * from, GstCaps * filter)
2783 {
2784 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
2785 GstCaps *to, *ret;
2786 GstCaps *templ;
2787 GstStructure *structure;
2788 GstPad *other;
2789 gint i, j;
2790
2791 to = gst_caps_new_empty ();
2792 for (i = 0; i < gst_caps_get_size (from); i++) {
2793 const GValue *fval, *lval;
2794 GValue list = { 0, };
2795 GValue val = { 0, };
2796 gboolean seen_yuv = FALSE, seen_rgb = FALSE;
2797 const gchar *str;
2798
2799 structure = gst_structure_copy (gst_caps_get_structure (from, i));
2800
2801 /* Transform width/height */
2802 if (video_box->autocrop) {
2803 gst_structure_remove_field (structure, "width");
2804 gst_structure_remove_field (structure, "height");
2805 } else {
2806 gint dw = 0, dh = 0;
2807 const GValue *v;
2808 GValue w_val = { 0, };
2809 GValue h_val = { 0, };
2810
2811 /* calculate width and height */
2812 if (direction == GST_PAD_SINK) {
2813 dw -= video_box->box_left;
2814 dw -= video_box->box_right;
2815 } else {
2816 dw += video_box->box_left;
2817 dw += video_box->box_right;
2818 }
2819
2820 if (direction == GST_PAD_SINK) {
2821 dh -= video_box->box_top;
2822 dh -= video_box->box_bottom;
2823 } else {
2824 dh += video_box->box_top;
2825 dh += video_box->box_bottom;
2826 }
2827
2828 v = gst_structure_get_value (structure, "width");
2829 if (!gst_video_box_transform_dimension_value (v, dw, &w_val)) {
2830 GST_WARNING_OBJECT (video_box,
2831 "could not tranform width value with dw=%d" ", caps structure=%"
2832 GST_PTR_FORMAT, dw, structure);
2833 goto bail;
2834 }
2835 gst_structure_set_value (structure, "width", &w_val);
2836
2837 v = gst_structure_get_value (structure, "height");
2838 if (!gst_video_box_transform_dimension_value (v, dh, &h_val)) {
2839 g_value_unset (&w_val);
2840 GST_WARNING_OBJECT (video_box,
2841 "could not tranform height value with dh=%d" ", caps structure=%"
2842 GST_PTR_FORMAT, dh, structure);
2843 goto bail;
2844 }
2845 gst_structure_set_value (structure, "height", &h_val);
2846 g_value_unset (&w_val);
2847 g_value_unset (&h_val);
2848 }
2849
2850 /* Supported conversions:
2851 * I420->AYUV
2852 * I420->YV12
2853 * YV12->AYUV
2854 * YV12->I420
2855 * AYUV->I420
2856 * AYUV->YV12
2857 * AYUV->xRGB (24bpp, 32bpp, incl. alpha)
2858 * xRGB->xRGB (24bpp, 32bpp, from/to all variants, incl. alpha)
2859 * xRGB->AYUV (24bpp, 32bpp, incl. alpha)
2860 *
2861 * Passthrough only for everything else.
2862 */
2863 fval = gst_structure_get_value (structure, "format");
2864 if (fval && GST_VALUE_HOLDS_LIST (fval)) {
2865 for (j = 0; j < gst_value_list_get_size (fval); j++) {
2866 lval = gst_value_list_get_value (fval, j);
2867 if ((str = g_value_get_string (lval))) {
2868 if (strcmp (str, "AYUV") == 0) {
2869 seen_yuv = TRUE;
2870 seen_rgb = TRUE;
2871 break;
2872 } else if (strstr (str, "RGB") || strstr (str, "BGR")) {
2873 seen_rgb = TRUE;
2874 } else if (strcmp (str, "I420") == 0 || strcmp (str, "YV12") == 0) {
2875 seen_yuv = TRUE;
2876 }
2877 }
2878 }
2879 } else if (fval && G_VALUE_HOLDS_STRING (fval)) {
2880 if ((str = g_value_get_string (fval))) {
2881 if (strcmp (str, "AYUV") == 0) {
2882 seen_yuv = TRUE;
2883 seen_rgb = TRUE;
2884 } else if (strstr (str, "RGB") || strstr (str, "BGR")) {
2885 seen_rgb = TRUE;
2886 } else if (strcmp (str, "I420") == 0 || strcmp (str, "YV12") == 0) {
2887 seen_yuv = TRUE;
2888 }
2889 }
2890 }
2891
2892 if (seen_yuv || seen_rgb) {
2893 g_value_init (&list, GST_TYPE_LIST);
2894
2895 g_value_init (&val, G_TYPE_STRING);
2896 g_value_set_string (&val, "AYUV");
2897 gst_value_list_append_value (&list, &val);
2898 g_value_unset (&val);
2899
2900 if (seen_yuv) {
2901 g_value_init (&val, G_TYPE_STRING);
2902 g_value_set_string (&val, "I420");
2903 gst_value_list_append_value (&list, &val);
2904 g_value_reset (&val);
2905 g_value_set_string (&val, "YV12");
2906 gst_value_list_append_value (&list, &val);
2907 g_value_unset (&val);
2908 }
2909 if (seen_rgb) {
2910 g_value_init (&val, G_TYPE_STRING);
2911 g_value_set_string (&val, "RGBx");
2912 gst_value_list_append_value (&list, &val);
2913 g_value_reset (&val);
2914 g_value_set_string (&val, "BGRx");
2915 gst_value_list_append_value (&list, &val);
2916 g_value_reset (&val);
2917 g_value_set_string (&val, "xRGB");
2918 gst_value_list_append_value (&list, &val);
2919 g_value_reset (&val);
2920 g_value_set_string (&val, "xBGR");
2921 gst_value_list_append_value (&list, &val);
2922 g_value_reset (&val);
2923 g_value_set_string (&val, "RGBA");
2924 gst_value_list_append_value (&list, &val);
2925 g_value_reset (&val);
2926 g_value_set_string (&val, "BGRA");
2927 gst_value_list_append_value (&list, &val);
2928 g_value_reset (&val);
2929 g_value_set_string (&val, "ARGB");
2930 gst_value_list_append_value (&list, &val);
2931 g_value_reset (&val);
2932 g_value_set_string (&val, "ABGR");
2933 gst_value_list_append_value (&list, &val);
2934 g_value_reset (&val);
2935 g_value_set_string (&val, "RGB");
2936 gst_value_list_append_value (&list, &val);
2937 g_value_reset (&val);
2938 g_value_set_string (&val, "BGR");
2939 gst_value_list_append_value (&list, &val);
2940 g_value_unset (&val);
2941 }
2942 gst_value_list_merge (&val, fval, &list);
2943 gst_structure_set_value (structure, "format", &val);
2944 g_value_unset (&val);
2945 g_value_unset (&list);
2946 }
2947
2948 gst_structure_remove_field (structure, "colorimetry");
2949 gst_structure_remove_field (structure, "chroma-site");
2950
2951 gst_caps_append_structure (to, structure);
2952 }
2953
2954 /* filter against set allowed caps on the pad */
2955 other = (direction == GST_PAD_SINK) ? trans->srcpad : trans->sinkpad;
2956 templ = gst_pad_get_pad_template_caps (other);
2957 ret = gst_caps_intersect (to, templ);
2958 gst_caps_unref (to);
2959 gst_caps_unref (templ);
2960
2961 GST_DEBUG_OBJECT (video_box, "direction %d, transformed %" GST_PTR_FORMAT
2962 " to %" GST_PTR_FORMAT, direction, from, ret);
2963
2964 if (ret && filter) {
2965 GstCaps *intersection;
2966
2967 GST_DEBUG_OBJECT (video_box, "Using filter caps %" GST_PTR_FORMAT, filter);
2968 intersection =
2969 gst_caps_intersect_full (filter, ret, GST_CAPS_INTERSECT_FIRST);
2970 gst_caps_unref (ret);
2971 ret = intersection;
2972 GST_DEBUG_OBJECT (video_box, "Intersection %" GST_PTR_FORMAT, ret);
2973 }
2974
2975 return ret;
2976
2977 /* ERRORS */
2978 bail:
2979 {
2980 gst_structure_free (structure);
2981 gst_caps_unref (to);
2982 to = gst_caps_new_empty ();
2983 return to;
2984 }
2985 }
2986
2987 static gboolean
gst_video_box_recalc_transform(GstVideoBox * video_box)2988 gst_video_box_recalc_transform (GstVideoBox * video_box)
2989 {
2990 gboolean res = TRUE;
2991
2992 /* if we have the same format in and out and we don't need to perform any
2993 * cropping at all, we can just operate in passthrough mode */
2994 if (video_box->in_format == video_box->out_format &&
2995 video_box->box_left == 0 && video_box->box_right == 0 &&
2996 video_box->box_top == 0 && video_box->box_bottom == 0 &&
2997 video_box->in_sdtv == video_box->out_sdtv) {
2998
2999 GST_LOG_OBJECT (video_box, "we are using passthrough");
3000 gst_base_transform_set_passthrough (GST_BASE_TRANSFORM_CAST (video_box),
3001 TRUE);
3002 } else {
3003 GST_LOG_OBJECT (video_box, "we are not using passthrough");
3004 gst_base_transform_set_passthrough (GST_BASE_TRANSFORM_CAST (video_box),
3005 FALSE);
3006 }
3007 return res;
3008 }
3009
3010 static gboolean
gst_video_box_select_processing_functions(GstVideoBox * video_box)3011 gst_video_box_select_processing_functions (GstVideoBox * video_box)
3012 {
3013 switch (video_box->out_format) {
3014 case GST_VIDEO_FORMAT_AYUV:
3015 video_box->fill = fill_ayuv;
3016 switch (video_box->in_format) {
3017 case GST_VIDEO_FORMAT_AYUV:
3018 video_box->copy = copy_ayuv_ayuv;
3019 break;
3020 case GST_VIDEO_FORMAT_I420:
3021 case GST_VIDEO_FORMAT_YV12:
3022 video_box->copy = copy_i420_ayuv;
3023 break;
3024 case GST_VIDEO_FORMAT_ARGB:
3025 case GST_VIDEO_FORMAT_ABGR:
3026 case GST_VIDEO_FORMAT_RGBA:
3027 case GST_VIDEO_FORMAT_BGRA:
3028 case GST_VIDEO_FORMAT_xRGB:
3029 case GST_VIDEO_FORMAT_xBGR:
3030 case GST_VIDEO_FORMAT_RGBx:
3031 case GST_VIDEO_FORMAT_BGRx:
3032 case GST_VIDEO_FORMAT_RGB:
3033 case GST_VIDEO_FORMAT_BGR:
3034 video_box->copy = copy_rgb32_ayuv;
3035 break;
3036 default:
3037 break;
3038 }
3039 break;
3040 case GST_VIDEO_FORMAT_I420:
3041 case GST_VIDEO_FORMAT_YV12:
3042 video_box->fill = fill_planar_yuv;
3043 switch (video_box->in_format) {
3044 case GST_VIDEO_FORMAT_AYUV:
3045 video_box->copy = copy_ayuv_i420;
3046 break;
3047 case GST_VIDEO_FORMAT_I420:
3048 case GST_VIDEO_FORMAT_YV12:
3049 video_box->copy = copy_i420_i420;
3050 break;
3051 default:
3052 break;
3053 }
3054 break;
3055 case GST_VIDEO_FORMAT_ARGB:
3056 case GST_VIDEO_FORMAT_ABGR:
3057 case GST_VIDEO_FORMAT_RGBA:
3058 case GST_VIDEO_FORMAT_BGRA:
3059 case GST_VIDEO_FORMAT_xRGB:
3060 case GST_VIDEO_FORMAT_xBGR:
3061 case GST_VIDEO_FORMAT_RGBx:
3062 case GST_VIDEO_FORMAT_BGRx:
3063 case GST_VIDEO_FORMAT_RGB:
3064 case GST_VIDEO_FORMAT_BGR:
3065 video_box->fill = (video_box->out_format == GST_VIDEO_FORMAT_BGR
3066 || video_box->out_format ==
3067 GST_VIDEO_FORMAT_RGB) ? fill_rgb24 : fill_rgb32;
3068 switch (video_box->in_format) {
3069 case GST_VIDEO_FORMAT_ARGB:
3070 case GST_VIDEO_FORMAT_ABGR:
3071 case GST_VIDEO_FORMAT_RGBA:
3072 case GST_VIDEO_FORMAT_BGRA:
3073 case GST_VIDEO_FORMAT_xRGB:
3074 case GST_VIDEO_FORMAT_xBGR:
3075 case GST_VIDEO_FORMAT_RGBx:
3076 case GST_VIDEO_FORMAT_BGRx:
3077 case GST_VIDEO_FORMAT_RGB:
3078 case GST_VIDEO_FORMAT_BGR:
3079 video_box->copy = copy_rgb32;
3080 break;
3081 case GST_VIDEO_FORMAT_AYUV:
3082 video_box->copy = copy_ayuv_rgb32;
3083 default:
3084 break;
3085 }
3086 break;
3087 case GST_VIDEO_FORMAT_GRAY8:
3088 case GST_VIDEO_FORMAT_GRAY16_BE:
3089 case GST_VIDEO_FORMAT_GRAY16_LE:
3090 video_box->fill = fill_gray;
3091 switch (video_box->in_format) {
3092 case GST_VIDEO_FORMAT_GRAY8:
3093 case GST_VIDEO_FORMAT_GRAY16_BE:
3094 case GST_VIDEO_FORMAT_GRAY16_LE:
3095 video_box->copy = copy_packed_simple;
3096 break;
3097 default:
3098 break;
3099 }
3100 break;
3101 case GST_VIDEO_FORMAT_YUY2:
3102 case GST_VIDEO_FORMAT_YVYU:
3103 case GST_VIDEO_FORMAT_UYVY:
3104 video_box->fill = fill_yuy2;
3105 switch (video_box->in_format) {
3106 case GST_VIDEO_FORMAT_YUY2:
3107 case GST_VIDEO_FORMAT_YVYU:
3108 case GST_VIDEO_FORMAT_UYVY:
3109 video_box->copy = copy_yuy2_yuy2;
3110 break;
3111 default:
3112 break;
3113 }
3114 break;
3115 case GST_VIDEO_FORMAT_Y444:
3116 case GST_VIDEO_FORMAT_Y42B:
3117 case GST_VIDEO_FORMAT_Y41B:
3118 video_box->fill = fill_planar_yuv;
3119 switch (video_box->in_format) {
3120 case GST_VIDEO_FORMAT_Y444:
3121 video_box->copy = copy_y444_y444;
3122 break;
3123 case GST_VIDEO_FORMAT_Y42B:
3124 video_box->copy = copy_y42b_y42b;
3125 break;
3126 case GST_VIDEO_FORMAT_Y41B:
3127 video_box->copy = copy_y41b_y41b;
3128 break;
3129 default:
3130 break;
3131 }
3132 break;
3133 default:
3134 break;
3135 }
3136
3137 return video_box->fill != NULL && video_box->copy != NULL;
3138 }
3139
3140 static gboolean
gst_video_box_set_info(GstVideoFilter * vfilter,GstCaps * in,GstVideoInfo * in_info,GstCaps * out,GstVideoInfo * out_info)3141 gst_video_box_set_info (GstVideoFilter * vfilter, GstCaps * in,
3142 GstVideoInfo * in_info, GstCaps * out, GstVideoInfo * out_info)
3143 {
3144 GstVideoBox *video_box = GST_VIDEO_BOX (vfilter);
3145 gboolean ret;
3146
3147 g_mutex_lock (&video_box->mutex);
3148
3149 video_box->in_format = GST_VIDEO_INFO_FORMAT (in_info);
3150 video_box->in_width = GST_VIDEO_INFO_WIDTH (in_info);
3151 video_box->in_height = GST_VIDEO_INFO_HEIGHT (in_info);
3152
3153 video_box->out_format = GST_VIDEO_INFO_FORMAT (out_info);
3154 video_box->out_width = GST_VIDEO_INFO_WIDTH (out_info);
3155 video_box->out_height = GST_VIDEO_INFO_HEIGHT (out_info);
3156
3157 video_box->in_sdtv =
3158 in_info->colorimetry.matrix == GST_VIDEO_COLOR_MATRIX_BT601;
3159 video_box->out_sdtv =
3160 out_info->colorimetry.matrix == GST_VIDEO_COLOR_MATRIX_BT601;
3161
3162 GST_DEBUG_OBJECT (video_box, "Input w: %d h: %d", video_box->in_width,
3163 video_box->in_height);
3164 GST_DEBUG_OBJECT (video_box, "Output w: %d h: %d", video_box->out_width,
3165 video_box->out_height);
3166
3167 if (video_box->autocrop)
3168 gst_video_box_autocrop (video_box);
3169
3170 /* recalc the transformation strategy */
3171 ret = gst_video_box_recalc_transform (video_box);
3172
3173 if (ret)
3174 ret = gst_video_box_select_processing_functions (video_box);
3175 g_mutex_unlock (&video_box->mutex);
3176
3177 return ret;
3178 }
3179
3180 static gboolean
gst_video_box_src_event(GstBaseTransform * trans,GstEvent * event)3181 gst_video_box_src_event (GstBaseTransform * trans, GstEvent * event)
3182 {
3183 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3184 GstStructure *new_structure;
3185 const GstStructure *structure;
3186 const gchar *event_name;
3187 gdouble pointer_x;
3188 gdouble pointer_y;
3189
3190 GST_OBJECT_LOCK (video_box);
3191 if (GST_EVENT_TYPE (event) == GST_EVENT_NAVIGATION &&
3192 (video_box->box_left != 0 || video_box->box_top != 0)) {
3193 structure = gst_event_get_structure (event);
3194 event_name = gst_structure_get_string (structure, "event");
3195
3196 if (event_name &&
3197 (strcmp (event_name, "mouse-move") == 0 ||
3198 strcmp (event_name, "mouse-button-press") == 0 ||
3199 strcmp (event_name, "mouse-button-release") == 0)) {
3200 if (gst_structure_get_double (structure, "pointer_x", &pointer_x) &&
3201 gst_structure_get_double (structure, "pointer_y", &pointer_y)) {
3202 gdouble new_pointer_x, new_pointer_y;
3203 GstEvent *new_event;
3204
3205 new_pointer_x = pointer_x + video_box->box_left;
3206 new_pointer_y = pointer_y + video_box->box_top;
3207
3208 new_structure = gst_structure_copy (structure);
3209 gst_structure_set (new_structure,
3210 "pointer_x", G_TYPE_DOUBLE, (gdouble) (new_pointer_x),
3211 "pointer_y", G_TYPE_DOUBLE, (gdouble) (new_pointer_y), NULL);
3212
3213 new_event = gst_event_new_navigation (new_structure);
3214 gst_event_unref (event);
3215 event = new_event;
3216 } else {
3217 GST_WARNING_OBJECT (video_box, "Failed to read navigation event");
3218 }
3219 }
3220 }
3221 GST_OBJECT_UNLOCK (video_box);
3222
3223 return GST_BASE_TRANSFORM_CLASS (parent_class)->src_event (trans, event);
3224 }
3225
3226 static void
gst_video_box_process(GstVideoBox * video_box,GstVideoFrame * in,GstVideoFrame * out)3227 gst_video_box_process (GstVideoBox * video_box, GstVideoFrame * in,
3228 GstVideoFrame * out)
3229 {
3230 guint b_alpha = CLAMP (video_box->border_alpha * 256, 0, 255);
3231 guint i_alpha = CLAMP (video_box->alpha * 256, 0, 255);
3232 GstVideoBoxFill fill_type = video_box->fill_type;
3233 gint br, bl, bt, bb, crop_w, crop_h;
3234
3235 crop_h = 0;
3236 crop_w = 0;
3237
3238 br = video_box->box_right;
3239 bl = video_box->box_left;
3240 bt = video_box->box_top;
3241 bb = video_box->box_bottom;
3242
3243 if (br >= 0 && bl >= 0) {
3244 crop_w = video_box->in_width - (br + bl);
3245 } else if (br >= 0 && bl < 0) {
3246 crop_w = video_box->in_width - (br);
3247 } else if (br < 0 && bl >= 0) {
3248 crop_w = video_box->in_width - (bl);
3249 } else if (br < 0 && bl < 0) {
3250 crop_w = video_box->in_width;
3251 }
3252
3253 if (bb >= 0 && bt >= 0) {
3254 crop_h = video_box->in_height - (bb + bt);
3255 } else if (bb >= 0 && bt < 0) {
3256 crop_h = video_box->in_height - (bb);
3257 } else if (bb < 0 && bt >= 0) {
3258 crop_h = video_box->in_height - (bt);
3259 } else if (bb < 0 && bt < 0) {
3260 crop_h = video_box->in_height;
3261 }
3262
3263 GST_DEBUG_OBJECT (video_box, "Borders are: L:%d, R:%d, T:%d, B:%d", bl, br,
3264 bt, bb);
3265 GST_DEBUG_OBJECT (video_box, "Alpha value is: %u (frame) %u (border)",
3266 i_alpha, b_alpha);
3267
3268 if (crop_h < 0 || crop_w < 0) {
3269 video_box->fill (fill_type, b_alpha, out, video_box->out_sdtv);
3270 } else if (bb == 0 && bt == 0 && br == 0 && bl == 0) {
3271 video_box->copy (i_alpha, out, video_box->out_sdtv, 0, 0, in,
3272 video_box->in_sdtv, 0, 0, crop_w, crop_h);
3273 } else {
3274 gint src_x = 0, src_y = 0;
3275 gint dest_x = 0, dest_y = 0;
3276
3277 /* Fill everything if a border should be added somewhere */
3278 if (bt < 0 || bb < 0 || br < 0 || bl < 0)
3279 video_box->fill (fill_type, b_alpha, out, video_box->out_sdtv);
3280
3281 /* Top border */
3282 if (bt < 0) {
3283 dest_y += -bt;
3284 } else {
3285 src_y += bt;
3286 }
3287
3288 /* Left border */
3289 if (bl < 0) {
3290 dest_x += -bl;
3291 } else {
3292 src_x += bl;
3293 }
3294
3295 /* Frame */
3296 video_box->copy (i_alpha, out, video_box->out_sdtv, dest_x, dest_y,
3297 in, video_box->in_sdtv, src_x, src_y, crop_w, crop_h);
3298 }
3299
3300 GST_LOG_OBJECT (video_box, "image created");
3301 }
3302
3303 static void
gst_video_box_before_transform(GstBaseTransform * trans,GstBuffer * in)3304 gst_video_box_before_transform (GstBaseTransform * trans, GstBuffer * in)
3305 {
3306 GstVideoBox *video_box = GST_VIDEO_BOX (trans);
3307 GstClockTime timestamp, stream_time;
3308
3309 timestamp = GST_BUFFER_TIMESTAMP (in);
3310 stream_time =
3311 gst_segment_to_stream_time (&trans->segment, GST_FORMAT_TIME, timestamp);
3312
3313 GST_DEBUG_OBJECT (video_box, "sync to %" GST_TIME_FORMAT,
3314 GST_TIME_ARGS (timestamp));
3315
3316 if (GST_CLOCK_TIME_IS_VALID (stream_time))
3317 gst_object_sync_values (GST_OBJECT (video_box), stream_time);
3318 }
3319
3320 static GstFlowReturn
gst_video_box_transform_frame(GstVideoFilter * vfilter,GstVideoFrame * in_frame,GstVideoFrame * out_frame)3321 gst_video_box_transform_frame (GstVideoFilter * vfilter,
3322 GstVideoFrame * in_frame, GstVideoFrame * out_frame)
3323 {
3324 GstVideoBox *video_box = GST_VIDEO_BOX (vfilter);
3325
3326 g_mutex_lock (&video_box->mutex);
3327 gst_video_box_process (video_box, in_frame, out_frame);
3328 g_mutex_unlock (&video_box->mutex);
3329 return GST_FLOW_OK;
3330 }
3331
3332 /* FIXME: 0.11 merge with videocrop plugin */
3333 static gboolean
plugin_init(GstPlugin * plugin)3334 plugin_init (GstPlugin * plugin)
3335 {
3336 GST_DEBUG_CATEGORY_INIT (videobox_debug, "videobox", 0,
3337 "Resizes a video by adding borders or cropping");
3338
3339 return gst_element_register (plugin, "videobox", GST_RANK_NONE,
3340 GST_TYPE_VIDEO_BOX);
3341 }
3342
3343 GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
3344 GST_VERSION_MINOR,
3345 videobox,
3346 "resizes a video by adding borders or cropping",
3347 plugin_init, VERSION, GST_LICENSE, GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)
3348