1 /*------------------------------------------------------------------------
2 * Copyright 2007-2010 (c) Jeff Brown <spadix@users.sourceforge.net>
3 *
4 * This file is part of the ZBar Bar Code Reader.
5 *
6 * The ZBar Bar Code Reader is free software; you can redistribute it
7 * and/or modify it under the terms of the GNU Lesser Public License as
8 * published by the Free Software Foundation; either version 2.1 of
9 * the License, or (at your option) any later version.
10 *
11 * The ZBar Bar Code Reader is distributed in the hope that it will be
12 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser Public License
17 * along with the ZBar Bar Code Reader; if not, write to the Free
18 * Software Foundation, Inc., 51 Franklin St, Fifth Floor,
19 * Boston, MA 02110-1301 USA
20 *
21 * http://sourceforge.net/projects/zbar
22 *------------------------------------------------------------------------*/
23
24 #include "image.h"
25 #include "video.h"
26 #include "window.h"
27
28 /* pack bit size and location offset of a component into one byte
29 */
30 #define RGB_BITS(off, size) ((((8 - (size)) & 0x7) << 5) | ((off) & 0x1f))
31
32 typedef void (conversion_handler_t)(zbar_image_t*,
33 const zbar_format_def_t*,
34 const zbar_image_t*,
35 const zbar_format_def_t*);
36
37 typedef struct conversion_def_s {
38 int cost; /* conversion "badness" */
39 conversion_handler_t *func; /* function that accomplishes it */
40 } conversion_def_t;
41
42
43 /* NULL terminated list of known formats, in order of preference
44 * (NB Cr=V Cb=U)
45 */
46 const uint32_t _zbar_formats[] = {
47
48 /* planar YUV formats */
49 fourcc('4','2','2','P'), /* FIXME also YV16? */
50 fourcc('I','4','2','0'),
51 fourcc('Y','U','1','2'), /* FIXME also IYUV? */
52 fourcc('Y','V','1','2'),
53 fourcc('4','1','1','P'),
54
55 /* planar Y + packed UV plane */
56 fourcc('N','V','1','2'),
57 fourcc('N','V','2','1'),
58
59 /* packed YUV formats */
60 fourcc('Y','U','Y','V'),
61 fourcc('U','Y','V','Y'),
62 fourcc('Y','U','Y','2'), /* FIXME add YVYU */
63 fourcc('Y','U','V','4'), /* FIXME where is this from? */
64
65 /* packed rgb formats */
66 fourcc('R','G','B','3'),
67 fourcc( 3 , 0 , 0 , 0 ),
68 fourcc('B','G','R','3'),
69 fourcc('R','G','B','4'),
70 fourcc('B','G','R','4'),
71
72 fourcc('R','G','B','P'),
73 fourcc('R','G','B','O'),
74 fourcc('R','G','B','R'),
75 fourcc('R','G','B','Q'),
76
77 fourcc('Y','U','V','9'),
78 fourcc('Y','V','U','9'),
79
80 /* basic grayscale format */
81 fourcc('G','R','E','Y'),
82 fourcc('Y','8','0','0'),
83 fourcc('Y','8',' ',' '),
84 fourcc('Y','8', 0 , 0 ),
85
86 /* low quality RGB formats */
87 fourcc('R','G','B','1'),
88 fourcc('R','4','4','4'),
89 fourcc('B','A','8','1'),
90
91 /* unsupported packed YUV formats */
92 fourcc('Y','4','1','P'),
93 fourcc('Y','4','4','4'),
94 fourcc('Y','U','V','O'),
95 fourcc('H','M','1','2'),
96
97 /* unsupported packed RGB format */
98 fourcc('H','I','2','4'),
99
100 /* unsupported compressed formats */
101 fourcc('J','P','E','G'),
102 fourcc('M','J','P','G'),
103 fourcc('M','P','E','G'),
104
105 /* terminator */
106 0
107 };
108
109 const int _zbar_num_formats = sizeof(_zbar_formats) / sizeof(uint32_t);
110
111 /* format definitions */
112 static const zbar_format_def_t format_defs[] = {
113
114 { fourcc('R','G','B','4'), ZBAR_FMT_RGB_PACKED,
115 { { 4, RGB_BITS(8, 8), RGB_BITS(16, 8), RGB_BITS(24, 8) } } },
116 { fourcc('B','G','R','1'), ZBAR_FMT_RGB_PACKED,
117 { { 1, RGB_BITS(0, 3), RGB_BITS(3, 3), RGB_BITS(6, 2) } } },
118 { fourcc('4','2','2','P'), ZBAR_FMT_YUV_PLANAR, { { 1, 0, 0 /*UV*/ } } },
119 { fourcc('Y','8','0','0'), ZBAR_FMT_GRAY, },
120 { fourcc('Y','U','Y','2'), ZBAR_FMT_YUV_PACKED,
121 { { 1, 0, 0, /*YUYV*/ } } },
122 { fourcc('J','P','E','G'), ZBAR_FMT_JPEG, },
123 { fourcc('Y','V','Y','U'), ZBAR_FMT_YUV_PACKED,
124 { { 1, 0, 1, /*YVYU*/ } } },
125 { fourcc('Y','8', 0 , 0 ), ZBAR_FMT_GRAY, },
126 { fourcc('N','V','2','1'), ZBAR_FMT_YUV_NV, { { 1, 1, 1 /*VU*/ } } },
127 { fourcc('N','V','1','2'), ZBAR_FMT_YUV_NV, { { 1, 1, 0 /*UV*/ } } },
128 { fourcc('B','G','R','3'), ZBAR_FMT_RGB_PACKED,
129 { { 3, RGB_BITS(16, 8), RGB_BITS(8, 8), RGB_BITS(0, 8) } } },
130 { fourcc('Y','V','U','9'), ZBAR_FMT_YUV_PLANAR, { { 2, 2, 1 /*VU*/ } } },
131 { fourcc('R','G','B','O'), ZBAR_FMT_RGB_PACKED,
132 { { 2, RGB_BITS(10, 5), RGB_BITS(5, 5), RGB_BITS(0, 5) } } },
133 { fourcc('R','G','B','Q'), ZBAR_FMT_RGB_PACKED,
134 { { 2, RGB_BITS(2, 5), RGB_BITS(13, 5), RGB_BITS(8, 5) } } },
135 { fourcc('G','R','E','Y'), ZBAR_FMT_GRAY, },
136 { fourcc( 3 , 0 , 0 , 0 ), ZBAR_FMT_RGB_PACKED,
137 { { 4, RGB_BITS(16, 8), RGB_BITS(8, 8), RGB_BITS(0, 8) } } },
138 { fourcc('Y','8',' ',' '), ZBAR_FMT_GRAY, },
139 { fourcc('I','4','2','0'), ZBAR_FMT_YUV_PLANAR, { { 1, 1, 0 /*UV*/ } } },
140 { fourcc('R','G','B','1'), ZBAR_FMT_RGB_PACKED,
141 { { 1, RGB_BITS(5, 3), RGB_BITS(2, 3), RGB_BITS(0, 2) } } },
142 { fourcc('Y','U','1','2'), ZBAR_FMT_YUV_PLANAR, { { 1, 1, 0 /*UV*/ } } },
143 { fourcc('Y','V','1','2'), ZBAR_FMT_YUV_PLANAR, { { 1, 1, 1 /*VU*/ } } },
144 { fourcc('R','G','B','3'), ZBAR_FMT_RGB_PACKED,
145 { { 3, RGB_BITS(0, 8), RGB_BITS(8, 8), RGB_BITS(16, 8) } } },
146 { fourcc('R','4','4','4'), ZBAR_FMT_RGB_PACKED,
147 { { 2, RGB_BITS(8, 4), RGB_BITS(4, 4), RGB_BITS(0, 4) } } },
148 { fourcc('B','G','R','4'), ZBAR_FMT_RGB_PACKED,
149 { { 4, RGB_BITS(16, 8), RGB_BITS(8, 8), RGB_BITS(0, 8) } } },
150 { fourcc('Y','U','V','9'), ZBAR_FMT_YUV_PLANAR, { { 2, 2, 0 /*UV*/ } } },
151 { fourcc('M','J','P','G'), ZBAR_FMT_JPEG, },
152 { fourcc('4','1','1','P'), ZBAR_FMT_YUV_PLANAR, { { 2, 0, 0 /*UV*/ } } },
153 { fourcc('R','G','B','P'), ZBAR_FMT_RGB_PACKED,
154 { { 2, RGB_BITS(11, 5), RGB_BITS(5, 6), RGB_BITS(0, 5) } } },
155 { fourcc('R','G','B','R'), ZBAR_FMT_RGB_PACKED,
156 { { 2, RGB_BITS(3, 5), RGB_BITS(13, 6), RGB_BITS(8, 5) } } },
157 { fourcc('Y','U','Y','V'), ZBAR_FMT_YUV_PACKED,
158 { { 1, 0, 0, /*YUYV*/ } } },
159 { fourcc('U','Y','V','Y'), ZBAR_FMT_YUV_PACKED,
160 { { 1, 0, 2, /*UYVY*/ } } },
161 };
162
163 static const int num_format_defs =
164 sizeof(format_defs) / sizeof(zbar_format_def_t);
165
166 #ifdef DEBUG_CONVERT
intsort(const void * a,const void * b)167 static int intsort (const void *a,
168 const void *b)
169 {
170 return(*(uint32_t*)a - *(uint32_t*)b);
171 }
172 #endif
173
174 /* verify that format list is in required sort order */
verify_format_sort(void)175 static inline int verify_format_sort (void)
176 {
177 int i;
178 for(i = 0; i < num_format_defs; i++) {
179 int j = i * 2 + 1;
180 if((j < num_format_defs &&
181 format_defs[i].format < format_defs[j].format) ||
182 (j + 1 < num_format_defs &&
183 format_defs[j + 1].format < format_defs[i].format))
184 break;
185 }
186 if(i == num_format_defs)
187 return(0);
188
189 /* spew correct order for fix */
190 fprintf(stderr, "ERROR: image format list is not sorted!?\n");
191
192 #ifdef DEBUG_CONVERT
193 assert(num_format_defs);
194 uint32_t sorted[num_format_defs];
195 uint32_t ordered[num_format_defs];
196 for(i = 0; i < num_format_defs; i++)
197 sorted[i] = format_defs[i].format;
198 qsort(sorted, num_format_defs, sizeof(uint32_t), intsort);
199 for(i = 0; i < num_format_defs; i = i << 1 | 1);
200 i = (i - 1) / 2;
201 ordered[i] = sorted[0];
202 int j, k;
203 for(j = 1; j < num_format_defs; j++) {
204 k = i * 2 + 2;
205 if(k < num_format_defs) {
206 i = k;
207 for(k = k * 2 + 1; k < num_format_defs; k = k * 2 + 1)
208 i = k;
209 }
210 else {
211 for(k = (i - 1) / 2; i != k * 2 + 1; k = (i - 1) / 2) {
212 assert(i);
213 i = k;
214 }
215 i = k;
216 }
217 ordered[i] = sorted[j];
218 }
219 fprintf(stderr, "correct sort order is:");
220 for(i = 0; i < num_format_defs; i++)
221 fprintf(stderr, " %4.4s", (char*)&ordered[i]);
222 fprintf(stderr, "\n");
223 #endif
224 return(-1);
225 }
226
uv_round(zbar_image_t * img,const zbar_format_def_t * fmt)227 static inline void uv_round (zbar_image_t *img,
228 const zbar_format_def_t *fmt)
229 {
230 img->width >>= fmt->p.yuv.xsub2;
231 img->width <<= fmt->p.yuv.xsub2;
232 img->height >>= fmt->p.yuv.ysub2;
233 img->height <<= fmt->p.yuv.ysub2;
234 }
235
uv_roundup(zbar_image_t * img,const zbar_format_def_t * fmt)236 static inline void uv_roundup (zbar_image_t *img,
237 const zbar_format_def_t *fmt)
238 {
239 unsigned xmask, ymask;
240 if(fmt->group == ZBAR_FMT_GRAY)
241 return;
242 xmask = (1 << fmt->p.yuv.xsub2) - 1;
243 if(img->width & xmask)
244 img->width = (img->width + xmask) & ~xmask;
245 ymask = (1 << fmt->p.yuv.ysub2) - 1;
246 if(img->height & ymask)
247 img->height = (img->height + ymask) & ~ymask;
248 }
249
uvp_size(const zbar_image_t * img,const zbar_format_def_t * fmt)250 static inline unsigned long uvp_size (const zbar_image_t *img,
251 const zbar_format_def_t *fmt)
252 {
253 if(fmt->group == ZBAR_FMT_GRAY)
254 return(0);
255 return((img->width >> fmt->p.yuv.xsub2) *
256 (img->height >> fmt->p.yuv.ysub2));
257 }
258
convert_read_rgb(const uint8_t * srcp,int bpp)259 static inline uint32_t convert_read_rgb (const uint8_t *srcp,
260 int bpp)
261 {
262 uint32_t p;
263 if(bpp == 3) {
264 p = *srcp;
265 p |= *(srcp + 1) << 8;
266 p |= *(srcp + 2) << 16;
267 }
268 else if(bpp == 4)
269 p = *((uint32_t*)(srcp));
270 else if(bpp == 2)
271 p = *((uint16_t*)(srcp));
272 else
273 p = *srcp;
274 return(p);
275 }
276
convert_write_rgb(uint8_t * dstp,uint32_t p,int bpp)277 static inline void convert_write_rgb (uint8_t *dstp,
278 uint32_t p,
279 int bpp)
280 {
281 if(bpp == 3) {
282 *dstp = p & 0xff;
283 *(dstp + 1) = (p >> 8) & 0xff;
284 *(dstp + 2) = (p >> 16) & 0xff;
285 }
286 else if(bpp == 4)
287 *((uint32_t*)dstp) = p;
288 else if(bpp == 2)
289 *((uint16_t*)dstp) = p;
290 else
291 *dstp = p;
292 }
293
294 /* cleanup linked image by unrefing */
cleanup_ref(zbar_image_t * img)295 static void cleanup_ref (zbar_image_t *img)
296 {
297 if(img->next)
298 _zbar_image_refcnt(img->next, -1);
299 }
300
301 /* resize y plane, drop extra columns/rows from the right/bottom,
302 * or duplicate last column/row to pad missing data
303 */
convert_y_resize(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt,size_t n)304 static inline void convert_y_resize (zbar_image_t *dst,
305 const zbar_format_def_t *dstfmt,
306 const zbar_image_t *src,
307 const zbar_format_def_t *srcfmt,
308 size_t n)
309 {
310 uint8_t *psrc, *pdst;
311 unsigned width, height, xpad, y;
312
313 if(dst->width == src->width && dst->height == src->height) {
314 memcpy((void*)dst->data, src->data, n);
315 return;
316 }
317 psrc = (void*)src->data;
318 pdst = (void*)dst->data;
319 width = (dst->width > src->width) ? src->width : dst->width;
320 xpad = (dst->width > src->width) ? dst->width - src->width : 0;
321 height = (dst->height > src->height) ? src->height : dst->height;
322 for(y = 0; y < height; y++) {
323 memcpy(pdst, psrc, width);
324 pdst += width;
325 psrc += src->width;
326 if(xpad) {
327 memset(pdst, *(psrc - 1), xpad);
328 pdst += xpad;
329 }
330 }
331 psrc -= src->width;
332 for(; y < dst->height; y++) {
333 memcpy(pdst, psrc, width);
334 pdst += width;
335 if(xpad) {
336 memset(pdst, *(psrc - 1), xpad);
337 pdst += xpad;
338 }
339 }
340 }
341
342 /* make new image w/reference to the same image data */
convert_copy(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt)343 static void convert_copy (zbar_image_t *dst,
344 const zbar_format_def_t *dstfmt,
345 const zbar_image_t *src,
346 const zbar_format_def_t *srcfmt)
347 {
348 if(src->width == dst->width &&
349 src->height == dst->height) {
350 zbar_image_t *s = (zbar_image_t*)src;
351 dst->data = src->data;
352 dst->datalen = src->datalen;
353 dst->cleanup = cleanup_ref;
354 dst->next = s;
355 _zbar_image_refcnt(s, 1);
356 }
357 else
358 /* NB only for GRAY/YUV_PLANAR formats */
359 convert_y_resize(dst, dstfmt, src, srcfmt, dst->width * dst->height);
360 }
361
362 /* append neutral UV plane to grayscale image */
convert_uvp_append(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt)363 static void convert_uvp_append (zbar_image_t *dst,
364 const zbar_format_def_t *dstfmt,
365 const zbar_image_t *src,
366 const zbar_format_def_t *srcfmt)
367 {
368 unsigned long n;
369 uv_roundup(dst, dstfmt);
370 dst->datalen = uvp_size(dst, dstfmt) * 2;
371 n = dst->width * dst->height;
372 dst->datalen += n;
373 assert(src->datalen >= src->width * src->height);
374 zprintf(24, "dst=%dx%d (%lx) %lx src=%dx%d %lx\n",
375 dst->width, dst->height, n, dst->datalen,
376 src->width, src->height, src->datalen);
377 dst->data = malloc(dst->datalen);
378 if(!dst->data) return;
379 convert_y_resize(dst, dstfmt, src, srcfmt, n);
380 memset((uint8_t*)dst->data + n, 0x80, dst->datalen - n);
381 }
382
383 /* interleave YUV planes into packed YUV */
convert_yuv_pack(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt)384 static void convert_yuv_pack (zbar_image_t *dst,
385 const zbar_format_def_t *dstfmt,
386 const zbar_image_t *src,
387 const zbar_format_def_t *srcfmt)
388 {
389 unsigned long srcm, srcn;
390 uint8_t flags, *srcy, *dstp;
391 const uint8_t *srcu, *srcv;
392 unsigned srcl, xmask, ymask, x, y;
393 uint8_t y0 = 0, y1 = 0, u = 0x80, v = 0x80;
394
395 uv_roundup(dst, dstfmt);
396 dst->datalen = dst->width * dst->height + uvp_size(dst, dstfmt) * 2;
397 dst->data = malloc(dst->datalen);
398 if(!dst->data) return;
399 dstp = (void*)dst->data;
400
401 srcm = uvp_size(src, srcfmt);
402 srcn = src->width * src->height;
403 assert(src->datalen >= srcn + 2 * srcn);
404 flags = dstfmt->p.yuv.packorder ^ srcfmt->p.yuv.packorder;
405 srcy = (void*)src->data;
406 if(flags & 1) {
407 srcv = (uint8_t*)src->data + srcn;
408 srcu = srcv + srcm;
409 } else {
410 srcu = (uint8_t*)src->data + srcn;
411 srcv = srcu + srcm;
412 }
413 flags = dstfmt->p.yuv.packorder & 2;
414
415 srcl = src->width >> srcfmt->p.yuv.xsub2;
416 xmask = (1 << srcfmt->p.yuv.xsub2) - 1;
417 ymask = (1 << srcfmt->p.yuv.ysub2) - 1;
418 for(y = 0; y < dst->height; y++) {
419 if(y >= src->height) {
420 srcy -= src->width;
421 srcu -= srcl; srcv -= srcl;
422 }
423 else if(y & ymask) {
424 srcu -= srcl; srcv -= srcl;
425 }
426 for(x = 0; x < dst->width; x += 2) {
427 if(x < src->width) {
428 y0 = *(srcy++); y1 = *(srcy++);
429 if(!(x & xmask)) {
430 u = *(srcu++); v = *(srcv++);
431 }
432 }
433 if(flags) {
434 *(dstp++) = u; *(dstp++) = y0;
435 *(dstp++) = v; *(dstp++) = y1;
436 } else {
437 *(dstp++) = y0; *(dstp++) = u;
438 *(dstp++) = y1; *(dstp++) = v;
439 }
440 }
441 for(; x < src->width; x += 2) {
442 srcy += 2;
443 if(!(x & xmask)) {
444 srcu++; srcv++;
445 }
446 }
447 }
448 }
449
450 /* split packed YUV samples and join into YUV planes
451 * FIXME currently ignores color and grayscales the image
452 */
convert_yuv_unpack(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt)453 static void convert_yuv_unpack (zbar_image_t *dst,
454 const zbar_format_def_t *dstfmt,
455 const zbar_image_t *src,
456 const zbar_format_def_t *srcfmt)
457 {
458 unsigned long dstn, dstm2;
459 uint8_t *dsty, flags;
460 const uint8_t *srcp;
461 unsigned srcl, x, y;
462 uint8_t y0 = 0, y1 = 0;
463
464 uv_roundup(dst, dstfmt);
465 dstn = dst->width * dst->height;
466 dstm2 = uvp_size(dst, dstfmt) * 2;
467 dst->datalen = dstn + dstm2;
468 dst->data = malloc(dst->datalen);
469 if(!dst->data) return;
470 if(dstm2)
471 memset((uint8_t*)dst->data + dstn, 0x80, dstm2);
472 dsty = (uint8_t*)dst->data;
473
474 flags = srcfmt->p.yuv.packorder ^ dstfmt->p.yuv.packorder;
475 flags &= 2;
476 srcp = src->data;
477 if(flags)
478 srcp++;
479
480 srcl = src->width + (src->width >> srcfmt->p.yuv.xsub2);
481 for(y = 0; y < dst->height; y++) {
482 if(y >= src->height)
483 srcp -= srcl;
484 for(x = 0; x < dst->width; x += 2) {
485 if(x < src->width) {
486 y0 = *(srcp++); srcp++;
487 y1 = *(srcp++); srcp++;
488 }
489 *(dsty++) = y0;
490 *(dsty++) = y1;
491 }
492 if(x < src->width)
493 srcp += (src->width - x) * 2;
494 }
495 }
496
497 /* resample and resize UV plane(s)
498 * FIXME currently ignores color and grayscales the image
499 */
convert_uvp_resample(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt)500 static void convert_uvp_resample (zbar_image_t *dst,
501 const zbar_format_def_t *dstfmt,
502 const zbar_image_t *src,
503 const zbar_format_def_t *srcfmt)
504 {
505 unsigned long dstn, dstm2;
506 uv_roundup(dst, dstfmt);
507 dstn = dst->width * dst->height;
508 dstm2 = uvp_size(dst, dstfmt) * 2;
509 dst->datalen = dstn + dstm2;
510 dst->data = malloc(dst->datalen);
511 if(!dst->data) return;
512 convert_y_resize(dst, dstfmt, src, srcfmt, dstn);
513 if(dstm2)
514 memset((uint8_t*)dst->data + dstn, 0x80, dstm2);
515 }
516
517 /* rearrange interleaved UV componets */
convert_uv_resample(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt)518 static void convert_uv_resample (zbar_image_t *dst,
519 const zbar_format_def_t *dstfmt,
520 const zbar_image_t *src,
521 const zbar_format_def_t *srcfmt)
522 {
523 unsigned long dstn;
524 uint8_t *dstp, flags;
525 const uint8_t *srcp;
526 unsigned srcl, x, y;
527 uint8_t y0 = 0, y1 = 0, u = 0x80, v = 0x80;
528
529 uv_roundup(dst, dstfmt);
530 dstn = dst->width * dst->height;
531 dst->datalen = dstn + uvp_size(dst, dstfmt) * 2;
532 dst->data = malloc(dst->datalen);
533 if(!dst->data) return;
534 dstp = (void*)dst->data;
535
536 flags = (srcfmt->p.yuv.packorder ^ dstfmt->p.yuv.packorder) & 1;
537 srcp = src->data;
538
539 srcl = src->width + (src->width >> srcfmt->p.yuv.xsub2);
540 for(y = 0; y < dst->height; y++) {
541 if(y >= src->height)
542 srcp -= srcl;
543 for(x = 0; x < dst->width; x += 2) {
544 if(x < src->width) {
545 if(!(srcfmt->p.yuv.packorder & 2)) {
546 y0 = *(srcp++); u = *(srcp++);
547 y1 = *(srcp++); v = *(srcp++);
548 }
549 else {
550 u = *(srcp++); y0 = *(srcp++);
551 v = *(srcp++); y1 = *(srcp++);
552 }
553 if(flags) {
554 uint8_t tmp = u; u = v; v = tmp;
555 }
556 }
557 if(!(dstfmt->p.yuv.packorder & 2)) {
558 *(dstp++) = y0; *(dstp++) = u;
559 *(dstp++) = y1; *(dstp++) = v;
560 }
561 else {
562 *(dstp++) = u; *(dstp++) = y0;
563 *(dstp++) = v; *(dstp++) = y1;
564 }
565 }
566 if(x < src->width)
567 srcp += (src->width - x) * 2;
568 }
569 }
570
571 /* YUV planes to packed RGB
572 * FIXME currently ignores color and grayscales the image
573 */
convert_yuvp_to_rgb(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt)574 static void convert_yuvp_to_rgb (zbar_image_t *dst,
575 const zbar_format_def_t *dstfmt,
576 const zbar_image_t *src,
577 const zbar_format_def_t *srcfmt)
578 {
579 uint8_t *dstp, *srcy;
580 int drbits, drbit0, dgbits, dgbit0, dbbits, dbbit0;
581 unsigned long srcm, srcn;
582 unsigned x, y;
583 uint32_t p = 0;
584
585 dst->datalen = dst->width * dst->height * dstfmt->p.rgb.bpp;
586 dst->data = malloc(dst->datalen);
587 if(!dst->data) return;
588 dstp = (void*)dst->data;
589
590 drbits = RGB_SIZE(dstfmt->p.rgb.red);
591 drbit0 = RGB_OFFSET(dstfmt->p.rgb.red);
592 dgbits = RGB_SIZE(dstfmt->p.rgb.green);
593 dgbit0 = RGB_OFFSET(dstfmt->p.rgb.green);
594 dbbits = RGB_SIZE(dstfmt->p.rgb.blue);
595 dbbit0 = RGB_OFFSET(dstfmt->p.rgb.blue);
596
597 srcm = uvp_size(src, srcfmt);
598 srcn = src->width * src->height;
599 assert(src->datalen >= srcn + 2 * srcm);
600 srcy = (void*)src->data;
601
602 for(y = 0; y < dst->height; y++) {
603 if(y >= src->height)
604 srcy -= src->width;
605 for(x = 0; x < dst->width; x++) {
606 if(x < src->width) {
607 /* FIXME color space? */
608 unsigned y0 = *(srcy++);
609 p = (((y0 >> drbits) << drbit0) |
610 ((y0 >> dgbits) << dgbit0) |
611 ((y0 >> dbbits) << dbbit0));
612 }
613 convert_write_rgb(dstp, p, dstfmt->p.rgb.bpp);
614 dstp += dstfmt->p.rgb.bpp;
615 }
616 if(x < src->width)
617 srcy += (src->width - x);
618 }
619 }
620
621 /* packed RGB to YUV planes
622 * FIXME currently ignores color and grayscales the image
623 */
convert_rgb_to_yuvp(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt)624 static void convert_rgb_to_yuvp (zbar_image_t *dst,
625 const zbar_format_def_t *dstfmt,
626 const zbar_image_t *src,
627 const zbar_format_def_t *srcfmt)
628 {
629 unsigned long dstn, dstm2;
630 uint8_t *dsty;
631 const uint8_t *srcp;
632 int rbits, rbit0, gbits, gbit0, bbits, bbit0;
633 unsigned srcl, x, y;
634 uint16_t y0 = 0;
635
636 uv_roundup(dst, dstfmt);
637 dstn = dst->width * dst->height;
638 dstm2 = uvp_size(dst, dstfmt) * 2;
639 dst->datalen = dstn + dstm2;
640 dst->data = malloc(dst->datalen);
641 if(!dst->data) return;
642 if(dstm2)
643 memset((uint8_t*)dst->data + dstn, 0x80, dstm2);
644 dsty = (void*)dst->data;
645
646 assert(src->datalen >= (src->width * src->height * srcfmt->p.rgb.bpp));
647 srcp = src->data;
648
649 rbits = RGB_SIZE(srcfmt->p.rgb.red);
650 rbit0 = RGB_OFFSET(srcfmt->p.rgb.red);
651 gbits = RGB_SIZE(srcfmt->p.rgb.green);
652 gbit0 = RGB_OFFSET(srcfmt->p.rgb.green);
653 bbits = RGB_SIZE(srcfmt->p.rgb.blue);
654 bbit0 = RGB_OFFSET(srcfmt->p.rgb.blue);
655
656 srcl = src->width * srcfmt->p.rgb.bpp;
657 for(y = 0; y < dst->height; y++) {
658 if(y >= src->height)
659 srcp -= srcl;
660 for(x = 0; x < dst->width; x++) {
661 if(x < src->width) {
662 uint8_t r, g, b;
663 uint32_t p = convert_read_rgb(srcp, srcfmt->p.rgb.bpp);
664 srcp += srcfmt->p.rgb.bpp;
665
666 /* FIXME endianness? */
667 r = ((p >> rbit0) << rbits) & 0xff;
668 g = ((p >> gbit0) << gbits) & 0xff;
669 b = ((p >> bbit0) << bbits) & 0xff;
670
671 /* FIXME color space? */
672 y0 = ((77 * r + 150 * g + 29 * b) + 0x80) >> 8;
673 }
674 *(dsty++) = y0;
675 }
676 if(x < src->width)
677 srcp += (src->width - x) * srcfmt->p.rgb.bpp;
678 }
679 }
680
681 /* packed YUV to packed RGB */
convert_yuv_to_rgb(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt)682 static void convert_yuv_to_rgb (zbar_image_t *dst,
683 const zbar_format_def_t *dstfmt,
684 const zbar_image_t *src,
685 const zbar_format_def_t *srcfmt)
686 {
687 uint8_t *dstp;
688 unsigned long dstn = dst->width * dst->height;
689 int drbits, drbit0, dgbits, dgbit0, dbbits, dbbit0;
690 const uint8_t *srcp;
691 unsigned srcl, x, y;
692 uint32_t p = 0;
693
694 dst->datalen = dstn * dstfmt->p.rgb.bpp;
695 dst->data = malloc(dst->datalen);
696 if(!dst->data) return;
697 dstp = (void*)dst->data;
698
699 drbits = RGB_SIZE(dstfmt->p.rgb.red);
700 drbit0 = RGB_OFFSET(dstfmt->p.rgb.red);
701 dgbits = RGB_SIZE(dstfmt->p.rgb.green);
702 dgbit0 = RGB_OFFSET(dstfmt->p.rgb.green);
703 dbbits = RGB_SIZE(dstfmt->p.rgb.blue);
704 dbbit0 = RGB_OFFSET(dstfmt->p.rgb.blue);
705
706 assert(src->datalen >= (src->width * src->height +
707 uvp_size(src, srcfmt) * 2));
708 srcp = src->data;
709 if(srcfmt->p.yuv.packorder & 2)
710 srcp++;
711
712 assert(srcfmt->p.yuv.xsub2 == 1);
713 srcl = src->width + (src->width >> 1);
714 for(y = 0; y < dst->height; y++) {
715 if(y >= src->height)
716 srcp -= srcl;
717 for(x = 0; x < dst->width; x++) {
718 if(x < src->width) {
719 uint8_t y0 = *(srcp++);
720 srcp++;
721
722 if(y0 <= 16)
723 y0 = 0;
724 else if(y0 >= 235)
725 y0 = 255;
726 else
727 y0 = (uint16_t)(y0 - 16) * 255 / 219;
728
729 p = (((y0 >> drbits) << drbit0) |
730 ((y0 >> dgbits) << dgbit0) |
731 ((y0 >> dbbits) << dbbit0));
732 }
733 convert_write_rgb(dstp, p, dstfmt->p.rgb.bpp);
734 dstp += dstfmt->p.rgb.bpp;
735 }
736 if(x < src->width)
737 srcp += (src->width - x) * 2;
738 }
739 }
740
741 /* packed RGB to packed YUV
742 * FIXME currently ignores color and grayscales the image
743 */
convert_rgb_to_yuv(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt)744 static void convert_rgb_to_yuv (zbar_image_t *dst,
745 const zbar_format_def_t *dstfmt,
746 const zbar_image_t *src,
747 const zbar_format_def_t *srcfmt)
748 {
749 uint8_t *dstp, flags;
750 const uint8_t *srcp;
751 int rbits, rbit0, gbits, gbit0, bbits, bbit0;
752 unsigned srcl, x, y;
753 uint16_t y0 = 0;
754
755 uv_roundup(dst, dstfmt);
756 dst->datalen = dst->width * dst->height + uvp_size(dst, dstfmt) * 2;
757 dst->data = malloc(dst->datalen);
758 if(!dst->data) return;
759 dstp = (void*)dst->data;
760 flags = dstfmt->p.yuv.packorder & 2;
761
762 assert(src->datalen >= (src->width * src->height * srcfmt->p.rgb.bpp));
763 srcp = src->data;
764
765 rbits = RGB_SIZE(srcfmt->p.rgb.red);
766 rbit0 = RGB_OFFSET(srcfmt->p.rgb.red);
767 gbits = RGB_SIZE(srcfmt->p.rgb.green);
768 gbit0 = RGB_OFFSET(srcfmt->p.rgb.green);
769 bbits = RGB_SIZE(srcfmt->p.rgb.blue);
770 bbit0 = RGB_OFFSET(srcfmt->p.rgb.blue);
771
772 srcl = src->width * srcfmt->p.rgb.bpp;
773 for(y = 0; y < dst->height; y++) {
774 if(y >= src->height)
775 srcp -= srcl;
776 for(x = 0; x < dst->width; x++) {
777 if(x < src->width) {
778 uint8_t r, g, b;
779 uint32_t p = convert_read_rgb(srcp, srcfmt->p.rgb.bpp);
780 srcp += srcfmt->p.rgb.bpp;
781
782 /* FIXME endianness? */
783 r = ((p >> rbit0) << rbits) & 0xff;
784 g = ((p >> gbit0) << gbits) & 0xff;
785 b = ((p >> bbit0) << bbits) & 0xff;
786
787 /* FIXME color space? */
788 y0 = ((77 * r + 150 * g + 29 * b) + 0x80) >> 8;
789 }
790 if(flags) {
791 *(dstp++) = 0x80; *(dstp++) = y0;
792 }
793 else {
794 *(dstp++) = y0; *(dstp++) = 0x80;
795 }
796 }
797 if(x < src->width)
798 srcp += (src->width - x) * srcfmt->p.rgb.bpp;
799 }
800 }
801
802 /* resample and resize packed RGB components */
convert_rgb_resample(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt)803 static void convert_rgb_resample (zbar_image_t *dst,
804 const zbar_format_def_t *dstfmt,
805 const zbar_image_t *src,
806 const zbar_format_def_t *srcfmt)
807 {
808 unsigned long dstn = dst->width * dst->height;
809 uint8_t *dstp;
810 int drbits, drbit0, dgbits, dgbit0, dbbits, dbbit0;
811 int srbits, srbit0, sgbits, sgbit0, sbbits, sbbit0;
812 const uint8_t *srcp;
813 unsigned srcl, x, y;
814 uint32_t p = 0;
815
816 dst->datalen = dstn * dstfmt->p.rgb.bpp;
817 dst->data = malloc(dst->datalen);
818 if(!dst->data) return;
819 dstp = (void*)dst->data;
820
821 drbits = RGB_SIZE(dstfmt->p.rgb.red);
822 drbit0 = RGB_OFFSET(dstfmt->p.rgb.red);
823 dgbits = RGB_SIZE(dstfmt->p.rgb.green);
824 dgbit0 = RGB_OFFSET(dstfmt->p.rgb.green);
825 dbbits = RGB_SIZE(dstfmt->p.rgb.blue);
826 dbbit0 = RGB_OFFSET(dstfmt->p.rgb.blue);
827
828 assert(src->datalen >= (src->width * src->height * srcfmt->p.rgb.bpp));
829 srcp = src->data;
830
831 srbits = RGB_SIZE(srcfmt->p.rgb.red);
832 srbit0 = RGB_OFFSET(srcfmt->p.rgb.red);
833 sgbits = RGB_SIZE(srcfmt->p.rgb.green);
834 sgbit0 = RGB_OFFSET(srcfmt->p.rgb.green);
835 sbbits = RGB_SIZE(srcfmt->p.rgb.blue);
836 sbbit0 = RGB_OFFSET(srcfmt->p.rgb.blue);
837
838 srcl = src->width * srcfmt->p.rgb.bpp;
839 for(y = 0; y < dst->height; y++) {
840 if(y >= src->height)
841 y -= srcl;
842 for(x = 0; x < dst->width; x++) {
843 if(x < src->width) {
844 uint8_t r, g, b;
845 p = convert_read_rgb(srcp, srcfmt->p.rgb.bpp);
846 srcp += srcfmt->p.rgb.bpp;
847
848 /* FIXME endianness? */
849 r = (p >> srbit0) << srbits;
850 g = (p >> sgbit0) << sgbits;
851 b = (p >> sbbit0) << sbbits;
852
853 p = (((r >> drbits) << drbit0) |
854 ((g >> dgbits) << dgbit0) |
855 ((b >> dbbits) << dbbit0));
856 }
857 convert_write_rgb(dstp, p, dstfmt->p.rgb.bpp);
858 dstp += dstfmt->p.rgb.bpp;
859 }
860 if(x < src->width)
861 srcp += (src->width - x) * srcfmt->p.rgb.bpp;
862 }
863 }
864
865 #ifdef HAVE_LIBJPEG
866 void _zbar_convert_jpeg_to_y(zbar_image_t *dst,
867 const zbar_format_def_t *dstfmt,
868 const zbar_image_t *src,
869 const zbar_format_def_t *srcfmt);
870
871 static void convert_jpeg(zbar_image_t *dst,
872 const zbar_format_def_t *dstfmt,
873 const zbar_image_t *src,
874 const zbar_format_def_t *srcfmt);
875 #endif
876
877 /* group conversion matrix */
878 static conversion_def_t conversions[][ZBAR_FMT_NUM] = {
879 { /* *from* GRAY */
880 { 0, convert_copy }, /* to GRAY */
881 { 8, convert_uvp_append }, /* to YUV_PLANAR */
882 { 24, convert_yuv_pack }, /* to YUV_PACKED */
883 { 32, convert_yuvp_to_rgb }, /* to RGB_PACKED */
884 { 8, convert_uvp_append }, /* to YUV_NV */
885 { -1, NULL }, /* to JPEG */
886 },
887 { /* from YUV_PLANAR */
888 { 1, convert_copy }, /* to GRAY */
889 { 48, convert_uvp_resample }, /* to YUV_PLANAR */
890 { 64, convert_yuv_pack }, /* to YUV_PACKED */
891 { 128, convert_yuvp_to_rgb }, /* to RGB_PACKED */
892 { 40, convert_uvp_append }, /* to YUV_NV */
893 { -1, NULL }, /* to JPEG */
894 },
895 { /* from YUV_PACKED */
896 { 24, convert_yuv_unpack }, /* to GRAY */
897 { 52, convert_yuv_unpack }, /* to YUV_PLANAR */
898 { 20, convert_uv_resample }, /* to YUV_PACKED */
899 { 144, convert_yuv_to_rgb }, /* to RGB_PACKED */
900 { 18, convert_yuv_unpack }, /* to YUV_NV */
901 { -1, NULL }, /* to JPEG */
902 },
903 { /* from RGB_PACKED */
904 { 112, convert_rgb_to_yuvp }, /* to GRAY */
905 { 160, convert_rgb_to_yuvp }, /* to YUV_PLANAR */
906 { 144, convert_rgb_to_yuv }, /* to YUV_PACKED */
907 { 120, convert_rgb_resample }, /* to RGB_PACKED */
908 { 152, convert_rgb_to_yuvp }, /* to YUV_NV */
909 { -1, NULL }, /* to JPEG */
910 },
911 { /* from YUV_NV (FIXME treated as GRAY) */
912 { 1, convert_copy }, /* to GRAY */
913 { 8, convert_uvp_append }, /* to YUV_PLANAR */
914 { 24, convert_yuv_pack }, /* to YUV_PACKED */
915 { 32, convert_yuvp_to_rgb }, /* to RGB_PACKED */
916 { 8, convert_uvp_append }, /* to YUV_NV */
917 { -1, NULL }, /* to JPEG */
918 },
919 #ifdef HAVE_LIBJPEG
920 { /* from JPEG */
921 { 96, _zbar_convert_jpeg_to_y }, /* to GRAY */
922 { 104, convert_jpeg }, /* to YUV_PLANAR */
923 { 116, convert_jpeg }, /* to YUV_PACKED */
924 { 256, convert_jpeg }, /* to RGB_PACKED */
925 { 104, convert_jpeg }, /* to YUV_NV */
926 { -1, NULL }, /* to JPEG */
927 },
928 #else
929 { /* from JPEG */
930 { -1, NULL }, /* to GRAY */
931 { -1, NULL }, /* to YUV_PLANAR */
932 { -1, NULL }, /* to YUV_PACKED */
933 { -1, NULL }, /* to RGB_PACKED */
934 { -1, NULL }, /* to YUV_NV */
935 { -1, NULL }, /* to JPEG */
936 },
937 #endif
938 };
939
_zbar_format_lookup(uint32_t fmt)940 const zbar_format_def_t *_zbar_format_lookup (uint32_t fmt)
941 {
942 const zbar_format_def_t *def = NULL;
943 int i = 0;
944 while(i < num_format_defs) {
945 def = &format_defs[i];
946 if(fmt == def->format)
947 return(def);
948 i = i * 2 + 1;
949 if(fmt > def->format)
950 i++;
951 }
952 return(NULL);
953 }
954
955 #ifdef HAVE_LIBJPEG
956 /* convert JPEG data via an intermediate format supported by libjpeg */
convert_jpeg(zbar_image_t * dst,const zbar_format_def_t * dstfmt,const zbar_image_t * src,const zbar_format_def_t * srcfmt)957 static void convert_jpeg (zbar_image_t *dst,
958 const zbar_format_def_t *dstfmt,
959 const zbar_image_t *src,
960 const zbar_format_def_t *srcfmt)
961 {
962 /* define intermediate image in a format supported by libjpeg
963 * (currently only grayscale)
964 */
965 zbar_image_t *tmp;
966 if(!src->src) {
967 tmp = zbar_image_create();
968 tmp->format = fourcc('Y','8','0','0');
969 _zbar_image_copy_size(tmp, dst);
970 }
971 else {
972 tmp = src->src->jpeg_img;
973 assert(tmp);
974 _zbar_image_copy_size(dst, tmp);
975 }
976
977 const zbar_format_def_t *tmpfmt = _zbar_format_lookup(tmp->format);
978 assert(tmpfmt);
979
980 /* convert to intermediate format */
981 _zbar_convert_jpeg_to_y(tmp, tmpfmt, src, srcfmt);
982
983 /* now convert to dst */
984 _zbar_image_copy_size(dst, tmp);
985
986 conversion_handler_t *func =
987 conversions[tmpfmt->group][dstfmt->group].func;
988
989 func(dst, dstfmt, tmp, tmpfmt);
990
991 if(!src->src)
992 zbar_image_destroy(tmp);
993 }
994 #endif
995
zbar_image_convert_resize(const zbar_image_t * src,unsigned long fmt,unsigned width,unsigned height)996 zbar_image_t *zbar_image_convert_resize (const zbar_image_t *src,
997 unsigned long fmt,
998 unsigned width,
999 unsigned height)
1000 {
1001 const zbar_format_def_t *srcfmt, *dstfmt;
1002 conversion_handler_t *func;
1003 zbar_image_t *dst = zbar_image_create();
1004 dst->format = fmt;
1005 dst->width = width;
1006 dst->height = height;
1007 zbar_image_set_crop(dst, src->crop_x, src->crop_y,
1008 src->crop_w, src->crop_h);
1009 if(src->format == fmt &&
1010 src->width == width &&
1011 src->height == height) {
1012 convert_copy(dst, NULL, src, NULL);
1013 return(dst);
1014 }
1015
1016 srcfmt = _zbar_format_lookup(src->format);
1017 dstfmt = _zbar_format_lookup(dst->format);
1018 if(!srcfmt || !dstfmt)
1019 /* FIXME free dst */
1020 return(NULL);
1021
1022 if(srcfmt->group == dstfmt->group &&
1023 srcfmt->p.cmp == dstfmt->p.cmp &&
1024 src->width == width &&
1025 src->height == height) {
1026 convert_copy(dst, NULL, src, NULL);
1027 return(dst);
1028 }
1029
1030 func = conversions[srcfmt->group][dstfmt->group].func;
1031
1032 dst->cleanup = zbar_image_free_data;
1033 func(dst, dstfmt, src, srcfmt);
1034 if(!dst->data) {
1035 /* conversion failed */
1036 zbar_image_destroy(dst);
1037 return(NULL);
1038 }
1039 return(dst);
1040 }
1041
zbar_image_convert(const zbar_image_t * src,unsigned long fmt)1042 zbar_image_t *zbar_image_convert (const zbar_image_t *src,
1043 unsigned long fmt)
1044 {
1045 return(zbar_image_convert_resize(src, fmt, src->width, src->height));
1046 }
1047
has_format(uint32_t fmt,const uint32_t * fmts)1048 static inline int has_format (uint32_t fmt,
1049 const uint32_t *fmts)
1050 {
1051 for(; *fmts; fmts++)
1052 if(*fmts == fmt)
1053 return(1);
1054 return(0);
1055 }
1056
1057 /* select least cost conversion from src format to available dsts */
_zbar_best_format(uint32_t src,uint32_t * dst,const uint32_t * dsts)1058 int _zbar_best_format (uint32_t src,
1059 uint32_t *dst,
1060 const uint32_t *dsts)
1061 {
1062 const zbar_format_def_t *srcfmt;
1063 unsigned min_cost = -1;
1064
1065 if(dst)
1066 *dst = 0;
1067 if(!dsts)
1068 return(-1);
1069 if(has_format(src, dsts)) {
1070 zprintf(8, "shared format: %4.4s\n", (char*)&src);
1071 if(dst)
1072 *dst = src;
1073 return(0);
1074 }
1075 srcfmt = _zbar_format_lookup(src);
1076 if(!srcfmt)
1077 return(-1);
1078
1079 zprintf(8, "from %.4s(%08" PRIx32 ") to", (char*)&src, src);
1080 for(; *dsts; dsts++) {
1081 const zbar_format_def_t *dstfmt = _zbar_format_lookup(*dsts);
1082 int cost;
1083 if(!dstfmt)
1084 continue;
1085 if(srcfmt->group == dstfmt->group &&
1086 srcfmt->p.cmp == dstfmt->p.cmp)
1087 cost = 0;
1088 else
1089 cost = conversions[srcfmt->group][dstfmt->group].cost;
1090
1091 if(_zbar_verbosity >= 8)
1092 fprintf(stderr, " %.4s(%08" PRIx32 ")=%d",
1093 (char*)dsts, *dsts, cost);
1094 if(cost >= 0 && min_cost > cost) {
1095 min_cost = cost;
1096 if(dst)
1097 *dst = *dsts;
1098 }
1099 }
1100 if(_zbar_verbosity >= 8)
1101 fprintf(stderr, "\n");
1102 return(min_cost);
1103 }
1104
zbar_negotiate_format(zbar_video_t * vdo,zbar_window_t * win)1105 int zbar_negotiate_format (zbar_video_t *vdo,
1106 zbar_window_t *win)
1107 {
1108 static const uint32_t y800[2] = { fourcc('Y','8','0','0'), 0 };
1109 errinfo_t *errdst;
1110 const uint32_t *srcs, *dsts;
1111 unsigned min_cost = -1;
1112 uint32_t min_fmt = 0;
1113 const uint32_t *fmt;
1114
1115 if(!vdo && !win)
1116 return(0);
1117
1118 if(win)
1119 (void)window_lock(win);
1120
1121 errdst = (vdo) ? &vdo->err : &win->err;
1122 if(verify_format_sort()) {
1123 if(win)
1124 (void)window_unlock(win);
1125 return(err_capture(errdst, SEV_FATAL, ZBAR_ERR_INTERNAL, __func__,
1126 "image format list is not sorted!?"));
1127 }
1128
1129 if((vdo && !vdo->formats) || (win && !win->formats)) {
1130 if(win)
1131 (void)window_unlock(win);
1132 return(err_capture(errdst, SEV_ERROR, ZBAR_ERR_UNSUPPORTED, __func__,
1133 "no input or output formats available"));
1134 }
1135
1136 srcs = (vdo) ? vdo->formats : y800;
1137 dsts = (win) ? win->formats : y800;
1138
1139 for(fmt = _zbar_formats; *fmt; fmt++) {
1140 /* only consider formats supported by video device */
1141 uint32_t win_fmt = 0;
1142 int cost;
1143 if(!has_format(*fmt, srcs))
1144 continue;
1145 cost = _zbar_best_format(*fmt, &win_fmt, dsts);
1146 if(cost < 0) {
1147 zprintf(4, "%.4s(%08" PRIx32 ") -> ? (unsupported)\n",
1148 (char*)fmt, *fmt);
1149 continue;
1150 }
1151 zprintf(4, "%.4s(%08" PRIx32 ") -> %.4s(%08" PRIx32 ") (%d)\n",
1152 (char*)fmt, *fmt, (char*)&win_fmt, win_fmt, cost);
1153 if(min_cost > cost) {
1154 min_cost = cost;
1155 min_fmt = *fmt;
1156 if(!cost)
1157 break;
1158 }
1159 }
1160 if(!min_fmt && vdo->emu_formats) {
1161 /* As vdo->formats aren't compatible, just free them */
1162 free(vdo->formats);
1163 vdo->formats = vdo->emu_formats;
1164 vdo->emu_formats = NULL;
1165
1166 srcs = (vdo) ? vdo->formats : y800;
1167 dsts = (win) ? win->formats : y800;
1168
1169 /*
1170 * Use the same cost algorithm to select emulated formats.
1171 * This might select a sub-optimal conversion, but, in practice,
1172 * it will select a conversion to YUV at libv4l, and a YUY->Y8
1173 * in zbar, with it is OK. Yet, it is better to not select the
1174 * most performatic conversion than to not support the webcam.
1175 */
1176 for(fmt = _zbar_formats; *fmt; fmt++) {
1177 /* only consider formats supported by video device */
1178 uint32_t win_fmt = 0;
1179 int cost;
1180 if(!has_format(*fmt, srcs))
1181 continue;
1182 cost = _zbar_best_format(*fmt, &win_fmt, dsts);
1183 if(cost < 0) {
1184 zprintf(4, "%.4s(%08" PRIx32 ") -> ? (unsupported)\n",
1185 (char*)fmt, *fmt);
1186 continue;
1187 }
1188 zprintf(4, "%.4s(%08" PRIx32 ") -> %.4s(%08" PRIx32 ") (%d)\n",
1189 (char*)fmt, *fmt, (char*)&win_fmt, win_fmt, cost);
1190 if(min_cost > cost) {
1191 min_cost = cost;
1192 min_fmt = *fmt;
1193 if(!cost)
1194 break;
1195 }
1196 }
1197 }
1198
1199 if(win)
1200 (void)window_unlock(win);
1201
1202 if(!min_fmt)
1203 return(err_capture(errdst, SEV_ERROR, ZBAR_ERR_UNSUPPORTED, __func__,
1204 "no supported image formats available"));
1205 if(!vdo)
1206 return(0);
1207
1208 zprintf(2, "setting best format %.4s(%08" PRIx32 ") (%d)\n",
1209 (char*)&min_fmt, min_fmt, min_cost);
1210 return(zbar_video_init(vdo, min_fmt));
1211 }
1212