1 /*
2 * This files includes a straightforward (to be) optimized JPEG encoder for
3 * the YUV422 format, based on mjpeg code from ffmpeg.
4 *
5 * For an excellent introduction to the JPEG format, see:
6 * http://www.ece.purdue.edu/~bouman/grad-labs/lab8/pdf/lab.pdf
7 *
8 * Copyright (C) 2005 Rik Snel <rsnel@cube.dyndns.org>
9 * - based on vd_lavc.c by A'rpi (C) 2002-2003
10 * - parts from ffmpeg Copyright (c) 2000-2003 Fabrice Bellard
11 *
12 * This file is part of MPlayer.
13 *
14 * MPlayer is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * MPlayer is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with MPlayer; if not, write to the Free Software Foundation, Inc.,
26 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 */
28
29 /**
30 * \file
31 *
32 * \brief Does mjpeg encoding as required by the zrmjpeg filter as well
33 * as by the zr video driver.
34 */
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <inttypes.h>
40
41 #include "config.h"
42 #include "libavutil/avstring.h"
43 #include "av_helpers.h"
44 #include "mp_msg.h"
45 #include "img_format.h"
46 #include "mp_image.h"
47 #include "vf.h"
48
49 /* We need this #define because we need ../libavcodec/common.h to #define
50 * be2me_32, otherwise the linker will complain that it doesn't exist */
51 #define HAVE_AV_CONFIG_H
52 #include "libavcodec/avcodec.h"
53 #include "libavcodec/mjpegenc.h"
54
55 #undef malloc
56 #undef free
57 #undef av_strcasecmp
58
59 /* some convenient #define's, is this portable enough? */
60 /// Printout with vf_zrmjpeg: prefix at VERBOSE level
61 #define VERBOSE(...) mp_msg(MSGT_DECVIDEO, MSGL_V, "vf_zrmjpeg: " __VA_ARGS__)
62 /// Printout with vf_zrmjpeg: prefix at ERROR level
63 #define ERROR(...) mp_msg(MSGT_DECVIDEO, MSGL_ERR, "vf_zrmjpeg: " __VA_ARGS__)
64 /// Printout with vf_zrmjpeg: prefix at WARNING level
65 #define WARNING(...) mp_msg(MSGT_DECVIDEO, MSGL_WARN, \
66 "vf_zrmjpeg: " __VA_ARGS__)
67
68 /// The get_pixels() routine to use. The real routine comes from dsputil
69 static void (*get_pixels)(int16_t *restrict block, const uint8_t *pixels, int line_size);
70
71 /* Begin excessive code duplication ************************************/
72 /* Code coming from mpegvideo.c and mjpeg.c in ../libavcodec ***********/
73
74 /// copy of the table in mpegvideo.c
75 static const unsigned short aanscales[64] = {
76 /**< precomputed values scaled up by 14 bits */
77 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
78 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
79 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
80 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
81 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
82 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
83 8867, 12299, 11585, 10426, 8867, 6967, 4799, 2446,
84 4520, 6270, 5906, 5315, 4520, 3552, 2446, 1247
85 };
86
87 /// Precompute DCT quantizing matrix
88 /**
89 * This routine will precompute the combined DCT matrix with qscale
90 * and DCT renorm needed by the MPEG encoder here. It is basically the
91 * same as the routine with the same name in mpegvideo.c, except for
92 * some coefficient changes. The matrix will be computed in two variations,
93 * depending on the DCT version used. The second used by the MMX version of DCT.
94 *
95 * \param s MpegEncContext pointer
96 * \param qmat[OUT] pointer to where the matrix is stored
97 * \param qmat16[OUT] pointer to where matrix for MMX is stored.
98 * This matrix is not permutated
99 * and second 64 entries are bias
100 * \param quant_matrix[IN] the quantizion matrix to use
101 * \param bias bias for the quantizer
102 * \param qmin minimum qscale value to set up for
103 * \param qmax maximum qscale value to set up for
104 *
105 * Only rows between qmin and qmax will be populated in the matrix.
106 * In this MJPEG encoder, only the value 8 for qscale is used.
107 */
convert_matrix(MpegEncContext * s,int (* qmat)[64],uint16_t (* qmat16)[2][64],const uint16_t * quant_matrix,int bias,int qmin,int qmax)108 static void convert_matrix(MpegEncContext *s, int (*qmat)[64],
109 uint16_t (*qmat16)[2][64], const uint16_t *quant_matrix,
110 int bias, int qmin, int qmax) {
111 int qscale;
112
113 for(qscale = qmin; qscale <= qmax; qscale++) {
114 int i;
115 if (s->dsp.fdct == ff_jpeg_fdct_islow_8) {
116 for (i = 0; i < 64; i++) {
117 const int j = s->dsp.idct_permutation[i];
118 /* 16 <= qscale * quant_matrix[i] <= 7905
119 * 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026
120 * (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i])
121 * >= (1<<36)/249205026
122 * 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
123 qmat[qscale][i] = (int)((UINT64_C(1) <<
124 (QMAT_SHIFT-3))/
125 (qscale*quant_matrix[j]));
126 }
127 } else if (s->dsp.fdct == ff_fdct_ifast) {
128 for (i = 0; i < 64; i++) {
129 const int j = s->dsp.idct_permutation[i];
130 /* 16 <= qscale * quant_matrix[i] <= 7905
131 * 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026
132 * (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i])
133 * >= (1<<36)/249205026
134 * 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
135 qmat[qscale][i] = (int)((UINT64_C(1) <<
136 (QMAT_SHIFT + 11))/(aanscales[i]
137 *qscale * quant_matrix[j]));
138 }
139 } else {
140 for (i = 0; i < 64; i++) {
141 const int j = s->dsp.idct_permutation[i];
142 /* We can safely assume that 16 <= quant_matrix[i] <= 255
143 * So 16 <= qscale * quant_matrix[i] <= 7905
144 * so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
145 * so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67 */
146 qmat[qscale][i] = (int)((UINT64_C(1) <<
147 QMAT_SHIFT_MMX) / (qscale
148 *quant_matrix[j]));
149 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX)
150 /(qscale * quant_matrix[j]);
151
152 if (qmat16[qscale][0][i] == 0 ||
153 qmat16[qscale][0][i] == 128*256)
154 qmat16[qscale][0][i]=128*256-1;
155 qmat16[qscale][1][i]=ROUNDED_DIV(bias
156 <<(16-QUANT_BIAS_SHIFT),
157 qmat16[qscale][0][i]);
158 }
159 }
160 }
161 }
162
163 /// Emit the DC value into a MJPEG code sream
164 /**
165 * This routine is only intended to be used from encode_block
166 *
167 * \param s pointer to MpegEncContext structure
168 * \param val the DC value to emit
169 * \param huff_size pointer to huffman code size array
170 * \param huff_code pointer to the code array corresponding to \a huff_size
171 *
172 * This routine is a clone of mjpeg_encode_dc
173 */
encode_dc(MpegEncContext * s,int val,uint8_t * huff_size,uint16_t * huff_code)174 static inline void encode_dc(MpegEncContext *s, int val,
175 uint8_t *huff_size, uint16_t *huff_code) {
176 int mant, nbits;
177
178 if (val == 0) {
179 put_bits(&s->pb, huff_size[0], huff_code[0]);
180 } else {
181 mant = val;
182 if (val < 0) {
183 val = -val;
184 mant--;
185 }
186 nbits= av_log2_16bit(val) + 1;
187 put_bits(&s->pb, huff_size[nbits], huff_code[nbits]);
188 put_bits(&s->pb, nbits, mant & ((1 << nbits) - 1));
189 }
190 }
191
192 /// Huffman encode and emit one DCT block into the MJPEG code stream
193 /**
194 * \param s pointer to MpegEncContext structure
195 * \param block pointer to the DCT block to emit
196 * \param n
197 *
198 * This routine is a duplicate of encode_block in mjpeg.c
199 */
encode_block(MpegEncContext * s,int16_t * block,int n)200 static void encode_block(MpegEncContext *s, int16_t *block, int n) {
201 int mant, nbits, code, i, j;
202 int component, dc, run, last_index, val;
203 MJpegContext *m = s->mjpeg_ctx;
204 uint8_t *huff_size_ac;
205 uint16_t *huff_code_ac;
206
207 /* DC coef */
208 component = (n <= 3 ? 0 : n - 4 + 1);
209 dc = block[0]; /* overflow is impossible */
210 val = dc - s->last_dc[component];
211 if (n < 4) {
212 encode_dc(s, val, m->huff_size_dc_luminance,
213 m->huff_code_dc_luminance);
214 huff_size_ac = m->huff_size_ac_luminance;
215 huff_code_ac = m->huff_code_ac_luminance;
216 } else {
217 encode_dc(s, val, m->huff_size_dc_chrominance,
218 m->huff_code_dc_chrominance);
219 huff_size_ac = m->huff_size_ac_chrominance;
220 huff_code_ac = m->huff_code_ac_chrominance;
221 }
222 s->last_dc[component] = dc;
223
224 /* AC coefs */
225
226 run = 0;
227 last_index = s->block_last_index[n];
228 for (i = 1; i <= last_index; i++) {
229 j = s->intra_scantable.permutated[i];
230 val = block[j];
231 if (val == 0) run++;
232 else {
233 while (run >= 16) {
234 put_bits(&s->pb, huff_size_ac[0xf0],
235 huff_code_ac[0xf0]);
236 run -= 16;
237 }
238 mant = val;
239 if (val < 0) {
240 val = -val;
241 mant--;
242 }
243
244 nbits= av_log2_16bit(val) + 1;
245 code = (run << 4) | nbits;
246
247 put_bits(&s->pb, huff_size_ac[code],
248 huff_code_ac[code]);
249 put_bits(&s->pb, nbits, mant & ((1 << nbits) - 1));
250 run = 0;
251 }
252 }
253
254 /* output EOB only if not already 64 values */
255 if (last_index < 63 || run != 0)
256 put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]);
257 }
258
259 /// clip overflowing DCT coefficients
260 /**
261 * If the computed DCT coefficients in a block overflow, this routine
262 * will go through them and clip them to be in the valid range.
263 *
264 * \param s pointer to MpegEncContext
265 * \param block pointer to DCT block to process
266 * \param last_index index of the last non-zero coefficient in block
267 *
268 * The max and min level, which are clipped to, are stored in
269 * s->min_qcoeff and s->max_qcoeff respectively.
270 */
clip_coeffs(MpegEncContext * s,int16_t * block,int last_index)271 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
272 int last_index) {
273 int i;
274 const int maxlevel= s->max_qcoeff;
275 const int minlevel= s->min_qcoeff;
276
277 for (i = 0; i <= last_index; i++) {
278 const int j = s->intra_scantable.permutated[i];
279 int level = block[j];
280
281 if (level > maxlevel) level=maxlevel;
282 else if(level < minlevel) level=minlevel;
283 block[j]= level;
284 }
285 }
286
287 /* End excessive code duplication **************************************/
288
289 typedef struct {
290 struct MpegEncContext *s;
291 int cheap_upsample;
292 int bw;
293 int y_rs;
294 int u_rs;
295 int v_rs;
296 } jpeg_enc_t;
297
298 // Huffman encode and emit one MCU of MJPEG code
299 /**
300 * \param j pointer to jpeg_enc_t structure
301 *
302 * This function huffman encodes one MCU, and emits the
303 * resulting bitstream into the MJPEG code that is currently worked on.
304 *
305 * this function is a reproduction of the one in mjpeg, it includes two
306 * changes, it allows for black&white encoding (it skips the U and V
307 * macroblocks and it outputs the huffman code for 'no change' (dc) and
308 * 'all zero' (ac)) and it takes 4 macroblocks (422) instead of 6 (420)
309 */
zr_mjpeg_encode_mb(jpeg_enc_t * j)310 static av_always_inline void zr_mjpeg_encode_mb(jpeg_enc_t *j) {
311
312 MJpegContext *m = j->s->mjpeg_ctx;
313
314 encode_block(j->s, j->s->block[0], 0);
315 encode_block(j->s, j->s->block[1], 1);
316 if (j->bw) {
317 /* U */
318 put_bits(&j->s->pb, m->huff_size_dc_chrominance[0],
319 m->huff_code_dc_chrominance[0]);
320 put_bits(&j->s->pb, m->huff_size_ac_chrominance[0],
321 m->huff_code_ac_chrominance[0]);
322 /* V */
323 put_bits(&j->s->pb, m->huff_size_dc_chrominance[0],
324 m->huff_code_dc_chrominance[0]);
325 put_bits(&j->s->pb, m->huff_size_ac_chrominance[0],
326 m->huff_code_ac_chrominance[0]);
327 } else {
328 /* we trick encode_block here so that it uses
329 * chrominance huffman tables instead of luminance ones
330 * (see the effect of second argument of encode_block) */
331 encode_block(j->s, j->s->block[2], 4);
332 encode_block(j->s, j->s->block[3], 5);
333 }
334 }
335
336 /// Fill one DCT MCU from planar storage
337 /**
338 * This routine will convert one MCU from YUYV planar storage into 4
339 * DCT macro blocks, converting from 8-bit format in the planar
340 * storage to 16-bit format used in the DCT.
341 *
342 * \param j pointer to jpeg_enc structure, and also storage for DCT macro blocks
343 * \param x pixel x-coordinate for the first pixel
344 * \param y pixel y-coordinate for the first pixel
345 * \param y_data pointer to the Y plane
346 * \param u_data pointer to the U plane
347 * \param v_data pointer to the V plane
348 */
fill_block(jpeg_enc_t * j,int x,int y,unsigned char * y_data,unsigned char * u_data,unsigned char * v_data)349 static av_always_inline void fill_block(jpeg_enc_t *j, int x, int y,
350 unsigned char *y_data, unsigned char *u_data,
351 unsigned char *v_data)
352 {
353 int i, k;
354 short int *dest;
355 unsigned char *source;
356
357 // The first Y, Y0
358 get_pixels(j->s->block[0], y*8*j->y_rs + 16*x + y_data, j->y_rs);
359 // The second Y, Y1
360 get_pixels(j->s->block[1], y*8*j->y_rs + 16*x + 8 + y_data, j->y_rs);
361
362 if (!j->bw && j->cheap_upsample) {
363 source = y * 4 * j->u_rs + 8*x + u_data;
364 dest = j->s->block[2];
365 for (i = 0; i < 4; i++) {
366 for (k = 0; k < 8; k++) {
367 dest[k] = source[k]; // First row
368 dest[k+8] = source[k]; // Duplicate to next row
369
370 }
371 dest += 16;
372 source += j->u_rs;
373 }
374 source = y * 4 * j->v_rs + 8*x + v_data;
375 dest = j->s->block[3];
376 for (i = 0; i < 4; i++) {
377 for (k = 0; k < 8; k++) {
378 dest[k] = source[k];
379 dest[k+8] = source[k];
380 }
381 dest += 16;
382 source += j->u_rs;
383 }
384 } else if (!j->bw && !j->cheap_upsample) {
385 // U
386 get_pixels(j->s->block[2], y*8*j->u_rs + 8*x + u_data, j->u_rs);
387 // V
388 get_pixels(j->s->block[3], y*8*j->v_rs + 8*x + v_data, j->v_rs);
389 }
390 }
391
392 /**
393 * \brief initialize mjpeg encoder
394 *
395 * This routine is to set up the parameters and initialize the mjpeg encoder.
396 * It does all the initializations needed of lower level routines.
397 * The formats accepted by this encoder is YUV422P and YUV420
398 *
399 * \param w width in pixels of the image to encode, must be a multiple of 16
400 * \param h height in pixels of the image to encode, must be a multiple of 8
401 * \param y_rsize size of each plane row Y component
402 * \param y_rsize size of each plane row U component
403 * \param v_rsize size of each plane row V component
404 * \param cu "cheap upsample". Set to 0 for YUV422 format, 1 for YUV420 format
405 * when set to 1, the encoder will assume that there is only half th
406 * number of rows of chroma information, and every chroma row is
407 * duplicated.
408 * \param q quality parameter for the mjpeg encode. Between 1 and 20 where 1
409 * is best quality and 20 is the worst quality.
410 * \param b monochrome flag. When set to 1, the mjpeg output is monochrome.
411 * In that case, the colour information is omitted, and actually the
412 * colour planes are not touched.
413 *
414 * \returns an appropriately set up jpeg_enc_t structure
415 *
416 * The actual plane buffer addreses are passed by jpeg_enc_frame().
417 *
418 * The encoder doesn't know anything about interlacing, the halve height
419 * needs to be passed and the double rowstride. Which field gets encoded
420 * is decided by what buffers are passed to mjpeg_encode_frame()
421 */
jpeg_enc_init(int w,int h,int y_rsize,int u_rsize,int v_rsize,int cu,int q,int b)422 static jpeg_enc_t *jpeg_enc_init(int w, int h, int y_rsize,
423 int u_rsize, int v_rsize,
424 int cu, int q, int b) {
425 jpeg_enc_t *j;
426 int i = 0;
427 VERBOSE("JPEG encoder init: %dx%d %d %d %d cu=%d q=%d bw=%d\n",
428 w, h, y_rsize, u_rsize, v_rsize, cu, q, b);
429
430 j = av_mallocz(sizeof(jpeg_enc_t));
431 if (j == NULL) return NULL;
432
433 j->s = av_mallocz(sizeof(MpegEncContext));
434 if (j->s == NULL) {
435 av_free(j);
436 return NULL;
437 }
438
439 /* info on how to access the pixels */
440 j->y_rs = y_rsize;
441 j->u_rs = u_rsize;
442 j->v_rs = v_rsize;
443
444 j->s->width = w; // image width and height
445 j->s->height = h;
446 j->s->qscale = q; // Encoding quality
447
448 j->s->out_format = FMT_MJPEG;
449 j->s->intra_only = 1; // Generate only intra pictures for jpeg
450 j->s->encoding = 1; // Set mode to encode
451 j->s->pict_type = AV_PICTURE_TYPE_I;
452 j->s->y_dc_scale = 8;
453 j->s->c_dc_scale = 8;
454
455 /*
456 * This sets up the MCU (Minimal Code Unit) number
457 * of appearances of the various component
458 * for the SOF0 table in the generated MJPEG.
459 * The values are not used for anything else.
460 * The current setup is simply YUV422, with two horizontal Y components
461 * for every UV component.
462 */
463 //FIXME j->s->mjpeg_write_tables = 1; // setup to write tables
464 j->s->mjpeg_vsample[0] = 1; // 1 appearance of Y vertically
465 j->s->mjpeg_vsample[1] = 1; // 1 appearance of U vertically
466 j->s->mjpeg_vsample[2] = 1; // 1 appearance of V vertically
467 j->s->mjpeg_hsample[0] = 2; // 2 appearances of Y horizontally
468 j->s->mjpeg_hsample[1] = 1; // 1 appearance of U horizontally
469 j->s->mjpeg_hsample[2] = 1; // 1 appearance of V horizontally
470
471 j->cheap_upsample = cu;
472 j->bw = b;
473
474 init_avcodec();
475
476 // Build mjpeg huffman code tables, setting up j->s->mjpeg_ctx
477 if (ff_mjpeg_encode_init(j->s) < 0) {
478 av_free(j->s);
479 av_free(j);
480 return NULL;
481 }
482
483 /* alloc bogus avctx to keep MPV_common_init from segfaulting */
484 j->s->avctx = avcodec_alloc_context();
485 if (j->s->avctx == NULL) {
486 av_free(j->s);
487 av_free(j);
488 return NULL;
489 }
490
491 // Set some a minimum amount of default values that are needed
492 // Indicates that we should generated normal MJPEG
493 j->s->avctx->codec_id = AV_CODEC_ID_MJPEG;
494 // Which DCT method to use. AUTO will select the fastest one
495 j->s->avctx->dct_algo = FF_DCT_AUTO;
496 j->s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
497 // indicate we 'decode' to jpeg 4:2:2
498 j->s->avctx->pix_fmt = AV_PIX_FMT_YUVJ422P;
499
500 j->s->avctx->thread_count = 1;
501
502 /* make MPV_common_init allocate important buffers, like s->block
503 * Also initializes dsputil */
504 if (ff_MPV_common_init(j->s) < 0) {
505 av_free(j->s);
506 av_free(j);
507 return NULL;
508 }
509
510 /* correct the value for sc->mb_height. MPV_common_init put other
511 * values there */
512 j->s->mb_height = j->s->height/8;
513 j->s->mb_intra = 1;
514
515 // Init q matrix
516 j->s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
517 for (i = 1; i < 64; i++)
518 j->s->intra_matrix[i] = av_clip_uint8(
519 (ff_mpeg1_default_intra_matrix[i]*j->s->qscale) >> 3);
520
521 // precompute matrix
522 convert_matrix(j->s, j->s->q_intra_matrix, j->s->q_intra_matrix16,
523 j->s->intra_matrix, j->s->intra_quant_bias, 8, 8);
524
525 /* Pick up the selection of the optimal get_pixels() routine
526 * to use, which was done in MPV_common_init() */
527 get_pixels = j->s->dsp.get_pixels;
528
529 return j;
530 }
531
532 /**
533 * \brief mjpeg encode an image
534 *
535 * This routine will take a 3-plane YUV422 image and encoded it with MJPEG
536 * base line format, as suitable as input for the Zoran hardare MJPEG chips.
537 *
538 * It requires that the \a j parameter points the structure set up by the
539 * jpeg_enc_init() routine.
540 *
541 * \param j pointer to jpeg_enc_t structure as created by jpeg_enc_init()
542 * \param y_data pointer to Y component plane, packed one byte/pixel
543 * \param u_data pointer to U component plane, packed one byte per every
544 * other pixel
545 * \param v_data pointer to V component plane, packed one byte per every
546 * other pixel
547 * \param bufr pointer to the buffer where the mjpeg encoded code is stored
548 *
549 * \returns the number of bytes stored into \a bufr
550 *
551 * If \a j->s->mjpeg_write_tables is set, it will also emit the mjpeg tables,
552 * otherwise it will just emit the data. The \a j->s->mjpeg_write_tables
553 * variable will be reset to 0 by the routine.
554 */
jpeg_enc_frame(jpeg_enc_t * j,uint8_t * y_data,uint8_t * u_data,uint8_t * v_data,uint8_t * bufr)555 static int jpeg_enc_frame(jpeg_enc_t *j, uint8_t *y_data,
556 uint8_t *u_data, uint8_t *v_data, uint8_t *bufr) {
557 int mb_x, mb_y, overflow;
558 /* initialize the buffer */
559
560 init_put_bits(&j->s->pb, bufr, 1024*256);
561
562 // Emit the mjpeg header blocks
563 ff_mjpeg_encode_picture_header(j->s);
564
565 j->s->header_bits = put_bits_count(&j->s->pb);
566
567 j->s->last_dc[0] = 128;
568 j->s->last_dc[1] = 128;
569 j->s->last_dc[2] = 128;
570
571 for (mb_y = 0; mb_y < j->s->mb_height; mb_y++) {
572 for (mb_x = 0; mb_x < j->s->mb_width; mb_x++) {
573 /*
574 * Fill one DCT block (8x8 pixels) from
575 * 2 Y macroblocks and one U and one V
576 */
577 fill_block(j, mb_x, mb_y, y_data, u_data, v_data);
578 emms_c(); /* is this really needed? */
579
580 j->s->block_last_index[0] =
581 j->s->dct_quantize(j->s, j->s->block[0],
582 0, 8, &overflow);
583 if (overflow) clip_coeffs(j->s, j->s->block[0],
584 j->s->block_last_index[0]);
585 j->s->block_last_index[1] =
586 j->s->dct_quantize(j->s, j->s->block[1],
587 1, 8, &overflow);
588 if (overflow) clip_coeffs(j->s, j->s->block[1],
589 j->s->block_last_index[1]);
590
591 if (!j->bw) {
592 j->s->block_last_index[4] =
593 j->s->dct_quantize(j->s, j->s->block[2],
594 4, 8, &overflow);
595 if (overflow) clip_coeffs(j->s, j->s->block[2],
596 j->s->block_last_index[2]);
597 j->s->block_last_index[5] =
598 j->s->dct_quantize(j->s, j->s->block[3],
599 5, 8, &overflow);
600 if (overflow) clip_coeffs(j->s, j->s->block[3],
601 j->s->block_last_index[3]);
602 }
603 zr_mjpeg_encode_mb(j);
604 }
605 }
606 emms_c();
607 ff_mjpeg_encode_picture_trailer(j->s);
608 flush_put_bits(&j->s->pb);
609
610 //FIXME
611 //if (j->s->mjpeg_write_tables == 1)
612 // j->s->mjpeg_write_tables = 0;
613
614 return put_bits_ptr(&(j->s->pb)) - j->s->pb.buf;
615 }
616
617 /// the real uninit routine
618 /**
619 * This is the real routine that does the uninit of the ZRMJPEG filter
620 *
621 * \param j pointer to jpeg_enc structure
622 */
jpeg_enc_uninit(jpeg_enc_t * j)623 static void jpeg_enc_uninit(jpeg_enc_t *j) {
624 ff_mjpeg_encode_close(j->s);
625 av_free(j->s);
626 av_free(j);
627 }
628
629 /// Private structure for ZRMJPEG filter
630 struct vf_priv_s {
631 jpeg_enc_t *j;
632 unsigned char buf[256*1024];
633 int bw, fd, hdec, vdec;
634 int fields;
635 int y_stride;
636 int c_stride;
637 int quality;
638 int maxwidth;
639 int maxheight;
640 };
641
642 /// vf CONFIGURE entry point for the ZRMJPEG filter
643 /**
644 * \param vf video filter instance pointer
645 * \param width image source width in pixels
646 * \param height image source height in pixels
647 * \param d_width width of requested window, just a hint
648 * \param d_height height of requested window, just a hint
649 * \param flags vf filter flags
650 * \param outfmt
651 *
652 * \returns returns 0 on error
653 *
654 * This routine will make the necessary hardware-related decisions for
655 * the ZRMJPEG filter, do the initialization of the MJPEG encoder, and
656 * then select one of the ZRJMJPEGIT or ZRMJPEGNI filters and then
657 * arrange to dispatch to the config() entry pointer for the one
658 * selected.
659 */
config(struct vf_instance * vf,int width,int height,int d_width,int d_height,unsigned int flags,unsigned int outfmt)660 static int config(struct vf_instance *vf, int width, int height, int d_width,
661 int d_height, unsigned int flags, unsigned int outfmt){
662 struct vf_priv_s *priv = vf->priv;
663 float aspect_decision;
664 int stretchx, stretchy, err = 0, maxstretchx = 4;
665 priv->fields = 1;
666
667 VERBOSE("config() called\n");
668
669 if (priv->j) {
670 VERBOSE("re-configuring, resetting JPEG encoder\n");
671 jpeg_enc_uninit(priv->j);
672 priv->j = NULL;
673 }
674
675 aspect_decision = ((float)d_width/(float)d_height)/
676 ((float)width/(float)height);
677
678 if (aspect_decision > 1.8 && aspect_decision < 2.2) {
679 VERBOSE("should correct aspect by stretching x times 2, %d %d\n", 2*width, priv->maxwidth);
680 if (2*width <= priv->maxwidth) {
681 d_width = 2*width;
682 d_height = height;
683 maxstretchx = 2;
684 } else {
685 WARNING("unable to correct aspect by stretching, because resulting X will be too large, aspect correction by decimating y not yet implemented\n");
686 d_width = width;
687 d_height = height;
688 }
689 /* prestretch movie */
690 } else {
691 /* uncorrecting output for now */
692 d_width = width;
693 d_height = height;
694 }
695 /* make the scaling decision
696 * we are capable of stretching the image in the horizontal
697 * direction by factors 1, 2 and 4
698 * we can stretch the image in the vertical direction by a
699 * factor of 1 and 2 AND we must decide about interlacing */
700 if (d_width > priv->maxwidth/2 || height > priv->maxheight/2
701 || maxstretchx == 1) {
702 stretchx = 1;
703 stretchy = 1;
704 priv->fields = 2;
705 if (priv->vdec == 2) {
706 priv->fields = 1;
707 } else if (priv->vdec == 4) {
708 priv->fields = 1;
709 stretchy = 2;
710 }
711 if (priv->hdec > maxstretchx) {
712 if (priv->fd) {
713 WARNING("horizontal decimation too high, "
714 "changing to %d (use fd to keep"
715 " hdec=%d)\n",
716 maxstretchx, priv->hdec);
717 priv->hdec = maxstretchx;
718 }
719 }
720 stretchx = priv->hdec;
721 } else if (d_width > priv->maxwidth/4 ||
722 height > priv->maxheight/4 ||
723 maxstretchx == 2) {
724 stretchx = 2;
725 stretchy = 1;
726 priv->fields = 1;
727 if (priv->vdec == 2) {
728 stretchy = 2;
729 } else if (priv->vdec == 4) {
730 if (!priv->fd) {
731 WARNING("vertical decimation too high, "
732 "changing to 2 (use fd to keep "
733 "vdec=4)\n");
734 priv->vdec = 2;
735 }
736 stretchy = 2;
737 }
738 if (priv->hdec == 2) {
739 stretchx = 4;
740 } else if (priv->hdec == 4) {
741 if (priv->fd) {
742 WARNING("horizontal decimation too high, "
743 "changing to 2 (use fd to keep "
744 "hdec=4)\n");
745 priv->hdec = 2;
746 }
747 stretchx = 4;
748 }
749 } else {
750 /* output image is maximally stretched */
751 stretchx = 4;
752 stretchy = 2;
753 priv->fields = 1;
754 if (priv->vdec != 1 && !priv->fd) {
755 WARNING("vertical decimation too high, changing to 1 "
756 "(use fd to keep vdec=%d)\n",
757 priv->vdec);
758 priv->vdec = 1;
759 }
760 if (priv->hdec != 1 && !priv->fd) {
761 WARNING("horizontal decimation too high, changing to 1 (use fd to keep hdec=%d)\n", priv->hdec);
762 priv->hdec = 1;
763 }
764 }
765
766 VERBOSE("generated JPEG's %dx%s%d%s, stretched to %dx%d\n",
767 width/priv->hdec, (priv->fields == 2) ? "(" : "",
768 height/(priv->vdec*priv->fields),
769 (priv->fields == 2) ? "x2)" : "",
770 (width/priv->hdec)*stretchx,
771 (height/(priv->vdec*priv->fields))*
772 stretchy*priv->fields);
773
774
775 if ((width/priv->hdec)*stretchx > priv->maxwidth ||
776 (height/(priv->vdec*priv->fields))*
777 stretchy*priv->fields > priv->maxheight) {
778 ERROR("output dimensions too large (%dx%d), max (%dx%d) "
779 "insert crop to fix\n",
780 (width/priv->hdec)*stretchx,
781 (height/(priv->vdec*priv->fields))*
782 stretchy*priv->fields,
783 priv->maxwidth, priv->maxheight);
784 err = 1;
785 }
786
787 if (width%(16*priv->hdec) != 0) {
788 ERROR("width must be a multiple of 16*hdec (%d), use expand\n",
789 priv->hdec*16);
790 err = 1;
791 }
792
793 if (height%(8*priv->fields*priv->vdec) != 0) {
794 ERROR("height must be a multiple of 8*fields*vdec (%d),"
795 " use expand\n", priv->vdec*priv->fields*8);
796 err = 1;
797 }
798
799 if (err) return 0;
800
801 priv->y_stride = width;
802 priv->c_stride = width/2;
803 priv->j = jpeg_enc_init(width, height/priv->fields,
804 priv->fields*priv->y_stride,
805 priv->fields*priv->c_stride,
806 priv->fields*priv->c_stride,
807 1, priv->quality, priv->bw);
808
809 if (!priv->j) return 0;
810 return vf_next_config(vf, width, height, d_width, d_height, flags,
811 (priv->fields == 2) ? IMGFMT_ZRMJPEGIT : IMGFMT_ZRMJPEGNI);
812 }
813
814 /// put_image entrypoint for the ZRMJPEG vf filter
815 /***
816 * \param vf pointer to vf_instance
817 * \param mpi pointer to mp_image_t structure
818 * \param pts
819 */
put_image(struct vf_instance * vf,mp_image_t * mpi,double pts,double endpts)820 static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts, double endpts){
821 struct vf_priv_s *priv = vf->priv;
822 int size = 0;
823 int i;
824 mp_image_t* dmpi;
825 for (i = 0; i < priv->fields; i++)
826 size += jpeg_enc_frame(priv->j,
827 mpi->planes[0] + i*priv->y_stride,
828 mpi->planes[1] + i*priv->c_stride,
829 mpi->planes[2] + i*priv->c_stride,
830 priv->buf + size);
831
832 dmpi = vf_get_image(vf->next, IMGFMT_ZRMJPEGNI,
833 MP_IMGTYPE_EXPORT, 0, mpi->w, mpi->h);
834 dmpi->planes[0] = (uint8_t*)priv->buf;
835 dmpi->planes[1] = (uint8_t*)size;
836 return vf_next_put_image(vf, dmpi, pts, endpts);
837 }
838
839 /// query_format entrypoint for the ZRMJPEG vf filter
840 /***
841 * \param vf pointer to vf_instance
842 * \param fmt image format to query for
843 *
844 * \returns 0 if image format in fmt is not supported
845 *
846 * Given the image format specified by \a fmt, this routine is called
847 * to ask if the format is supported or not.
848 */
query_format(struct vf_instance * vf,unsigned int fmt)849 static int query_format(struct vf_instance *vf, unsigned int fmt){
850 VERBOSE("query_format() called\n");
851
852 switch (fmt) {
853 case IMGFMT_YV12:
854 case IMGFMT_YUY2:
855 /* strictly speaking the output format of
856 * this filter will be known after config(),
857 * but everything that supports IMGFMT_ZRMJPEGNI
858 * should also support all other IMGFMT_ZRMJPEG* */
859 return vf_next_query_format(vf, IMGFMT_ZRMJPEGNI);
860 }
861
862 return 0;
863 }
864
865 /// vf UNINIT entry point for the ZRMJPEG filter
866 /**
867 * \param vf pointer to the vf instance structure
868 */
uninit(vf_instance_t * vf)869 static void uninit(vf_instance_t *vf) {
870 struct vf_priv_s *priv = vf->priv;
871 VERBOSE("uninit() called\n");
872 if (priv->j) jpeg_enc_uninit(priv->j);
873 free(priv);
874 }
875
876 /// vf OPEN entry point for the ZRMJPEG filter
877 /**
878 * \param vf pointer to the vf instance structure
879 * \param args the argument list string for the -vf zrmjpeg command
880 *
881 * \returns 0 for error, 1 for success
882 *
883 * This routine will do some basic initialization of local structures etc.,
884 * and then parse the command line arguments specific for the ZRMJPEG filter.
885 */
vf_open(vf_instance_t * vf,char * args)886 static int vf_open(vf_instance_t *vf, char *args){
887 struct vf_priv_s *priv;
888 VERBOSE("vf_open() called: args=\"%s\"\n", args);
889
890 vf->config = config;
891 vf->put_image = put_image;
892 vf->query_format = query_format;
893 vf->uninit = uninit;
894
895 priv = vf->priv = calloc(sizeof(*priv), 1);
896 if (!vf->priv) {
897 ERROR("out of memory error\n");
898 return 0;
899 }
900
901 /* maximum displayable size by zoran card, these defaults
902 * are for my own zoran card in PAL mode, these can be changed
903 * by filter options. But... in an ideal world these values would
904 * be queried from the vo device itself... */
905 priv->maxwidth = 768;
906 priv->maxheight = 576;
907
908 priv->quality = 2;
909 priv->hdec = 1;
910 priv->vdec = 1;
911
912 init_avcodec();
913
914 if (args) {
915 char *arg, *tmp, *ptr, junk;
916 int last = 0, input;
917
918 /* save arguments, to be able to safely modify them */
919 arg = strdup(args);
920 if (!arg) {
921 ERROR("out of memory, this is bad\n");
922 return 0;
923 }
924
925 tmp = ptr = arg;
926 do {
927 while (*tmp != ':' && *tmp) tmp++;
928 if (*tmp == ':') *tmp++ = '\0';
929 else last = 1;
930 VERBOSE("processing filter option \"%s\"\n", ptr);
931 /* These options deal with the maximum output
932 * resolution of the zoran card. These should
933 * be queried from the vo device, but it is currently
934 * too difficult, so the user should tell the filter */
935 if (!strncmp("maxheight=", ptr, 10)) {
936 if (sscanf(ptr+10, "%d%c", &input, &junk) != 1)
937 ERROR(
938 "error parsing parameter to \"maxheight=\", \"%s\", ignoring\n"
939 , ptr + 10);
940 else {
941 priv->maxheight = input;
942 VERBOSE("setting maxheight to %d\n",
943 priv->maxheight);
944 }
945 } else if (!strncmp("quality=", ptr, 8)) {
946 if (sscanf(ptr+8, "%d%c", &input, &junk) != 1)
947 ERROR(
948 "error parsing parameter to \"quality=\", \"%s\", ignoring\n"
949 , ptr + 8);
950 else if (input < 1 || input > 20)
951 ERROR(
952 "parameter to \"quality=\" out of range (1..20), %d\n", input);
953 else {
954 priv->quality = input;
955 VERBOSE("setting JPEG quality to %d\n",
956 priv->quality);
957 }
958 } else if (!strncmp("maxwidth=", ptr, 9)) {
959 if (sscanf(ptr+9, "%d%c", &input, &junk) != 1)
960 ERROR(
961 "error parsing parameter to \"maxwidth=\", \"%s\", ignoring\n"
962 , ptr + 9);
963 else {
964 priv->maxwidth = input;
965 VERBOSE("setting maxwidth to %d\n",
966 priv->maxwidth);
967 }
968 } else if (!strncmp("hdec=", ptr, 5)) {
969 if (sscanf(ptr+5, "%d%c", &input, &junk) != 1)
970 ERROR(
971 "error parsing parameter to \"hdec=\", \"%s\", ignoring\n"
972 , ptr + 9);
973 else if (input != 1 && input != 2 && input != 4)
974 ERROR(
975 "illegal parameter to \"hdec=\", %d, should be 1, 2 or 4",
976 input);
977 else {
978 priv->hdec = input;
979 VERBOSE(
980 "setting horizontal decimation to %d\n", priv->maxwidth);
981 }
982 } else if (!strncmp("vdec=", ptr, 5)) {
983 if (sscanf(ptr+5, "%d%c", &input, &junk) != 1)
984 ERROR(
985 "error parsing parameter to \"vdec=\", \"%s\", ignoring\n"
986 , ptr + 9);
987 else if (input != 1 && input != 2 && input != 4)
988 ERROR(
989 "illegal parameter to \"vdec=\", %d, should be 1, 2 or 4",
990 input);
991 else {
992 priv->vdec = input;
993 VERBOSE(
994 "setting vertical decimation to %d\n", priv->maxwidth);
995 }
996 } else if (!av_strcasecmp("dc10+-PAL", ptr) ||
997 !av_strcasecmp("dc10-PAL", ptr)) {
998 priv->maxwidth = 768;
999 priv->maxheight = 576;
1000 VERBOSE("setting DC10(+) PAL profile\n");
1001 } else if (!av_strcasecmp("fd", ptr)) {
1002 priv->fd = 1;
1003 VERBOSE("forcing decimation\n");
1004 } else if (!av_strcasecmp("nofd", ptr)) {
1005 priv->fd = 0;
1006 VERBOSE("decimate only if beautiful\n");
1007 } else if (!av_strcasecmp("bw", ptr)) {
1008 priv->bw = 1;
1009 VERBOSE("setting black and white encoding\n");
1010 } else if (!av_strcasecmp("color", ptr)) {
1011 priv->bw = 0;
1012 VERBOSE("setting color encoding\n");
1013 } else if (!av_strcasecmp("dc10+-NTSC", ptr) ||
1014 !av_strcasecmp("dc10-NTSC", ptr)) {
1015 priv->maxwidth = 640;
1016 priv->maxheight = 480;
1017 VERBOSE("setting DC10(+) NTSC profile\n");
1018 } else if (!av_strcasecmp("buz-PAL", ptr) ||
1019 !av_strcasecmp("lml33-PAL", ptr)) {
1020 priv->maxwidth = 720;
1021 priv->maxheight = 576;
1022 VERBOSE("setting buz/lml33 PAL profile\n");
1023 } else if (!av_strcasecmp("buz-NTSC", ptr) ||
1024 !av_strcasecmp("lml33-NTSC", ptr)) {
1025 priv->maxwidth = 720;
1026 priv->maxheight = 480;
1027 VERBOSE("setting buz/lml33 NTSC profile\n");
1028 } else {
1029 WARNING("ignoring unknown filter option "
1030 "\"%s\", or missing argument\n",
1031 ptr);
1032 }
1033 ptr = tmp;
1034 } while (!last);
1035
1036 free(arg);
1037 }
1038
1039
1040 return 1;
1041 }
1042
1043 const vf_info_t vf_info_zrmjpeg = {
1044 "realtime zoran MJPEG encoding",
1045 "zrmjpeg",
1046 "Rik Snel",
1047 "",
1048 vf_open,
1049 NULL
1050 };
1051