1 /*
2 * Copyright (c) 2003 The FFmpeg Project
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /*
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
31 *
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
37 *
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41 */
42
43 #include <inttypes.h>
44
45 #include "libavutil/attributes.h"
46 #include "libavutil/crc.h"
47
48 #include "internal.h"
49 #include "avcodec.h"
50 #include "mpegutils.h"
51 #include "h264dec.h"
52 #include "h264data.h"
53 #include "golomb.h"
54 #include "hpeldsp.h"
55 #include "mathops.h"
56 #include "rectangle.h"
57 #include "tpeldsp.h"
58
59 #if CONFIG_ZLIB
60 #include <zlib.h>
61 #endif
62
63 #include "svq1.h"
64
65 /**
66 * @file
67 * svq3 decoder.
68 */
69
70 typedef struct SVQ3Frame {
71 AVFrame *f;
72
73 int16_t (*motion_val_buf[2])[2];
74 int16_t (*motion_val[2])[2];
75
76 uint32_t *mb_type_buf, *mb_type;
77 } SVQ3Frame;
78
79 typedef struct SVQ3Context {
80 AVCodecContext *avctx;
81
82 H264DSPContext h264dsp;
83 H264PredContext hpc;
84 HpelDSPContext hdsp;
85 TpelDSPContext tdsp;
86 VideoDSPContext vdsp;
87
88 SVQ3Frame *cur_pic;
89 SVQ3Frame *next_pic;
90 SVQ3Frame *last_pic;
91 GetBitContext gb;
92 GetBitContext gb_slice;
93 uint8_t *slice_buf;
94 int slice_size;
95 int halfpel_flag;
96 int thirdpel_flag;
97 int has_watermark;
98 uint32_t watermark_key;
99 uint8_t *buf;
100 int buf_size;
101 int adaptive_quant;
102 int next_p_frame_damaged;
103 int h_edge_pos;
104 int v_edge_pos;
105 int last_frame_output;
106 int slice_num;
107 int qscale;
108 int cbp;
109 int frame_num;
110 int frame_num_offset;
111 int prev_frame_num_offset;
112 int prev_frame_num;
113
114 enum AVPictureType pict_type;
115 enum AVPictureType slice_type;
116 int low_delay;
117
118 int mb_x, mb_y;
119 int mb_xy;
120 int mb_width, mb_height;
121 int mb_stride, mb_num;
122 int b_stride;
123
124 uint32_t *mb2br_xy;
125
126 int chroma_pred_mode;
127 int intra16x16_pred_mode;
128
129 int8_t intra4x4_pred_mode_cache[5 * 8];
130 int8_t (*intra4x4_pred_mode);
131
132 unsigned int top_samples_available;
133 unsigned int topright_samples_available;
134 unsigned int left_samples_available;
135
136 uint8_t *edge_emu_buffer;
137
138 DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
139 DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
140 DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
141 DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
142 DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
143 uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
144 int block_offset[2 * (16 * 3)];
145 SVQ3Frame frames[3];
146 } SVQ3Context;
147
148 #define FULLPEL_MODE 1
149 #define HALFPEL_MODE 2
150 #define THIRDPEL_MODE 3
151 #define PREDICT_MODE 4
152
153 /* dual scan (from some older H.264 draft)
154 * o-->o-->o o
155 * | /|
156 * o o o / o
157 * | / | |/ |
158 * o o o o
159 * /
160 * o-->o-->o-->o
161 */
162 static const uint8_t svq3_scan[16] = {
163 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
164 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
165 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
166 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
167 };
168
169 static const uint8_t luma_dc_zigzag_scan[16] = {
170 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
171 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
172 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
173 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
174 };
175
176 static const uint8_t svq3_pred_0[25][2] = {
177 { 0, 0 },
178 { 1, 0 }, { 0, 1 },
179 { 0, 2 }, { 1, 1 }, { 2, 0 },
180 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
181 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
182 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
183 { 2, 4 }, { 3, 3 }, { 4, 2 },
184 { 4, 3 }, { 3, 4 },
185 { 4, 4 }
186 };
187
188 static const int8_t svq3_pred_1[6][6][5] = {
189 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
190 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
191 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
192 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
193 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
194 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
195 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
196 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
197 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
198 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
199 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
200 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
201 };
202
203 static const struct {
204 uint8_t run;
205 uint8_t level;
206 } svq3_dct_tables[2][16] = {
207 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
208 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
209 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
210 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
211 };
212
213 static const uint32_t svq3_dequant_coeff[32] = {
214 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
215 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
216 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
217 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
218 };
219
svq3_luma_dc_dequant_idct_c(int16_t * output,int16_t * input,int qp)220 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
221 {
222 const unsigned qmul = svq3_dequant_coeff[qp];
223 #define stride 16
224 int i;
225 int temp[16];
226 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
227
228 for (i = 0; i < 4; i++) {
229 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
230 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
231 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
232 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
233
234 temp[4 * i + 0] = z0 + z3;
235 temp[4 * i + 1] = z1 + z2;
236 temp[4 * i + 2] = z1 - z2;
237 temp[4 * i + 3] = z0 - z3;
238 }
239
240 for (i = 0; i < 4; i++) {
241 const int offset = x_offset[i];
242 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
243 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
244 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
245 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
246
247 output[stride * 0 + offset] = (int)((z0 + z3) * qmul + 0x80000) >> 20;
248 output[stride * 2 + offset] = (int)((z1 + z2) * qmul + 0x80000) >> 20;
249 output[stride * 8 + offset] = (int)((z1 - z2) * qmul + 0x80000) >> 20;
250 output[stride * 10 + offset] = (int)((z0 - z3) * qmul + 0x80000) >> 20;
251 }
252 }
253 #undef stride
254
svq3_add_idct_c(uint8_t * dst,int16_t * block,int stride,int qp,int dc)255 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
256 int stride, int qp, int dc)
257 {
258 const int qmul = svq3_dequant_coeff[qp];
259 int i;
260
261 if (dc) {
262 dc = 13 * 13 * (dc == 1 ? 1538U* block[0]
263 : qmul * (block[0] >> 3) / 2);
264 block[0] = 0;
265 }
266
267 for (i = 0; i < 4; i++) {
268 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
269 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
270 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
271 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
272
273 block[0 + 4 * i] = z0 + z3;
274 block[1 + 4 * i] = z1 + z2;
275 block[2 + 4 * i] = z1 - z2;
276 block[3 + 4 * i] = z0 - z3;
277 }
278
279 for (i = 0; i < 4; i++) {
280 const unsigned z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
281 const unsigned z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
282 const unsigned z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
283 const unsigned z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
284 const int rr = (dc + 0x80000u);
285
286 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((int)((z0 + z3) * qmul + rr) >> 20));
287 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((int)((z1 + z2) * qmul + rr) >> 20));
288 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((int)((z1 - z2) * qmul + rr) >> 20));
289 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((int)((z0 - z3) * qmul + rr) >> 20));
290 }
291
292 memset(block, 0, 16 * sizeof(int16_t));
293 }
294
svq3_decode_block(GetBitContext * gb,int16_t * block,int index,const int type)295 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
296 int index, const int type)
297 {
298 static const uint8_t *const scan_patterns[4] = {
299 luma_dc_zigzag_scan, ff_zigzag_scan, svq3_scan, ff_h264_chroma_dc_scan
300 };
301
302 int run, level, sign, limit;
303 unsigned vlc;
304 const int intra = 3 * type >> 2;
305 const uint8_t *const scan = scan_patterns[type];
306
307 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
308 for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
309 if ((int32_t)vlc < 0)
310 return -1;
311
312 sign = (vlc & 1) ? 0 : -1;
313 vlc = vlc + 1 >> 1;
314
315 if (type == 3) {
316 if (vlc < 3) {
317 run = 0;
318 level = vlc;
319 } else if (vlc < 4) {
320 run = 1;
321 level = 1;
322 } else {
323 run = vlc & 0x3;
324 level = (vlc + 9 >> 2) - run;
325 }
326 } else {
327 if (vlc < 16U) {
328 run = svq3_dct_tables[intra][vlc].run;
329 level = svq3_dct_tables[intra][vlc].level;
330 } else if (intra) {
331 run = vlc & 0x7;
332 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
333 } else {
334 run = vlc & 0xF;
335 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
336 }
337 }
338
339
340 if ((index += run) >= limit)
341 return -1;
342
343 block[scan[index]] = (level ^ sign) - sign;
344 }
345
346 if (type != 2) {
347 break;
348 }
349 }
350
351 return 0;
352 }
353
354 static av_always_inline int
svq3_fetch_diagonal_mv(const SVQ3Context * s,const int16_t ** C,int i,int list,int part_width)355 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
356 int i, int list, int part_width)
357 {
358 const int topright_ref = s->ref_cache[list][i - 8 + part_width];
359
360 if (topright_ref != PART_NOT_AVAILABLE) {
361 *C = s->mv_cache[list][i - 8 + part_width];
362 return topright_ref;
363 } else {
364 *C = s->mv_cache[list][i - 8 - 1];
365 return s->ref_cache[list][i - 8 - 1];
366 }
367 }
368
369 /**
370 * Get the predicted MV.
371 * @param n the block index
372 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
373 * @param mx the x component of the predicted motion vector
374 * @param my the y component of the predicted motion vector
375 */
svq3_pred_motion(const SVQ3Context * s,int n,int part_width,int list,int ref,int * const mx,int * const my)376 static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n,
377 int part_width, int list,
378 int ref, int *const mx, int *const my)
379 {
380 const int index8 = scan8[n];
381 const int top_ref = s->ref_cache[list][index8 - 8];
382 const int left_ref = s->ref_cache[list][index8 - 1];
383 const int16_t *const A = s->mv_cache[list][index8 - 1];
384 const int16_t *const B = s->mv_cache[list][index8 - 8];
385 const int16_t *C;
386 int diagonal_ref, match_count;
387
388 /* mv_cache
389 * B . . A T T T T
390 * U . . L . . , .
391 * U . . L . . . .
392 * U . . L . . , .
393 * . . . L . . . .
394 */
395
396 diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
397 match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
398 if (match_count > 1) { //most common
399 *mx = mid_pred(A[0], B[0], C[0]);
400 *my = mid_pred(A[1], B[1], C[1]);
401 } else if (match_count == 1) {
402 if (left_ref == ref) {
403 *mx = A[0];
404 *my = A[1];
405 } else if (top_ref == ref) {
406 *mx = B[0];
407 *my = B[1];
408 } else {
409 *mx = C[0];
410 *my = C[1];
411 }
412 } else {
413 if (top_ref == PART_NOT_AVAILABLE &&
414 diagonal_ref == PART_NOT_AVAILABLE &&
415 left_ref != PART_NOT_AVAILABLE) {
416 *mx = A[0];
417 *my = A[1];
418 } else {
419 *mx = mid_pred(A[0], B[0], C[0]);
420 *my = mid_pred(A[1], B[1], C[1]);
421 }
422 }
423 }
424
svq3_mc_dir_part(SVQ3Context * s,int x,int y,int width,int height,int mx,int my,int dxy,int thirdpel,int dir,int avg)425 static inline void svq3_mc_dir_part(SVQ3Context *s,
426 int x, int y, int width, int height,
427 int mx, int my, int dxy,
428 int thirdpel, int dir, int avg)
429 {
430 const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
431 uint8_t *src, *dest;
432 int i, emu = 0;
433 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
434 int linesize = s->cur_pic->f->linesize[0];
435 int uvlinesize = s->cur_pic->f->linesize[1];
436
437 mx += x;
438 my += y;
439
440 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
441 my < 0 || my >= s->v_edge_pos - height - 1) {
442 emu = 1;
443 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
444 my = av_clip(my, -16, s->v_edge_pos - height + 15);
445 }
446
447 /* form component predictions */
448 dest = s->cur_pic->f->data[0] + x + y * linesize;
449 src = pic->f->data[0] + mx + my * linesize;
450
451 if (emu) {
452 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
453 linesize, linesize,
454 width + 1, height + 1,
455 mx, my, s->h_edge_pos, s->v_edge_pos);
456 src = s->edge_emu_buffer;
457 }
458 if (thirdpel)
459 (avg ? s->tdsp.avg_tpel_pixels_tab
460 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
461 width, height);
462 else
463 (avg ? s->hdsp.avg_pixels_tab
464 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
465 height);
466
467 if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
468 mx = mx + (mx < (int) x) >> 1;
469 my = my + (my < (int) y) >> 1;
470 width = width >> 1;
471 height = height >> 1;
472 blocksize++;
473
474 for (i = 1; i < 3; i++) {
475 dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
476 src = pic->f->data[i] + mx + my * uvlinesize;
477
478 if (emu) {
479 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
480 uvlinesize, uvlinesize,
481 width + 1, height + 1,
482 mx, my, (s->h_edge_pos >> 1),
483 s->v_edge_pos >> 1);
484 src = s->edge_emu_buffer;
485 }
486 if (thirdpel)
487 (avg ? s->tdsp.avg_tpel_pixels_tab
488 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
489 uvlinesize,
490 width, height);
491 else
492 (avg ? s->hdsp.avg_pixels_tab
493 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
494 uvlinesize,
495 height);
496 }
497 }
498 }
499
svq3_mc_dir(SVQ3Context * s,int size,int mode,int dir,int avg)500 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
501 int dir, int avg)
502 {
503 int i, j, k, mx, my, dx, dy, x, y;
504 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
505 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
506 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
507 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
508 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
509
510 for (i = 0; i < 16; i += part_height)
511 for (j = 0; j < 16; j += part_width) {
512 const int b_xy = (4 * s->mb_x + (j >> 2)) +
513 (4 * s->mb_y + (i >> 2)) * s->b_stride;
514 int dxy;
515 x = 16 * s->mb_x + j;
516 y = 16 * s->mb_y + i;
517 k = (j >> 2 & 1) + (i >> 1 & 2) +
518 (j >> 1 & 4) + (i & 8);
519
520 if (mode != PREDICT_MODE) {
521 svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
522 } else {
523 mx = s->next_pic->motion_val[0][b_xy][0] * 2;
524 my = s->next_pic->motion_val[0][b_xy][1] * 2;
525
526 if (dir == 0) {
527 mx = mx * s->frame_num_offset /
528 s->prev_frame_num_offset + 1 >> 1;
529 my = my * s->frame_num_offset /
530 s->prev_frame_num_offset + 1 >> 1;
531 } else {
532 mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
533 s->prev_frame_num_offset + 1 >> 1;
534 my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
535 s->prev_frame_num_offset + 1 >> 1;
536 }
537 }
538
539 /* clip motion vector prediction to frame border */
540 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
541 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
542
543 /* get (optional) motion vector differential */
544 if (mode == PREDICT_MODE) {
545 dx = dy = 0;
546 } else {
547 dy = get_interleaved_se_golomb(&s->gb_slice);
548 dx = get_interleaved_se_golomb(&s->gb_slice);
549
550 if (dx != (int16_t)dx || dy != (int16_t)dy) {
551 av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
552 return -1;
553 }
554 }
555
556 /* compute motion vector */
557 if (mode == THIRDPEL_MODE) {
558 int fx, fy;
559 mx = (mx + 1 >> 1) + dx;
560 my = (my + 1 >> 1) + dy;
561 fx = (unsigned)(mx + 0x30000) / 3 - 0x10000;
562 fy = (unsigned)(my + 0x30000) / 3 - 0x10000;
563 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
564
565 svq3_mc_dir_part(s, x, y, part_width, part_height,
566 fx, fy, dxy, 1, dir, avg);
567 mx += mx;
568 my += my;
569 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
570 mx = (unsigned)(mx + 1 + 0x30000) / 3 + dx - 0x10000;
571 my = (unsigned)(my + 1 + 0x30000) / 3 + dy - 0x10000;
572 dxy = (mx & 1) + 2 * (my & 1);
573
574 svq3_mc_dir_part(s, x, y, part_width, part_height,
575 mx >> 1, my >> 1, dxy, 0, dir, avg);
576 mx *= 3;
577 my *= 3;
578 } else {
579 mx = (unsigned)(mx + 3 + 0x60000) / 6 + dx - 0x10000;
580 my = (unsigned)(my + 3 + 0x60000) / 6 + dy - 0x10000;
581
582 svq3_mc_dir_part(s, x, y, part_width, part_height,
583 mx, my, 0, 0, dir, avg);
584 mx *= 6;
585 my *= 6;
586 }
587
588 /* update mv_cache */
589 if (mode != PREDICT_MODE) {
590 int32_t mv = pack16to32(mx, my);
591
592 if (part_height == 8 && i < 8) {
593 AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
594
595 if (part_width == 8 && j < 8)
596 AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
597 }
598 if (part_width == 8 && j < 8)
599 AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
600 if (part_width == 4 || part_height == 4)
601 AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
602 }
603
604 /* write back motion vectors */
605 fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
606 part_width >> 2, part_height >> 2, s->b_stride,
607 pack16to32(mx, my), 4);
608 }
609
610 return 0;
611 }
612
hl_decode_mb_idct_luma(SVQ3Context * s,int mb_type,const int * block_offset,int linesize,uint8_t * dest_y)613 static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s,
614 int mb_type, const int *block_offset,
615 int linesize, uint8_t *dest_y)
616 {
617 int i;
618 if (!IS_INTRA4x4(mb_type)) {
619 for (i = 0; i < 16; i++)
620 if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
621 uint8_t *const ptr = dest_y + block_offset[i];
622 svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
623 s->qscale, IS_INTRA(mb_type) ? 1 : 0);
624 }
625 }
626 }
627
hl_decode_mb_predict_luma(SVQ3Context * s,int mb_type,const int * block_offset,int linesize,uint8_t * dest_y)628 static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s,
629 int mb_type,
630 const int *block_offset,
631 int linesize,
632 uint8_t *dest_y)
633 {
634 int i;
635 int qscale = s->qscale;
636
637 if (IS_INTRA4x4(mb_type)) {
638 for (i = 0; i < 16; i++) {
639 uint8_t *const ptr = dest_y + block_offset[i];
640 const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
641
642 uint8_t *topright;
643 int nnz, tr;
644 if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
645 const int topright_avail = (s->topright_samples_available << i) & 0x8000;
646 av_assert2(s->mb_y || linesize <= block_offset[i]);
647 if (!topright_avail) {
648 tr = ptr[3 - linesize] * 0x01010101u;
649 topright = (uint8_t *)&tr;
650 } else
651 topright = ptr + 4 - linesize;
652 } else
653 topright = NULL;
654
655 s->hpc.pred4x4[dir](ptr, topright, linesize);
656 nnz = s->non_zero_count_cache[scan8[i]];
657 if (nnz) {
658 svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
659 }
660 }
661 } else {
662 s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
663 svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
664 }
665 }
666
hl_decode_mb(SVQ3Context * s)667 static void hl_decode_mb(SVQ3Context *s)
668 {
669 const int mb_x = s->mb_x;
670 const int mb_y = s->mb_y;
671 const int mb_xy = s->mb_xy;
672 const int mb_type = s->cur_pic->mb_type[mb_xy];
673 uint8_t *dest_y, *dest_cb, *dest_cr;
674 int linesize, uvlinesize;
675 int i, j;
676 const int *block_offset = &s->block_offset[0];
677 const int block_h = 16 >> 1;
678
679 linesize = s->cur_pic->f->linesize[0];
680 uvlinesize = s->cur_pic->f->linesize[1];
681
682 dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
683 dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
684 dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
685
686 s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
687 s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
688
689 if (IS_INTRA(mb_type)) {
690 s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
691 s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
692
693 hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
694 }
695
696 hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
697
698 if (s->cbp & 0x30) {
699 uint8_t *dest[2] = { dest_cb, dest_cr };
700 s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
701 s->dequant4_coeff[4][0]);
702 s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
703 s->dequant4_coeff[4][0]);
704 for (j = 1; j < 3; j++) {
705 for (i = j * 16; i < j * 16 + 4; i++)
706 if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
707 uint8_t *const ptr = dest[j - 1] + block_offset[i];
708 svq3_add_idct_c(ptr, s->mb + i * 16,
709 uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
710 }
711 }
712 }
713 }
714
svq3_decode_mb(SVQ3Context * s,unsigned int mb_type)715 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
716 {
717 int i, j, k, m, dir, mode;
718 int cbp = 0;
719 uint32_t vlc;
720 int8_t *top, *left;
721 const int mb_xy = s->mb_xy;
722 const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
723
724 s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
725 s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
726 s->topright_samples_available = 0xFFFF;
727
728 if (mb_type == 0) { /* SKIP */
729 if (s->pict_type == AV_PICTURE_TYPE_P ||
730 s->next_pic->mb_type[mb_xy] == -1) {
731 svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
732 0, 0, 0, 0, 0, 0);
733
734 if (s->pict_type == AV_PICTURE_TYPE_B)
735 svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
736 0, 0, 0, 0, 1, 1);
737
738 mb_type = MB_TYPE_SKIP;
739 } else {
740 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
741 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
742 return -1;
743 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
744 return -1;
745
746 mb_type = MB_TYPE_16x16;
747 }
748 } else if (mb_type < 8) { /* INTER */
749 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
750 mode = THIRDPEL_MODE;
751 else if (s->halfpel_flag &&
752 s->thirdpel_flag == !get_bits1(&s->gb_slice))
753 mode = HALFPEL_MODE;
754 else
755 mode = FULLPEL_MODE;
756
757 /* fill caches */
758 /* note ref_cache should contain here:
759 * ????????
760 * ???11111
761 * N??11111
762 * N??11111
763 * N??11111
764 */
765
766 for (m = 0; m < 2; m++) {
767 if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
768 for (i = 0; i < 4; i++)
769 AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
770 s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
771 } else {
772 for (i = 0; i < 4; i++)
773 AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
774 }
775 if (s->mb_y > 0) {
776 memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
777 s->cur_pic->motion_val[m][b_xy - s->b_stride],
778 4 * 2 * sizeof(int16_t));
779 memset(&s->ref_cache[m][scan8[0] - 1 * 8],
780 (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
781
782 if (s->mb_x < s->mb_width - 1) {
783 AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
784 s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
785 s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
786 (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
787 s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
788 } else
789 s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
790 if (s->mb_x > 0) {
791 AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
792 s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
793 s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
794 (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
795 } else
796 s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
797 } else
798 memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
799 PART_NOT_AVAILABLE, 8);
800
801 if (s->pict_type != AV_PICTURE_TYPE_B)
802 break;
803 }
804
805 /* decode motion vector(s) and form prediction(s) */
806 if (s->pict_type == AV_PICTURE_TYPE_P) {
807 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
808 return -1;
809 } else { /* AV_PICTURE_TYPE_B */
810 if (mb_type != 2) {
811 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
812 return -1;
813 } else {
814 for (i = 0; i < 4; i++)
815 memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
816 0, 4 * 2 * sizeof(int16_t));
817 }
818 if (mb_type != 1) {
819 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
820 return -1;
821 } else {
822 for (i = 0; i < 4; i++)
823 memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
824 0, 4 * 2 * sizeof(int16_t));
825 }
826 }
827
828 mb_type = MB_TYPE_16x16;
829 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
830 int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
831 int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
832
833 memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
834
835 if (mb_type == 8) {
836 if (s->mb_x > 0) {
837 for (i = 0; i < 4; i++)
838 s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
839 if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
840 s->left_samples_available = 0x5F5F;
841 }
842 if (s->mb_y > 0) {
843 s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
844 s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
845 s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
846 s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
847
848 if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
849 s->top_samples_available = 0x33FF;
850 }
851
852 /* decode prediction codes for luma blocks */
853 for (i = 0; i < 16; i += 2) {
854 vlc = get_interleaved_ue_golomb(&s->gb_slice);
855
856 if (vlc >= 25U) {
857 av_log(s->avctx, AV_LOG_ERROR,
858 "luma prediction:%"PRIu32"\n", vlc);
859 return -1;
860 }
861
862 left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
863 top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
864
865 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
866 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
867
868 if (left[1] == -1 || left[2] == -1) {
869 av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
870 return -1;
871 }
872 }
873 } else { /* mb_type == 33, DC_128_PRED block type */
874 for (i = 0; i < 4; i++)
875 memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
876 }
877
878 AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
879 i4x4[4] = i4x4_cache[7 + 8 * 3];
880 i4x4[5] = i4x4_cache[7 + 8 * 2];
881 i4x4[6] = i4x4_cache[7 + 8 * 1];
882
883 if (mb_type == 8) {
884 ff_h264_check_intra4x4_pred_mode(s->intra4x4_pred_mode_cache,
885 s->avctx, s->top_samples_available,
886 s->left_samples_available);
887
888 s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
889 s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
890 } else {
891 for (i = 0; i < 4; i++)
892 memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
893
894 s->top_samples_available = 0x33FF;
895 s->left_samples_available = 0x5F5F;
896 }
897
898 mb_type = MB_TYPE_INTRA4x4;
899 } else { /* INTRA16x16 */
900 dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
901 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
902
903 if ((s->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
904 s->left_samples_available, dir, 0)) < 0) {
905 av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
906 return s->intra16x16_pred_mode;
907 }
908
909 cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
910 mb_type = MB_TYPE_INTRA16x16;
911 }
912
913 if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
914 for (i = 0; i < 4; i++)
915 memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
916 0, 4 * 2 * sizeof(int16_t));
917 if (s->pict_type == AV_PICTURE_TYPE_B) {
918 for (i = 0; i < 4; i++)
919 memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
920 0, 4 * 2 * sizeof(int16_t));
921 }
922 }
923 if (!IS_INTRA4x4(mb_type)) {
924 memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
925 }
926 if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
927 memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
928 }
929
930 if (!IS_INTRA16x16(mb_type) &&
931 (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
932 if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
933 av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
934 return -1;
935 }
936
937 cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
938 : ff_h264_golomb_to_inter_cbp[vlc];
939 }
940 if (IS_INTRA16x16(mb_type) ||
941 (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
942 s->qscale += get_interleaved_se_golomb(&s->gb_slice);
943
944 if (s->qscale > 31u) {
945 av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
946 return -1;
947 }
948 }
949 if (IS_INTRA16x16(mb_type)) {
950 AV_ZERO128(s->mb_luma_dc[0] + 0);
951 AV_ZERO128(s->mb_luma_dc[0] + 8);
952 if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
953 av_log(s->avctx, AV_LOG_ERROR,
954 "error while decoding intra luma dc\n");
955 return -1;
956 }
957 }
958
959 if (cbp) {
960 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
961 const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
962
963 for (i = 0; i < 4; i++)
964 if ((cbp & (1 << i))) {
965 for (j = 0; j < 4; j++) {
966 k = index ? (1 * (j & 1) + 2 * (i & 1) +
967 2 * (j & 2) + 4 * (i & 2))
968 : (4 * i + j);
969 s->non_zero_count_cache[scan8[k]] = 1;
970
971 if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
972 av_log(s->avctx, AV_LOG_ERROR,
973 "error while decoding block\n");
974 return -1;
975 }
976 }
977 }
978
979 if ((cbp & 0x30)) {
980 for (i = 1; i < 3; ++i)
981 if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
982 av_log(s->avctx, AV_LOG_ERROR,
983 "error while decoding chroma dc block\n");
984 return -1;
985 }
986
987 if ((cbp & 0x20)) {
988 for (i = 1; i < 3; i++) {
989 for (j = 0; j < 4; j++) {
990 k = 16 * i + j;
991 s->non_zero_count_cache[scan8[k]] = 1;
992
993 if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
994 av_log(s->avctx, AV_LOG_ERROR,
995 "error while decoding chroma ac block\n");
996 return -1;
997 }
998 }
999 }
1000 }
1001 }
1002 }
1003
1004 s->cbp = cbp;
1005 s->cur_pic->mb_type[mb_xy] = mb_type;
1006
1007 if (IS_INTRA(mb_type))
1008 s->chroma_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
1009 s->left_samples_available, DC_PRED8x8, 1);
1010
1011 return 0;
1012 }
1013
svq3_decode_slice_header(AVCodecContext * avctx)1014 static int svq3_decode_slice_header(AVCodecContext *avctx)
1015 {
1016 SVQ3Context *s = avctx->priv_data;
1017 const int mb_xy = s->mb_xy;
1018 int i, header;
1019 unsigned slice_id;
1020
1021 header = get_bits(&s->gb, 8);
1022
1023 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1024 /* TODO: what? */
1025 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1026 return -1;
1027 } else {
1028 int slice_bits, slice_bytes, slice_length;
1029 int length = header >> 5 & 3;
1030
1031 slice_length = show_bits(&s->gb, 8 * length);
1032 slice_bits = slice_length * 8;
1033 slice_bytes = slice_length + length - 1;
1034
1035 skip_bits(&s->gb, 8);
1036
1037 av_fast_malloc(&s->slice_buf, &s->slice_size, slice_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
1038 if (!s->slice_buf)
1039 return AVERROR(ENOMEM);
1040
1041 if (slice_bytes * 8LL > get_bits_left(&s->gb)) {
1042 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1043 return AVERROR_INVALIDDATA;
1044 }
1045 memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1046
1047 if (s->watermark_key) {
1048 uint32_t header = AV_RL32(&s->slice_buf[1]);
1049 AV_WL32(&s->slice_buf[1], header ^ s->watermark_key);
1050 }
1051 init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1052
1053 if (length > 0) {
1054 memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1055 }
1056 skip_bits_long(&s->gb, slice_bytes * 8);
1057 }
1058
1059 if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1060 av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1061 return -1;
1062 }
1063
1064 s->slice_type = ff_h264_golomb_to_pict_type[slice_id];
1065
1066 if ((header & 0x9F) == 2) {
1067 i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1068 get_bits(&s->gb_slice, i);
1069 } else if (get_bits1(&s->gb_slice)) {
1070 avpriv_report_missing_feature(s->avctx, "Media key encryption");
1071 return AVERROR_PATCHWELCOME;
1072 }
1073
1074 s->slice_num = get_bits(&s->gb_slice, 8);
1075 s->qscale = get_bits(&s->gb_slice, 5);
1076 s->adaptive_quant = get_bits1(&s->gb_slice);
1077
1078 /* unknown fields */
1079 skip_bits1(&s->gb_slice);
1080
1081 if (s->has_watermark)
1082 skip_bits1(&s->gb_slice);
1083
1084 skip_bits1(&s->gb_slice);
1085 skip_bits(&s->gb_slice, 2);
1086
1087 if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1088 return AVERROR_INVALIDDATA;
1089
1090 /* reset intra predictors and invalidate motion vector references */
1091 if (s->mb_x > 0) {
1092 memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1093 -1, 4 * sizeof(int8_t));
1094 memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1095 -1, 8 * sizeof(int8_t) * s->mb_x);
1096 }
1097 if (s->mb_y > 0) {
1098 memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1099 -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1100
1101 if (s->mb_x > 0)
1102 s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1103 }
1104
1105 return 0;
1106 }
1107
init_dequant4_coeff_table(SVQ3Context * s)1108 static void init_dequant4_coeff_table(SVQ3Context *s)
1109 {
1110 int q, x;
1111 const int max_qp = 51;
1112
1113 for (q = 0; q < max_qp + 1; q++) {
1114 int shift = ff_h264_quant_div6[q] + 2;
1115 int idx = ff_h264_quant_rem6[q];
1116 for (x = 0; x < 16; x++)
1117 s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1118 ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1119 }
1120 }
1121
svq3_decode_init(AVCodecContext * avctx)1122 static av_cold int svq3_decode_init(AVCodecContext *avctx)
1123 {
1124 SVQ3Context *s = avctx->priv_data;
1125 int m, x, y;
1126 unsigned char *extradata;
1127 unsigned char *extradata_end;
1128 unsigned int size;
1129 int marker_found = 0;
1130 int ret;
1131
1132 s->cur_pic = &s->frames[0];
1133 s->last_pic = &s->frames[1];
1134 s->next_pic = &s->frames[2];
1135
1136 s->cur_pic->f = av_frame_alloc();
1137 s->last_pic->f = av_frame_alloc();
1138 s->next_pic->f = av_frame_alloc();
1139 if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1140 return AVERROR(ENOMEM);
1141
1142 ff_h264dsp_init(&s->h264dsp, 8, 1);
1143 ff_h264_pred_init(&s->hpc, AV_CODEC_ID_SVQ3, 8, 1);
1144 ff_videodsp_init(&s->vdsp, 8);
1145
1146
1147 avctx->bits_per_raw_sample = 8;
1148
1149 ff_hpeldsp_init(&s->hdsp, avctx->flags);
1150 ff_tpeldsp_init(&s->tdsp);
1151
1152 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1153 avctx->color_range = AVCOL_RANGE_JPEG;
1154
1155 s->avctx = avctx;
1156 s->halfpel_flag = 1;
1157 s->thirdpel_flag = 1;
1158 s->has_watermark = 0;
1159
1160 /* prowl for the "SEQH" marker in the extradata */
1161 extradata = (unsigned char *)avctx->extradata;
1162 extradata_end = avctx->extradata + avctx->extradata_size;
1163 if (extradata) {
1164 for (m = 0; m + 8 < avctx->extradata_size; m++) {
1165 if (!memcmp(extradata, "SEQH", 4)) {
1166 marker_found = 1;
1167 break;
1168 }
1169 extradata++;
1170 }
1171 }
1172
1173 /* if a match was found, parse the extra data */
1174 if (marker_found) {
1175 GetBitContext gb;
1176 int frame_size_code;
1177 int unk0, unk1, unk2, unk3, unk4;
1178 int w,h;
1179
1180 size = AV_RB32(&extradata[4]);
1181 if (size > extradata_end - extradata - 8)
1182 return AVERROR_INVALIDDATA;
1183 init_get_bits(&gb, extradata + 8, size * 8);
1184
1185 /* 'frame size code' and optional 'width, height' */
1186 frame_size_code = get_bits(&gb, 3);
1187 switch (frame_size_code) {
1188 case 0:
1189 w = 160;
1190 h = 120;
1191 break;
1192 case 1:
1193 w = 128;
1194 h = 96;
1195 break;
1196 case 2:
1197 w = 176;
1198 h = 144;
1199 break;
1200 case 3:
1201 w = 352;
1202 h = 288;
1203 break;
1204 case 4:
1205 w = 704;
1206 h = 576;
1207 break;
1208 case 5:
1209 w = 240;
1210 h = 180;
1211 break;
1212 case 6:
1213 w = 320;
1214 h = 240;
1215 break;
1216 case 7:
1217 w = get_bits(&gb, 12);
1218 h = get_bits(&gb, 12);
1219 break;
1220 }
1221 ret = ff_set_dimensions(avctx, w, h);
1222 if (ret < 0)
1223 return ret;
1224
1225 s->halfpel_flag = get_bits1(&gb);
1226 s->thirdpel_flag = get_bits1(&gb);
1227
1228 /* unknown fields */
1229 unk0 = get_bits1(&gb);
1230 unk1 = get_bits1(&gb);
1231 unk2 = get_bits1(&gb);
1232 unk3 = get_bits1(&gb);
1233
1234 s->low_delay = get_bits1(&gb);
1235
1236 /* unknown field */
1237 unk4 = get_bits1(&gb);
1238
1239 av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1240 unk0, unk1, unk2, unk3, unk4);
1241
1242 if (skip_1stop_8data_bits(&gb) < 0)
1243 return AVERROR_INVALIDDATA;
1244
1245 s->has_watermark = get_bits1(&gb);
1246 avctx->has_b_frames = !s->low_delay;
1247 if (s->has_watermark) {
1248 #if CONFIG_ZLIB
1249 unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1250 unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1251 int u1 = get_interleaved_ue_golomb(&gb);
1252 int u2 = get_bits(&gb, 8);
1253 int u3 = get_bits(&gb, 2);
1254 int u4 = get_interleaved_ue_golomb(&gb);
1255 unsigned long buf_len = watermark_width *
1256 watermark_height * 4;
1257 int offset = get_bits_count(&gb) + 7 >> 3;
1258 uint8_t *buf;
1259
1260 if (watermark_height <= 0 ||
1261 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
1262 return AVERROR_INVALIDDATA;
1263
1264 buf = av_malloc(buf_len);
1265 if (!buf)
1266 return AVERROR(ENOMEM);
1267
1268 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1269 watermark_width, watermark_height);
1270 av_log(avctx, AV_LOG_DEBUG,
1271 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1272 u1, u2, u3, u4, offset);
1273 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1274 size - offset) != Z_OK) {
1275 av_log(avctx, AV_LOG_ERROR,
1276 "could not uncompress watermark logo\n");
1277 av_free(buf);
1278 return -1;
1279 }
1280 s->watermark_key = av_bswap16(av_crc(av_crc_get_table(AV_CRC_16_CCITT), 0, buf, buf_len));
1281
1282 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1283 av_log(avctx, AV_LOG_DEBUG,
1284 "watermark key %#"PRIx32"\n", s->watermark_key);
1285 av_free(buf);
1286 #else
1287 av_log(avctx, AV_LOG_ERROR,
1288 "this svq3 file contains watermark which need zlib support compiled in\n");
1289 return AVERROR(ENOSYS);
1290 #endif
1291 }
1292 }
1293
1294 s->mb_width = (avctx->width + 15) / 16;
1295 s->mb_height = (avctx->height + 15) / 16;
1296 s->mb_stride = s->mb_width + 1;
1297 s->mb_num = s->mb_width * s->mb_height;
1298 s->b_stride = 4 * s->mb_width;
1299 s->h_edge_pos = s->mb_width * 16;
1300 s->v_edge_pos = s->mb_height * 16;
1301
1302 s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1303 if (!s->intra4x4_pred_mode)
1304 return AVERROR(ENOMEM);
1305
1306 s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1307 sizeof(*s->mb2br_xy));
1308 if (!s->mb2br_xy)
1309 return AVERROR(ENOMEM);
1310
1311 for (y = 0; y < s->mb_height; y++)
1312 for (x = 0; x < s->mb_width; x++) {
1313 const int mb_xy = x + y * s->mb_stride;
1314
1315 s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1316 }
1317
1318 init_dequant4_coeff_table(s);
1319
1320 return 0;
1321 }
1322
free_picture(AVCodecContext * avctx,SVQ3Frame * pic)1323 static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
1324 {
1325 int i;
1326 for (i = 0; i < 2; i++) {
1327 av_freep(&pic->motion_val_buf[i]);
1328 }
1329 av_freep(&pic->mb_type_buf);
1330
1331 av_frame_unref(pic->f);
1332 }
1333
get_buffer(AVCodecContext * avctx,SVQ3Frame * pic)1334 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1335 {
1336 SVQ3Context *s = avctx->priv_data;
1337 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1338 const int b4_stride = s->mb_width * 4 + 1;
1339 const int b4_array_size = b4_stride * s->mb_height * 4;
1340 int ret;
1341
1342 if (!pic->motion_val_buf[0]) {
1343 int i;
1344
1345 pic->mb_type_buf = av_calloc(big_mb_num + s->mb_stride, sizeof(uint32_t));
1346 if (!pic->mb_type_buf)
1347 return AVERROR(ENOMEM);
1348 pic->mb_type = pic->mb_type_buf + 2 * s->mb_stride + 1;
1349
1350 for (i = 0; i < 2; i++) {
1351 pic->motion_val_buf[i] = av_calloc(b4_array_size + 4, 2 * sizeof(int16_t));
1352 if (!pic->motion_val_buf[i]) {
1353 ret = AVERROR(ENOMEM);
1354 goto fail;
1355 }
1356
1357 pic->motion_val[i] = pic->motion_val_buf[i] + 4;
1358 }
1359 }
1360
1361 ret = ff_get_buffer(avctx, pic->f,
1362 (s->pict_type != AV_PICTURE_TYPE_B) ?
1363 AV_GET_BUFFER_FLAG_REF : 0);
1364 if (ret < 0)
1365 goto fail;
1366
1367 if (!s->edge_emu_buffer) {
1368 s->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1369 if (!s->edge_emu_buffer)
1370 return AVERROR(ENOMEM);
1371 }
1372
1373 return 0;
1374 fail:
1375 free_picture(avctx, pic);
1376 return ret;
1377 }
1378
svq3_decode_frame(AVCodecContext * avctx,void * data,int * got_frame,AVPacket * avpkt)1379 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1380 int *got_frame, AVPacket *avpkt)
1381 {
1382 SVQ3Context *s = avctx->priv_data;
1383 int buf_size = avpkt->size;
1384 int left;
1385 uint8_t *buf;
1386 int ret, m, i;
1387
1388 /* special case for last picture */
1389 if (buf_size == 0) {
1390 if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1391 ret = av_frame_ref(data, s->next_pic->f);
1392 if (ret < 0)
1393 return ret;
1394 s->last_frame_output = 1;
1395 *got_frame = 1;
1396 }
1397 return 0;
1398 }
1399
1400 s->mb_x = s->mb_y = s->mb_xy = 0;
1401
1402 if (s->watermark_key) {
1403 av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1404 if (!s->buf)
1405 return AVERROR(ENOMEM);
1406 memcpy(s->buf, avpkt->data, buf_size);
1407 buf = s->buf;
1408 } else {
1409 buf = avpkt->data;
1410 }
1411
1412 ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1413 if (ret < 0)
1414 return ret;
1415
1416 if (svq3_decode_slice_header(avctx))
1417 return -1;
1418
1419 s->pict_type = s->slice_type;
1420
1421 if (s->pict_type != AV_PICTURE_TYPE_B)
1422 FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1423
1424 av_frame_unref(s->cur_pic->f);
1425
1426 /* for skipping the frame */
1427 s->cur_pic->f->pict_type = s->pict_type;
1428 s->cur_pic->f->key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
1429
1430 ret = get_buffer(avctx, s->cur_pic);
1431 if (ret < 0)
1432 return ret;
1433
1434 for (i = 0; i < 16; i++) {
1435 s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1436 s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1437 }
1438 for (i = 0; i < 16; i++) {
1439 s->block_offset[16 + i] =
1440 s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1441 s->block_offset[48 + 16 + i] =
1442 s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1443 }
1444
1445 if (s->pict_type != AV_PICTURE_TYPE_I) {
1446 if (!s->last_pic->f->data[0]) {
1447 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1448 av_frame_unref(s->last_pic->f);
1449 ret = get_buffer(avctx, s->last_pic);
1450 if (ret < 0)
1451 return ret;
1452 memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1453 memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1454 s->last_pic->f->linesize[1]);
1455 memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1456 s->last_pic->f->linesize[2]);
1457 }
1458
1459 if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1460 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1461 av_frame_unref(s->next_pic->f);
1462 ret = get_buffer(avctx, s->next_pic);
1463 if (ret < 0)
1464 return ret;
1465 memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1466 memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1467 s->next_pic->f->linesize[1]);
1468 memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1469 s->next_pic->f->linesize[2]);
1470 }
1471 }
1472
1473 if (avctx->debug & FF_DEBUG_PICT_INFO)
1474 av_log(s->avctx, AV_LOG_DEBUG,
1475 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1476 av_get_picture_type_char(s->pict_type),
1477 s->halfpel_flag, s->thirdpel_flag,
1478 s->adaptive_quant, s->qscale, s->slice_num);
1479
1480 if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1481 avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I ||
1482 avctx->skip_frame >= AVDISCARD_ALL)
1483 return 0;
1484
1485 if (s->next_p_frame_damaged) {
1486 if (s->pict_type == AV_PICTURE_TYPE_B)
1487 return 0;
1488 else
1489 s->next_p_frame_damaged = 0;
1490 }
1491
1492 if (s->pict_type == AV_PICTURE_TYPE_B) {
1493 s->frame_num_offset = s->slice_num - s->prev_frame_num;
1494
1495 if (s->frame_num_offset < 0)
1496 s->frame_num_offset += 256;
1497 if (s->frame_num_offset == 0 ||
1498 s->frame_num_offset >= s->prev_frame_num_offset) {
1499 av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1500 return -1;
1501 }
1502 } else {
1503 s->prev_frame_num = s->frame_num;
1504 s->frame_num = s->slice_num;
1505 s->prev_frame_num_offset = s->frame_num - s->prev_frame_num;
1506
1507 if (s->prev_frame_num_offset < 0)
1508 s->prev_frame_num_offset += 256;
1509 }
1510
1511 for (m = 0; m < 2; m++) {
1512 int i;
1513 for (i = 0; i < 4; i++) {
1514 int j;
1515 for (j = -1; j < 4; j++)
1516 s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1517 if (i < 3)
1518 s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1519 }
1520 }
1521
1522 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1523 for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1524 unsigned mb_type;
1525 s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1526
1527 if ((get_bits_left(&s->gb_slice)) <= 7) {
1528 if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1529 show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1530
1531 if (svq3_decode_slice_header(avctx))
1532 return -1;
1533 }
1534 if (s->slice_type != s->pict_type) {
1535 avpriv_request_sample(avctx, "non constant slice type");
1536 }
1537 /* TODO: support s->mb_skip_run */
1538 }
1539
1540 mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1541
1542 if (s->pict_type == AV_PICTURE_TYPE_I)
1543 mb_type += 8;
1544 else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1545 mb_type += 4;
1546 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1547 av_log(s->avctx, AV_LOG_ERROR,
1548 "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1549 return -1;
1550 }
1551
1552 if (mb_type != 0 || s->cbp)
1553 hl_decode_mb(s);
1554
1555 if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1556 s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1557 (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1558 }
1559
1560 ff_draw_horiz_band(avctx, s->cur_pic->f,
1561 s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1562 16 * s->mb_y, 16, PICT_FRAME, 0,
1563 s->low_delay);
1564 }
1565
1566 left = buf_size*8 - get_bits_count(&s->gb_slice);
1567
1568 if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1569 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1570 //av_hex_dump(stderr, buf+buf_size-8, 8);
1571 }
1572
1573 if (left < 0) {
1574 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1575 return -1;
1576 }
1577
1578 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1579 ret = av_frame_ref(data, s->cur_pic->f);
1580 else if (s->last_pic->f->data[0])
1581 ret = av_frame_ref(data, s->last_pic->f);
1582 if (ret < 0)
1583 return ret;
1584
1585 /* Do not output the last pic after seeking. */
1586 if (s->last_pic->f->data[0] || s->low_delay)
1587 *got_frame = 1;
1588
1589 if (s->pict_type != AV_PICTURE_TYPE_B) {
1590 FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1591 } else {
1592 av_frame_unref(s->cur_pic->f);
1593 }
1594
1595 return buf_size;
1596 }
1597
svq3_decode_end(AVCodecContext * avctx)1598 static av_cold int svq3_decode_end(AVCodecContext *avctx)
1599 {
1600 SVQ3Context *s = avctx->priv_data;
1601
1602 free_picture(avctx, s->cur_pic);
1603 free_picture(avctx, s->next_pic);
1604 free_picture(avctx, s->last_pic);
1605 av_frame_free(&s->cur_pic->f);
1606 av_frame_free(&s->next_pic->f);
1607 av_frame_free(&s->last_pic->f);
1608 av_freep(&s->slice_buf);
1609 av_freep(&s->intra4x4_pred_mode);
1610 av_freep(&s->edge_emu_buffer);
1611 av_freep(&s->mb2br_xy);
1612
1613
1614 av_freep(&s->buf);
1615 s->buf_size = 0;
1616
1617 return 0;
1618 }
1619
1620 AVCodec ff_svq3_decoder = {
1621 .name = "svq3",
1622 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1623 .type = AVMEDIA_TYPE_VIDEO,
1624 .id = AV_CODEC_ID_SVQ3,
1625 .priv_data_size = sizeof(SVQ3Context),
1626 .init = svq3_decode_init,
1627 .close = svq3_decode_end,
1628 .decode = svq3_decode_frame,
1629 .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1630 AV_CODEC_CAP_DR1 |
1631 AV_CODEC_CAP_DELAY,
1632 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1633 AV_PIX_FMT_NONE},
1634 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1635 };
1636