1 /*
2  * H.263/MPEG-4 backend for encoder and decoder
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * H.263+ support.
5  * Copyright (c) 2001 Juan J. Sierralta P
6  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * H.263/MPEG-4 codec.
28  */
29 
30 #include <limits.h>
31 
32 #include "libavutil/thread.h"
33 #include "avcodec.h"
34 #include "mpegvideo.h"
35 #include "h263.h"
36 #include "h263data.h"
37 #include "mathops.h"
38 #include "mpegutils.h"
39 #include "flv.h"
40 #include "mpeg4video.h"
41 
h263_init_rl_inter(void)42 static av_cold void h263_init_rl_inter(void)
43 {
44     static uint8_t h263_rl_inter_table[2][2 * MAX_RUN + MAX_LEVEL + 3];
45     ff_rl_init(&ff_h263_rl_inter, h263_rl_inter_table);
46 }
47 
ff_h263_init_rl_inter(void)48 av_cold void ff_h263_init_rl_inter(void)
49 {
50     static AVOnce init_static_once = AV_ONCE_INIT;
51     ff_thread_once(&init_static_once, h263_init_rl_inter);
52 }
53 
ff_h263_update_motion_val(MpegEncContext * s)54 void ff_h263_update_motion_val(MpegEncContext * s){
55     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
56                //FIXME a lot of that is only needed for !low_delay
57     const int wrap = s->b8_stride;
58     const int xy = s->block_index[0];
59 
60     s->current_picture.mbskip_table[mb_xy] = s->mb_skipped;
61 
62     if(s->mv_type != MV_TYPE_8X8){
63         int motion_x, motion_y;
64         if (s->mb_intra) {
65             motion_x = 0;
66             motion_y = 0;
67         } else if (s->mv_type == MV_TYPE_16X16) {
68             motion_x = s->mv[0][0][0];
69             motion_y = s->mv[0][0][1];
70         } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
71             int i;
72             motion_x = s->mv[0][0][0] + s->mv[0][1][0];
73             motion_y = s->mv[0][0][1] + s->mv[0][1][1];
74             motion_x = (motion_x>>1) | (motion_x&1);
75             for(i=0; i<2; i++){
76                 s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
77                 s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
78             }
79             s->current_picture.ref_index[0][4*mb_xy    ] =
80             s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
81             s->current_picture.ref_index[0][4*mb_xy + 2] =
82             s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
83         }
84 
85         /* no update if 8X8 because it has been done during parsing */
86         s->current_picture.motion_val[0][xy][0]            = motion_x;
87         s->current_picture.motion_val[0][xy][1]            = motion_y;
88         s->current_picture.motion_val[0][xy + 1][0]        = motion_x;
89         s->current_picture.motion_val[0][xy + 1][1]        = motion_y;
90         s->current_picture.motion_val[0][xy + wrap][0]     = motion_x;
91         s->current_picture.motion_val[0][xy + wrap][1]     = motion_y;
92         s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
93         s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
94     }
95 
96     if(s->encoding){ //FIXME encoding MUST be cleaned up
97         if (s->mv_type == MV_TYPE_8X8)
98             s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
99         else if(s->mb_intra)
100             s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA;
101         else
102             s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
103     }
104 }
105 
ff_h263_pred_dc(MpegEncContext * s,int n,int16_t ** dc_val_ptr)106 int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
107 {
108     int x, y, wrap, a, c, pred_dc;
109     int16_t *dc_val;
110 
111     /* find prediction */
112     if (n < 4) {
113         x = 2 * s->mb_x + (n & 1);
114         y = 2 * s->mb_y + ((n & 2) >> 1);
115         wrap = s->b8_stride;
116         dc_val = s->dc_val[0];
117     } else {
118         x = s->mb_x;
119         y = s->mb_y;
120         wrap = s->mb_stride;
121         dc_val = s->dc_val[n - 4 + 1];
122     }
123     /* B C
124      * A X
125      */
126     a = dc_val[(x - 1) + (y) * wrap];
127     c = dc_val[(x) + (y - 1) * wrap];
128 
129     /* No prediction outside GOB boundary */
130     if(s->first_slice_line && n!=3){
131         if(n!=2) c= 1024;
132         if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
133     }
134     /* just DC prediction */
135     if (a != 1024 && c != 1024)
136         pred_dc = (a + c) >> 1;
137     else if (a != 1024)
138         pred_dc = a;
139     else
140         pred_dc = c;
141 
142     /* we assume pred is positive */
143     *dc_val_ptr = &dc_val[x + y * wrap];
144     return pred_dc;
145 }
146 
ff_h263_loop_filter(MpegEncContext * s)147 void ff_h263_loop_filter(MpegEncContext * s){
148     int qp_c;
149     const int linesize  = s->linesize;
150     const int uvlinesize= s->uvlinesize;
151     const int xy = s->mb_y * s->mb_stride + s->mb_x;
152     uint8_t *dest_y = s->dest[0];
153     uint8_t *dest_cb= s->dest[1];
154     uint8_t *dest_cr= s->dest[2];
155 
156     /*
157        Diag Top
158        Left Center
159     */
160     if (!IS_SKIP(s->current_picture.mb_type[xy])) {
161         qp_c= s->qscale;
162         s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize,     linesize, qp_c);
163         s->h263dsp.h263_v_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
164     }else
165         qp_c= 0;
166 
167     if(s->mb_y){
168         int qp_dt, qp_tt, qp_tc;
169 
170         if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride]))
171             qp_tt=0;
172         else
173             qp_tt = s->current_picture.qscale_table[xy - s->mb_stride];
174 
175         if(qp_c)
176             qp_tc= qp_c;
177         else
178             qp_tc= qp_tt;
179 
180         if(qp_tc){
181             const int chroma_qp= s->chroma_qscale_table[qp_tc];
182             s->h263dsp.h263_v_loop_filter(dest_y,     linesize, qp_tc);
183             s->h263dsp.h263_v_loop_filter(dest_y + 8, linesize, qp_tc);
184 
185             s->h263dsp.h263_v_loop_filter(dest_cb, uvlinesize, chroma_qp);
186             s->h263dsp.h263_v_loop_filter(dest_cr, uvlinesize, chroma_qp);
187         }
188 
189         if(qp_tt)
190             s->h263dsp.h263_h_loop_filter(dest_y - 8 * linesize + 8, linesize, qp_tt);
191 
192         if(s->mb_x){
193             if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride]))
194                 qp_dt= qp_tt;
195             else
196                 qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride];
197 
198             if(qp_dt){
199                 const int chroma_qp= s->chroma_qscale_table[qp_dt];
200                 s->h263dsp.h263_h_loop_filter(dest_y  - 8 * linesize,   linesize,   qp_dt);
201                 s->h263dsp.h263_h_loop_filter(dest_cb - 8 * uvlinesize, uvlinesize, chroma_qp);
202                 s->h263dsp.h263_h_loop_filter(dest_cr - 8 * uvlinesize, uvlinesize, chroma_qp);
203             }
204         }
205     }
206 
207     if(qp_c){
208         s->h263dsp.h263_h_loop_filter(dest_y + 8, linesize, qp_c);
209         if(s->mb_y + 1 == s->mb_height)
210             s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize + 8, linesize, qp_c);
211     }
212 
213     if(s->mb_x){
214         int qp_lc;
215         if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1]))
216             qp_lc= qp_c;
217         else
218             qp_lc = s->current_picture.qscale_table[xy - 1];
219 
220         if(qp_lc){
221             s->h263dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
222             if(s->mb_y + 1 == s->mb_height){
223                 const int chroma_qp= s->chroma_qscale_table[qp_lc];
224                 s->h263dsp.h263_h_loop_filter(dest_y + 8 * linesize, linesize, qp_lc);
225                 s->h263dsp.h263_h_loop_filter(dest_cb, uvlinesize, chroma_qp);
226                 s->h263dsp.h263_h_loop_filter(dest_cr, uvlinesize, chroma_qp);
227             }
228         }
229     }
230 }
231 
ff_h263_pred_acdc(MpegEncContext * s,int16_t * block,int n)232 void ff_h263_pred_acdc(MpegEncContext * s, int16_t *block, int n)
233 {
234     int x, y, wrap, a, c, pred_dc, scale, i;
235     int16_t *dc_val, *ac_val, *ac_val1;
236 
237     /* find prediction */
238     if (n < 4) {
239         x = 2 * s->mb_x + (n & 1);
240         y = 2 * s->mb_y + (n>> 1);
241         wrap = s->b8_stride;
242         dc_val = s->dc_val[0];
243         ac_val = s->ac_val[0][0];
244         scale = s->y_dc_scale;
245     } else {
246         x = s->mb_x;
247         y = s->mb_y;
248         wrap = s->mb_stride;
249         dc_val = s->dc_val[n - 4 + 1];
250         ac_val = s->ac_val[n - 4 + 1][0];
251         scale = s->c_dc_scale;
252     }
253 
254     ac_val += ((y) * wrap + (x)) * 16;
255     ac_val1 = ac_val;
256 
257     /* B C
258      * A X
259      */
260     a = dc_val[(x - 1) + (y) * wrap];
261     c = dc_val[(x) + (y - 1) * wrap];
262 
263     /* No prediction outside GOB boundary */
264     if(s->first_slice_line && n!=3){
265         if(n!=2) c= 1024;
266         if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
267     }
268 
269     if (s->ac_pred) {
270         pred_dc = 1024;
271         if (s->h263_aic_dir) {
272             /* left prediction */
273             if (a != 1024) {
274                 ac_val -= 16;
275                 for(i=1;i<8;i++) {
276                     block[s->idsp.idct_permutation[i << 3]] += ac_val[i];
277                 }
278                 pred_dc = a;
279             }
280         } else {
281             /* top prediction */
282             if (c != 1024) {
283                 ac_val -= 16 * wrap;
284                 for(i=1;i<8;i++) {
285                     block[s->idsp.idct_permutation[i]] += ac_val[i + 8];
286                 }
287                 pred_dc = c;
288             }
289         }
290     } else {
291         /* just DC prediction */
292         if (a != 1024 && c != 1024)
293             pred_dc = (a + c) >> 1;
294         else if (a != 1024)
295             pred_dc = a;
296         else
297             pred_dc = c;
298     }
299 
300     /* we assume pred is positive */
301     block[0]=block[0]*scale + pred_dc;
302 
303     if (block[0] < 0)
304         block[0] = 0;
305     else
306         block[0] |= 1;
307 
308     /* Update AC/DC tables */
309     dc_val[(x) + (y) * wrap] = block[0];
310 
311     /* left copy */
312     for(i=1;i<8;i++)
313         ac_val1[i]     = block[s->idsp.idct_permutation[i << 3]];
314     /* top copy */
315     for(i=1;i<8;i++)
316         ac_val1[8 + i] = block[s->idsp.idct_permutation[i]];
317 }
318 
ff_h263_pred_motion(MpegEncContext * s,int block,int dir,int * px,int * py)319 int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
320                              int *px, int *py)
321 {
322     int wrap;
323     int16_t *A, *B, *C, (*mot_val)[2];
324     static const int off[4]= {2, 1, 1, -1};
325 
326     wrap = s->b8_stride;
327     mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
328 
329     A = mot_val[ - 1];
330     /* special case for first (slice) line */
331     if (s->first_slice_line && block<3) {
332         // we can't just change some MVs to simulate that as we need them for the B-frames (and ME)
333         // and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
334         if(block==0){ //most common case
335             if(s->mb_x  == s->resync_mb_x){ //rare
336                 *px= *py = 0;
337             }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
338                 C = mot_val[off[block] - wrap];
339                 if(s->mb_x==0){
340                     *px = C[0];
341                     *py = C[1];
342                 }else{
343                     *px = mid_pred(A[0], 0, C[0]);
344                     *py = mid_pred(A[1], 0, C[1]);
345                 }
346             }else{
347                 *px = A[0];
348                 *py = A[1];
349             }
350         }else if(block==1){
351             if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
352                 C = mot_val[off[block] - wrap];
353                 *px = mid_pred(A[0], 0, C[0]);
354                 *py = mid_pred(A[1], 0, C[1]);
355             }else{
356                 *px = A[0];
357                 *py = A[1];
358             }
359         }else{ /* block==2*/
360             B = mot_val[ - wrap];
361             C = mot_val[off[block] - wrap];
362             if(s->mb_x == s->resync_mb_x) //rare
363                 A[0]=A[1]=0;
364 
365             *px = mid_pred(A[0], B[0], C[0]);
366             *py = mid_pred(A[1], B[1], C[1]);
367         }
368     } else {
369         B = mot_val[ - wrap];
370         C = mot_val[off[block] - wrap];
371         *px = mid_pred(A[0], B[0], C[0]);
372         *py = mid_pred(A[1], B[1], C[1]);
373     }
374     return *mot_val;
375 }
376