1 /*****************************************************************************
2  * macroblock.c: macroblock common functions
3  *****************************************************************************
4  * Copyright (C) 2003-2014 x264 project
5  *
6  * Authors: Fiona Glaser <fiona@x264.com>
7  *          Laurent Aimar <fenrir@via.ecp.fr>
8  *          Loren Merritt <lorenm@u.washington.edu>
9  *          Henrik Gramner <henrik@gramner.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
24  *
25  * This program is also available under a commercial proprietary license.
26  * For more information, contact us at licensing@x264.com.
27  *****************************************************************************/
28 
29 #include "common.h"
30 #include "encoder/me.h"
31 
32 #define MC_LUMA(list,p) \
33     h->mc.mc_luma( &h->mb.pic.p_fdec[p][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, \
34                    &h->mb.pic.p_fref[list][i_ref][p*4], h->mb.pic.i_stride[p], \
35                    mvx, mvy, 4*width, 4*height, \
36                    list ? x264_weight_none : &h->sh.weight[i_ref][p] );
37 
x264_mb_mc_0xywh(x264_t * h,int x,int y,int width,int height)38 static NOINLINE void x264_mb_mc_0xywh( x264_t *h, int x, int y, int width, int height )
39 {
40     int i8    = x264_scan8[0]+x+8*y;
41     int i_ref = h->mb.cache.ref[0][i8];
42     int mvx   = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
43     int mvy   = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
44 
45     MC_LUMA( 0, 0 );
46 
47     if( CHROMA444 )
48     {
49         MC_LUMA( 0, 1 );
50         MC_LUMA( 0, 2 );
51     }
52     else
53     {
54         int v_shift = CHROMA_V_SHIFT;
55         int offset;
56 
57 		// Chroma in 4:2:0 is offset if MCing from a field of opposite parity
58         if( v_shift & MB_INTERLACED & i_ref )
59             mvy += (h->mb.i_mb_y & 1)*4 - 2;
60 
61         offset = (4*FDEC_STRIDE>>v_shift)*y + 2*x;
62         height = 4*height >> v_shift;
63 
64         h->mc.mc_chroma( &h->mb.pic.p_fdec[1][offset],
65                          &h->mb.pic.p_fdec[2][offset], FDEC_STRIDE,
66                          h->mb.pic.p_fref[0][i_ref][4], h->mb.pic.i_stride[1],
67                          mvx, 2*mvy>>v_shift, 2*width, height );
68 
69         if( h->sh.weight[i_ref][1].weightfn )
70             h->sh.weight[i_ref][1].weightfn[width>>1]( &h->mb.pic.p_fdec[1][offset], FDEC_STRIDE,
71                                                        &h->mb.pic.p_fdec[1][offset], FDEC_STRIDE,
72                                                        &h->sh.weight[i_ref][1], height );
73         if( h->sh.weight[i_ref][2].weightfn )
74             h->sh.weight[i_ref][2].weightfn[width>>1]( &h->mb.pic.p_fdec[2][offset], FDEC_STRIDE,
75                                                        &h->mb.pic.p_fdec[2][offset], FDEC_STRIDE,
76                                                        &h->sh.weight[i_ref][2], height );
77     }
78 }
x264_mb_mc_1xywh(x264_t * h,int x,int y,int width,int height)79 static NOINLINE void x264_mb_mc_1xywh( x264_t *h, int x, int y, int width, int height )
80 {
81     int i8    = x264_scan8[0]+x+8*y;
82     int i_ref = h->mb.cache.ref[1][i8];
83     int mvx   = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
84     int mvy   = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
85 
86     MC_LUMA( 1, 0 );
87 
88     if( CHROMA444 )
89     {
90         MC_LUMA( 1, 1 );
91         MC_LUMA( 1, 2 );
92     }
93     else
94     {
95         int v_shift = CHROMA_V_SHIFT;
96         int offset;
97 
98 		if( v_shift & MB_INTERLACED & i_ref )
99             mvy += (h->mb.i_mb_y & 1)*4 - 2;
100 
101         offset = (4*FDEC_STRIDE>>v_shift)*y + 2*x;
102         h->mc.mc_chroma( &h->mb.pic.p_fdec[1][offset],
103                          &h->mb.pic.p_fdec[2][offset], FDEC_STRIDE,
104                          h->mb.pic.p_fref[1][i_ref][4], h->mb.pic.i_stride[1],
105                          mvx, 2*mvy>>v_shift, 2*width, 4*height>>v_shift );
106     }
107 }
108 
109 #define MC_LUMA_BI(p) \
110     src0 = h->mc.get_ref( tmp0, &i_stride0, &h->mb.pic.p_fref[0][i_ref0][p*4], h->mb.pic.i_stride[p], \
111                           mvx0, mvy0, 4*width, 4*height, x264_weight_none ); \
112     src1 = h->mc.get_ref( tmp1, &i_stride1, &h->mb.pic.p_fref[1][i_ref1][p*4], h->mb.pic.i_stride[p], \
113                           mvx1, mvy1, 4*width, 4*height, x264_weight_none ); \
114     h->mc.avg[i_mode]( &h->mb.pic.p_fdec[p][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, \
115                        src0, i_stride0, src1, i_stride1, weight );
116 
x264_mb_mc_01xywh(x264_t * h,int x,int y,int width,int height)117 static NOINLINE void x264_mb_mc_01xywh( x264_t *h, int x, int y, int width, int height )
118 {
119     int i8 = x264_scan8[0]+x+8*y;
120     int i_ref0 = h->mb.cache.ref[0][i8];
121     int i_ref1 = h->mb.cache.ref[1][i8];
122     int weight = h->mb.bipred_weight[i_ref0][i_ref1];
123     int mvx0   = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
124     int mvx1   = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
125     int mvy0   = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
126     int mvy1   = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
127     int i_mode = x264_size2pixel[height][width];
128     intptr_t i_stride0 = 16, i_stride1 = 16;
129 
130 	ALIGNED_ARRAY_N( pixel, tmp0,[16*16] );
131     ALIGNED_ARRAY_N( pixel, tmp1,[16*16] );
132 
133 	pixel *src0, *src1;
134 
135     MC_LUMA_BI( 0 );
136 
137     if( CHROMA444 )
138     {
139         MC_LUMA_BI( 1 );
140         MC_LUMA_BI( 2 );
141     }
142     else
143     {
144         int v_shift = CHROMA_V_SHIFT;
145         int chromapix;
146         int offset;
147 
148 		if( v_shift & MB_INTERLACED & i_ref0 )
149             mvy0 += (h->mb.i_mb_y & 1)*4 - 2;
150         if( v_shift & MB_INTERLACED & i_ref1 )
151             mvy1 += (h->mb.i_mb_y & 1)*4 - 2;
152 
153         h->mc.mc_chroma( tmp0, tmp0+8, 16, h->mb.pic.p_fref[0][i_ref0][4], h->mb.pic.i_stride[1],
154                          mvx0, 2*mvy0>>v_shift, 2*width, 4*height>>v_shift );
155         h->mc.mc_chroma( tmp1, tmp1+8, 16, h->mb.pic.p_fref[1][i_ref1][4], h->mb.pic.i_stride[1],
156                          mvx1, 2*mvy1>>v_shift, 2*width, 4*height>>v_shift );
157 
158         chromapix = h->luma2chroma_pixel[i_mode];
159         offset = (4*FDEC_STRIDE>>v_shift)*y + 2*x;
160         h->mc.avg[chromapix]( &h->mb.pic.p_fdec[1][offset], FDEC_STRIDE, tmp0,   16, tmp1,   16, weight );
161         h->mc.avg[chromapix]( &h->mb.pic.p_fdec[2][offset], FDEC_STRIDE, tmp0+8, 16, tmp1+8, 16, weight );
162     }
163 }
164 
165 #undef MC_LUMA
166 #undef MC_LUMA_BI
167 
x264_mb_mc_8x8(x264_t * h,int i8)168 void x264_mb_mc_8x8( x264_t *h, int i8 )
169 {
170     int x = 2*(i8&1);
171     int y = 2*(i8>>1);
172 
173     if( h->sh.i_type == SLICE_TYPE_P )
174     {
175         switch( h->mb.i_sub_partition[i8] )
176         {
177             case D_L0_8x8:
178                 x264_mb_mc_0xywh( h, x, y, 2, 2 );
179                 break;
180             case D_L0_8x4:
181                 x264_mb_mc_0xywh( h, x, y+0, 2, 1 );
182                 x264_mb_mc_0xywh( h, x, y+1, 2, 1 );
183                 break;
184             case D_L0_4x8:
185                 x264_mb_mc_0xywh( h, x+0, y, 1, 2 );
186                 x264_mb_mc_0xywh( h, x+1, y, 1, 2 );
187                 break;
188             case D_L0_4x4:
189                 x264_mb_mc_0xywh( h, x+0, y+0, 1, 1 );
190                 x264_mb_mc_0xywh( h, x+1, y+0, 1, 1 );
191                 x264_mb_mc_0xywh( h, x+0, y+1, 1, 1 );
192                 x264_mb_mc_0xywh( h, x+1, y+1, 1, 1 );
193                 break;
194         }
195     }
196     else
197     {
198         int scan8 = x264_scan8[0] + x + 8*y;
199 
200         if( h->mb.cache.ref[0][scan8] >= 0 )
201             if( h->mb.cache.ref[1][scan8] >= 0 )
202                 x264_mb_mc_01xywh( h, x, y, 2, 2 );
203             else
204                 x264_mb_mc_0xywh( h, x, y, 2, 2 );
205         else
206             x264_mb_mc_1xywh( h, x, y, 2, 2 );
207     }
208 }
209 
x264_mb_mc(x264_t * h)210 void x264_mb_mc( x264_t *h )
211 {
212     if( h->mb.i_partition == D_8x8 )
213     {
214 		int i;
215 
216 		for( i = 0; i < 4; i++ )
217             x264_mb_mc_8x8( h, i );
218     }
219     else
220     {
221         int ref0a = h->mb.cache.ref[0][x264_scan8[ 0]];
222         int ref0b = h->mb.cache.ref[0][x264_scan8[12]];
223         int ref1a = h->mb.cache.ref[1][x264_scan8[ 0]];
224         int ref1b = h->mb.cache.ref[1][x264_scan8[12]];
225 
226         if( h->mb.i_partition == D_16x16 )
227         {
228             if( ref0a >= 0 )
229                 if( ref1a >= 0 ) x264_mb_mc_01xywh( h, 0, 0, 4, 4 );
230                 else             x264_mb_mc_0xywh ( h, 0, 0, 4, 4 );
231             else                 x264_mb_mc_1xywh ( h, 0, 0, 4, 4 );
232         }
233         else if( h->mb.i_partition == D_16x8 )
234         {
235             if( ref0a >= 0 )
236                 if( ref1a >= 0 ) x264_mb_mc_01xywh( h, 0, 0, 4, 2 );
237                 else             x264_mb_mc_0xywh ( h, 0, 0, 4, 2 );
238             else                 x264_mb_mc_1xywh ( h, 0, 0, 4, 2 );
239 
240             if( ref0b >= 0 )
241                 if( ref1b >= 0 ) x264_mb_mc_01xywh( h, 0, 2, 4, 2 );
242                 else             x264_mb_mc_0xywh ( h, 0, 2, 4, 2 );
243             else                 x264_mb_mc_1xywh ( h, 0, 2, 4, 2 );
244         }
245         else if( h->mb.i_partition == D_8x16 )
246         {
247             if( ref0a >= 0 )
248                 if( ref1a >= 0 ) x264_mb_mc_01xywh( h, 0, 0, 2, 4 );
249                 else             x264_mb_mc_0xywh ( h, 0, 0, 2, 4 );
250             else                 x264_mb_mc_1xywh ( h, 0, 0, 2, 4 );
251 
252             if( ref0b >= 0 )
253                 if( ref1b >= 0 ) x264_mb_mc_01xywh( h, 2, 0, 2, 4 );
254                 else             x264_mb_mc_0xywh ( h, 2, 0, 2, 4 );
255             else                 x264_mb_mc_1xywh ( h, 2, 0, 2, 4 );
256         }
257     }
258 }
259 
x264_macroblock_cache_allocate(x264_t * h)260 int x264_macroblock_cache_allocate( x264_t *h )
261 {
262     int i_mb_count = h->mb.i_mb_count;
263     int prealloc_idx;
264 	size_t prealloc_size;
265 	uint8_t **preallocs[PREALLOC_BUF_SIZE];
266 	int i2;
267 
268     h->mb.i_mb_stride = h->mb.i_mb_width;
269     h->mb.i_b8_stride = h->mb.i_mb_width * 2;
270     h->mb.i_b4_stride = h->mb.i_mb_width * 4;
271 
272     h->mb.b_interlaced = PARAM_INTERLACED;
273 
274     prealloc_idx = 0;
275     prealloc_size = 0;
276 
277     /* type pun fixes */
278     PREALLOC( h->mb.qp.t_uint8_t, i_mb_count * sizeof(int8_t) );
279     PREALLOC( h->mb.cbp.t_uint8_t, i_mb_count * sizeof(int16_t) );
280     PREALLOC( h->mb.mb_transform_size.t_uint8_t, i_mb_count * sizeof(int8_t) );
281     PREALLOC( h->mb.slice_table.t_uint8_t, i_mb_count * sizeof(uint16_t) );
282 
283     /* 0 -> 3 top(4), 4 -> 6 : left(3) */
284     /* type pun fix */
285     PREALLOC( h->mb.intra4x4_pred_mode.t_uint8_t, i_mb_count * 8 * sizeof(int8_t) );
286 
287     /* all coeffs */
288     /* type pun fix */
289     PREALLOC( h->mb.non_zero_count.t_uint8_t, i_mb_count * 48 * sizeof(uint8_t) );
290 
291     if( h->param.b_cabac )
292     {
293         /* type pun fixes */
294         PREALLOC( h->mb.skipbp.t_uint8_t, i_mb_count * sizeof(int8_t) );
295         PREALLOC( h->mb.chroma_pred_mode.t_uint8_t, i_mb_count * sizeof(int8_t) );
296         PREALLOC( h->mb.mvd[0], i_mb_count * sizeof( **h->mb.mvd ) );
297         if( h->param.i_bframe )
298             PREALLOC( h->mb.mvd[1], i_mb_count * sizeof( **h->mb.mvd ) );
299     }
300 
301     for( i2 = 0; i2 < 2; i2++ )
302     {
303         int i_refs = X264_MIN(X264_REF_MAX, (i2 ? 1 + !!h->param.i_bframe_pyramid : h->param.i_frame_reference) ) << PARAM_INTERLACED;
304 		int j;
305 
306 		if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
307             i_refs = X264_MIN(X264_REF_MAX, i_refs + 1 + (BIT_DEPTH == 8)); //smart weights add two duplicate frames, one in >8-bit
308 
309         for( j = !i2; j < i_refs; j++ )
310             PREALLOC( h->mb.mvr[i2][j], 2 * (i_mb_count + 1) * sizeof(int16_t) );
311     }
312 
313     if( h->param.analyse.i_weighted_pred )
314     {
315         int i_padv = PADV << PARAM_INTERLACED;
316         int luma_plane_size = 0;
317         int numweightbuf;
318 		int i;
319 
320         if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_FAKE )
321         {
322             // only need buffer for lookahead
323             if( !h->param.i_sync_lookahead || h == h->thread[h->param.i_threads] )
324             {
325                 // Fake analysis only works on lowres
326                 luma_plane_size = h->fdec->i_stride_lowres * (h->mb.i_mb_height*8+2*i_padv);
327                 // Only need 1 buffer for analysis
328                 numweightbuf = 1;
329             }
330             else
331                 numweightbuf = 0;
332         }
333         else
334         {
335             /* Both ref and fenc is stored for 4:2:0 and 4:2:2 which means that 4:2:0 and 4:4:4
336              * needs the same amount of space and 4:2:2 needs twice that much */
337             luma_plane_size = h->fdec->i_stride[0] * (h->mb.i_mb_height*(16<<(CHROMA_FORMAT==CHROMA_422))+2*i_padv);
338 
339             if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
340                 //smart can weight one ref and one offset -1 in 8-bit
341                 numweightbuf = 1 + (BIT_DEPTH == 8);
342             else
343                 //simple only has one weighted ref
344                 numweightbuf = 1;
345         }
346 
347         for( i = 0; i < numweightbuf; i++ )
348             PREALLOC( h->mb.p_weight_buf[i], luma_plane_size * sizeof(pixel) );
349     }
350 
351     PREALLOC_END( h->mb.base );
352 
353     memset( h->mb.slice_table.t_uint16_t, -1, i_mb_count * sizeof(uint16_t) );
354 
355     for( i2 = 0; i2 < 2; i2++ )
356     {
357         int i_refs = X264_MIN(X264_REF_MAX, (i2 ? 1 + !!h->param.i_bframe_pyramid : h->param.i_frame_reference) ) << PARAM_INTERLACED;
358 		int j;
359 
360 		if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
361             i_refs = X264_MIN(X264_REF_MAX, i_refs + 1 + (BIT_DEPTH == 8)); //smart weights add two duplicate frames, one in >8-bit
362 
363         for( j = !i2; j < i_refs; j++ )
364         {
365             M32( h->mb.mvr[i2][j][0] ) = 0;
366             h->mb.mvr[i2][j]++;
367         }
368     }
369 
370     return 0;
371 fail:
372     return -1;
373 }
x264_macroblock_cache_free(x264_t * h)374 void x264_macroblock_cache_free( x264_t *h )
375 {
376     x264_free( h->mb.base );
377 }
378 
x264_macroblock_thread_allocate(x264_t * h,int b_lookahead)379 int x264_macroblock_thread_allocate( x264_t *h, int b_lookahead )
380 {
381     int scratch_size;
382     int buf_mbtree;
383     int buf_lookahead_threads;
384     int buf_mbtree2;
385 
386 	if( !b_lookahead )
387     {
388 		int i;
389 		int j;
390 
391 		for( i = 0; i < (PARAM_INTERLACED ? 5 : 2); i++ )
392             for( j = 0; j < (CHROMA444 ? 3 : 2); j++ )
393             {
394                 CHECKED_MALLOC( h->intra_border_backup[i][j], (h->sps->i_mb_width*16+32) * sizeof(pixel) );
395                 h->intra_border_backup[i][j] += 16;
396             }
397         for( i = 0; i <= PARAM_INTERLACED; i++ )
398         {
399             if( h->param.b_sliced_threads )
400             {
401                 /* Only allocate the first one, and allocate it for the whole frame, because we
402                  * won't be deblocking until after the frame is fully encoded. */
403                 if( h == h->thread[0] && !i )
404                     CHECKED_MALLOC( h->deblock_strength[0], sizeof(**h->deblock_strength) * h->mb.i_mb_count );
405                 else
406                     h->deblock_strength[i] = h->thread[0]->deblock_strength[0];
407             }
408             else
409                 CHECKED_MALLOC( h->deblock_strength[i], sizeof(**h->deblock_strength) * h->mb.i_mb_width );
410             h->deblock_strength[1] = h->deblock_strength[i];
411         }
412     }
413 
414     /* Allocate scratch buffer */
415     scratch_size = 0;
416     if( !b_lookahead )
417     {
418         int buf_hpel = (h->thread[0]->fdec->i_width[0]+48+32) * sizeof(int16_t);
419         int buf_ssim = h->param.analyse.b_ssim * 8 * (h->param.i_width/4+3) * sizeof(int);
420         int me_range = X264_MIN(h->param.analyse.i_me_range, h->param.analyse.i_mv_range);
421         int buf_tesa = (h->param.analyse.i_me_method >= X264_ME_ESA) *
422             ((me_range*2+24) * sizeof(int16_t) + (me_range+4) * (me_range+1) * 4 * sizeof(mvsad_t));
423         scratch_size = X264_MAX3( buf_hpel, buf_ssim, buf_tesa );
424     }
425     buf_mbtree = h->param.rc.b_mb_tree * ((h->mb.i_mb_width+7)&~7) * sizeof(int16_t);
426     scratch_size = X264_MAX( scratch_size, buf_mbtree );
427     if( scratch_size )
428         CHECKED_MALLOC( h->scratch_buffer, scratch_size );
429     else
430         h->scratch_buffer = NULL;
431 
432     buf_lookahead_threads = (h->mb.i_mb_height + (4 + 32) * h->param.i_lookahead_threads) * sizeof(int) * 2;
433     buf_mbtree2 = buf_mbtree * 12; /* size of the internal propagate_list asm buffer */
434     scratch_size = X264_MAX( buf_lookahead_threads, buf_mbtree2 );
435     CHECKED_MALLOC( h->scratch_buffer2, scratch_size );
436 
437     return 0;
438 fail:
439     return -1;
440 }
441 
x264_macroblock_thread_free(x264_t * h,int b_lookahead)442 void x264_macroblock_thread_free( x264_t *h, int b_lookahead )
443 {
444     if( !b_lookahead )
445     {
446 		int i;
447 		int j;
448 
449 		for( i = 0; i <= PARAM_INTERLACED; i++ )
450             if( !h->param.b_sliced_threads || (h == h->thread[0] && !i) )
451                 x264_free( h->deblock_strength[i] );
452         for( i = 0; i < (PARAM_INTERLACED ? 5 : 2); i++ )
453             for( j = 0; j < (CHROMA444 ? 3 : 2); j++ )
454                 x264_free( h->intra_border_backup[i][j] - 16 );
455     }
456     x264_free( h->scratch_buffer );
457     x264_free( h->scratch_buffer2 );
458 }
459 
x264_macroblock_slice_init(x264_t * h)460 void x264_macroblock_slice_init( x264_t *h )
461 {
462 	int i;
463 
464 	h->mb.mv[0] = h->fdec->mv[0];
465     h->mb.mv[1] = h->fdec->mv[1];
466     h->mb.mvr[0][0] = h->fdec->mv16x16.t_int16_t_array;
467     h->mb.ref[0] = h->fdec->ref[0];
468     h->mb.ref[1] = h->fdec->ref[1];
469     h->mb.type = h->fdec->mb_type.t_int8_t;
470     h->mb.partition = h->fdec->mb_partition;
471     h->mb.field = h->fdec->field;
472 
473     h->fdec->i_ref[0] = h->i_ref[0];
474     h->fdec->i_ref[1] = h->i_ref[1];
475     for( i = 0; i < h->i_ref[0]; i++ )
476         h->fdec->ref_poc[0][i] = h->fref[0][i]->i_poc;
477     if( h->sh.i_type == SLICE_TYPE_B )
478     {
479         for( i = 0; i < h->i_ref[1]; i++ )
480             h->fdec->ref_poc[1][i] = h->fref[1][i]->i_poc;
481 
482         map_col_to_list0(-1) = -1;
483         map_col_to_list0(-2) = -2;
484         for( i = 0; i < h->fref[1][0]->i_ref[0]; i++ )
485         {
486 			int poc = h->fref[1][0]->ref_poc[0][i];
487 			int j;
488 
489 			map_col_to_list0(i) = -2;
490             for( j = 0; j < h->i_ref[0]; j++ )
491                 if( h->fref[0][j]->i_poc == poc )
492                 {
493                     map_col_to_list0(i) = j;
494                     break;
495                 }
496         }
497     }
498     else if( h->sh.i_type == SLICE_TYPE_P )
499     {
500         if( h->sh.i_disable_deblocking_filter_idc != 1 && h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
501         {
502             deblock_ref_table(-2) = -2;
503             deblock_ref_table(-1) = -1;
504             for( i = 0; i < h->i_ref[0] << SLICE_MBAFF; i++ )
505             {
506                 /* Mask off high bits to avoid frame num collisions with -1/-2.
507                  * In current x264 frame num values don't cover a range of more
508                  * than 32, so 6 bits is enough for uniqueness. */
509                 if( !MB_INTERLACED )
510                     deblock_ref_table(i) = h->fref[0][i]->i_frame_num&63;
511                 else
512                     deblock_ref_table(i) = ((h->fref[0][i>>1]->i_frame_num&63)<<1) + (i&1);
513             }
514         }
515     }
516 
517     /* init with not available (for top right idx=7,15) */
518     memset( h->mb.cache.ref, -2, sizeof( h->mb.cache.ref ) );
519 
520     if( h->i_ref[0] > 0 ) {
521         int field;
522 		for( field = 0; field <= SLICE_MBAFF; field++ )
523         {
524             int curpoc = h->fdec->i_poc + h->fdec->i_delta_poc[field];
525             int refpoc = h->fref[0][0]->i_poc + h->fref[0][0]->i_delta_poc[field];
526             int delta = curpoc - refpoc;
527 
528             h->fdec->inv_ref_poc[field] = (256 + delta/2) / delta;
529         }
530 	}
531     h->mb.i_neighbour4[6] =
532     h->mb.i_neighbour4[9] =
533     h->mb.i_neighbour4[12] =
534     h->mb.i_neighbour4[14] = MB_LEFT|MB_TOP|MB_TOPLEFT|MB_TOPRIGHT;
535     h->mb.i_neighbour4[3] =
536     h->mb.i_neighbour4[7] =
537     h->mb.i_neighbour4[11] =
538     h->mb.i_neighbour4[13] =
539     h->mb.i_neighbour4[15] =
540     h->mb.i_neighbour8[3] = MB_LEFT|MB_TOP|MB_TOPLEFT;
541 }
542 
x264_macroblock_thread_init(x264_t * h)543 void x264_macroblock_thread_init( x264_t *h )
544 {
545     h->mb.i_me_method = h->param.analyse.i_me_method;
546     h->mb.i_subpel_refine = h->param.analyse.i_subpel_refine;
547     if( h->sh.i_type == SLICE_TYPE_B && (h->mb.i_subpel_refine == 6 || h->mb.i_subpel_refine == 8) )
548         h->mb.i_subpel_refine--;
549     h->mb.b_chroma_me = h->param.analyse.b_chroma_me &&
550                         ((h->sh.i_type == SLICE_TYPE_P && h->mb.i_subpel_refine >= 5) ||
551                          (h->sh.i_type == SLICE_TYPE_B && h->mb.i_subpel_refine >= 9));
552     h->mb.b_dct_decimate = h->sh.i_type == SLICE_TYPE_B ||
553                           (h->param.analyse.b_dct_decimate && h->sh.i_type != SLICE_TYPE_I);
554     h->mb.i_mb_prev_xy = -1;
555 
556     /*          4:2:0                      4:2:2                      4:4:4
557      * fdec            fenc       fdec            fenc       fdec            fenc
558      * y y y y y y y   Y Y Y Y    y y y y y y y   Y Y Y Y    y y y y y y y   Y Y Y Y
559      * y Y Y Y Y       Y Y Y Y    y Y Y Y Y       Y Y Y Y    y Y Y Y Y       Y Y Y Y
560      * y Y Y Y Y       Y Y Y Y    y Y Y Y Y       Y Y Y Y    y Y Y Y Y       Y Y Y Y
561      * y Y Y Y Y       Y Y Y Y    y Y Y Y Y       Y Y Y Y    y Y Y Y Y       Y Y Y Y
562      * y Y Y Y Y       U U V V    y Y Y Y Y       U U V V    y Y Y Y Y       U U U U
563      * u u u   v v v   U U V V    u u u   v v v   U U V V    u u u u u u u   U U U U
564      * u U U   v V V              u U U   v V V   U U V V    u U U U U       U U U U
565      * u U U   v V V              u U U   v V V   U U V V    u U U U U       U U U U
566      *                            u U U   v V V              u U U U U       V V V V
567      *                            u U U   v V V              u U U U U       V V V V
568      *                                                       v v v v v v v   V V V V
569      *                                                       v V V V V       V V V V
570      *                                                       v V V V V
571      *                                                       v V V V V
572      *                                                       v V V V V
573      */
574     h->mb.pic.p_fenc[0] = h->mb.pic.fenc_buf;
575     h->mb.pic.p_fdec[0] = h->mb.pic.fdec_buf + 2*FDEC_STRIDE;
576     h->mb.pic.p_fenc[1] = h->mb.pic.fenc_buf + 16*FENC_STRIDE;
577     h->mb.pic.p_fdec[1] = h->mb.pic.fdec_buf + 19*FDEC_STRIDE;
578     if( CHROMA444 )
579     {
580         h->mb.pic.p_fenc[2] = h->mb.pic.fenc_buf + 32*FENC_STRIDE;
581         h->mb.pic.p_fdec[2] = h->mb.pic.fdec_buf + 36*FDEC_STRIDE;
582     }
583     else
584     {
585         h->mb.pic.p_fenc[2] = h->mb.pic.fenc_buf + 16*FENC_STRIDE + 8;
586         h->mb.pic.p_fdec[2] = h->mb.pic.fdec_buf + 19*FDEC_STRIDE + 16;
587     }
588 }
589 
x264_prefetch_fenc(x264_t * h,x264_frame_t * fenc,int i_mb_x,int i_mb_y)590 void x264_prefetch_fenc( x264_t *h, x264_frame_t *fenc, int i_mb_x, int i_mb_y )
591 {
592     int stride_y  = fenc->i_stride[0];
593     int stride_uv = fenc->i_stride[1];
594     int off_y  = 16 * i_mb_x + 16 * i_mb_y * stride_y;
595     int off_uv = 16 * i_mb_x + (16 * i_mb_y * stride_uv >> CHROMA_V_SHIFT);
596     h->mc.prefetch_fenc( fenc->plane[0]+off_y, stride_y,
597                          fenc->plane[1]+off_uv, stride_uv, i_mb_x );
598 }
599 
x264_copy_column8(pixel * dst,pixel * src)600 NOINLINE void x264_copy_column8( pixel *dst, pixel *src )
601 {
602 	int i;
603 
604 	// input pointers are offset by 4 rows because that's faster (smaller instruction size on x86)
605     for( i = -4; i < 4; i++ )
606         dst[i*FDEC_STRIDE] = src[i*FDEC_STRIDE];
607 }
608 
x264_macroblock_load_pic_pointers(x264_t * h,int mb_x,int mb_y,int i,int b_chroma,int b_mbaff)609 static void ALWAYS_INLINE x264_macroblock_load_pic_pointers( x264_t *h, int mb_x, int mb_y, int i, int b_chroma, int b_mbaff )
610 {
611     int mb_interlaced = b_mbaff && MB_INTERLACED;
612     int height = b_chroma ? 16 >> CHROMA_V_SHIFT : 16;
613     int i_stride = h->fdec->i_stride[i];
614     int i_stride2 = i_stride << mb_interlaced;
615     int i_pix_offset = mb_interlaced
616                      ? 16 * mb_x + height * (mb_y&~1) * i_stride + (mb_y&1) * i_stride
617                      : 16 * mb_x + height * mb_y * i_stride;
618     pixel *plane_fdec = &h->fdec->plane[i][i_pix_offset];
619     int fdec_idx = b_mbaff ? (mb_interlaced ? (3 + (mb_y&1)) : (mb_y&1) ? 2 : 4) : !(mb_y&1);
620     pixel *intra_fdec = &h->intra_border_backup[fdec_idx][i][mb_x*16];
621     int ref_pix_offset[2] = { i_pix_offset, i_pix_offset };
622     pixel *plane_src;
623 	pixel **filtered_src;
624 	int j2;
625 
626 	/* ref_pix_offset[0] references the current field and [1] the opposite field. */
627     if( mb_interlaced )
628         ref_pix_offset[1] += (1-2*(mb_y&1)) * i_stride;
629     h->mb.pic.i_stride[i] = i_stride2;
630     h->mb.pic.p_fenc_plane[i] = &h->fenc->plane[i][i_pix_offset];
631     if( b_chroma )
632     {
633         h->mc.load_deinterleave_chroma_fenc( h->mb.pic.p_fenc[1], h->mb.pic.p_fenc_plane[1], i_stride2, height );
634         memcpy( h->mb.pic.p_fdec[1]-FDEC_STRIDE, intra_fdec, 8*sizeof(pixel) );
635         memcpy( h->mb.pic.p_fdec[2]-FDEC_STRIDE, intra_fdec+8, 8*sizeof(pixel) );
636         h->mb.pic.p_fdec[1][-FDEC_STRIDE-1] = intra_fdec[-1-8];
637         h->mb.pic.p_fdec[2][-FDEC_STRIDE-1] = intra_fdec[-1];
638     }
639     else
640     {
641         h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fenc[i], FENC_STRIDE, h->mb.pic.p_fenc_plane[i], i_stride2, 16 );
642         memcpy( h->mb.pic.p_fdec[i]-FDEC_STRIDE, intra_fdec, 24*sizeof(pixel) );
643         h->mb.pic.p_fdec[i][-FDEC_STRIDE-1] = intra_fdec[-1];
644     }
645     if( b_mbaff || h->mb.b_reencode_mb )
646     {
647 		int j;
648 		for( j = 0; j < height; j++ )
649             if( b_chroma )
650             {
651                 h->mb.pic.p_fdec[1][-1+j*FDEC_STRIDE] = plane_fdec[-2+j*i_stride2];
652                 h->mb.pic.p_fdec[2][-1+j*FDEC_STRIDE] = plane_fdec[-1+j*i_stride2];
653             }
654             else
655                 h->mb.pic.p_fdec[i][-1+j*FDEC_STRIDE] = plane_fdec[-1+j*i_stride2];
656     }
657     for( j2 = 0; j2 < h->mb.pic.i_fref[0]; j2++ )
658     {
659         // Interpolate between pixels in same field.
660         if( mb_interlaced )
661         {
662             plane_src = h->fref[0][j2>>1]->plane_fld[i];
663             filtered_src = h->fref[0][j2>>1]->filtered_fld[i];
664         }
665         else
666         {
667             plane_src = h->fref[0][j2]->plane[i];
668             filtered_src = h->fref[0][j2]->filtered[i];
669         }
670         h->mb.pic.p_fref[0][j2][i*4] = plane_src + ref_pix_offset[j2&1];
671 
672         if( !b_chroma )
673         {
674 			int k;
675 
676 			for( k = 1; k < 4; k++ )
677                 h->mb.pic.p_fref[0][j2][i*4+k] = filtered_src[k] + ref_pix_offset[j2&1];
678             if( !i )
679             {
680                 if( h->sh.weight[j2][0].weightfn )
681                     h->mb.pic.p_fref_w[j2] = &h->fenc->weighted[j2 >> mb_interlaced][ref_pix_offset[j2&1]];
682                 else
683                     h->mb.pic.p_fref_w[j2] = h->mb.pic.p_fref[0][j2][0];
684             }
685         }
686     }
687     if( h->sh.i_type == SLICE_TYPE_B )
688         for( j2 = 0; j2 < h->mb.pic.i_fref[1]; j2++ )
689         {
690             if( mb_interlaced )
691             {
692                 plane_src = h->fref[1][j2>>1]->plane_fld[i];
693                 filtered_src = h->fref[1][j2>>1]->filtered_fld[i];
694             }
695             else
696             {
697                 plane_src = h->fref[1][j2]->plane[i];
698                 filtered_src = h->fref[1][j2]->filtered[i];
699             }
700             h->mb.pic.p_fref[1][j2][i*4] = plane_src + ref_pix_offset[j2&1];
701 
702             if( !b_chroma ) {
703 				int k;
704 				for( k = 1; k < 4; k++ )
705                     h->mb.pic.p_fref[1][j2][i*4+k] = filtered_src[k] + ref_pix_offset[j2&1];
706 			}
707 		}
708 }
709 
710 static const x264_left_table_t left_indices[4] =
711 {
712     /* Current is progressive */
713     {{ 4, 4, 5, 5}, { 3,  3,  7,  7}, {16+1, 16+1, 32+1, 32+1}, {0, 0, 1, 1}, {0, 0, 0, 0}},
714     {{ 6, 6, 3, 3}, {11, 11, 15, 15}, {16+5, 16+5, 32+5, 32+5}, {2, 2, 3, 3}, {1, 1, 1, 1}},
715     /* Current is interlaced */
716     {{ 4, 6, 4, 6}, { 3, 11,  3, 11}, {16+1, 16+1, 32+1, 32+1}, {0, 2, 0, 2}, {0, 1, 0, 1}},
717     /* Both same */
718     {{ 4, 5, 6, 3}, { 3,  7, 11, 15}, {16+1, 16+5, 32+1, 32+5}, {0, 1, 2, 3}, {0, 0, 1, 1}}
719 };
720 
x264_macroblock_cache_load_neighbours(x264_t * h,int mb_x,int mb_y,int b_interlaced)721 static void ALWAYS_INLINE x264_macroblock_cache_load_neighbours( x264_t *h, int mb_x, int mb_y, int b_interlaced )
722 {
723     const int mb_interlaced = b_interlaced && MB_INTERLACED;
724     int top_y = mb_y - (1 << mb_interlaced);
725     int top = top_y * h->mb.i_mb_stride + mb_x;
726     int topleft_y;
727     int topright_y;
728     int left[2];
729 
730     h->mb.i_mb_x = mb_x;
731     h->mb.i_mb_y = mb_y;
732     h->mb.i_mb_xy = mb_y * h->mb.i_mb_stride + mb_x;
733     h->mb.i_b8_xy = 2*(mb_y * h->mb.i_b8_stride + mb_x);
734     h->mb.i_b4_xy = 4*(mb_y * h->mb.i_b4_stride + mb_x);
735     h->mb.left_b8[0] =
736     h->mb.left_b8[1] = -1;
737     h->mb.left_b4[0] =
738     h->mb.left_b4[1] = -1;
739     h->mb.i_neighbour = 0;
740     h->mb.i_neighbour_intra = 0;
741     h->mb.i_neighbour_frame = 0;
742     h->mb.i_mb_top_xy = -1;
743     h->mb.i_mb_top_y = -1;
744     h->mb.i_mb_left_xy[0] = h->mb.i_mb_left_xy[1] = -1;
745     h->mb.i_mb_topleft_xy = -1;
746     h->mb.i_mb_topright_xy = -1;
747     h->mb.i_mb_type_top = -1;
748     h->mb.i_mb_type_left[0] = h->mb.i_mb_type_left[1] = -1;
749     h->mb.i_mb_type_topleft = -1;
750     h->mb.i_mb_type_topright = -1;
751     h->mb.left_index_table = &left_indices[3];
752     h->mb.topleft_partition = 0;
753 
754     topleft_y = top_y;
755     topright_y = top_y;
756 
757     left[0] = left[1] = h->mb.i_mb_xy - 1;
758     h->mb.left_b8[0] = h->mb.left_b8[1] = h->mb.i_b8_xy - 2;
759     h->mb.left_b4[0] = h->mb.left_b4[1] = h->mb.i_b4_xy - 4;
760 
761     if( b_interlaced )
762     {
763         h->mb.i_mb_top_mbpair_xy = h->mb.i_mb_xy - 2*h->mb.i_mb_stride;
764         h->mb.i_mb_topleft_y = -1;
765         h->mb.i_mb_topright_y = -1;
766 
767         if( mb_y&1 )
768         {
769             if( mb_x && mb_interlaced != h->mb.field[h->mb.i_mb_xy-1] )
770             {
771                 left[0] = left[1] = h->mb.i_mb_xy - 1 - h->mb.i_mb_stride;
772                 h->mb.left_b8[0] = h->mb.left_b8[1] = h->mb.i_b8_xy - 2 - 2*h->mb.i_b8_stride;
773                 h->mb.left_b4[0] = h->mb.left_b4[1] = h->mb.i_b4_xy - 4 - 4*h->mb.i_b4_stride;
774 
775                 if( mb_interlaced )
776                 {
777                     h->mb.left_index_table = &left_indices[2];
778                     left[1] += h->mb.i_mb_stride;
779                     h->mb.left_b8[1] += 2*h->mb.i_b8_stride;
780                     h->mb.left_b4[1] += 4*h->mb.i_b4_stride;
781                 }
782                 else
783                 {
784                     h->mb.left_index_table = &left_indices[1];
785                     topleft_y++;
786                     h->mb.topleft_partition = 1;
787                 }
788             }
789             if( !mb_interlaced )
790                 topright_y = -1;
791         }
792         else
793         {
794             if( mb_interlaced && top >= 0 )
795             {
796                 if( !h->mb.field[top] )
797                 {
798                     top += h->mb.i_mb_stride;
799                     top_y++;
800                 }
801                 if( mb_x )
802                     topleft_y += !h->mb.field[h->mb.i_mb_stride*topleft_y + mb_x - 1];
803                 if( mb_x < h->mb.i_mb_width-1 )
804                     topright_y += !h->mb.field[h->mb.i_mb_stride*topright_y + mb_x + 1];
805             }
806             if( mb_x && mb_interlaced != h->mb.field[h->mb.i_mb_xy-1] )
807             {
808                 if( mb_interlaced )
809                 {
810                     h->mb.left_index_table = &left_indices[2];
811                     left[1] += h->mb.i_mb_stride;
812                     h->mb.left_b8[1] += 2*h->mb.i_b8_stride;
813                     h->mb.left_b4[1] += 4*h->mb.i_b4_stride;
814                 }
815                 else
816                     h->mb.left_index_table = &left_indices[0];
817             }
818         }
819     }
820 
821     if( mb_x > 0 )
822     {
823         h->mb.i_neighbour_frame |= MB_LEFT;
824         h->mb.i_mb_left_xy[0] = left[0];
825         h->mb.i_mb_left_xy[1] = left[1];
826         h->mb.i_mb_type_left[0] = h->mb.type[h->mb.i_mb_left_xy[0]];
827         h->mb.i_mb_type_left[1] = h->mb.type[h->mb.i_mb_left_xy[1]];
828         if( h->mb.slice_table.t_uint16_t[left[0]] == h->sh.i_first_mb )
829         {
830             h->mb.i_neighbour |= MB_LEFT;
831 
832             // FIXME: We don't currently support constrained intra + mbaff.
833             if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_left[0] ) )
834                 h->mb.i_neighbour_intra |= MB_LEFT;
835         }
836     }
837 
838     /* We can't predict from the previous threadslice since it hasn't been encoded yet. */
839     if( (h->i_threadslice_start >> mb_interlaced) != (mb_y >> mb_interlaced) )
840     {
841         if( top >= 0 )
842         {
843             h->mb.i_neighbour_frame |= MB_TOP;
844             h->mb.i_mb_top_xy = top;
845             h->mb.i_mb_top_y = top_y;
846             h->mb.i_mb_type_top = h->mb.type[h->mb.i_mb_top_xy];
847             if( h->mb.slice_table.t_uint16_t[top] == h->sh.i_first_mb )
848             {
849                 h->mb.i_neighbour |= MB_TOP;
850 
851                 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_top ) )
852                     h->mb.i_neighbour_intra |= MB_TOP;
853 
854                 /* We only need to prefetch the top blocks because the left was just written
855                  * to as part of the previous cache_save.  Since most target CPUs use write-allocate
856                  * caches, left blocks are near-guaranteed to be in L1 cache.  Top--not so much. */
857                 x264_prefetch( &h->mb.cbp.t_int16_t[top] );
858                 x264_prefetch( h->mb.intra4x4_pred_mode.t_int8_t_array[top] );
859                 x264_prefetch( &h->mb.non_zero_count.t_uint8_t_array[top][12] );
860                 /* These aren't always allocated, but prefetching an invalid address can't hurt. */
861                 x264_prefetch( &h->mb.mb_transform_size.t_int8_t[top] );
862                 x264_prefetch( &h->mb.skipbp.t_int8_t[top] );
863             }
864         }
865 
866         if( mb_x > 0 && topleft_y >= 0  )
867         {
868             h->mb.i_neighbour_frame |= MB_TOPLEFT;
869             h->mb.i_mb_topleft_xy = h->mb.i_mb_stride*topleft_y + mb_x - 1;
870             h->mb.i_mb_topleft_y = topleft_y;
871             h->mb.i_mb_type_topleft = h->mb.type[h->mb.i_mb_topleft_xy];
872             if( h->mb.slice_table.t_uint16_t[h->mb.i_mb_topleft_xy] == h->sh.i_first_mb )
873             {
874                 h->mb.i_neighbour |= MB_TOPLEFT;
875 
876                 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_topleft ) )
877                     h->mb.i_neighbour_intra |= MB_TOPLEFT;
878             }
879         }
880 
881         if( mb_x < h->mb.i_mb_width - 1 && topright_y >= 0 )
882         {
883             h->mb.i_neighbour_frame |= MB_TOPRIGHT;
884             h->mb.i_mb_topright_xy = h->mb.i_mb_stride*topright_y + mb_x + 1;
885             h->mb.i_mb_topright_y = topright_y;
886             h->mb.i_mb_type_topright = h->mb.type[h->mb.i_mb_topright_xy];
887             if( h->mb.slice_table.t_uint16_t[h->mb.i_mb_topright_xy] == h->sh.i_first_mb )
888             {
889                 h->mb.i_neighbour |= MB_TOPRIGHT;
890 
891                 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_topright ) )
892                     h->mb.i_neighbour_intra |= MB_TOPRIGHT;
893             }
894         }
895     }
896 }
897 
898 #define LTOP 0
899 #if HAVE_INTERLACED
900 #   define LBOT 1
901 #else
902 #   define LBOT 0
903 #endif
904 
x264_macroblock_cache_load(x264_t * h,int mb_x,int mb_y,int b_mbaff)905 static void ALWAYS_INLINE x264_macroblock_cache_load( x264_t *h, int mb_x, int mb_y, int b_mbaff )
906 {
907     int *left;
908     int top;
909     int top_y;
910     int s8x8;
911     int s4x4;
912     int top_8x8;
913     int top_4x4;
914     int lists;
915 
916 	int8_t (*i4x4)[8];
917     uint8_t (*nnz)[48];
918     int16_t *cbp;
919 
920     const x264_left_table_t *left_index_table;
921 
922 	int l;
923 
924 	x264_macroblock_cache_load_neighbours( h, mb_x, mb_y, b_mbaff );
925 
926     left = h->mb.i_mb_left_xy;
927     top = h->mb.i_mb_top_xy;
928     top_y = h->mb.i_mb_top_y;
929     s8x8 = h->mb.i_b8_stride;
930     s4x4 = h->mb.i_b4_stride;
931     top_8x8 = (2*top_y+1) * s8x8 + 2*mb_x;
932     top_4x4 = (4*top_y+3) * s4x4 + 4*mb_x;
933     lists = (1 << h->sh.i_type) & 3;
934 
935     /* GCC pessimizes direct loads from heap-allocated arrays due to aliasing. */
936     /* By only dereferencing them once, we avoid this issue. */
937     i4x4 = h->mb.intra4x4_pred_mode.t_int8_t_array;
938     nnz = h->mb.non_zero_count.t_uint8_t_array;
939     cbp = h->mb.cbp.t_int16_t;
940 
941     left_index_table = h->mb.left_index_table;
942 
943     h->mb.cache.deblock_strength = h->deblock_strength[mb_y&1][h->param.b_sliced_threads?h->mb.i_mb_xy:mb_x];
944 
945     /* load cache */
946     if( h->mb.i_neighbour & MB_TOP )
947     {
948 		h->mb.cache.i_cbp_top = cbp[top];
949         /* load intra4x4 */
950         CP32( &h->mb.cache.intra4x4_pred_mode[x264_scan8[0] - 8], &i4x4[top][0] );
951 
952         /* load non_zero_count */
953         CP32( &h->mb.cache.non_zero_count[x264_scan8[ 0] - 8], &nnz[top][12] );
954         CP32( &h->mb.cache.non_zero_count[x264_scan8[16] - 8], &nnz[top][16-4 + (16>>CHROMA_V_SHIFT)] );
955         CP32( &h->mb.cache.non_zero_count[x264_scan8[32] - 8], &nnz[top][32-4 + (16>>CHROMA_V_SHIFT)] );
956 
957         /* Finish the prefetching */
958         for( l = 0; l < lists; l++ )
959         {
960             x264_prefetch( &h->mb.mv[l][top_4x4-1] );
961             /* Top right being not in the same cacheline as top left will happen
962              * once every 4 MBs, so one extra prefetch is worthwhile */
963             x264_prefetch( &h->mb.mv[l][top_4x4+4] );
964             x264_prefetch( &h->mb.ref[l][top_8x8-1] );
965             x264_prefetch( &h->mb.mvd[l][top] );
966         }
967     }
968     else
969     {
970         h->mb.cache.i_cbp_top = -1;
971 
972         /* load intra4x4 */
973         M32( &h->mb.cache.intra4x4_pred_mode[x264_scan8[0] - 8] ) = 0xFFFFFFFFU;
974 
975         /* load non_zero_count */
976         M32( &h->mb.cache.non_zero_count[x264_scan8[ 0] - 8] ) = 0x80808080U;
977         M32( &h->mb.cache.non_zero_count[x264_scan8[16] - 8] ) = 0x80808080U;
978         M32( &h->mb.cache.non_zero_count[x264_scan8[32] - 8] ) = 0x80808080U;
979     }
980 
981     if( h->mb.i_neighbour & MB_LEFT )
982     {
983         int ltop = left[LTOP];
984         int lbot = b_mbaff ? left[LBOT] : ltop;
985         if( b_mbaff )
986         {
987             const int16_t top_luma = (cbp[ltop] >> (left_index_table->mv[0]&(~1))) & 2;
988             const int16_t bot_luma = (cbp[lbot] >> (left_index_table->mv[2]&(~1))) & 2;
989             h->mb.cache.i_cbp_left = (cbp[ltop] & 0xfff0) | (bot_luma<<2) | top_luma;
990         }
991         else
992             h->mb.cache.i_cbp_left = cbp[ltop];
993 
994         /* load intra4x4 */
995         h->mb.cache.intra4x4_pred_mode[x264_scan8[ 0] - 1] = i4x4[ltop][left_index_table->intra[0]];
996         h->mb.cache.intra4x4_pred_mode[x264_scan8[ 2] - 1] = i4x4[ltop][left_index_table->intra[1]];
997         h->mb.cache.intra4x4_pred_mode[x264_scan8[ 8] - 1] = i4x4[lbot][left_index_table->intra[2]];
998         h->mb.cache.intra4x4_pred_mode[x264_scan8[10] - 1] = i4x4[lbot][left_index_table->intra[3]];
999 
1000         /* load non_zero_count */
1001         h->mb.cache.non_zero_count[x264_scan8[ 0] - 1] = nnz[ltop][left_index_table->nnz[0]];
1002         h->mb.cache.non_zero_count[x264_scan8[ 2] - 1] = nnz[ltop][left_index_table->nnz[1]];
1003         h->mb.cache.non_zero_count[x264_scan8[ 8] - 1] = nnz[lbot][left_index_table->nnz[2]];
1004         h->mb.cache.non_zero_count[x264_scan8[10] - 1] = nnz[lbot][left_index_table->nnz[3]];
1005 
1006         if( CHROMA_FORMAT >= CHROMA_422 )
1007         {
1008             int offset = (4>>CHROMA_H_SHIFT) - 4;
1009             h->mb.cache.non_zero_count[x264_scan8[16+ 0] - 1] = nnz[ltop][left_index_table->nnz[0]+16+offset];
1010             h->mb.cache.non_zero_count[x264_scan8[16+ 2] - 1] = nnz[ltop][left_index_table->nnz[1]+16+offset];
1011             h->mb.cache.non_zero_count[x264_scan8[16+ 8] - 1] = nnz[lbot][left_index_table->nnz[2]+16+offset];
1012             h->mb.cache.non_zero_count[x264_scan8[16+10] - 1] = nnz[lbot][left_index_table->nnz[3]+16+offset];
1013             h->mb.cache.non_zero_count[x264_scan8[32+ 0] - 1] = nnz[ltop][left_index_table->nnz[0]+32+offset];
1014             h->mb.cache.non_zero_count[x264_scan8[32+ 2] - 1] = nnz[ltop][left_index_table->nnz[1]+32+offset];
1015             h->mb.cache.non_zero_count[x264_scan8[32+ 8] - 1] = nnz[lbot][left_index_table->nnz[2]+32+offset];
1016             h->mb.cache.non_zero_count[x264_scan8[32+10] - 1] = nnz[lbot][left_index_table->nnz[3]+32+offset];
1017         }
1018         else
1019         {
1020             h->mb.cache.non_zero_count[x264_scan8[16+ 0] - 1] = nnz[ltop][left_index_table->nnz_chroma[0]];
1021             h->mb.cache.non_zero_count[x264_scan8[16+ 2] - 1] = nnz[lbot][left_index_table->nnz_chroma[1]];
1022             h->mb.cache.non_zero_count[x264_scan8[32+ 0] - 1] = nnz[ltop][left_index_table->nnz_chroma[2]];
1023             h->mb.cache.non_zero_count[x264_scan8[32+ 2] - 1] = nnz[lbot][left_index_table->nnz_chroma[3]];
1024         }
1025     }
1026     else
1027     {
1028         h->mb.cache.i_cbp_left = -1;
1029 
1030         h->mb.cache.intra4x4_pred_mode[x264_scan8[ 0] - 1] =
1031         h->mb.cache.intra4x4_pred_mode[x264_scan8[ 2] - 1] =
1032         h->mb.cache.intra4x4_pred_mode[x264_scan8[ 8] - 1] =
1033         h->mb.cache.intra4x4_pred_mode[x264_scan8[10] - 1] = -1;
1034 
1035         /* load non_zero_count */
1036         h->mb.cache.non_zero_count[x264_scan8[ 0] - 1] =
1037         h->mb.cache.non_zero_count[x264_scan8[ 2] - 1] =
1038         h->mb.cache.non_zero_count[x264_scan8[ 8] - 1] =
1039         h->mb.cache.non_zero_count[x264_scan8[10] - 1] =
1040         h->mb.cache.non_zero_count[x264_scan8[16+ 0] - 1] =
1041         h->mb.cache.non_zero_count[x264_scan8[16+ 2] - 1] =
1042         h->mb.cache.non_zero_count[x264_scan8[32+ 0] - 1] =
1043         h->mb.cache.non_zero_count[x264_scan8[32+ 2] - 1] = 0x80;
1044         if( CHROMA_FORMAT >= CHROMA_422 )
1045         {
1046             h->mb.cache.non_zero_count[x264_scan8[16+ 8] - 1] =
1047             h->mb.cache.non_zero_count[x264_scan8[16+10] - 1] =
1048             h->mb.cache.non_zero_count[x264_scan8[32+ 8] - 1] =
1049             h->mb.cache.non_zero_count[x264_scan8[32+10] - 1] = 0x80;
1050         }
1051     }
1052 
1053     if( h->pps->b_transform_8x8_mode )
1054     {
1055         h->mb.cache.i_neighbour_transform_size =
1056             ( (h->mb.i_neighbour & MB_LEFT) && h->mb.mb_transform_size.t_int8_t[left[0]] )
1057           + ( (h->mb.i_neighbour & MB_TOP) && h->mb.mb_transform_size.t_int8_t[top]  );
1058     }
1059 
1060     if( b_mbaff )
1061     {
1062         h->mb.pic.i_fref[0] = h->i_ref[0] << MB_INTERLACED;
1063         h->mb.pic.i_fref[1] = h->i_ref[1] << MB_INTERLACED;
1064     }
1065 
1066     if( !b_mbaff )
1067     {
1068         x264_copy_column8( h->mb.pic.p_fdec[0]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+ 4*FDEC_STRIDE );
1069         x264_copy_column8( h->mb.pic.p_fdec[0]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+12*FDEC_STRIDE );
1070         x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 0, 0, 0 );
1071         if( CHROMA444 )
1072         {
1073             x264_copy_column8( h->mb.pic.p_fdec[1]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+15+ 4*FDEC_STRIDE );
1074             x264_copy_column8( h->mb.pic.p_fdec[1]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[1]+15+12*FDEC_STRIDE );
1075             x264_copy_column8( h->mb.pic.p_fdec[2]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+15+ 4*FDEC_STRIDE );
1076             x264_copy_column8( h->mb.pic.p_fdec[2]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[2]+15+12*FDEC_STRIDE );
1077             x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 1, 0, 0 );
1078             x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 2, 0, 0 );
1079         }
1080         else
1081         {
1082             x264_copy_column8( h->mb.pic.p_fdec[1]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+ 7+ 4*FDEC_STRIDE );
1083             x264_copy_column8( h->mb.pic.p_fdec[2]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+ 7+ 4*FDEC_STRIDE );
1084             if( CHROMA_FORMAT == CHROMA_422 )
1085             {
1086                 x264_copy_column8( h->mb.pic.p_fdec[1]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[1]+ 7+12*FDEC_STRIDE );
1087                 x264_copy_column8( h->mb.pic.p_fdec[2]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[2]+ 7+12*FDEC_STRIDE );
1088             }
1089             x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 1, 1, 0 );
1090         }
1091     }
1092     else
1093     {
1094         x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 0, 0, 1 );
1095         if( CHROMA444 )
1096         {
1097             x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 1, 0, 1 );
1098             x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 2, 0, 1 );
1099         }
1100         else
1101             x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 1, 1, 1 );
1102     }
1103 
1104     if( h->fdec->integral )
1105     {
1106         int offset = 16 * (mb_x + mb_y * h->fdec->i_stride[0]);
1107 		int list;
1108 		int i;
1109 
1110 		for( list = 0; list < 2; list++ )
1111             for( i = 0; i < h->mb.pic.i_fref[list]; i++ )
1112                 h->mb.pic.p_integral[list][i] = &h->fref[list][i]->integral[offset];
1113     }
1114 
1115     x264_prefetch_fenc( h, h->fenc, mb_x, mb_y );
1116 
1117     /* load ref/mv/mvd */
1118     for( l = 0; l < lists; l++ )
1119     {
1120         int16_t (*mv)[2] = h->mb.mv[l];
1121         int8_t *ref = h->mb.ref[l];
1122 
1123         int i8 = x264_scan8[0] - 1 - 1*8;
1124         if( h->mb.i_neighbour & MB_TOPLEFT )
1125         {
1126             int ir = b_mbaff ? 2*(s8x8*h->mb.i_mb_topleft_y + mb_x-1)+1+s8x8 : top_8x8 - 1;
1127             int iv = b_mbaff ? 4*(s4x4*h->mb.i_mb_topleft_y + mb_x-1)+3+3*s4x4 : top_4x4 - 1;
1128             if( b_mbaff && h->mb.topleft_partition )
1129             {
1130                 /* Take motion vector from the middle of macroblock instead of
1131                  * the bottom right as usual. */
1132                 iv -= 2*s4x4;
1133                 ir -= s8x8;
1134             }
1135             h->mb.cache.ref[l][i8] = ref[ir];
1136             CP32( h->mb.cache.mv[l][i8], mv[iv] );
1137         }
1138         else
1139         {
1140             h->mb.cache.ref[l][i8] = -2;
1141             M32( h->mb.cache.mv[l][i8] ) = 0;
1142         }
1143 
1144         i8 = x264_scan8[0] - 8;
1145         if( h->mb.i_neighbour & MB_TOP )
1146         {
1147             h->mb.cache.ref[l][i8+0] =
1148             h->mb.cache.ref[l][i8+1] = ref[top_8x8 + 0];
1149             h->mb.cache.ref[l][i8+2] =
1150             h->mb.cache.ref[l][i8+3] = ref[top_8x8 + 1];
1151             CP128( h->mb.cache.mv[l][i8], mv[top_4x4] );
1152         }
1153         else
1154         {
1155 			M128( h->mb.cache.mv[l][i8] ) = M128_ZERO;
1156 			M32( &h->mb.cache.ref[l][i8] ) = (uint8_t)(-2) * 0x01010101U;
1157         }
1158 
1159         i8 = x264_scan8[0] + 4 - 1*8;
1160         if( h->mb.i_neighbour & MB_TOPRIGHT )
1161         {
1162             int ir = b_mbaff ? 2*(s8x8*h->mb.i_mb_topright_y + (mb_x+1))+s8x8 : top_8x8 + 2;
1163             int iv = b_mbaff ? 4*(s4x4*h->mb.i_mb_topright_y + (mb_x+1))+3*s4x4 : top_4x4 + 4;
1164             h->mb.cache.ref[l][i8] = ref[ir];
1165             CP32( h->mb.cache.mv[l][i8], mv[iv] );
1166         }
1167         else
1168              h->mb.cache.ref[l][i8] = -2;
1169 
1170         i8 = x264_scan8[0] - 1;
1171         if( h->mb.i_neighbour & MB_LEFT )
1172         {
1173             if( b_mbaff )
1174             {
1175                 h->mb.cache.ref[l][i8+0*8] = ref[h->mb.left_b8[LTOP] + 1 + s8x8*left_index_table->ref[0]];
1176                 h->mb.cache.ref[l][i8+1*8] = ref[h->mb.left_b8[LTOP] + 1 + s8x8*left_index_table->ref[1]];
1177                 h->mb.cache.ref[l][i8+2*8] = ref[h->mb.left_b8[LBOT] + 1 + s8x8*left_index_table->ref[2]];
1178                 h->mb.cache.ref[l][i8+3*8] = ref[h->mb.left_b8[LBOT] + 1 + s8x8*left_index_table->ref[3]];
1179 
1180                 CP32( h->mb.cache.mv[l][i8+0*8], mv[h->mb.left_b4[LTOP] + 3 + s4x4*left_index_table->mv[0]] );
1181                 CP32( h->mb.cache.mv[l][i8+1*8], mv[h->mb.left_b4[LTOP] + 3 + s4x4*left_index_table->mv[1]] );
1182                 CP32( h->mb.cache.mv[l][i8+2*8], mv[h->mb.left_b4[LBOT] + 3 + s4x4*left_index_table->mv[2]] );
1183                 CP32( h->mb.cache.mv[l][i8+3*8], mv[h->mb.left_b4[LBOT] + 3 + s4x4*left_index_table->mv[3]] );
1184             }
1185             else
1186             {
1187                 const int ir = h->mb.i_b8_xy - 1;
1188                 const int iv = h->mb.i_b4_xy - 1;
1189                 h->mb.cache.ref[l][i8+0*8] =
1190                 h->mb.cache.ref[l][i8+1*8] = ref[ir + 0*s8x8];
1191                 h->mb.cache.ref[l][i8+2*8] =
1192                 h->mb.cache.ref[l][i8+3*8] = ref[ir + 1*s8x8];
1193 
1194                 CP32( h->mb.cache.mv[l][i8+0*8], mv[iv + 0*s4x4] );
1195                 CP32( h->mb.cache.mv[l][i8+1*8], mv[iv + 1*s4x4] );
1196                 CP32( h->mb.cache.mv[l][i8+2*8], mv[iv + 2*s4x4] );
1197                 CP32( h->mb.cache.mv[l][i8+3*8], mv[iv + 3*s4x4] );
1198             }
1199         }
1200         else
1201         {
1202 			int i;
1203 			for( i = 0; i < 4; i++ )
1204             {
1205                 h->mb.cache.ref[l][i8+i*8] = -2;
1206                 M32( h->mb.cache.mv[l][i8+i*8] ) = 0;
1207             }
1208         }
1209 
1210         /* Extra logic for top right mv in mbaff.
1211          * . . . d  . . a .
1212          * . . . e  . . . .
1213          * . . . f  b . c .
1214          * . . . .  . . . .
1215          *
1216          * If the top right of the 4x4 partitions labeled a, b and c in the
1217          * above diagram do not exist, but the entries d, e and f exist (in
1218          * the macroblock to the left) then use those instead.
1219          */
1220         if( b_mbaff && (h->mb.i_neighbour & MB_LEFT) )
1221         {
1222             if( MB_INTERLACED && !h->mb.field[h->mb.i_mb_xy-1] )
1223             {
1224                 h->mb.cache.topright_ref[l][0] = ref[h->mb.left_b8[0] + 1 + s8x8*0];
1225                 h->mb.cache.topright_ref[l][1] = ref[h->mb.left_b8[0] + 1 + s8x8*1];
1226                 h->mb.cache.topright_ref[l][2] = ref[h->mb.left_b8[1] + 1 + s8x8*0];
1227                 CP32( h->mb.cache.topright_mv[l][0], mv[h->mb.left_b4[0] + 3 + s4x4*(left_index_table->mv[0]+1)] );
1228                 CP32( h->mb.cache.topright_mv[l][1], mv[h->mb.left_b4[0] + 3 + s4x4*(left_index_table->mv[1]+1)] );
1229                 CP32( h->mb.cache.topright_mv[l][2], mv[h->mb.left_b4[1] + 3 + s4x4*(left_index_table->mv[2]+1)] );
1230             }
1231             else if( !MB_INTERLACED && h->mb.field[h->mb.i_mb_xy-1] )
1232             {
1233                 // Looking at the bottom field so always take the bottom macroblock of the pair.
1234                 h->mb.cache.topright_ref[l][0] = ref[h->mb.left_b8[0] + 1 + s8x8*2 + s8x8*left_index_table->ref[0]];
1235                 h->mb.cache.topright_ref[l][1] = ref[h->mb.left_b8[0] + 1 + s8x8*2 + s8x8*left_index_table->ref[0]];
1236                 h->mb.cache.topright_ref[l][2] = ref[h->mb.left_b8[0] + 1 + s8x8*2 + s8x8*left_index_table->ref[2]];
1237                 CP32( h->mb.cache.topright_mv[l][0], mv[h->mb.left_b4[0] + 3 + s4x4*4 + s4x4*left_index_table->mv[0]] );
1238                 CP32( h->mb.cache.topright_mv[l][1], mv[h->mb.left_b4[0] + 3 + s4x4*4 + s4x4*left_index_table->mv[1]] );
1239                 CP32( h->mb.cache.topright_mv[l][2], mv[h->mb.left_b4[0] + 3 + s4x4*4 + s4x4*left_index_table->mv[2]] );
1240             }
1241         }
1242 
1243         if( h->param.b_cabac )
1244         {
1245             uint8_t (*mvd)[8][2] = h->mb.mvd[l];
1246             if( h->mb.i_neighbour & MB_TOP )
1247                 CP64( h->mb.cache.mvd[l][x264_scan8[0] - 8], mvd[top][0] );
1248             else
1249                 M64( h->mb.cache.mvd[l][x264_scan8[0] - 8] ) = 0;
1250 
1251             if( h->mb.i_neighbour & MB_LEFT && (!b_mbaff || h->mb.cache.ref[l][x264_scan8[0]-1] >= 0) )
1252             {
1253                 CP16( h->mb.cache.mvd[l][x264_scan8[0 ] - 1], mvd[left[LTOP]][left_index_table->intra[0]] );
1254                 CP16( h->mb.cache.mvd[l][x264_scan8[2 ] - 1], mvd[left[LTOP]][left_index_table->intra[1]] );
1255             }
1256             else
1257             {
1258                 M16( h->mb.cache.mvd[l][x264_scan8[0]-1+0*8] ) = 0;
1259                 M16( h->mb.cache.mvd[l][x264_scan8[0]-1+1*8] ) = 0;
1260             }
1261             if( h->mb.i_neighbour & MB_LEFT && (!b_mbaff || h->mb.cache.ref[l][x264_scan8[0]-1+2*8] >=0) )
1262             {
1263                 CP16( h->mb.cache.mvd[l][x264_scan8[8 ] - 1], mvd[left[LBOT]][left_index_table->intra[2]] );
1264                 CP16( h->mb.cache.mvd[l][x264_scan8[10] - 1], mvd[left[LBOT]][left_index_table->intra[3]] );
1265             }
1266             else
1267             {
1268                 M16( h->mb.cache.mvd[l][x264_scan8[0]-1+2*8] ) = 0;
1269                 M16( h->mb.cache.mvd[l][x264_scan8[0]-1+3*8] ) = 0;
1270             }
1271         }
1272 
1273         /* If motion vectors are cached from frame macroblocks but this
1274          * macroblock is a field macroblock then the motion vector must be
1275          * halved. Similarly, motion vectors from field macroblocks are doubled. */
1276         if( b_mbaff )
1277         {
1278 #define MAP_MVS\
1279                 if( FIELD_DIFFERENT(h->mb.i_mb_topleft_xy) )\
1280                     MAP_F2F(mv, ref, x264_scan8[0] - 1 - 1*8)\
1281                 if( FIELD_DIFFERENT(top) )\
1282                 {\
1283                     MAP_F2F(mv, ref, x264_scan8[0] + 0 - 1*8)\
1284                     MAP_F2F(mv, ref, x264_scan8[0] + 1 - 1*8)\
1285                     MAP_F2F(mv, ref, x264_scan8[0] + 2 - 1*8)\
1286                     MAP_F2F(mv, ref, x264_scan8[0] + 3 - 1*8)\
1287                 }\
1288                 if( FIELD_DIFFERENT(h->mb.i_mb_topright_xy) )\
1289                     MAP_F2F(mv, ref, x264_scan8[0] + 4 - 1*8)\
1290                 if( FIELD_DIFFERENT(left[0]) )\
1291                 {\
1292                     MAP_F2F(mv, ref, x264_scan8[0] - 1 + 0*8)\
1293                     MAP_F2F(mv, ref, x264_scan8[0] - 1 + 1*8)\
1294                     MAP_F2F(mv, ref, x264_scan8[0] - 1 + 2*8)\
1295                     MAP_F2F(mv, ref, x264_scan8[0] - 1 + 3*8)\
1296                     MAP_F2F(topright_mv, topright_ref, 0)\
1297                     MAP_F2F(topright_mv, topright_ref, 1)\
1298                     MAP_F2F(topright_mv, topright_ref, 2)\
1299                 }
1300 
1301             if( MB_INTERLACED )
1302             {
1303 #define FIELD_DIFFERENT(macroblock) (macroblock >= 0 && !h->mb.field[macroblock])
1304 #define MAP_F2F(varmv, varref, index)\
1305                 if( h->mb.cache.varref[l][index] >= 0 )\
1306                 {\
1307                     h->mb.cache.varref[l][index] <<= 1;\
1308                     h->mb.cache.varmv[l][index][1] /= 2;\
1309                     h->mb.cache.mvd[l][index][1] >>= 1;\
1310                 }
1311                 MAP_MVS
1312 #undef MAP_F2F
1313 #undef FIELD_DIFFERENT
1314             }
1315             else
1316             {
1317 #define FIELD_DIFFERENT(macroblock) (macroblock >= 0 && h->mb.field[macroblock])
1318 #define MAP_F2F(varmv, varref, index)\
1319                 if( h->mb.cache.varref[l][index] >= 0 )\
1320                 {\
1321                     h->mb.cache.varref[l][index] >>= 1;\
1322                     h->mb.cache.varmv[l][index][1] <<= 1;\
1323                     h->mb.cache.mvd[l][index][1] <<= 1;\
1324                 }
1325                 MAP_MVS
1326 #undef MAP_F2F
1327 #undef FIELD_DIFFERENT
1328             }
1329         }
1330     }
1331 
1332     if( b_mbaff && mb_x == 0 && !(mb_y&1) )
1333     {
1334         if( h->mb.i_mb_top_xy >= h->sh.i_first_mb )
1335             h->mb.field_decoding_flag = h->mb.field[h->mb.i_mb_top_xy];
1336         else
1337             h->mb.field_decoding_flag = 0;
1338     }
1339 
1340     /* Check whether skip here would cause decoder to predict interlace mode incorrectly.
1341      * FIXME: It might be better to change the interlace type rather than forcing a skip to be non-skip. */
1342     h->mb.b_allow_skip = 1;
1343     if( b_mbaff )
1344     {
1345         if( MB_INTERLACED != h->mb.field_decoding_flag &&
1346             (mb_y&1) && IS_SKIP(h->mb.type[h->mb.i_mb_xy - h->mb.i_mb_stride]) )
1347             h->mb.b_allow_skip = 0;
1348     }
1349 
1350     if( h->param.b_cabac )
1351     {
1352         if( b_mbaff )
1353         {
1354             int left_xy, top_xy;
1355             /* Neighbours here are calculated based on field_decoding_flag */
1356             int mb_xy = mb_x + (mb_y&~1)*h->mb.i_mb_stride;
1357             left_xy = mb_xy - 1;
1358             if( (mb_y&1) && mb_x > 0 && h->mb.field_decoding_flag == h->mb.field[left_xy] )
1359                 left_xy += h->mb.i_mb_stride;
1360             if( h->mb.field_decoding_flag )
1361             {
1362                 top_xy = mb_xy - h->mb.i_mb_stride;
1363                 if( !(mb_y&1) && top_xy >= 0 && h->mb.slice_table.t_uint16_t[top_xy] == h->sh.i_first_mb && h->mb.field[top_xy] )
1364                     top_xy -= h->mb.i_mb_stride;
1365             }
1366             else
1367                 top_xy = mb_x + (mb_y-1)*h->mb.i_mb_stride;
1368 
1369             h->mb.cache.i_neighbour_skip =   (mb_x >  0 && h->mb.slice_table.t_uint16_t[left_xy] == h->sh.i_first_mb && !IS_SKIP( h->mb.type[left_xy] ))
1370                                          + (top_xy >= 0 && h->mb.slice_table.t_uint16_t[top_xy]  == h->sh.i_first_mb && !IS_SKIP( h->mb.type[top_xy] ));
1371         }
1372         else
1373         {
1374             h->mb.cache.i_neighbour_skip = ((h->mb.i_neighbour & MB_LEFT) && !IS_SKIP( h->mb.i_mb_type_left[0] ))
1375                                          + ((h->mb.i_neighbour & MB_TOP)  && !IS_SKIP( h->mb.i_mb_type_top ));
1376         }
1377     }
1378 
1379     /* load skip */
1380     if( h->sh.i_type == SLICE_TYPE_B )
1381     {
1382         h->mb.bipred_weight = h->mb.bipred_weight_buf[MB_INTERLACED][MB_INTERLACED&(mb_y&1)];
1383         h->mb.dist_scale_factor = h->mb.dist_scale_factor_buf[MB_INTERLACED][MB_INTERLACED&(mb_y&1)];
1384         if( h->param.b_cabac )
1385         {
1386             uint8_t skipbp;
1387             x264_macroblock_cache_skip( h, 0, 0, 4, 4, 0 );
1388             if( b_mbaff )
1389             {
1390                 skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp.t_int8_t[left[LTOP]] : 0;
1391                 h->mb.cache.skip[x264_scan8[0] - 1] = (skipbp >> (1+(left_index_table->mv[0]&~1))) & 1;
1392                 skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp.t_int8_t[left[LBOT]] : 0;
1393                 h->mb.cache.skip[x264_scan8[8] - 1] = (skipbp >> (1+(left_index_table->mv[2]&~1))) & 1;
1394             }
1395             else
1396             {
1397                 skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp.t_int8_t[left[0]] : 0;
1398                 h->mb.cache.skip[x264_scan8[0] - 1] = skipbp & 0x2;
1399                 h->mb.cache.skip[x264_scan8[8] - 1] = skipbp & 0x8;
1400             }
1401             skipbp = (h->mb.i_neighbour & MB_TOP) ? h->mb.skipbp.t_int8_t[top] : 0;
1402             h->mb.cache.skip[x264_scan8[0] - 8] = skipbp & 0x4;
1403             h->mb.cache.skip[x264_scan8[4] - 8] = skipbp & 0x8;
1404         }
1405     }
1406 
1407     if( h->sh.i_type == SLICE_TYPE_P )
1408         x264_mb_predict_mv_pskip( h, h->mb.cache.pskip_mv );
1409 
1410     h->mb.i_neighbour4[0] =
1411     h->mb.i_neighbour8[0] = (h->mb.i_neighbour_intra & (MB_TOP|MB_LEFT|MB_TOPLEFT))
1412                             | ((h->mb.i_neighbour_intra & MB_TOP) ? MB_TOPRIGHT : 0);
1413     h->mb.i_neighbour4[4] =
1414     h->mb.i_neighbour4[1] = MB_LEFT | ((h->mb.i_neighbour_intra & MB_TOP) ? (MB_TOP|MB_TOPLEFT|MB_TOPRIGHT) : 0);
1415     h->mb.i_neighbour4[2] =
1416     h->mb.i_neighbour4[8] =
1417     h->mb.i_neighbour4[10] =
1418     h->mb.i_neighbour8[2] = MB_TOP|MB_TOPRIGHT | ((h->mb.i_neighbour_intra & MB_LEFT) ? (MB_LEFT|MB_TOPLEFT) : 0);
1419     h->mb.i_neighbour4[5] =
1420     h->mb.i_neighbour8[1] = MB_LEFT | (h->mb.i_neighbour_intra & MB_TOPRIGHT)
1421                             | ((h->mb.i_neighbour_intra & MB_TOP) ? MB_TOP|MB_TOPLEFT : 0);
1422 }
1423 
x264_macroblock_cache_load_progressive(x264_t * h,int mb_x,int mb_y)1424 void x264_macroblock_cache_load_progressive( x264_t *h, int mb_x, int mb_y )
1425 {
1426     x264_macroblock_cache_load( h, mb_x, mb_y, 0 );
1427 }
1428 
x264_macroblock_cache_load_interlaced(x264_t * h,int mb_x,int mb_y)1429 void x264_macroblock_cache_load_interlaced( x264_t *h, int mb_x, int mb_y )
1430 {
1431     x264_macroblock_cache_load( h, mb_x, mb_y, 1 );
1432 }
1433 
x264_macroblock_deblock_strength_mbaff(x264_t * h,uint8_t (* bs)[8][4])1434 static void x264_macroblock_deblock_strength_mbaff( x264_t *h, uint8_t (*bs)[8][4] )
1435 {
1436     if( (h->mb.i_neighbour & MB_LEFT) && h->mb.field[h->mb.i_mb_left_xy[0]] != MB_INTERLACED )
1437     {
1438         static const uint8_t offset[2][2][8] =
1439         {   {   { 0, 0, 0, 0, 1, 1, 1, 1 },
1440                 { 2, 2, 2, 2, 3, 3, 3, 3 }, },
1441             {   { 0, 1, 2, 3, 0, 1, 2, 3 },
1442                 { 0, 1, 2, 3, 0, 1, 2, 3 }, }
1443         };
1444 
1445 		ALIGNED_ARRAY_8( uint8_t, tmpbs, [8] );
1446 
1447         const uint8_t *off = offset[MB_INTERLACED][h->mb.i_mb_y&1];
1448         uint8_t (*nnz)[48] = h->mb.non_zero_count.t_uint8_t_array;
1449         int i;
1450 
1451         for( i = 0; i < 8; i++ )
1452         {
1453             int left = h->mb.i_mb_left_xy[MB_INTERLACED ? i>>2 : i&1];
1454             int nnz_this = h->mb.cache.non_zero_count[x264_scan8[0]+8*(i>>1)];
1455             int nnz_left = nnz[left][3 + 4*off[i]];
1456             if( !h->param.b_cabac && h->pps->b_transform_8x8_mode )
1457             {
1458                 int j = off[i]&~1;
1459                 if( h->mb.mb_transform_size.t_int8_t[left] )
1460                     nnz_left = !!(M16( &nnz[left][2+4*j] ) | M16( &nnz[left][2+4*(1+j)] ));
1461             }
1462             tmpbs[i] = (nnz_left || nnz_this) ? 2 : 1;
1463         }
1464 
1465         if( MB_INTERLACED )
1466         {
1467             CP32( bs[0][0], &tmpbs[0] );
1468             CP32( bs[0][4], &tmpbs[4] );
1469         }
1470         else
1471         {
1472             for( i = 0; i < 4; i++ ) bs[0][0][i] = tmpbs[2*i];
1473             for( i = 0; i < 4; i++ ) bs[0][4][i] = tmpbs[1+2*i];
1474         }
1475     }
1476 
1477     if( (h->mb.i_neighbour & MB_TOP) && MB_INTERLACED != h->mb.field[h->mb.i_mb_top_xy] )
1478     {
1479         if( !(h->mb.i_mb_y&1) && !MB_INTERLACED )
1480         {
1481             /* Need to filter both fields (even for frame macroblocks).
1482              * Filter top two rows using the top macroblock of the above
1483              * pair and then the bottom one. */
1484             int mbn_xy = h->mb.i_mb_xy - 2 * h->mb.i_mb_stride;
1485             uint8_t *nnz_cur = &h->mb.cache.non_zero_count[x264_scan8[0]];
1486 			int j;
1487 
1488             for( j = 0; j < 2; j++, mbn_xy += h->mb.i_mb_stride )
1489             {
1490                 uint8_t (*nnz)[48] = h->mb.non_zero_count.t_uint8_t_array;
1491 				int i;
1492 
1493                 ALIGNED_4( uint8_t nnz_top[4] );
1494                 CP32( nnz_top, &nnz[mbn_xy][3*4] );
1495 
1496                 if( !h->param.b_cabac && h->pps->b_transform_8x8_mode && h->mb.mb_transform_size.t_int8_t[mbn_xy] )
1497                 {
1498                     nnz_top[0] = nnz_top[1] = M16( &nnz[mbn_xy][ 8] ) || M16( &nnz[mbn_xy][12] );
1499                     nnz_top[2] = nnz_top[3] = M16( &nnz[mbn_xy][10] ) || M16( &nnz[mbn_xy][14] );
1500                 }
1501 
1502                 for( i = 0; i < 4; i++ )
1503                     bs[1][4*j][i] = (nnz_cur[i] || nnz_top[i]) ? 2 : 1;
1504             }
1505         }
1506         else {
1507 			int i;
1508             for( i = 0; i < 4; i++ )
1509                 bs[1][0][i] = X264_MAX( bs[1][0][i], 1 );
1510 		}
1511 	}
1512 }
1513 
x264_macroblock_deblock_strength(x264_t * h)1514 void x264_macroblock_deblock_strength( x264_t *h )
1515 {
1516     uint8_t (*bs)[8][4] = h->mb.cache.deblock_strength;
1517     int neighbour_changed;
1518 
1519 	if( IS_INTRA( h->mb.i_type ) )
1520     {
1521         memset( bs[0][1], 3, 3*4*sizeof(uint8_t) );
1522         memset( bs[1][1], 3, 3*4*sizeof(uint8_t) );
1523         return;
1524     }
1525 
1526     /* Early termination: in this case, nnz guarantees all edges use strength 2.*/
1527     if( h->mb.b_transform_8x8 && !CHROMA444 )
1528     {
1529         int cbp_mask = 0xf >> CHROMA_V_SHIFT;
1530         if( (h->mb.i_cbp_luma&cbp_mask) == cbp_mask )
1531         {
1532             M32( bs[0][0] ) = 0x02020202;
1533             M32( bs[0][2] ) = 0x02020202;
1534             M32( bs[0][4] ) = 0x02020202;
1535             memset( bs[1][0], 2, 5*4*sizeof(uint8_t) ); /* [1][1] and [1][3] has to be set for 4:2:2 */
1536             return;
1537         }
1538     }
1539 
1540     neighbour_changed = 0;
1541     if( h->sh.i_disable_deblocking_filter_idc != 2 )
1542     {
1543         neighbour_changed = h->mb.i_neighbour_frame&~h->mb.i_neighbour;
1544         h->mb.i_neighbour = h->mb.i_neighbour_frame;
1545     }
1546 
1547     /* MBAFF deblock uses different left neighbors from encoding */
1548     if( SLICE_MBAFF && (h->mb.i_neighbour & MB_LEFT) && (h->mb.field[h->mb.i_mb_xy - 1] != MB_INTERLACED) )
1549     {
1550         h->mb.i_mb_left_xy[1] =
1551         h->mb.i_mb_left_xy[0] = h->mb.i_mb_xy - 1;
1552         if( h->mb.i_mb_y&1 )
1553             h->mb.i_mb_left_xy[0] -= h->mb.i_mb_stride;
1554         else
1555             h->mb.i_mb_left_xy[1] += h->mb.i_mb_stride;
1556     }
1557 
1558     /* If we have multiple slices and we're deblocking on slice edges, we
1559      * have to reload neighbour data. */
1560     if( neighbour_changed )
1561     {
1562         int top_y = h->mb.i_mb_top_y;
1563         int top_8x8 = (2*top_y+1) * h->mb.i_b8_stride + 2*h->mb.i_mb_x;
1564         int top_4x4 = (4*top_y+3) * h->mb.i_b4_stride + 4*h->mb.i_mb_x;
1565         int s8x8 = h->mb.i_b8_stride;
1566         int s4x4 = h->mb.i_b4_stride;
1567 
1568         uint8_t (*nnz)[48] = h->mb.non_zero_count.t_uint8_t_array;
1569         const x264_left_table_t *left_index_table = SLICE_MBAFF ? h->mb.left_index_table : &left_indices[3];
1570 		int l;
1571 
1572         if( neighbour_changed & MB_TOP )
1573             CP32( &h->mb.cache.non_zero_count[x264_scan8[0] - 8], &nnz[h->mb.i_mb_top_xy][12] );
1574 
1575         if( neighbour_changed & MB_LEFT )
1576         {
1577             int *left = h->mb.i_mb_left_xy;
1578             h->mb.cache.non_zero_count[x264_scan8[0 ] - 1] = nnz[left[0]][left_index_table->nnz[0]];
1579             h->mb.cache.non_zero_count[x264_scan8[2 ] - 1] = nnz[left[0]][left_index_table->nnz[1]];
1580             h->mb.cache.non_zero_count[x264_scan8[8 ] - 1] = nnz[left[1]][left_index_table->nnz[2]];
1581             h->mb.cache.non_zero_count[x264_scan8[10] - 1] = nnz[left[1]][left_index_table->nnz[3]];
1582         }
1583 
1584         for( l = 0; l <= (h->sh.i_type == SLICE_TYPE_B); l++ )
1585         {
1586             int16_t (*mv)[2] = h->mb.mv[l];
1587             int8_t *ref = h->mb.ref[l];
1588 
1589             int i8 = x264_scan8[0] - 8;
1590             if( neighbour_changed & MB_TOP )
1591             {
1592                 h->mb.cache.ref[l][i8+0] =
1593                 h->mb.cache.ref[l][i8+1] = ref[top_8x8 + 0];
1594                 h->mb.cache.ref[l][i8+2] =
1595                 h->mb.cache.ref[l][i8+3] = ref[top_8x8 + 1];
1596                 CP128( h->mb.cache.mv[l][i8], mv[top_4x4] );
1597             }
1598 
1599             i8 = x264_scan8[0] - 1;
1600             if( neighbour_changed & MB_LEFT )
1601             {
1602                 h->mb.cache.ref[l][i8+0*8] =
1603                 h->mb.cache.ref[l][i8+1*8] = ref[h->mb.left_b8[0] + 1 + s8x8*left_index_table->ref[0]];
1604                 h->mb.cache.ref[l][i8+2*8] =
1605                 h->mb.cache.ref[l][i8+3*8] = ref[h->mb.left_b8[1] + 1 + s8x8*left_index_table->ref[2]];
1606 
1607                 CP32( h->mb.cache.mv[l][i8+0*8], mv[h->mb.left_b4[0] + 3 + s4x4*left_index_table->mv[0]] );
1608                 CP32( h->mb.cache.mv[l][i8+1*8], mv[h->mb.left_b4[0] + 3 + s4x4*left_index_table->mv[1]] );
1609                 CP32( h->mb.cache.mv[l][i8+2*8], mv[h->mb.left_b4[1] + 3 + s4x4*left_index_table->mv[2]] );
1610                 CP32( h->mb.cache.mv[l][i8+3*8], mv[h->mb.left_b4[1] + 3 + s4x4*left_index_table->mv[3]] );
1611             }
1612         }
1613     }
1614 
1615     if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART && h->sh.i_type == SLICE_TYPE_P )
1616     {
1617         /* Handle reference frame duplicates */
1618         int i8 = x264_scan8[0] - 8;
1619         int ref0;
1620         int ref1;
1621         int ref2;
1622         int ref3;
1623         uint32_t reftop;
1624         uint32_t refbot;
1625 
1626 		h->mb.cache.ref[0][i8+0] =
1627         h->mb.cache.ref[0][i8+1] = deblock_ref_table(h->mb.cache.ref[0][i8+0]);
1628         h->mb.cache.ref[0][i8+2] =
1629         h->mb.cache.ref[0][i8+3] = deblock_ref_table(h->mb.cache.ref[0][i8+2]);
1630 
1631         i8 = x264_scan8[0] - 1;
1632         h->mb.cache.ref[0][i8+0*8] =
1633         h->mb.cache.ref[0][i8+1*8] = deblock_ref_table(h->mb.cache.ref[0][i8+0*8]);
1634         h->mb.cache.ref[0][i8+2*8] =
1635         h->mb.cache.ref[0][i8+3*8] = deblock_ref_table(h->mb.cache.ref[0][i8+2*8]);
1636 
1637         ref0 = deblock_ref_table(h->mb.cache.ref[0][x264_scan8[ 0]]);
1638         ref1 = deblock_ref_table(h->mb.cache.ref[0][x264_scan8[ 4]]);
1639         ref2 = deblock_ref_table(h->mb.cache.ref[0][x264_scan8[ 8]]);
1640         ref3 = deblock_ref_table(h->mb.cache.ref[0][x264_scan8[12]]);
1641         reftop = pack16to32( (uint8_t)ref0, (uint8_t)ref1 ) * 0x0101;
1642         refbot = pack16to32( (uint8_t)ref2, (uint8_t)ref3 ) * 0x0101;
1643 
1644         M32( &h->mb.cache.ref[0][x264_scan8[0]+8*0] ) = reftop;
1645         M32( &h->mb.cache.ref[0][x264_scan8[0]+8*1] ) = reftop;
1646         M32( &h->mb.cache.ref[0][x264_scan8[0]+8*2] ) = refbot;
1647         M32( &h->mb.cache.ref[0][x264_scan8[0]+8*3] ) = refbot;
1648     }
1649 
1650     /* Munge NNZ for cavlc + 8x8dct */
1651     if( !h->param.b_cabac && h->pps->b_transform_8x8_mode )
1652     {
1653         uint8_t (*nnz)[48] = h->mb.non_zero_count.t_uint8_t_array;
1654         int top = h->mb.i_mb_top_xy;
1655         int *left = h->mb.i_mb_left_xy;
1656 
1657         if( (h->mb.i_neighbour & MB_TOP) && h->mb.mb_transform_size.t_int8_t[top] )
1658         {
1659             int i8 = x264_scan8[0] - 8;
1660             int nnz_top0 = M16( &nnz[top][8] ) | M16( &nnz[top][12] );
1661             int nnz_top1 = M16( &nnz[top][10] ) | M16( &nnz[top][14] );
1662             M16( &h->mb.cache.non_zero_count[i8+0] ) = nnz_top0 ? 0x0101 : 0;
1663             M16( &h->mb.cache.non_zero_count[i8+2] ) = nnz_top1 ? 0x0101 : 0;
1664         }
1665 
1666         if( h->mb.i_neighbour & MB_LEFT )
1667         {
1668             int i8 = x264_scan8[0] - 1;
1669             if( h->mb.mb_transform_size.t_int8_t[left[0]] )
1670             {
1671                 int nnz_left0 = M16( &nnz[left[0]][2] ) | M16( &nnz[left[0]][6] );
1672                 h->mb.cache.non_zero_count[i8+8*0] = !!nnz_left0;
1673                 h->mb.cache.non_zero_count[i8+8*1] = !!nnz_left0;
1674             }
1675             if( h->mb.mb_transform_size.t_int8_t[left[1]] )
1676             {
1677                 int nnz_left1 = M16( &nnz[left[1]][10] ) | M16( &nnz[left[1]][14] );
1678                 h->mb.cache.non_zero_count[i8+8*2] = !!nnz_left1;
1679                 h->mb.cache.non_zero_count[i8+8*3] = !!nnz_left1;
1680             }
1681         }
1682 
1683         if( h->mb.b_transform_8x8 )
1684         {
1685             int nnz0 = M16( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[ 2]] );
1686             int nnz1 = M16( &h->mb.cache.non_zero_count[x264_scan8[ 4]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[ 6]] );
1687             int nnz2 = M16( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[10]] );
1688             int nnz3 = M16( &h->mb.cache.non_zero_count[x264_scan8[12]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[14]] );
1689             uint32_t nnztop = pack16to32( !!nnz0, !!nnz1 ) * 0x0101;
1690             uint32_t nnzbot = pack16to32( !!nnz2, !!nnz3 ) * 0x0101;
1691 
1692             M32( &h->mb.cache.non_zero_count[x264_scan8[0]+8*0] ) = nnztop;
1693             M32( &h->mb.cache.non_zero_count[x264_scan8[0]+8*1] ) = nnztop;
1694             M32( &h->mb.cache.non_zero_count[x264_scan8[0]+8*2] ) = nnzbot;
1695             M32( &h->mb.cache.non_zero_count[x264_scan8[0]+8*3] ) = nnzbot;
1696         }
1697     }
1698 
1699     h->loopf.deblock_strength( h->mb.cache.non_zero_count, h->mb.cache.ref, h->mb.cache.mv,
1700                                bs, 4 >> MB_INTERLACED, h->sh.i_type == SLICE_TYPE_B );
1701 
1702     if( SLICE_MBAFF )
1703         x264_macroblock_deblock_strength_mbaff( h, bs );
1704 }
1705 
x264_macroblock_store_pic(x264_t * h,int mb_x,int mb_y,int i,int b_chroma,int b_mbaff)1706 static void ALWAYS_INLINE x264_macroblock_store_pic( x264_t *h, int mb_x, int mb_y, int i, int b_chroma, int b_mbaff )
1707 {
1708     int height = b_chroma ? 16>>CHROMA_V_SHIFT : 16;
1709     int i_stride = h->fdec->i_stride[i];
1710     int i_stride2 = i_stride << (b_mbaff && MB_INTERLACED);
1711     int i_pix_offset = (b_mbaff && MB_INTERLACED)
1712                      ? 16 * mb_x + height * (mb_y&~1) * i_stride + (mb_y&1) * i_stride
1713                      : 16 * mb_x + height * mb_y * i_stride;
1714     if( b_chroma )
1715         h->mc.store_interleave_chroma( &h->fdec->plane[1][i_pix_offset], i_stride2, h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], height );
1716     else
1717         h->mc.copy[PIXEL_16x16]( &h->fdec->plane[i][i_pix_offset], i_stride2, h->mb.pic.p_fdec[i], FDEC_STRIDE, 16 );
1718 }
1719 
x264_macroblock_backup_intra(x264_t * h,int mb_x,int mb_y,int b_mbaff)1720 static void ALWAYS_INLINE x264_macroblock_backup_intra( x264_t *h, int mb_x, int mb_y, int b_mbaff )
1721 {
1722     /* In MBAFF we store the last two rows in intra_border_backup[0] and [1].
1723      * For progressive mbs this is the bottom two rows, and for interlaced the
1724      * bottom row of each field. We also store samples needed for the next
1725      * mbpair in intra_border_backup[2]. */
1726     int backup_dst = !b_mbaff ? (mb_y&1) : (mb_y&1) ? 1 : MB_INTERLACED ? 0 : 2;
1727     memcpy( &h->intra_border_backup[backup_dst][0][mb_x*16  ], h->mb.pic.p_fdec[0]+FDEC_STRIDE*15, 16*sizeof(pixel) );
1728     if( CHROMA444 )
1729     {
1730         memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16  ], h->mb.pic.p_fdec[1]+FDEC_STRIDE*15, 16*sizeof(pixel) );
1731         memcpy( &h->intra_border_backup[backup_dst][2][mb_x*16  ], h->mb.pic.p_fdec[2]+FDEC_STRIDE*15, 16*sizeof(pixel) );
1732     }
1733     else
1734     {
1735         int backup_src = (15>>CHROMA_V_SHIFT) * FDEC_STRIDE;
1736         memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16  ], h->mb.pic.p_fdec[1]+backup_src, 8*sizeof(pixel) );
1737         memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16+8], h->mb.pic.p_fdec[2]+backup_src, 8*sizeof(pixel) );
1738     }
1739     if( b_mbaff )
1740     {
1741         if( mb_y&1 )
1742         {
1743             int backup_src = (MB_INTERLACED ? 7 : 14) * FDEC_STRIDE;
1744             backup_dst = MB_INTERLACED ? 2 : 0;
1745             memcpy( &h->intra_border_backup[backup_dst][0][mb_x*16  ], h->mb.pic.p_fdec[0]+backup_src, 16*sizeof(pixel) );
1746             if( CHROMA444 )
1747             {
1748                 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16  ], h->mb.pic.p_fdec[1]+backup_src, 16*sizeof(pixel) );
1749                 memcpy( &h->intra_border_backup[backup_dst][2][mb_x*16  ], h->mb.pic.p_fdec[2]+backup_src, 16*sizeof(pixel) );
1750             }
1751             else
1752             {
1753                 if( CHROMA_FORMAT == CHROMA_420 )
1754                     backup_src = (MB_INTERLACED ? 3 : 6) * FDEC_STRIDE;
1755                 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16  ], h->mb.pic.p_fdec[1]+backup_src,  8*sizeof(pixel) );
1756                 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16+8], h->mb.pic.p_fdec[2]+backup_src,  8*sizeof(pixel) );
1757             }
1758         }
1759     }
1760 }
1761 
x264_macroblock_cache_save(x264_t * h)1762 void x264_macroblock_cache_save( x264_t *h )
1763 {
1764     const int i_mb_xy = h->mb.i_mb_xy;
1765     const int i_mb_type = x264_mb_type_fix[h->mb.i_type];
1766     const int s8x8 = h->mb.i_b8_stride;
1767     const int s4x4 = h->mb.i_b4_stride;
1768     const int i_mb_4x4 = h->mb.i_b4_xy;
1769     const int i_mb_8x8 = h->mb.i_b8_xy;
1770 
1771     /* GCC pessimizes direct stores to heap-allocated arrays due to aliasing. */
1772     /* By only dereferencing them once, we avoid this issue. */
1773     int8_t *i4x4 = h->mb.intra4x4_pred_mode.t_int8_t_array[i_mb_xy];
1774     uint8_t *nnz = h->mb.non_zero_count.t_uint8_t_array[i_mb_xy];
1775 
1776     if( SLICE_MBAFF )
1777     {
1778         x264_macroblock_backup_intra( h, h->mb.i_mb_x, h->mb.i_mb_y, 1 );
1779         x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 0, 0, 1 );
1780         if( CHROMA444 )
1781         {
1782             x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 1, 0, 1 );
1783             x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 2, 0, 1 );
1784         }
1785         else
1786             x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 1, 1, 1 );
1787     }
1788     else
1789     {
1790         x264_macroblock_backup_intra( h, h->mb.i_mb_x, h->mb.i_mb_y, 0 );
1791         x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 0, 0, 0 );
1792         if( CHROMA444 )
1793         {
1794             x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 1, 0, 0 );
1795             x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 2, 0, 0 );
1796         }
1797         else
1798             x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 1, 1, 0 );
1799     }
1800 
1801     x264_prefetch_fenc( h, h->fdec, h->mb.i_mb_x, h->mb.i_mb_y );
1802 
1803     h->mb.type[i_mb_xy] = i_mb_type;
1804     h->mb.slice_table.t_uint16_t[i_mb_xy] = h->sh.i_first_mb;
1805     h->mb.partition[i_mb_xy] = IS_INTRA( i_mb_type ) ? D_16x16 : h->mb.i_partition;
1806     h->mb.i_mb_prev_xy = i_mb_xy;
1807 
1808     /* save intra4x4 */
1809     if( i_mb_type == I_4x4 )
1810     {
1811         CP32( &i4x4[0], &h->mb.cache.intra4x4_pred_mode[x264_scan8[10]] );
1812         M32( &i4x4[4] ) = pack8to32( h->mb.cache.intra4x4_pred_mode[x264_scan8[5] ],
1813                                      h->mb.cache.intra4x4_pred_mode[x264_scan8[7] ],
1814                                      h->mb.cache.intra4x4_pred_mode[x264_scan8[13] ], 0);
1815     }
1816     else if( !h->param.b_constrained_intra || IS_INTRA(i_mb_type) )
1817         M64( i4x4 ) = I_PRED_4x4_DC * ULLN(0x0101010101010101);
1818     else
1819         M64( i4x4 ) = (uint8_t)(-1) * ULLN(0x0101010101010101);
1820 
1821 
1822     if( i_mb_type == I_PCM )
1823     {
1824 		int i;
1825 
1826 		h->mb.qp.t_int8_t[i_mb_xy] = 0;
1827         h->mb.i_last_dqp = 0;
1828         h->mb.i_cbp_chroma = CHROMA444 ? 0 : 2;
1829         h->mb.i_cbp_luma = 0xf;
1830         h->mb.cbp.t_int16_t[i_mb_xy] = (h->mb.i_cbp_chroma << 4) | h->mb.i_cbp_luma | 0x700;
1831         h->mb.b_transform_8x8 = 0;
1832         for( i = 0; i < 48; i++ )
1833             h->mb.cache.non_zero_count[x264_scan8[i]] = h->param.b_cabac ? 1 : 16;
1834     }
1835     else
1836     {
1837         if( h->mb.i_type != I_16x16 && h->mb.i_cbp_luma == 0 && h->mb.i_cbp_chroma == 0 )
1838             h->mb.i_qp = h->mb.i_last_qp;
1839         h->mb.qp.t_int8_t[i_mb_xy] = h->mb.i_qp;
1840         h->mb.i_last_dqp = h->mb.i_qp - h->mb.i_last_qp;
1841         h->mb.i_last_qp = h->mb.i_qp;
1842     }
1843 
1844     /* save non zero count */
1845     CP32( &nnz[ 0+0*4], &h->mb.cache.non_zero_count[x264_scan8[ 0]] );
1846     CP32( &nnz[ 0+1*4], &h->mb.cache.non_zero_count[x264_scan8[ 2]] );
1847     CP32( &nnz[ 0+2*4], &h->mb.cache.non_zero_count[x264_scan8[ 8]] );
1848     CP32( &nnz[ 0+3*4], &h->mb.cache.non_zero_count[x264_scan8[10]] );
1849     CP32( &nnz[16+0*4], &h->mb.cache.non_zero_count[x264_scan8[16+0]] );
1850     CP32( &nnz[16+1*4], &h->mb.cache.non_zero_count[x264_scan8[16+2]] );
1851     CP32( &nnz[32+0*4], &h->mb.cache.non_zero_count[x264_scan8[32+0]] );
1852     CP32( &nnz[32+1*4], &h->mb.cache.non_zero_count[x264_scan8[32+2]] );
1853     if( CHROMA_FORMAT >= CHROMA_422 )
1854     {
1855         CP32( &nnz[16+2*4], &h->mb.cache.non_zero_count[x264_scan8[16+ 8]] );
1856         CP32( &nnz[16+3*4], &h->mb.cache.non_zero_count[x264_scan8[16+10]] );
1857         CP32( &nnz[32+2*4], &h->mb.cache.non_zero_count[x264_scan8[32+ 8]] );
1858         CP32( &nnz[32+3*4], &h->mb.cache.non_zero_count[x264_scan8[32+10]] );
1859     }
1860 
1861     if( h->mb.i_cbp_luma == 0 && h->mb.i_type != I_8x8 )
1862         h->mb.b_transform_8x8 = 0;
1863     h->mb.mb_transform_size.t_int8_t[i_mb_xy] = h->mb.b_transform_8x8;
1864 
1865     if( h->sh.i_type != SLICE_TYPE_I )
1866     {
1867         int16_t (*mv0)[2] = &h->mb.mv[0][i_mb_4x4];
1868         int16_t (*mv1)[2] = &h->mb.mv[1][i_mb_4x4];
1869         int8_t *ref0 = &h->mb.ref[0][i_mb_8x8];
1870         int8_t *ref1 = &h->mb.ref[1][i_mb_8x8];
1871         if( !IS_INTRA( i_mb_type ) )
1872         {
1873             ref0[0+0*s8x8] = h->mb.cache.ref[0][x264_scan8[0]];
1874             ref0[1+0*s8x8] = h->mb.cache.ref[0][x264_scan8[4]];
1875             ref0[0+1*s8x8] = h->mb.cache.ref[0][x264_scan8[8]];
1876             ref0[1+1*s8x8] = h->mb.cache.ref[0][x264_scan8[12]];
1877             CP128( &mv0[0*s4x4], h->mb.cache.mv[0][x264_scan8[0]+8*0] );
1878             CP128( &mv0[1*s4x4], h->mb.cache.mv[0][x264_scan8[0]+8*1] );
1879             CP128( &mv0[2*s4x4], h->mb.cache.mv[0][x264_scan8[0]+8*2] );
1880             CP128( &mv0[3*s4x4], h->mb.cache.mv[0][x264_scan8[0]+8*3] );
1881             if( h->sh.i_type == SLICE_TYPE_B )
1882             {
1883                 ref1[0+0*s8x8] = h->mb.cache.ref[1][x264_scan8[0]];
1884                 ref1[1+0*s8x8] = h->mb.cache.ref[1][x264_scan8[4]];
1885                 ref1[0+1*s8x8] = h->mb.cache.ref[1][x264_scan8[8]];
1886                 ref1[1+1*s8x8] = h->mb.cache.ref[1][x264_scan8[12]];
1887                 CP128( &mv1[0*s4x4], h->mb.cache.mv[1][x264_scan8[0]+8*0] );
1888                 CP128( &mv1[1*s4x4], h->mb.cache.mv[1][x264_scan8[0]+8*1] );
1889                 CP128( &mv1[2*s4x4], h->mb.cache.mv[1][x264_scan8[0]+8*2] );
1890                 CP128( &mv1[3*s4x4], h->mb.cache.mv[1][x264_scan8[0]+8*3] );
1891             }
1892         }
1893         else
1894         {
1895             M16( &ref0[0*s8x8] ) = (uint8_t)(-1) * 0x0101;
1896             M16( &ref0[1*s8x8] ) = (uint8_t)(-1) * 0x0101;
1897 			M128( &mv0[0*s4x4] ) = M128_ZERO;
1898             M128( &mv0[1*s4x4] ) = M128_ZERO;
1899             M128( &mv0[2*s4x4] ) = M128_ZERO;
1900             M128( &mv0[3*s4x4] ) = M128_ZERO;
1901 			if( h->sh.i_type == SLICE_TYPE_B )
1902             {
1903                 M16( &ref1[0*s8x8] ) = (uint8_t)(-1) * 0x0101;
1904                 M16( &ref1[1*s8x8] ) = (uint8_t)(-1) * 0x0101;
1905 				M128( &mv1[0*s4x4] ) = M128_ZERO;
1906                 M128( &mv1[1*s4x4] ) = M128_ZERO;
1907                 M128( &mv1[2*s4x4] ) = M128_ZERO;
1908                 M128( &mv1[3*s4x4] ) = M128_ZERO;
1909 			}
1910         }
1911     }
1912 
1913     if( h->param.b_cabac )
1914     {
1915         uint8_t (*mvd0)[2] = h->mb.mvd[0][i_mb_xy];
1916         uint8_t (*mvd1)[2] = h->mb.mvd[1][i_mb_xy];
1917         if( IS_INTRA(i_mb_type) && i_mb_type != I_PCM )
1918             h->mb.chroma_pred_mode.t_int8_t[i_mb_xy] = x264_mb_chroma_pred_mode_fix[h->mb.i_chroma_pred_mode];
1919         else
1920             h->mb.chroma_pred_mode.t_int8_t[i_mb_xy] = I_PRED_CHROMA_DC;
1921 
1922         if( (0x3FF30 >> i_mb_type) & 1 ) /* !INTRA && !SKIP && !DIRECT */
1923         {
1924             CP64( mvd0[0], h->mb.cache.mvd[0][x264_scan8[10]] );
1925             CP16( mvd0[4], h->mb.cache.mvd[0][x264_scan8[5 ]] );
1926             CP16( mvd0[5], h->mb.cache.mvd[0][x264_scan8[7 ]] );
1927             CP16( mvd0[6], h->mb.cache.mvd[0][x264_scan8[13]] );
1928             if( h->sh.i_type == SLICE_TYPE_B )
1929             {
1930                 CP64( mvd1[0], h->mb.cache.mvd[1][x264_scan8[10]] );
1931                 CP16( mvd1[4], h->mb.cache.mvd[1][x264_scan8[5 ]] );
1932                 CP16( mvd1[5], h->mb.cache.mvd[1][x264_scan8[7 ]] );
1933                 CP16( mvd1[6], h->mb.cache.mvd[1][x264_scan8[13]] );
1934             }
1935         }
1936         else
1937         {
1938 			M128( mvd0[0] ) = M128_ZERO;
1939 			if( h->sh.i_type == SLICE_TYPE_B )
1940 				M128( mvd1[0] ) = M128_ZERO;
1941 		}
1942 
1943         if( h->sh.i_type == SLICE_TYPE_B )
1944         {
1945             if( i_mb_type == B_SKIP || i_mb_type == B_DIRECT )
1946                 h->mb.skipbp.t_int8_t[i_mb_xy] = 0xf;
1947             else if( i_mb_type == B_8x8 )
1948             {
1949                 int skipbp = ( h->mb.i_sub_partition[0] == D_DIRECT_8x8 ) << 0;
1950                 skipbp    |= ( h->mb.i_sub_partition[1] == D_DIRECT_8x8 ) << 1;
1951                 skipbp    |= ( h->mb.i_sub_partition[2] == D_DIRECT_8x8 ) << 2;
1952                 skipbp    |= ( h->mb.i_sub_partition[3] == D_DIRECT_8x8 ) << 3;
1953                 h->mb.skipbp.t_int8_t[i_mb_xy] = skipbp;
1954             }
1955             else
1956                 h->mb.skipbp.t_int8_t[i_mb_xy] = 0;
1957         }
1958     }
1959 }
1960 
1961 
x264_macroblock_bipred_init(x264_t * h)1962 void x264_macroblock_bipred_init( x264_t *h )
1963 {
1964 	int mbfield;
1965 	int field;
1966 	int i_ref0;
1967 
1968 	for( mbfield = 0; mbfield <= SLICE_MBAFF; mbfield++ )
1969         for( field = 0; field <= SLICE_MBAFF; field++ )
1970             for( i_ref0 = 0; i_ref0 < (h->i_ref[0]<<mbfield); i_ref0++ )
1971             {
1972                 x264_frame_t *l0 = h->fref[0][i_ref0>>mbfield];
1973                 int poc0 = l0->i_poc + mbfield*l0->i_delta_poc[field^(i_ref0&1)];
1974 				int i_ref1;
1975 				for( i_ref1 = 0; i_ref1 < (h->i_ref[1]<<mbfield); i_ref1++ )
1976                 {
1977                     int dist_scale_factor;
1978                     x264_frame_t *l1 = h->fref[1][i_ref1>>mbfield];
1979                     int cur_poc = h->fdec->i_poc + mbfield*h->fdec->i_delta_poc[field];
1980                     int poc1 = l1->i_poc + mbfield*l1->i_delta_poc[field^(i_ref1&1)];
1981                     int td = x264_clip3( poc1 - poc0, -128, 127 );
1982                     if( td == 0 /* || pic0 is a long-term ref */ )
1983                         dist_scale_factor = 256;
1984                     else
1985                     {
1986                         int tb = x264_clip3( cur_poc - poc0, -128, 127 );
1987                         int tx = (16384 + (abs(td) >> 1)) / td;
1988                         dist_scale_factor = x264_clip3( (tb * tx + 32) >> 6, -1024, 1023 );
1989                     }
1990 
1991                     h->mb.dist_scale_factor_buf[mbfield][field][i_ref0][i_ref1] = dist_scale_factor;
1992 
1993                     dist_scale_factor >>= 2;
1994                     if( h->param.analyse.b_weighted_bipred
1995                           && dist_scale_factor >= -64
1996                           && dist_scale_factor <= 128 )
1997                     {
1998                         h->mb.bipred_weight_buf[mbfield][field][i_ref0][i_ref1] = 64 - dist_scale_factor;
1999                         // ssse3 implementation of biweight doesn't support the extrema.
2000                         // if we ever generate them, we'll have to drop that optimization.
2001                         assert( dist_scale_factor >= -63 && dist_scale_factor <= 127 );
2002                     }
2003                     else
2004                         h->mb.bipred_weight_buf[mbfield][field][i_ref0][i_ref1] = 32;
2005                 }
2006             }
2007 }
2008 
2009