1 /*****************************************************************************
2 * macroblock.c: macroblock common functions
3 *****************************************************************************
4 * Copyright (C) 2003-2021 x264 project
5 *
6 * Authors: Fiona Glaser <fiona@x264.com>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Loren Merritt <lorenm@u.washington.edu>
9 * Henrik Gramner <henrik@gramner.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 *
25 * This program is also available under a commercial proprietary license.
26 * For more information, contact us at licensing@x264.com.
27 *****************************************************************************/
28
29 #include "common.h"
30
31 #define MC_LUMA(list,p) \
32 h->mc.mc_luma( &h->mb.pic.p_fdec[p][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, \
33 &h->mb.pic.p_fref[list][i_ref][p*4], h->mb.pic.i_stride[p], \
34 mvx, mvy, 4*width, 4*height, \
35 list ? x264_weight_none : &h->sh.weight[i_ref][p] );
36
mb_mc_0xywh(x264_t * h,int x,int y,int width,int height)37 static NOINLINE void mb_mc_0xywh( x264_t *h, int x, int y, int width, int height )
38 {
39 int i8 = x264_scan8[0]+x+8*y;
40 int i_ref = h->mb.cache.ref[0][i8];
41 int mvx = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
42 int mvy = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
43
44 MC_LUMA( 0, 0 );
45
46 if( CHROMA444 )
47 {
48 MC_LUMA( 0, 1 );
49 MC_LUMA( 0, 2 );
50 }
51 else if( CHROMA_FORMAT )
52 {
53 int v_shift = CHROMA_V_SHIFT;
54 // Chroma in 4:2:0 is offset if MCing from a field of opposite parity
55 if( v_shift & MB_INTERLACED & i_ref )
56 mvy += (h->mb.i_mb_y & 1)*4 - 2;
57
58 int offset = (4*FDEC_STRIDE>>v_shift)*y + 2*x;
59 height = 4*height >> v_shift;
60
61 h->mc.mc_chroma( &h->mb.pic.p_fdec[1][offset],
62 &h->mb.pic.p_fdec[2][offset], FDEC_STRIDE,
63 h->mb.pic.p_fref[0][i_ref][4], h->mb.pic.i_stride[1],
64 mvx, 2*mvy>>v_shift, 2*width, height );
65
66 if( h->sh.weight[i_ref][1].weightfn )
67 h->sh.weight[i_ref][1].weightfn[width>>1]( &h->mb.pic.p_fdec[1][offset], FDEC_STRIDE,
68 &h->mb.pic.p_fdec[1][offset], FDEC_STRIDE,
69 &h->sh.weight[i_ref][1], height );
70 if( h->sh.weight[i_ref][2].weightfn )
71 h->sh.weight[i_ref][2].weightfn[width>>1]( &h->mb.pic.p_fdec[2][offset], FDEC_STRIDE,
72 &h->mb.pic.p_fdec[2][offset], FDEC_STRIDE,
73 &h->sh.weight[i_ref][2], height );
74 }
75 }
mb_mc_1xywh(x264_t * h,int x,int y,int width,int height)76 static NOINLINE void mb_mc_1xywh( x264_t *h, int x, int y, int width, int height )
77 {
78 int i8 = x264_scan8[0]+x+8*y;
79 int i_ref = h->mb.cache.ref[1][i8];
80 int mvx = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
81 int mvy = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
82
83 MC_LUMA( 1, 0 );
84
85 if( CHROMA444 )
86 {
87 MC_LUMA( 1, 1 );
88 MC_LUMA( 1, 2 );
89 }
90 else if( CHROMA_FORMAT )
91 {
92 int v_shift = CHROMA_V_SHIFT;
93 if( v_shift & MB_INTERLACED & i_ref )
94 mvy += (h->mb.i_mb_y & 1)*4 - 2;
95
96 int offset = (4*FDEC_STRIDE>>v_shift)*y + 2*x;
97 h->mc.mc_chroma( &h->mb.pic.p_fdec[1][offset],
98 &h->mb.pic.p_fdec[2][offset], FDEC_STRIDE,
99 h->mb.pic.p_fref[1][i_ref][4], h->mb.pic.i_stride[1],
100 mvx, 2*mvy>>v_shift, 2*width, 4*height>>v_shift );
101 }
102 }
103
104 #define MC_LUMA_BI(p) \
105 src0 = h->mc.get_ref( tmp0, &i_stride0, &h->mb.pic.p_fref[0][i_ref0][p*4], h->mb.pic.i_stride[p], \
106 mvx0, mvy0, 4*width, 4*height, x264_weight_none ); \
107 src1 = h->mc.get_ref( tmp1, &i_stride1, &h->mb.pic.p_fref[1][i_ref1][p*4], h->mb.pic.i_stride[p], \
108 mvx1, mvy1, 4*width, 4*height, x264_weight_none ); \
109 h->mc.avg[i_mode]( &h->mb.pic.p_fdec[p][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, \
110 src0, i_stride0, src1, i_stride1, weight );
111
mb_mc_01xywh(x264_t * h,int x,int y,int width,int height)112 static NOINLINE void mb_mc_01xywh( x264_t *h, int x, int y, int width, int height )
113 {
114 int i8 = x264_scan8[0]+x+8*y;
115 int i_ref0 = h->mb.cache.ref[0][i8];
116 int i_ref1 = h->mb.cache.ref[1][i8];
117 int weight = h->mb.bipred_weight[i_ref0][i_ref1];
118 int mvx0 = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
119 int mvx1 = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
120 int mvy0 = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
121 int mvy1 = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
122 int i_mode = x264_size2pixel[height][width];
123 intptr_t i_stride0 = 16, i_stride1 = 16;
124 ALIGNED_ARRAY_32( pixel, tmp0,[16*16] );
125 ALIGNED_ARRAY_32( pixel, tmp1,[16*16] );
126 pixel *src0, *src1;
127
128 MC_LUMA_BI( 0 );
129
130 if( CHROMA444 )
131 {
132 MC_LUMA_BI( 1 );
133 MC_LUMA_BI( 2 );
134 }
135 else if( CHROMA_FORMAT )
136 {
137 int v_shift = CHROMA_V_SHIFT;
138 if( v_shift & MB_INTERLACED & i_ref0 )
139 mvy0 += (h->mb.i_mb_y & 1)*4 - 2;
140 if( v_shift & MB_INTERLACED & i_ref1 )
141 mvy1 += (h->mb.i_mb_y & 1)*4 - 2;
142
143 h->mc.mc_chroma( tmp0, tmp0+8, 16, h->mb.pic.p_fref[0][i_ref0][4], h->mb.pic.i_stride[1],
144 mvx0, 2*mvy0>>v_shift, 2*width, 4*height>>v_shift );
145 h->mc.mc_chroma( tmp1, tmp1+8, 16, h->mb.pic.p_fref[1][i_ref1][4], h->mb.pic.i_stride[1],
146 mvx1, 2*mvy1>>v_shift, 2*width, 4*height>>v_shift );
147
148 int chromapix = h->luma2chroma_pixel[i_mode];
149 int offset = (4*FDEC_STRIDE>>v_shift)*y + 2*x;
150 h->mc.avg[chromapix]( &h->mb.pic.p_fdec[1][offset], FDEC_STRIDE, tmp0, 16, tmp1, 16, weight );
151 h->mc.avg[chromapix]( &h->mb.pic.p_fdec[2][offset], FDEC_STRIDE, tmp0+8, 16, tmp1+8, 16, weight );
152 }
153 }
154
155 #undef MC_LUMA
156 #undef MC_LUMA_BI
157
x264_mb_mc_8x8(x264_t * h,int i8)158 void x264_mb_mc_8x8( x264_t *h, int i8 )
159 {
160 int x = 2*(i8&1);
161 int y = 2*(i8>>1);
162
163 if( h->sh.i_type == SLICE_TYPE_P )
164 {
165 switch( h->mb.i_sub_partition[i8] )
166 {
167 case D_L0_8x8:
168 mb_mc_0xywh( h, x, y, 2, 2 );
169 break;
170 case D_L0_8x4:
171 mb_mc_0xywh( h, x, y+0, 2, 1 );
172 mb_mc_0xywh( h, x, y+1, 2, 1 );
173 break;
174 case D_L0_4x8:
175 mb_mc_0xywh( h, x+0, y, 1, 2 );
176 mb_mc_0xywh( h, x+1, y, 1, 2 );
177 break;
178 case D_L0_4x4:
179 mb_mc_0xywh( h, x+0, y+0, 1, 1 );
180 mb_mc_0xywh( h, x+1, y+0, 1, 1 );
181 mb_mc_0xywh( h, x+0, y+1, 1, 1 );
182 mb_mc_0xywh( h, x+1, y+1, 1, 1 );
183 break;
184 }
185 }
186 else
187 {
188 int scan8 = x264_scan8[0] + x + 8*y;
189
190 if( h->mb.cache.ref[0][scan8] >= 0 )
191 if( h->mb.cache.ref[1][scan8] >= 0 )
192 mb_mc_01xywh( h, x, y, 2, 2 );
193 else
194 mb_mc_0xywh( h, x, y, 2, 2 );
195 else
196 mb_mc_1xywh( h, x, y, 2, 2 );
197 }
198 }
199
x264_mb_mc(x264_t * h)200 void x264_mb_mc( x264_t *h )
201 {
202 if( h->mb.i_partition == D_8x8 )
203 {
204 for( int i = 0; i < 4; i++ )
205 x264_mb_mc_8x8( h, i );
206 }
207 else
208 {
209 int ref0a = h->mb.cache.ref[0][x264_scan8[ 0]];
210 int ref0b = h->mb.cache.ref[0][x264_scan8[12]];
211 int ref1a = h->mb.cache.ref[1][x264_scan8[ 0]];
212 int ref1b = h->mb.cache.ref[1][x264_scan8[12]];
213
214 if( h->mb.i_partition == D_16x16 )
215 {
216 if( ref0a >= 0 )
217 if( ref1a >= 0 ) mb_mc_01xywh( h, 0, 0, 4, 4 );
218 else mb_mc_0xywh ( h, 0, 0, 4, 4 );
219 else mb_mc_1xywh ( h, 0, 0, 4, 4 );
220 }
221 else if( h->mb.i_partition == D_16x8 )
222 {
223 if( ref0a >= 0 )
224 if( ref1a >= 0 ) mb_mc_01xywh( h, 0, 0, 4, 2 );
225 else mb_mc_0xywh ( h, 0, 0, 4, 2 );
226 else mb_mc_1xywh ( h, 0, 0, 4, 2 );
227
228 if( ref0b >= 0 )
229 if( ref1b >= 0 ) mb_mc_01xywh( h, 0, 2, 4, 2 );
230 else mb_mc_0xywh ( h, 0, 2, 4, 2 );
231 else mb_mc_1xywh ( h, 0, 2, 4, 2 );
232 }
233 else if( h->mb.i_partition == D_8x16 )
234 {
235 if( ref0a >= 0 )
236 if( ref1a >= 0 ) mb_mc_01xywh( h, 0, 0, 2, 4 );
237 else mb_mc_0xywh ( h, 0, 0, 2, 4 );
238 else mb_mc_1xywh ( h, 0, 0, 2, 4 );
239
240 if( ref0b >= 0 )
241 if( ref1b >= 0 ) mb_mc_01xywh( h, 2, 0, 2, 4 );
242 else mb_mc_0xywh ( h, 2, 0, 2, 4 );
243 else mb_mc_1xywh ( h, 2, 0, 2, 4 );
244 }
245 }
246 }
247
x264_macroblock_cache_allocate(x264_t * h)248 int x264_macroblock_cache_allocate( x264_t *h )
249 {
250 int i_mb_count = h->mb.i_mb_count;
251
252 h->mb.i_mb_stride = h->mb.i_mb_width;
253 h->mb.i_b8_stride = h->mb.i_mb_width * 2;
254 h->mb.i_b4_stride = h->mb.i_mb_width * 4;
255
256 h->mb.b_interlaced = PARAM_INTERLACED;
257
258 PREALLOC_INIT
259
260 PREALLOC( h->mb.qp, i_mb_count * sizeof(int8_t) );
261 PREALLOC( h->mb.cbp, i_mb_count * sizeof(int16_t) );
262 PREALLOC( h->mb.mb_transform_size, i_mb_count * sizeof(int8_t) );
263 PREALLOC( h->mb.slice_table, i_mb_count * sizeof(int32_t) );
264
265 /* 0 -> 3 top(4), 4 -> 6 : left(3) */
266 PREALLOC( h->mb.intra4x4_pred_mode, i_mb_count * 8 * sizeof(int8_t) );
267
268 /* all coeffs */
269 PREALLOC( h->mb.non_zero_count, i_mb_count * 48 * sizeof(uint8_t) );
270
271 if( h->param.b_cabac )
272 {
273 PREALLOC( h->mb.skipbp, i_mb_count * sizeof(int8_t) );
274 PREALLOC( h->mb.chroma_pred_mode, i_mb_count * sizeof(int8_t) );
275 PREALLOC( h->mb.mvd[0], i_mb_count * sizeof( **h->mb.mvd ) );
276 if( h->param.i_bframe )
277 PREALLOC( h->mb.mvd[1], i_mb_count * sizeof( **h->mb.mvd ) );
278 }
279
280 for( int i = 0; i < 2; i++ )
281 {
282 int i_refs = X264_MIN(X264_REF_MAX, (i ? 1 + !!h->param.i_bframe_pyramid : h->param.i_frame_reference) ) << PARAM_INTERLACED;
283 if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
284 i_refs = X264_MIN(X264_REF_MAX, i_refs + 1 + (BIT_DEPTH == 8)); //smart weights add two duplicate frames, one in >8-bit
285
286 for( int j = !i; j < i_refs; j++ )
287 PREALLOC( h->mb.mvr[i][j], 2 * (i_mb_count + 1) * sizeof(int16_t) );
288 }
289
290 if( h->param.analyse.i_weighted_pred )
291 {
292 int i_padv = PADV << PARAM_INTERLACED;
293 int luma_plane_size = 0;
294 int numweightbuf;
295
296 if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_FAKE )
297 {
298 // only need buffer for lookahead
299 if( !h->param.i_sync_lookahead || h == h->thread[h->param.i_threads] )
300 {
301 // Fake analysis only works on lowres
302 luma_plane_size = h->fdec->i_stride_lowres * (h->mb.i_mb_height*8+2*i_padv);
303 // Only need 1 buffer for analysis
304 numweightbuf = 1;
305 }
306 else
307 numweightbuf = 0;
308 }
309 else
310 {
311 /* Both ref and fenc is stored for 4:2:0 and 4:2:2 which means that 4:2:0 and 4:4:4
312 * needs the same amount of space and 4:2:2 needs twice that much */
313 luma_plane_size = h->fdec->i_stride[0] * (h->mb.i_mb_height*(16<<(CHROMA_FORMAT==CHROMA_422))+2*i_padv);
314
315 if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
316 //smart can weight one ref and one offset -1 in 8-bit
317 numweightbuf = 1 + (BIT_DEPTH == 8);
318 else
319 //simple only has one weighted ref
320 numweightbuf = 1;
321 }
322
323 for( int i = 0; i < numweightbuf; i++ )
324 PREALLOC( h->mb.p_weight_buf[i], luma_plane_size * SIZEOF_PIXEL );
325 }
326
327 PREALLOC_END( h->mb.base );
328
329 memset( h->mb.slice_table, -1, i_mb_count * sizeof(int32_t) );
330
331 for( int i = 0; i < 2; i++ )
332 {
333 int i_refs = X264_MIN(X264_REF_MAX, (i ? 1 + !!h->param.i_bframe_pyramid : h->param.i_frame_reference) ) << PARAM_INTERLACED;
334 if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
335 i_refs = X264_MIN(X264_REF_MAX, i_refs + 1 + (BIT_DEPTH == 8)); //smart weights add two duplicate frames, one in >8-bit
336
337 for( int j = !i; j < i_refs; j++ )
338 {
339 M32( h->mb.mvr[i][j][0] ) = 0;
340 h->mb.mvr[i][j]++;
341 }
342 }
343
344 return 0;
345 fail:
346 return -1;
347 }
x264_macroblock_cache_free(x264_t * h)348 void x264_macroblock_cache_free( x264_t *h )
349 {
350 x264_free( h->mb.base );
351 }
352
x264_macroblock_thread_allocate(x264_t * h,int b_lookahead)353 int x264_macroblock_thread_allocate( x264_t *h, int b_lookahead )
354 {
355 if( !b_lookahead )
356 {
357 for( int i = 0; i < (PARAM_INTERLACED ? 5 : 2); i++ )
358 for( int j = 0; j < (CHROMA444 ? 3 : 2); j++ )
359 {
360 CHECKED_MALLOC( h->intra_border_backup[i][j], (h->sps->i_mb_width*16+32) * SIZEOF_PIXEL );
361 h->intra_border_backup[i][j] += 16;
362 }
363 for( int i = 0; i <= PARAM_INTERLACED; i++ )
364 {
365 if( h->param.b_sliced_threads )
366 {
367 /* Only allocate the first one, and allocate it for the whole frame, because we
368 * won't be deblocking until after the frame is fully encoded. */
369 if( h == h->thread[0] && !i )
370 CHECKED_MALLOC( h->deblock_strength[0], sizeof(**h->deblock_strength) * h->mb.i_mb_count );
371 else
372 h->deblock_strength[i] = h->thread[0]->deblock_strength[0];
373 }
374 else
375 CHECKED_MALLOC( h->deblock_strength[i], sizeof(**h->deblock_strength) * h->mb.i_mb_width );
376 h->deblock_strength[1] = h->deblock_strength[i];
377 }
378 }
379
380 /* Allocate scratch buffer */
381 int scratch_size = 0;
382 if( !b_lookahead )
383 {
384 int buf_hpel = (h->thread[0]->fdec->i_width[0]+48+32) * sizeof(int16_t);
385 int buf_ssim = h->param.analyse.b_ssim * 8 * (h->param.i_width/4+3) * sizeof(int);
386 int me_range = X264_MIN(h->param.analyse.i_me_range, h->param.analyse.i_mv_range);
387 int buf_tesa = (h->param.analyse.i_me_method >= X264_ME_ESA) *
388 ((me_range*2+24) * sizeof(int16_t) + (me_range+4) * (me_range+1) * 4 * sizeof(mvsad_t));
389 scratch_size = X264_MAX3( buf_hpel, buf_ssim, buf_tesa );
390 }
391 int buf_mbtree = h->param.rc.b_mb_tree * ((h->mb.i_mb_width+15)&~15) * sizeof(int16_t);
392 scratch_size = X264_MAX( scratch_size, buf_mbtree );
393 if( scratch_size )
394 CHECKED_MALLOC( h->scratch_buffer, scratch_size );
395 else
396 h->scratch_buffer = NULL;
397
398 int buf_lookahead_threads = (h->mb.i_mb_height + (4 + 32) * h->param.i_lookahead_threads) * sizeof(int) * 2;
399 int buf_mbtree2 = buf_mbtree * 12; /* size of the internal propagate_list asm buffer */
400 scratch_size = X264_MAX( buf_lookahead_threads, buf_mbtree2 );
401 CHECKED_MALLOC( h->scratch_buffer2, scratch_size );
402
403 return 0;
404 fail:
405 return -1;
406 }
407
x264_macroblock_thread_free(x264_t * h,int b_lookahead)408 void x264_macroblock_thread_free( x264_t *h, int b_lookahead )
409 {
410 if( !b_lookahead )
411 {
412 for( int i = 0; i <= PARAM_INTERLACED; i++ )
413 if( !h->param.b_sliced_threads || (h == h->thread[0] && !i) )
414 x264_free( h->deblock_strength[i] );
415 for( int i = 0; i < (PARAM_INTERLACED ? 5 : 2); i++ )
416 for( int j = 0; j < (CHROMA444 ? 3 : 2); j++ )
417 x264_free( h->intra_border_backup[i][j] - 16 );
418 }
419 x264_free( h->scratch_buffer );
420 x264_free( h->scratch_buffer2 );
421 }
422
x264_macroblock_slice_init(x264_t * h)423 void x264_macroblock_slice_init( x264_t *h )
424 {
425 h->mb.mv[0] = h->fdec->mv[0];
426 h->mb.mv[1] = h->fdec->mv[1];
427 h->mb.mvr[0][0] = h->fdec->mv16x16;
428 h->mb.ref[0] = h->fdec->ref[0];
429 h->mb.ref[1] = h->fdec->ref[1];
430 h->mb.type = h->fdec->mb_type;
431 h->mb.partition = h->fdec->mb_partition;
432 h->mb.field = h->fdec->field;
433
434 h->fdec->i_ref[0] = h->i_ref[0];
435 h->fdec->i_ref[1] = h->i_ref[1];
436 for( int i = 0; i < h->i_ref[0]; i++ )
437 h->fdec->ref_poc[0][i] = h->fref[0][i]->i_poc;
438 if( h->sh.i_type == SLICE_TYPE_B )
439 {
440 for( int i = 0; i < h->i_ref[1]; i++ )
441 h->fdec->ref_poc[1][i] = h->fref[1][i]->i_poc;
442
443 map_col_to_list0(-1) = -1;
444 map_col_to_list0(-2) = -2;
445 for( int i = 0; i < h->fref[1][0]->i_ref[0]; i++ )
446 {
447 int poc = h->fref[1][0]->ref_poc[0][i];
448 map_col_to_list0(i) = -2;
449 for( int j = 0; j < h->i_ref[0]; j++ )
450 if( h->fref[0][j]->i_poc == poc )
451 {
452 map_col_to_list0(i) = j;
453 break;
454 }
455 }
456 }
457 else if( h->sh.i_type == SLICE_TYPE_P )
458 {
459 if( h->sh.i_disable_deblocking_filter_idc != 1 && h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
460 {
461 deblock_ref_table(-2) = -2;
462 deblock_ref_table(-1) = -1;
463 for( int i = 0; i < h->i_ref[0] << SLICE_MBAFF; i++ )
464 {
465 /* Mask off high bits to avoid frame num collisions with -1/-2.
466 * In current x264 frame num values don't cover a range of more
467 * than 32, so 6 bits is enough for uniqueness. */
468 if( !MB_INTERLACED )
469 deblock_ref_table(i) = h->fref[0][i]->i_frame_num&63;
470 else
471 deblock_ref_table(i) = ((h->fref[0][i>>1]->i_frame_num&63)<<1) + (i&1);
472 }
473 }
474 }
475
476 /* init with not available (for top right idx=7,15) */
477 memset( h->mb.cache.ref, -2, sizeof( h->mb.cache.ref ) );
478
479 if( h->i_ref[0] > 0 )
480 for( int field = 0; field <= SLICE_MBAFF; field++ )
481 {
482 int curpoc = h->fdec->i_poc + h->fdec->i_delta_poc[field];
483 int refpoc = h->fref[0][0]->i_poc + h->fref[0][0]->i_delta_poc[field];
484 int delta = curpoc - refpoc;
485
486 h->fdec->inv_ref_poc[field] = (256 + delta/2) / delta;
487 }
488
489 h->mb.i_neighbour4[6] =
490 h->mb.i_neighbour4[9] =
491 h->mb.i_neighbour4[12] =
492 h->mb.i_neighbour4[14] = MB_LEFT|MB_TOP|MB_TOPLEFT|MB_TOPRIGHT;
493 h->mb.i_neighbour4[3] =
494 h->mb.i_neighbour4[7] =
495 h->mb.i_neighbour4[11] =
496 h->mb.i_neighbour4[13] =
497 h->mb.i_neighbour4[15] =
498 h->mb.i_neighbour8[3] = MB_LEFT|MB_TOP|MB_TOPLEFT;
499 }
500
x264_macroblock_thread_init(x264_t * h)501 void x264_macroblock_thread_init( x264_t *h )
502 {
503 h->mb.i_me_method = h->param.analyse.i_me_method;
504 h->mb.i_subpel_refine = h->param.analyse.i_subpel_refine;
505 if( h->sh.i_type == SLICE_TYPE_B && (h->mb.i_subpel_refine == 6 || h->mb.i_subpel_refine == 8) )
506 h->mb.i_subpel_refine--;
507 h->mb.b_chroma_me = h->param.analyse.b_chroma_me &&
508 ((h->sh.i_type == SLICE_TYPE_P && h->mb.i_subpel_refine >= 5) ||
509 (h->sh.i_type == SLICE_TYPE_B && h->mb.i_subpel_refine >= 9));
510 h->mb.b_dct_decimate = h->sh.i_type == SLICE_TYPE_B ||
511 (h->param.analyse.b_dct_decimate && h->sh.i_type != SLICE_TYPE_I);
512 h->mb.i_mb_prev_xy = -1;
513
514 /* 4:2:0 4:2:2 4:4:4
515 * fdec fenc fdec fenc fdec fenc
516 * y y y y y y y Y Y Y Y y y y y y y y Y Y Y Y y y y y y y y Y Y Y Y
517 * y Y Y Y Y Y Y Y Y y Y Y Y Y Y Y Y Y y Y Y Y Y Y Y Y Y
518 * y Y Y Y Y Y Y Y Y y Y Y Y Y Y Y Y Y y Y Y Y Y Y Y Y Y
519 * y Y Y Y Y Y Y Y Y y Y Y Y Y Y Y Y Y y Y Y Y Y Y Y Y Y
520 * y Y Y Y Y U U V V y Y Y Y Y U U V V y Y Y Y Y U U U U
521 * u u u v v v U U V V u u u v v v U U V V u u u u u u u U U U U
522 * u U U v V V u U U v V V U U V V u U U U U U U U U
523 * u U U v V V u U U v V V U U V V u U U U U U U U U
524 * u U U v V V u U U U U V V V V
525 * u U U v V V u U U U U V V V V
526 * v v v v v v v V V V V
527 * v V V V V V V V V
528 * v V V V V
529 * v V V V V
530 * v V V V V
531 */
532 h->mb.pic.p_fenc[0] = h->mb.pic.fenc_buf;
533 h->mb.pic.p_fdec[0] = h->mb.pic.fdec_buf + 2*FDEC_STRIDE;
534 if( CHROMA_FORMAT )
535 {
536 h->mb.pic.p_fenc[1] = h->mb.pic.fenc_buf + 16*FENC_STRIDE;
537 h->mb.pic.p_fdec[1] = h->mb.pic.fdec_buf + 20*FDEC_STRIDE;
538 if( CHROMA444 )
539 {
540 h->mb.pic.p_fenc[2] = h->mb.pic.fenc_buf + 32*FENC_STRIDE;
541 h->mb.pic.p_fdec[2] = h->mb.pic.fdec_buf + 38*FDEC_STRIDE;
542 }
543 else
544 {
545 h->mb.pic.p_fenc[2] = h->mb.pic.fenc_buf + 16*FENC_STRIDE + 8;
546 h->mb.pic.p_fdec[2] = h->mb.pic.fdec_buf + 20*FDEC_STRIDE + 16;
547 }
548 }
549 }
550
x264_prefetch_fenc(x264_t * h,x264_frame_t * fenc,int i_mb_x,int i_mb_y)551 void x264_prefetch_fenc( x264_t *h, x264_frame_t *fenc, int i_mb_x, int i_mb_y )
552 {
553 int stride_y = fenc->i_stride[0];
554 int stride_uv = fenc->i_stride[1];
555 int off_y = 16 * i_mb_x + 16 * i_mb_y * stride_y;
556 int off_uv = 16 * i_mb_x + (16 * i_mb_y * stride_uv >> CHROMA_V_SHIFT);
557 h->mc.prefetch_fenc( fenc->plane[0]+off_y, stride_y,
558 fenc->plane[1] != NULL ? fenc->plane[1]+off_uv : NULL, stride_uv, i_mb_x );
559 }
560
x264_copy_column8(pixel * dst,pixel * src)561 NOINLINE void x264_copy_column8( pixel *dst, pixel *src )
562 {
563 // input pointers are offset by 4 rows because that's faster (smaller instruction size on x86)
564 for( int i = -4; i < 4; i++ )
565 dst[i*FDEC_STRIDE] = src[i*FDEC_STRIDE];
566 }
567
macroblock_load_pic_pointers(x264_t * h,int mb_x,int mb_y,int i,int b_chroma,int b_mbaff)568 static ALWAYS_INLINE void macroblock_load_pic_pointers( x264_t *h, int mb_x, int mb_y, int i, int b_chroma, int b_mbaff )
569 {
570 int mb_interlaced = b_mbaff && MB_INTERLACED;
571 int height = b_chroma ? 16 >> CHROMA_V_SHIFT : 16;
572 int i_stride = h->fdec->i_stride[i];
573 int i_stride2 = i_stride << mb_interlaced;
574 int i_pix_offset = mb_interlaced
575 ? 16 * mb_x + height * (mb_y&~1) * i_stride + (mb_y&1) * i_stride
576 : 16 * mb_x + height * mb_y * i_stride;
577 pixel *plane_fdec = &h->fdec->plane[i][i_pix_offset];
578 int fdec_idx = b_mbaff ? (mb_interlaced ? (3 + (mb_y&1)) : (mb_y&1) ? 2 : 4) : !(mb_y&1);
579 pixel *intra_fdec = &h->intra_border_backup[fdec_idx][i][mb_x*16];
580 int ref_pix_offset[2] = { i_pix_offset, i_pix_offset };
581 /* ref_pix_offset[0] references the current field and [1] the opposite field. */
582 if( mb_interlaced )
583 ref_pix_offset[1] += (1-2*(mb_y&1)) * i_stride;
584 h->mb.pic.i_stride[i] = i_stride2;
585 h->mb.pic.p_fenc_plane[i] = &h->fenc->plane[i][i_pix_offset];
586 if( b_chroma )
587 {
588 h->mc.load_deinterleave_chroma_fenc( h->mb.pic.p_fenc[1], h->mb.pic.p_fenc_plane[1], i_stride2, height );
589 memcpy( h->mb.pic.p_fdec[1]-FDEC_STRIDE, intra_fdec, 8*SIZEOF_PIXEL );
590 memcpy( h->mb.pic.p_fdec[2]-FDEC_STRIDE, intra_fdec+8, 8*SIZEOF_PIXEL );
591 h->mb.pic.p_fdec[1][-FDEC_STRIDE-1] = intra_fdec[-1-8];
592 h->mb.pic.p_fdec[2][-FDEC_STRIDE-1] = intra_fdec[-1];
593 }
594 else
595 {
596 h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fenc[i], FENC_STRIDE, h->mb.pic.p_fenc_plane[i], i_stride2, 16 );
597 memcpy( h->mb.pic.p_fdec[i]-FDEC_STRIDE, intra_fdec, 24*SIZEOF_PIXEL );
598 h->mb.pic.p_fdec[i][-FDEC_STRIDE-1] = intra_fdec[-1];
599 }
600 if( b_mbaff || h->mb.b_reencode_mb )
601 {
602 for( int j = 0; j < height; j++ )
603 if( b_chroma )
604 {
605 h->mb.pic.p_fdec[1][-1+j*FDEC_STRIDE] = plane_fdec[-2+j*i_stride2];
606 h->mb.pic.p_fdec[2][-1+j*FDEC_STRIDE] = plane_fdec[-1+j*i_stride2];
607 }
608 else
609 h->mb.pic.p_fdec[i][-1+j*FDEC_STRIDE] = plane_fdec[-1+j*i_stride2];
610 }
611 pixel *plane_src, **filtered_src;
612 for( int j = 0; j < h->mb.pic.i_fref[0]; j++ )
613 {
614 // Interpolate between pixels in same field.
615 if( mb_interlaced )
616 {
617 plane_src = h->fref[0][j>>1]->plane_fld[i];
618 filtered_src = h->fref[0][j>>1]->filtered_fld[i];
619 }
620 else
621 {
622 plane_src = h->fref[0][j]->plane[i];
623 filtered_src = h->fref[0][j]->filtered[i];
624 }
625 h->mb.pic.p_fref[0][j][i*4] = plane_src + ref_pix_offset[j&1];
626
627 if( !b_chroma )
628 {
629 if( h->param.analyse.i_subpel_refine )
630 for( int k = 1; k < 4; k++ )
631 h->mb.pic.p_fref[0][j][i*4+k] = filtered_src[k] + ref_pix_offset[j&1];
632 if( !i )
633 {
634 if( h->sh.weight[j][0].weightfn )
635 h->mb.pic.p_fref_w[j] = &h->fenc->weighted[j >> mb_interlaced][ref_pix_offset[j&1]];
636 else
637 h->mb.pic.p_fref_w[j] = h->mb.pic.p_fref[0][j][0];
638 }
639 }
640 }
641 if( h->sh.i_type == SLICE_TYPE_B )
642 for( int j = 0; j < h->mb.pic.i_fref[1]; j++ )
643 {
644 if( mb_interlaced )
645 {
646 plane_src = h->fref[1][j>>1]->plane_fld[i];
647 filtered_src = h->fref[1][j>>1]->filtered_fld[i];
648 }
649 else
650 {
651 plane_src = h->fref[1][j]->plane[i];
652 filtered_src = h->fref[1][j]->filtered[i];
653 }
654 h->mb.pic.p_fref[1][j][i*4] = plane_src + ref_pix_offset[j&1];
655
656 if( !b_chroma && h->param.analyse.i_subpel_refine )
657 for( int k = 1; k < 4; k++ )
658 h->mb.pic.p_fref[1][j][i*4+k] = filtered_src[k] + ref_pix_offset[j&1];
659 }
660 }
661
662 static const x264_left_table_t left_indices[4] =
663 {
664 /* Current is progressive */
665 {{ 4, 4, 5, 5}, { 3, 3, 7, 7}, {16+1, 16+1, 32+1, 32+1}, {0, 0, 1, 1}, {0, 0, 0, 0}},
666 {{ 6, 6, 3, 3}, {11, 11, 15, 15}, {16+5, 16+5, 32+5, 32+5}, {2, 2, 3, 3}, {1, 1, 1, 1}},
667 /* Current is interlaced */
668 {{ 4, 6, 4, 6}, { 3, 11, 3, 11}, {16+1, 16+1, 32+1, 32+1}, {0, 2, 0, 2}, {0, 1, 0, 1}},
669 /* Both same */
670 {{ 4, 5, 6, 3}, { 3, 7, 11, 15}, {16+1, 16+5, 32+1, 32+5}, {0, 1, 2, 3}, {0, 0, 1, 1}}
671 };
672
macroblock_cache_load_neighbours(x264_t * h,int mb_x,int mb_y,int b_interlaced)673 static ALWAYS_INLINE void macroblock_cache_load_neighbours( x264_t *h, int mb_x, int mb_y, int b_interlaced )
674 {
675 const int mb_interlaced = b_interlaced && MB_INTERLACED;
676 int top_y = mb_y - (1 << mb_interlaced);
677 int top = top_y * h->mb.i_mb_stride + mb_x;
678
679 h->mb.i_mb_x = mb_x;
680 h->mb.i_mb_y = mb_y;
681 h->mb.i_mb_xy = mb_y * h->mb.i_mb_stride + mb_x;
682 h->mb.i_b8_xy = 2*(mb_y * h->mb.i_b8_stride + mb_x);
683 h->mb.i_b4_xy = 4*(mb_y * h->mb.i_b4_stride + mb_x);
684 h->mb.left_b8[0] =
685 h->mb.left_b8[1] = -1;
686 h->mb.left_b4[0] =
687 h->mb.left_b4[1] = -1;
688 h->mb.i_neighbour = 0;
689 h->mb.i_neighbour_intra = 0;
690 h->mb.i_neighbour_frame = 0;
691 h->mb.i_mb_top_xy = -1;
692 h->mb.i_mb_top_y = -1;
693 h->mb.i_mb_left_xy[0] = h->mb.i_mb_left_xy[1] = -1;
694 h->mb.i_mb_topleft_xy = -1;
695 h->mb.i_mb_topright_xy = -1;
696 h->mb.i_mb_type_top = -1;
697 h->mb.i_mb_type_left[0] = h->mb.i_mb_type_left[1] = -1;
698 h->mb.i_mb_type_topleft = -1;
699 h->mb.i_mb_type_topright = -1;
700 h->mb.left_index_table = &left_indices[3];
701 h->mb.topleft_partition = 0;
702
703 int topleft_y = top_y;
704 int topright_y = top_y;
705 int left[2];
706
707 left[0] = left[1] = h->mb.i_mb_xy - 1;
708 h->mb.left_b8[0] = h->mb.left_b8[1] = h->mb.i_b8_xy - 2;
709 h->mb.left_b4[0] = h->mb.left_b4[1] = h->mb.i_b4_xy - 4;
710
711 if( b_interlaced )
712 {
713 h->mb.i_mb_top_mbpair_xy = h->mb.i_mb_xy - 2*h->mb.i_mb_stride;
714 h->mb.i_mb_topleft_y = -1;
715 h->mb.i_mb_topright_y = -1;
716
717 if( mb_y&1 )
718 {
719 if( mb_x && mb_interlaced != h->mb.field[h->mb.i_mb_xy-1] )
720 {
721 left[0] = left[1] = h->mb.i_mb_xy - 1 - h->mb.i_mb_stride;
722 h->mb.left_b8[0] = h->mb.left_b8[1] = h->mb.i_b8_xy - 2 - 2*h->mb.i_b8_stride;
723 h->mb.left_b4[0] = h->mb.left_b4[1] = h->mb.i_b4_xy - 4 - 4*h->mb.i_b4_stride;
724
725 if( mb_interlaced )
726 {
727 h->mb.left_index_table = &left_indices[2];
728 left[1] += h->mb.i_mb_stride;
729 h->mb.left_b8[1] += 2*h->mb.i_b8_stride;
730 h->mb.left_b4[1] += 4*h->mb.i_b4_stride;
731 }
732 else
733 {
734 h->mb.left_index_table = &left_indices[1];
735 topleft_y++;
736 h->mb.topleft_partition = 1;
737 }
738 }
739 if( !mb_interlaced )
740 topright_y = -1;
741 }
742 else
743 {
744 if( mb_interlaced && top >= 0 )
745 {
746 if( !h->mb.field[top] )
747 {
748 top += h->mb.i_mb_stride;
749 top_y++;
750 }
751 if( mb_x )
752 topleft_y += !h->mb.field[h->mb.i_mb_stride*topleft_y + mb_x - 1];
753 if( mb_x < h->mb.i_mb_width-1 )
754 topright_y += !h->mb.field[h->mb.i_mb_stride*topright_y + mb_x + 1];
755 }
756 if( mb_x && mb_interlaced != h->mb.field[h->mb.i_mb_xy-1] )
757 {
758 if( mb_interlaced )
759 {
760 h->mb.left_index_table = &left_indices[2];
761 left[1] += h->mb.i_mb_stride;
762 h->mb.left_b8[1] += 2*h->mb.i_b8_stride;
763 h->mb.left_b4[1] += 4*h->mb.i_b4_stride;
764 }
765 else
766 h->mb.left_index_table = &left_indices[0];
767 }
768 }
769 }
770
771 if( mb_x > 0 )
772 {
773 h->mb.i_neighbour_frame |= MB_LEFT;
774 h->mb.i_mb_left_xy[0] = left[0];
775 h->mb.i_mb_left_xy[1] = left[1];
776 h->mb.i_mb_type_left[0] = h->mb.type[h->mb.i_mb_left_xy[0]];
777 h->mb.i_mb_type_left[1] = h->mb.type[h->mb.i_mb_left_xy[1]];
778 if( h->mb.slice_table[left[0]] == h->sh.i_first_mb )
779 {
780 h->mb.i_neighbour |= MB_LEFT;
781
782 // FIXME: We don't currently support constrained intra + mbaff.
783 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_left[0] ) )
784 h->mb.i_neighbour_intra |= MB_LEFT;
785 }
786 }
787
788 /* We can't predict from the previous threadslice since it hasn't been encoded yet. */
789 if( (h->i_threadslice_start >> mb_interlaced) != (mb_y >> mb_interlaced) )
790 {
791 if( top >= 0 )
792 {
793 h->mb.i_neighbour_frame |= MB_TOP;
794 h->mb.i_mb_top_xy = top;
795 h->mb.i_mb_top_y = top_y;
796 h->mb.i_mb_type_top = h->mb.type[h->mb.i_mb_top_xy];
797 if( h->mb.slice_table[top] == h->sh.i_first_mb )
798 {
799 h->mb.i_neighbour |= MB_TOP;
800
801 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_top ) )
802 h->mb.i_neighbour_intra |= MB_TOP;
803
804 /* We only need to prefetch the top blocks because the left was just written
805 * to as part of the previous cache_save. Since most target CPUs use write-allocate
806 * caches, left blocks are near-guaranteed to be in L1 cache. Top--not so much. */
807 x264_prefetch( &h->mb.cbp[top] );
808 x264_prefetch( h->mb.intra4x4_pred_mode[top] );
809 x264_prefetch( &h->mb.non_zero_count[top][12] );
810 x264_prefetch( &h->mb.mb_transform_size[top] );
811 if( h->param.b_cabac )
812 x264_prefetch( &h->mb.skipbp[top] );
813 }
814 }
815
816 if( mb_x > 0 && topleft_y >= 0 )
817 {
818 h->mb.i_neighbour_frame |= MB_TOPLEFT;
819 h->mb.i_mb_topleft_xy = h->mb.i_mb_stride*topleft_y + mb_x - 1;
820 h->mb.i_mb_topleft_y = topleft_y;
821 h->mb.i_mb_type_topleft = h->mb.type[h->mb.i_mb_topleft_xy];
822 if( h->mb.slice_table[h->mb.i_mb_topleft_xy] == h->sh.i_first_mb )
823 {
824 h->mb.i_neighbour |= MB_TOPLEFT;
825
826 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_topleft ) )
827 h->mb.i_neighbour_intra |= MB_TOPLEFT;
828 }
829 }
830
831 if( mb_x < h->mb.i_mb_width - 1 && topright_y >= 0 )
832 {
833 h->mb.i_neighbour_frame |= MB_TOPRIGHT;
834 h->mb.i_mb_topright_xy = h->mb.i_mb_stride*topright_y + mb_x + 1;
835 h->mb.i_mb_topright_y = topright_y;
836 h->mb.i_mb_type_topright = h->mb.type[h->mb.i_mb_topright_xy];
837 if( h->mb.slice_table[h->mb.i_mb_topright_xy] == h->sh.i_first_mb )
838 {
839 h->mb.i_neighbour |= MB_TOPRIGHT;
840
841 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_topright ) )
842 h->mb.i_neighbour_intra |= MB_TOPRIGHT;
843 }
844 }
845 }
846 }
847
848 #define LTOP 0
849 #if HAVE_INTERLACED
850 # define LBOT 1
851 #else
852 # define LBOT 0
853 #endif
854
macroblock_cache_load(x264_t * h,int mb_x,int mb_y,int b_mbaff)855 static ALWAYS_INLINE void macroblock_cache_load( x264_t *h, int mb_x, int mb_y, int b_mbaff )
856 {
857 macroblock_cache_load_neighbours( h, mb_x, mb_y, b_mbaff );
858
859 int *left = h->mb.i_mb_left_xy;
860 int top = h->mb.i_mb_top_xy;
861 int top_y = h->mb.i_mb_top_y;
862 int s8x8 = h->mb.i_b8_stride;
863 int s4x4 = h->mb.i_b4_stride;
864 int top_8x8 = (2*top_y+1) * s8x8 + 2*mb_x;
865 int top_4x4 = (4*top_y+3) * s4x4 + 4*mb_x;
866 int lists = (1 << h->sh.i_type) & 3;
867
868 /* GCC pessimizes direct loads from heap-allocated arrays due to aliasing. */
869 /* By only dereferencing them once, we avoid this issue. */
870 int8_t (*i4x4)[8] = h->mb.intra4x4_pred_mode;
871 uint8_t (*nnz)[48] = h->mb.non_zero_count;
872 int16_t *cbp = h->mb.cbp;
873
874 const x264_left_table_t *left_index_table = h->mb.left_index_table;
875
876 h->mb.cache.deblock_strength = h->deblock_strength[mb_y&1][h->param.b_sliced_threads?h->mb.i_mb_xy:mb_x];
877
878 /* load cache */
879 if( h->mb.i_neighbour & MB_TOP )
880 {
881 h->mb.cache.i_cbp_top = cbp[top];
882 /* load intra4x4 */
883 CP32( &h->mb.cache.intra4x4_pred_mode[x264_scan8[0] - 8], &i4x4[top][0] );
884
885 /* load non_zero_count */
886 CP32( &h->mb.cache.non_zero_count[x264_scan8[ 0] - 8], &nnz[top][12] );
887 CP32( &h->mb.cache.non_zero_count[x264_scan8[16] - 8], &nnz[top][16-4 + (16>>CHROMA_V_SHIFT)] );
888 CP32( &h->mb.cache.non_zero_count[x264_scan8[32] - 8], &nnz[top][32-4 + (16>>CHROMA_V_SHIFT)] );
889
890 /* Finish the prefetching */
891 for( int l = 0; l < lists; l++ )
892 {
893 x264_prefetch( &h->mb.mv[l][top_4x4-1] );
894 /* Top right being not in the same cacheline as top left will happen
895 * once every 4 MBs, so one extra prefetch is worthwhile */
896 x264_prefetch( &h->mb.mv[l][top_4x4+4] );
897 x264_prefetch( &h->mb.ref[l][top_8x8-1] );
898 if( h->param.b_cabac )
899 x264_prefetch( &h->mb.mvd[l][top] );
900 }
901 }
902 else
903 {
904 h->mb.cache.i_cbp_top = -1;
905
906 /* load intra4x4 */
907 M32( &h->mb.cache.intra4x4_pred_mode[x264_scan8[0] - 8] ) = 0xFFFFFFFFU;
908
909 /* load non_zero_count */
910 M32( &h->mb.cache.non_zero_count[x264_scan8[ 0] - 8] ) = 0x80808080U;
911 M32( &h->mb.cache.non_zero_count[x264_scan8[16] - 8] ) = 0x80808080U;
912 M32( &h->mb.cache.non_zero_count[x264_scan8[32] - 8] ) = 0x80808080U;
913 }
914
915 if( h->mb.i_neighbour & MB_LEFT )
916 {
917 int ltop = left[LTOP];
918 int lbot = b_mbaff ? left[LBOT] : ltop;
919 if( b_mbaff )
920 {
921 const int16_t top_luma = (cbp[ltop] >> (left_index_table->mv[0]&(~1))) & 2;
922 const int16_t bot_luma = (cbp[lbot] >> (left_index_table->mv[2]&(~1))) & 2;
923 h->mb.cache.i_cbp_left = (cbp[ltop] & 0xfff0) | (bot_luma<<2) | top_luma;
924 }
925 else
926 h->mb.cache.i_cbp_left = cbp[ltop];
927
928 /* load intra4x4 */
929 h->mb.cache.intra4x4_pred_mode[x264_scan8[ 0] - 1] = i4x4[ltop][left_index_table->intra[0]];
930 h->mb.cache.intra4x4_pred_mode[x264_scan8[ 2] - 1] = i4x4[ltop][left_index_table->intra[1]];
931 h->mb.cache.intra4x4_pred_mode[x264_scan8[ 8] - 1] = i4x4[lbot][left_index_table->intra[2]];
932 h->mb.cache.intra4x4_pred_mode[x264_scan8[10] - 1] = i4x4[lbot][left_index_table->intra[3]];
933
934 /* load non_zero_count */
935 h->mb.cache.non_zero_count[x264_scan8[ 0] - 1] = nnz[ltop][left_index_table->nnz[0]];
936 h->mb.cache.non_zero_count[x264_scan8[ 2] - 1] = nnz[ltop][left_index_table->nnz[1]];
937 h->mb.cache.non_zero_count[x264_scan8[ 8] - 1] = nnz[lbot][left_index_table->nnz[2]];
938 h->mb.cache.non_zero_count[x264_scan8[10] - 1] = nnz[lbot][left_index_table->nnz[3]];
939
940 if( CHROMA_FORMAT >= CHROMA_422 )
941 {
942 int offset = (4>>CHROMA_H_SHIFT) - 4;
943 h->mb.cache.non_zero_count[x264_scan8[16+ 0] - 1] = nnz[ltop][left_index_table->nnz[0]+16+offset];
944 h->mb.cache.non_zero_count[x264_scan8[16+ 2] - 1] = nnz[ltop][left_index_table->nnz[1]+16+offset];
945 h->mb.cache.non_zero_count[x264_scan8[16+ 8] - 1] = nnz[lbot][left_index_table->nnz[2]+16+offset];
946 h->mb.cache.non_zero_count[x264_scan8[16+10] - 1] = nnz[lbot][left_index_table->nnz[3]+16+offset];
947 h->mb.cache.non_zero_count[x264_scan8[32+ 0] - 1] = nnz[ltop][left_index_table->nnz[0]+32+offset];
948 h->mb.cache.non_zero_count[x264_scan8[32+ 2] - 1] = nnz[ltop][left_index_table->nnz[1]+32+offset];
949 h->mb.cache.non_zero_count[x264_scan8[32+ 8] - 1] = nnz[lbot][left_index_table->nnz[2]+32+offset];
950 h->mb.cache.non_zero_count[x264_scan8[32+10] - 1] = nnz[lbot][left_index_table->nnz[3]+32+offset];
951 }
952 else
953 {
954 h->mb.cache.non_zero_count[x264_scan8[16+ 0] - 1] = nnz[ltop][left_index_table->nnz_chroma[0]];
955 h->mb.cache.non_zero_count[x264_scan8[16+ 2] - 1] = nnz[lbot][left_index_table->nnz_chroma[1]];
956 h->mb.cache.non_zero_count[x264_scan8[32+ 0] - 1] = nnz[ltop][left_index_table->nnz_chroma[2]];
957 h->mb.cache.non_zero_count[x264_scan8[32+ 2] - 1] = nnz[lbot][left_index_table->nnz_chroma[3]];
958 }
959 }
960 else
961 {
962 h->mb.cache.i_cbp_left = -1;
963
964 h->mb.cache.intra4x4_pred_mode[x264_scan8[ 0] - 1] =
965 h->mb.cache.intra4x4_pred_mode[x264_scan8[ 2] - 1] =
966 h->mb.cache.intra4x4_pred_mode[x264_scan8[ 8] - 1] =
967 h->mb.cache.intra4x4_pred_mode[x264_scan8[10] - 1] = -1;
968
969 /* load non_zero_count */
970 h->mb.cache.non_zero_count[x264_scan8[ 0] - 1] =
971 h->mb.cache.non_zero_count[x264_scan8[ 2] - 1] =
972 h->mb.cache.non_zero_count[x264_scan8[ 8] - 1] =
973 h->mb.cache.non_zero_count[x264_scan8[10] - 1] =
974 h->mb.cache.non_zero_count[x264_scan8[16+ 0] - 1] =
975 h->mb.cache.non_zero_count[x264_scan8[16+ 2] - 1] =
976 h->mb.cache.non_zero_count[x264_scan8[32+ 0] - 1] =
977 h->mb.cache.non_zero_count[x264_scan8[32+ 2] - 1] = 0x80;
978 if( CHROMA_FORMAT >= CHROMA_422 )
979 {
980 h->mb.cache.non_zero_count[x264_scan8[16+ 8] - 1] =
981 h->mb.cache.non_zero_count[x264_scan8[16+10] - 1] =
982 h->mb.cache.non_zero_count[x264_scan8[32+ 8] - 1] =
983 h->mb.cache.non_zero_count[x264_scan8[32+10] - 1] = 0x80;
984 }
985 }
986
987 if( h->pps->b_transform_8x8_mode )
988 {
989 h->mb.cache.i_neighbour_transform_size =
990 ( (h->mb.i_neighbour & MB_LEFT) && h->mb.mb_transform_size[left[0]] )
991 + ( (h->mb.i_neighbour & MB_TOP) && h->mb.mb_transform_size[top] );
992 }
993
994 if( b_mbaff )
995 {
996 h->mb.pic.i_fref[0] = h->i_ref[0] << MB_INTERLACED;
997 h->mb.pic.i_fref[1] = h->i_ref[1] << MB_INTERLACED;
998 }
999
1000 if( !b_mbaff )
1001 {
1002 x264_copy_column8( h->mb.pic.p_fdec[0]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+ 4*FDEC_STRIDE );
1003 x264_copy_column8( h->mb.pic.p_fdec[0]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+12*FDEC_STRIDE );
1004 macroblock_load_pic_pointers( h, mb_x, mb_y, 0, 0, 0 );
1005 if( CHROMA444 )
1006 {
1007 x264_copy_column8( h->mb.pic.p_fdec[1]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+15+ 4*FDEC_STRIDE );
1008 x264_copy_column8( h->mb.pic.p_fdec[1]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[1]+15+12*FDEC_STRIDE );
1009 x264_copy_column8( h->mb.pic.p_fdec[2]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+15+ 4*FDEC_STRIDE );
1010 x264_copy_column8( h->mb.pic.p_fdec[2]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[2]+15+12*FDEC_STRIDE );
1011 macroblock_load_pic_pointers( h, mb_x, mb_y, 1, 0, 0 );
1012 macroblock_load_pic_pointers( h, mb_x, mb_y, 2, 0, 0 );
1013 }
1014 else if( CHROMA_FORMAT )
1015 {
1016 x264_copy_column8( h->mb.pic.p_fdec[1]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+ 7+ 4*FDEC_STRIDE );
1017 x264_copy_column8( h->mb.pic.p_fdec[2]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+ 7+ 4*FDEC_STRIDE );
1018 if( CHROMA_FORMAT == CHROMA_422 )
1019 {
1020 x264_copy_column8( h->mb.pic.p_fdec[1]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[1]+ 7+12*FDEC_STRIDE );
1021 x264_copy_column8( h->mb.pic.p_fdec[2]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[2]+ 7+12*FDEC_STRIDE );
1022 }
1023 macroblock_load_pic_pointers( h, mb_x, mb_y, 1, 1, 0 );
1024 }
1025 }
1026 else
1027 {
1028 macroblock_load_pic_pointers( h, mb_x, mb_y, 0, 0, 1 );
1029 if( CHROMA444 )
1030 {
1031 macroblock_load_pic_pointers( h, mb_x, mb_y, 1, 0, 1 );
1032 macroblock_load_pic_pointers( h, mb_x, mb_y, 2, 0, 1 );
1033 }
1034 else if( CHROMA_FORMAT )
1035 macroblock_load_pic_pointers( h, mb_x, mb_y, 1, 1, 1 );
1036 }
1037
1038 if( h->fdec->integral )
1039 {
1040 int offset = 16 * (mb_x + mb_y * h->fdec->i_stride[0]);
1041 for( int list = 0; list < 2; list++ )
1042 for( int i = 0; i < h->mb.pic.i_fref[list]; i++ )
1043 h->mb.pic.p_integral[list][i] = &h->fref[list][i]->integral[offset];
1044 }
1045
1046 x264_prefetch_fenc( h, h->fenc, mb_x, mb_y );
1047
1048 /* load ref/mv/mvd */
1049 for( int l = 0; l < lists; l++ )
1050 {
1051 int16_t (*mv)[2] = h->mb.mv[l];
1052 int8_t *ref = h->mb.ref[l];
1053
1054 int i8 = x264_scan8[0] - 1 - 1*8;
1055 if( h->mb.i_neighbour & MB_TOPLEFT )
1056 {
1057 int ir = b_mbaff ? 2*(s8x8*h->mb.i_mb_topleft_y + mb_x-1)+1+s8x8 : top_8x8 - 1;
1058 int iv = b_mbaff ? 4*(s4x4*h->mb.i_mb_topleft_y + mb_x-1)+3+3*s4x4 : top_4x4 - 1;
1059 if( b_mbaff && h->mb.topleft_partition )
1060 {
1061 /* Take motion vector from the middle of macroblock instead of
1062 * the bottom right as usual. */
1063 iv -= 2*s4x4;
1064 ir -= s8x8;
1065 }
1066 h->mb.cache.ref[l][i8] = ref[ir];
1067 CP32( h->mb.cache.mv[l][i8], mv[iv] );
1068 }
1069 else
1070 {
1071 h->mb.cache.ref[l][i8] = -2;
1072 M32( h->mb.cache.mv[l][i8] ) = 0;
1073 }
1074
1075 i8 = x264_scan8[0] - 8;
1076 if( h->mb.i_neighbour & MB_TOP )
1077 {
1078 h->mb.cache.ref[l][i8+0] =
1079 h->mb.cache.ref[l][i8+1] = ref[top_8x8 + 0];
1080 h->mb.cache.ref[l][i8+2] =
1081 h->mb.cache.ref[l][i8+3] = ref[top_8x8 + 1];
1082 CP128( h->mb.cache.mv[l][i8], mv[top_4x4] );
1083 }
1084 else
1085 {
1086 M128( h->mb.cache.mv[l][i8] ) = M128_ZERO;
1087 M32( &h->mb.cache.ref[l][i8] ) = (uint8_t)(-2) * 0x01010101U;
1088 }
1089
1090 i8 = x264_scan8[0] + 4 - 1*8;
1091 if( h->mb.i_neighbour & MB_TOPRIGHT )
1092 {
1093 int ir = b_mbaff ? 2*(s8x8*h->mb.i_mb_topright_y + (mb_x+1))+s8x8 : top_8x8 + 2;
1094 int iv = b_mbaff ? 4*(s4x4*h->mb.i_mb_topright_y + (mb_x+1))+3*s4x4 : top_4x4 + 4;
1095 h->mb.cache.ref[l][i8] = ref[ir];
1096 CP32( h->mb.cache.mv[l][i8], mv[iv] );
1097 }
1098 else
1099 h->mb.cache.ref[l][i8] = -2;
1100
1101 i8 = x264_scan8[0] - 1;
1102 if( h->mb.i_neighbour & MB_LEFT )
1103 {
1104 if( b_mbaff )
1105 {
1106 h->mb.cache.ref[l][i8+0*8] = ref[h->mb.left_b8[LTOP] + 1 + s8x8*left_index_table->ref[0]];
1107 h->mb.cache.ref[l][i8+1*8] = ref[h->mb.left_b8[LTOP] + 1 + s8x8*left_index_table->ref[1]];
1108 h->mb.cache.ref[l][i8+2*8] = ref[h->mb.left_b8[LBOT] + 1 + s8x8*left_index_table->ref[2]];
1109 h->mb.cache.ref[l][i8+3*8] = ref[h->mb.left_b8[LBOT] + 1 + s8x8*left_index_table->ref[3]];
1110
1111 CP32( h->mb.cache.mv[l][i8+0*8], mv[h->mb.left_b4[LTOP] + 3 + s4x4*left_index_table->mv[0]] );
1112 CP32( h->mb.cache.mv[l][i8+1*8], mv[h->mb.left_b4[LTOP] + 3 + s4x4*left_index_table->mv[1]] );
1113 CP32( h->mb.cache.mv[l][i8+2*8], mv[h->mb.left_b4[LBOT] + 3 + s4x4*left_index_table->mv[2]] );
1114 CP32( h->mb.cache.mv[l][i8+3*8], mv[h->mb.left_b4[LBOT] + 3 + s4x4*left_index_table->mv[3]] );
1115 }
1116 else
1117 {
1118 const int ir = h->mb.i_b8_xy - 1;
1119 const int iv = h->mb.i_b4_xy - 1;
1120 h->mb.cache.ref[l][i8+0*8] =
1121 h->mb.cache.ref[l][i8+1*8] = ref[ir + 0*s8x8];
1122 h->mb.cache.ref[l][i8+2*8] =
1123 h->mb.cache.ref[l][i8+3*8] = ref[ir + 1*s8x8];
1124
1125 CP32( h->mb.cache.mv[l][i8+0*8], mv[iv + 0*s4x4] );
1126 CP32( h->mb.cache.mv[l][i8+1*8], mv[iv + 1*s4x4] );
1127 CP32( h->mb.cache.mv[l][i8+2*8], mv[iv + 2*s4x4] );
1128 CP32( h->mb.cache.mv[l][i8+3*8], mv[iv + 3*s4x4] );
1129 }
1130 }
1131 else
1132 {
1133 for( int i = 0; i < 4; i++ )
1134 {
1135 h->mb.cache.ref[l][i8+i*8] = -2;
1136 M32( h->mb.cache.mv[l][i8+i*8] ) = 0;
1137 }
1138 }
1139
1140 /* Extra logic for top right mv in mbaff.
1141 * . . . d . . a .
1142 * . . . e . . . .
1143 * . . . f b . c .
1144 * . . . . . . . .
1145 *
1146 * If the top right of the 4x4 partitions labeled a, b and c in the
1147 * above diagram do not exist, but the entries d, e and f exist (in
1148 * the macroblock to the left) then use those instead.
1149 */
1150 if( b_mbaff && (h->mb.i_neighbour & MB_LEFT) )
1151 {
1152 if( MB_INTERLACED && !h->mb.field[h->mb.i_mb_xy-1] )
1153 {
1154 h->mb.cache.topright_ref[l][0] = ref[h->mb.left_b8[0] + 1 + s8x8*0];
1155 h->mb.cache.topright_ref[l][1] = ref[h->mb.left_b8[0] + 1 + s8x8*1];
1156 h->mb.cache.topright_ref[l][2] = ref[h->mb.left_b8[1] + 1 + s8x8*0];
1157 CP32( h->mb.cache.topright_mv[l][0], mv[h->mb.left_b4[0] + 3 + s4x4*(left_index_table->mv[0]+1)] );
1158 CP32( h->mb.cache.topright_mv[l][1], mv[h->mb.left_b4[0] + 3 + s4x4*(left_index_table->mv[1]+1)] );
1159 CP32( h->mb.cache.topright_mv[l][2], mv[h->mb.left_b4[1] + 3 + s4x4*(left_index_table->mv[2]+1)] );
1160 }
1161 else if( !MB_INTERLACED && h->mb.field[h->mb.i_mb_xy-1] )
1162 {
1163 // Looking at the bottom field so always take the bottom macroblock of the pair.
1164 h->mb.cache.topright_ref[l][0] = ref[h->mb.left_b8[0] + 1 + s8x8*2 + s8x8*left_index_table->ref[0]];
1165 h->mb.cache.topright_ref[l][1] = ref[h->mb.left_b8[0] + 1 + s8x8*2 + s8x8*left_index_table->ref[1]];
1166 h->mb.cache.topright_ref[l][2] = ref[h->mb.left_b8[0] + 1 + s8x8*2 + s8x8*left_index_table->ref[2]];
1167 CP32( h->mb.cache.topright_mv[l][0], mv[h->mb.left_b4[0] + 3 + s4x4*4 + s4x4*left_index_table->mv[0]] );
1168 CP32( h->mb.cache.topright_mv[l][1], mv[h->mb.left_b4[0] + 3 + s4x4*4 + s4x4*left_index_table->mv[1]] );
1169 CP32( h->mb.cache.topright_mv[l][2], mv[h->mb.left_b4[0] + 3 + s4x4*4 + s4x4*left_index_table->mv[2]] );
1170 }
1171 }
1172
1173 if( h->param.b_cabac )
1174 {
1175 uint8_t (*mvd)[8][2] = h->mb.mvd[l];
1176 if( h->mb.i_neighbour & MB_TOP )
1177 CP64( h->mb.cache.mvd[l][x264_scan8[0] - 8], mvd[top][0] );
1178 else
1179 M64( h->mb.cache.mvd[l][x264_scan8[0] - 8] ) = 0;
1180
1181 if( h->mb.i_neighbour & MB_LEFT && (!b_mbaff || h->mb.cache.ref[l][x264_scan8[0]-1] >= 0) )
1182 {
1183 CP16( h->mb.cache.mvd[l][x264_scan8[0 ] - 1], mvd[left[LTOP]][left_index_table->intra[0]] );
1184 CP16( h->mb.cache.mvd[l][x264_scan8[2 ] - 1], mvd[left[LTOP]][left_index_table->intra[1]] );
1185 }
1186 else
1187 {
1188 M16( h->mb.cache.mvd[l][x264_scan8[0]-1+0*8] ) = 0;
1189 M16( h->mb.cache.mvd[l][x264_scan8[0]-1+1*8] ) = 0;
1190 }
1191 if( h->mb.i_neighbour & MB_LEFT && (!b_mbaff || h->mb.cache.ref[l][x264_scan8[0]-1+2*8] >= 0) )
1192 {
1193 CP16( h->mb.cache.mvd[l][x264_scan8[8 ] - 1], mvd[left[LBOT]][left_index_table->intra[2]] );
1194 CP16( h->mb.cache.mvd[l][x264_scan8[10] - 1], mvd[left[LBOT]][left_index_table->intra[3]] );
1195 }
1196 else
1197 {
1198 M16( h->mb.cache.mvd[l][x264_scan8[0]-1+2*8] ) = 0;
1199 M16( h->mb.cache.mvd[l][x264_scan8[0]-1+3*8] ) = 0;
1200 }
1201 }
1202
1203 /* If motion vectors are cached from frame macroblocks but this
1204 * macroblock is a field macroblock then the motion vector must be
1205 * halved. Similarly, motion vectors from field macroblocks are doubled. */
1206 if( b_mbaff )
1207 {
1208 #define MAP_MVS\
1209 if( FIELD_DIFFERENT(h->mb.i_mb_topleft_xy) )\
1210 MAP_F2F(mv, ref, x264_scan8[0] - 1 - 1*8)\
1211 if( FIELD_DIFFERENT(top) )\
1212 {\
1213 MAP_F2F(mv, ref, x264_scan8[0] + 0 - 1*8)\
1214 MAP_F2F(mv, ref, x264_scan8[0] + 1 - 1*8)\
1215 MAP_F2F(mv, ref, x264_scan8[0] + 2 - 1*8)\
1216 MAP_F2F(mv, ref, x264_scan8[0] + 3 - 1*8)\
1217 }\
1218 if( FIELD_DIFFERENT(h->mb.i_mb_topright_xy) )\
1219 MAP_F2F(mv, ref, x264_scan8[0] + 4 - 1*8)\
1220 if( FIELD_DIFFERENT(left[0]) )\
1221 {\
1222 MAP_F2F(mv, ref, x264_scan8[0] - 1 + 0*8)\
1223 MAP_F2F(mv, ref, x264_scan8[0] - 1 + 1*8)\
1224 MAP_F2F(mv, ref, x264_scan8[0] - 1 + 2*8)\
1225 MAP_F2F(mv, ref, x264_scan8[0] - 1 + 3*8)\
1226 MAP_F2F(topright_mv, topright_ref, 0)\
1227 MAP_F2F(topright_mv, topright_ref, 1)\
1228 MAP_F2F(topright_mv, topright_ref, 2)\
1229 }
1230
1231 if( MB_INTERLACED )
1232 {
1233 #define FIELD_DIFFERENT(macroblock) (macroblock >= 0 && !h->mb.field[macroblock])
1234 #define MAP_F2F(varmv, varref, index)\
1235 if( h->mb.cache.varref[l][index] >= 0 )\
1236 {\
1237 h->mb.cache.varref[l][index] <<= 1;\
1238 h->mb.cache.varmv[l][index][1] /= 2;\
1239 h->mb.cache.mvd[l][index][1] >>= 1;\
1240 }
1241 MAP_MVS
1242 #undef MAP_F2F
1243 #undef FIELD_DIFFERENT
1244 }
1245 else
1246 {
1247 #define FIELD_DIFFERENT(macroblock) (macroblock >= 0 && h->mb.field[macroblock])
1248 #define MAP_F2F(varmv, varref, index)\
1249 if( h->mb.cache.varref[l][index] >= 0 )\
1250 {\
1251 h->mb.cache.varref[l][index] >>= 1;\
1252 h->mb.cache.varmv[l][index][1] <<= 1;\
1253 h->mb.cache.mvd[l][index][1] <<= 1;\
1254 }
1255 MAP_MVS
1256 #undef MAP_F2F
1257 #undef FIELD_DIFFERENT
1258 }
1259 }
1260 }
1261
1262 if( b_mbaff && mb_x == 0 && !(mb_y&1) )
1263 {
1264 if( h->mb.i_mb_top_xy >= h->sh.i_first_mb )
1265 h->mb.field_decoding_flag = h->mb.field[h->mb.i_mb_top_xy];
1266 else
1267 h->mb.field_decoding_flag = 0;
1268 }
1269
1270 /* Check whether skip here would cause decoder to predict interlace mode incorrectly.
1271 * FIXME: It might be better to change the interlace type rather than forcing a skip to be non-skip. */
1272 h->mb.b_allow_skip = 1;
1273 if( b_mbaff )
1274 {
1275 if( MB_INTERLACED != h->mb.field_decoding_flag &&
1276 (mb_y&1) && IS_SKIP(h->mb.type[h->mb.i_mb_xy - h->mb.i_mb_stride]) )
1277 h->mb.b_allow_skip = 0;
1278 }
1279
1280 if( h->param.b_cabac )
1281 {
1282 if( b_mbaff )
1283 {
1284 int left_xy, top_xy;
1285 /* Neighbours here are calculated based on field_decoding_flag */
1286 int mb_xy = mb_x + (mb_y&~1)*h->mb.i_mb_stride;
1287 left_xy = mb_xy - 1;
1288 if( (mb_y&1) && mb_x > 0 && h->mb.field_decoding_flag == h->mb.field[left_xy] )
1289 left_xy += h->mb.i_mb_stride;
1290 if( h->mb.field_decoding_flag )
1291 {
1292 top_xy = mb_xy - h->mb.i_mb_stride;
1293 if( !(mb_y&1) && top_xy >= 0 && h->mb.slice_table[top_xy] == h->sh.i_first_mb && h->mb.field[top_xy] )
1294 top_xy -= h->mb.i_mb_stride;
1295 }
1296 else
1297 top_xy = mb_x + (mb_y-1)*h->mb.i_mb_stride;
1298
1299 h->mb.cache.i_neighbour_skip = (mb_x > 0 && h->mb.slice_table[left_xy] == h->sh.i_first_mb && !IS_SKIP( h->mb.type[left_xy] ))
1300 + (top_xy >= 0 && h->mb.slice_table[top_xy] == h->sh.i_first_mb && !IS_SKIP( h->mb.type[top_xy] ));
1301 }
1302 else
1303 {
1304 h->mb.cache.i_neighbour_skip = ((h->mb.i_neighbour & MB_LEFT) && !IS_SKIP( h->mb.i_mb_type_left[0] ))
1305 + ((h->mb.i_neighbour & MB_TOP) && !IS_SKIP( h->mb.i_mb_type_top ));
1306 }
1307 }
1308
1309 /* load skip */
1310 if( h->sh.i_type == SLICE_TYPE_B )
1311 {
1312 h->mb.bipred_weight = h->mb.bipred_weight_buf[MB_INTERLACED][MB_INTERLACED&(mb_y&1)];
1313 h->mb.dist_scale_factor = h->mb.dist_scale_factor_buf[MB_INTERLACED][MB_INTERLACED&(mb_y&1)];
1314 if( h->param.b_cabac )
1315 {
1316 uint8_t skipbp;
1317 x264_macroblock_cache_skip( h, 0, 0, 4, 4, 0 );
1318 if( b_mbaff )
1319 {
1320 skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp[left[LTOP]] : 0;
1321 h->mb.cache.skip[x264_scan8[0] - 1] = (skipbp >> (1+(left_index_table->mv[0]&~1))) & 1;
1322 skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp[left[LBOT]] : 0;
1323 h->mb.cache.skip[x264_scan8[8] - 1] = (skipbp >> (1+(left_index_table->mv[2]&~1))) & 1;
1324 }
1325 else
1326 {
1327 skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp[left[0]] : 0;
1328 h->mb.cache.skip[x264_scan8[0] - 1] = skipbp & 0x2;
1329 h->mb.cache.skip[x264_scan8[8] - 1] = skipbp & 0x8;
1330 }
1331 skipbp = (h->mb.i_neighbour & MB_TOP) ? h->mb.skipbp[top] : 0;
1332 h->mb.cache.skip[x264_scan8[0] - 8] = skipbp & 0x4;
1333 h->mb.cache.skip[x264_scan8[4] - 8] = skipbp & 0x8;
1334 }
1335 }
1336
1337 if( h->sh.i_type == SLICE_TYPE_P )
1338 x264_mb_predict_mv_pskip( h, h->mb.cache.pskip_mv );
1339
1340 h->mb.i_neighbour4[0] =
1341 h->mb.i_neighbour8[0] = (h->mb.i_neighbour_intra & (MB_TOP|MB_LEFT|MB_TOPLEFT))
1342 | ((h->mb.i_neighbour_intra & MB_TOP) ? MB_TOPRIGHT : 0);
1343 h->mb.i_neighbour4[4] =
1344 h->mb.i_neighbour4[1] = MB_LEFT | ((h->mb.i_neighbour_intra & MB_TOP) ? (MB_TOP|MB_TOPLEFT|MB_TOPRIGHT) : 0);
1345 h->mb.i_neighbour4[2] =
1346 h->mb.i_neighbour4[8] =
1347 h->mb.i_neighbour4[10] =
1348 h->mb.i_neighbour8[2] = MB_TOP|MB_TOPRIGHT | ((h->mb.i_neighbour_intra & MB_LEFT) ? (MB_LEFT|MB_TOPLEFT) : 0);
1349 h->mb.i_neighbour4[5] =
1350 h->mb.i_neighbour8[1] = MB_LEFT | (h->mb.i_neighbour_intra & MB_TOPRIGHT)
1351 | ((h->mb.i_neighbour_intra & MB_TOP) ? MB_TOP|MB_TOPLEFT : 0);
1352 }
1353
x264_macroblock_cache_load_progressive(x264_t * h,int mb_x,int mb_y)1354 void x264_macroblock_cache_load_progressive( x264_t *h, int mb_x, int mb_y )
1355 {
1356 macroblock_cache_load( h, mb_x, mb_y, 0 );
1357 }
1358
x264_macroblock_cache_load_interlaced(x264_t * h,int mb_x,int mb_y)1359 void x264_macroblock_cache_load_interlaced( x264_t *h, int mb_x, int mb_y )
1360 {
1361 macroblock_cache_load( h, mb_x, mb_y, 1 );
1362 }
1363
macroblock_deblock_strength_mbaff(x264_t * h,uint8_t (* bs)[8][4])1364 static void macroblock_deblock_strength_mbaff( x264_t *h, uint8_t (*bs)[8][4] )
1365 {
1366 if( (h->mb.i_neighbour & MB_LEFT) && h->mb.field[h->mb.i_mb_left_xy[0]] != MB_INTERLACED )
1367 {
1368 static const uint8_t offset[2][2][8] =
1369 { { { 0, 0, 0, 0, 1, 1, 1, 1 },
1370 { 2, 2, 2, 2, 3, 3, 3, 3 }, },
1371 { { 0, 1, 2, 3, 0, 1, 2, 3 },
1372 { 0, 1, 2, 3, 0, 1, 2, 3 }, }
1373 };
1374 ALIGNED_ARRAY_8( uint8_t, tmpbs, [8] );
1375
1376 const uint8_t *off = offset[MB_INTERLACED][h->mb.i_mb_y&1];
1377 uint8_t (*nnz)[48] = h->mb.non_zero_count;
1378
1379 for( int i = 0; i < 8; i++ )
1380 {
1381 int left = h->mb.i_mb_left_xy[MB_INTERLACED ? i>>2 : i&1];
1382 int nnz_this = h->mb.cache.non_zero_count[x264_scan8[0]+8*(i>>1)];
1383 int nnz_left = nnz[left][3 + 4*off[i]];
1384 if( !h->param.b_cabac && h->pps->b_transform_8x8_mode )
1385 {
1386 int j = off[i]&~1;
1387 if( h->mb.mb_transform_size[left] )
1388 nnz_left = !!(M16( &nnz[left][2+4*j] ) | M16( &nnz[left][2+4*(1+j)] ));
1389 }
1390 tmpbs[i] = (nnz_left || nnz_this) ? 2 : 1;
1391 }
1392
1393 if( MB_INTERLACED )
1394 {
1395 CP32( bs[0][0], &tmpbs[0] );
1396 CP32( bs[0][4], &tmpbs[4] );
1397 }
1398 else
1399 {
1400 for( int i = 0; i < 4; i++ ) bs[0][0][i] = tmpbs[2*i];
1401 for( int i = 0; i < 4; i++ ) bs[0][4][i] = tmpbs[1+2*i];
1402 }
1403 }
1404
1405 if( (h->mb.i_neighbour & MB_TOP) && MB_INTERLACED != h->mb.field[h->mb.i_mb_top_xy] )
1406 {
1407 if( !(h->mb.i_mb_y&1) && !MB_INTERLACED )
1408 {
1409 /* Need to filter both fields (even for frame macroblocks).
1410 * Filter top two rows using the top macroblock of the above
1411 * pair and then the bottom one. */
1412 int mbn_xy = h->mb.i_mb_xy - 2 * h->mb.i_mb_stride;
1413 uint8_t *nnz_cur = &h->mb.cache.non_zero_count[x264_scan8[0]];
1414
1415 for( int j = 0; j < 2; j++, mbn_xy += h->mb.i_mb_stride )
1416 {
1417 uint8_t (*nnz)[48] = h->mb.non_zero_count;
1418
1419 ALIGNED_4( uint8_t nnz_top[4] );
1420 CP32( nnz_top, &nnz[mbn_xy][3*4] );
1421
1422 if( !h->param.b_cabac && h->pps->b_transform_8x8_mode && h->mb.mb_transform_size[mbn_xy] )
1423 {
1424 nnz_top[0] = nnz_top[1] = M16( &nnz[mbn_xy][ 8] ) || M16( &nnz[mbn_xy][12] );
1425 nnz_top[2] = nnz_top[3] = M16( &nnz[mbn_xy][10] ) || M16( &nnz[mbn_xy][14] );
1426 }
1427
1428 for( int i = 0; i < 4; i++ )
1429 bs[1][4*j][i] = (nnz_cur[i] || nnz_top[i]) ? 2 : 1;
1430 }
1431 }
1432 else
1433 for( int i = 0; i < 4; i++ )
1434 bs[1][0][i] = X264_MAX( bs[1][0][i], 1 );
1435 }
1436 }
1437
x264_macroblock_deblock_strength(x264_t * h)1438 void x264_macroblock_deblock_strength( x264_t *h )
1439 {
1440 uint8_t (*bs)[8][4] = h->mb.cache.deblock_strength;
1441 if( IS_INTRA( h->mb.i_type ) )
1442 {
1443 M32( bs[0][1] ) = 0x03030303;
1444 M64( bs[0][2] ) = 0x0303030303030303ULL;
1445 M32( bs[1][1] ) = 0x03030303;
1446 M64( bs[1][2] ) = 0x0303030303030303ULL;
1447 return;
1448 }
1449
1450 /* Early termination: in this case, nnz guarantees all edges use strength 2.*/
1451 if( h->mb.b_transform_8x8 && !CHROMA444 )
1452 {
1453 int cbp_mask = 0xf >> CHROMA_V_SHIFT;
1454 if( (h->mb.i_cbp_luma&cbp_mask) == cbp_mask )
1455 {
1456 M32( bs[0][0] ) = 0x02020202;
1457 M32( bs[0][2] ) = 0x02020202;
1458 M32( bs[0][4] ) = 0x02020202;
1459 M64( bs[1][0] ) = 0x0202020202020202ULL; /* [1][1] and [1][3] has to be set for 4:2:2 */
1460 M64( bs[1][2] ) = 0x0202020202020202ULL;
1461 M32( bs[1][4] ) = 0x02020202;
1462 return;
1463 }
1464 }
1465
1466 int neighbour_changed = 0;
1467 if( h->sh.i_disable_deblocking_filter_idc != 2 )
1468 {
1469 neighbour_changed = h->mb.i_neighbour_frame&~h->mb.i_neighbour;
1470 h->mb.i_neighbour = h->mb.i_neighbour_frame;
1471 }
1472
1473 /* MBAFF deblock uses different left neighbors from encoding */
1474 if( SLICE_MBAFF && (h->mb.i_neighbour & MB_LEFT) && (h->mb.field[h->mb.i_mb_xy - 1] != MB_INTERLACED) )
1475 {
1476 h->mb.i_mb_left_xy[1] =
1477 h->mb.i_mb_left_xy[0] = h->mb.i_mb_xy - 1;
1478 if( h->mb.i_mb_y&1 )
1479 h->mb.i_mb_left_xy[0] -= h->mb.i_mb_stride;
1480 else
1481 h->mb.i_mb_left_xy[1] += h->mb.i_mb_stride;
1482 }
1483
1484 /* If we have multiple slices and we're deblocking on slice edges, we
1485 * have to reload neighbour data. */
1486 if( neighbour_changed )
1487 {
1488 int top_y = h->mb.i_mb_top_y;
1489 int top_8x8 = (2*top_y+1) * h->mb.i_b8_stride + 2*h->mb.i_mb_x;
1490 int top_4x4 = (4*top_y+3) * h->mb.i_b4_stride + 4*h->mb.i_mb_x;
1491 int s8x8 = h->mb.i_b8_stride;
1492 int s4x4 = h->mb.i_b4_stride;
1493
1494 uint8_t (*nnz)[48] = h->mb.non_zero_count;
1495 const x264_left_table_t *left_index_table = SLICE_MBAFF ? h->mb.left_index_table : &left_indices[3];
1496
1497 if( neighbour_changed & MB_TOP )
1498 CP32( &h->mb.cache.non_zero_count[x264_scan8[0] - 8], &nnz[h->mb.i_mb_top_xy][12] );
1499
1500 if( neighbour_changed & MB_LEFT )
1501 {
1502 int *left = h->mb.i_mb_left_xy;
1503 h->mb.cache.non_zero_count[x264_scan8[0 ] - 1] = nnz[left[0]][left_index_table->nnz[0]];
1504 h->mb.cache.non_zero_count[x264_scan8[2 ] - 1] = nnz[left[0]][left_index_table->nnz[1]];
1505 h->mb.cache.non_zero_count[x264_scan8[8 ] - 1] = nnz[left[1]][left_index_table->nnz[2]];
1506 h->mb.cache.non_zero_count[x264_scan8[10] - 1] = nnz[left[1]][left_index_table->nnz[3]];
1507 }
1508
1509 for( int l = 0; l <= (h->sh.i_type == SLICE_TYPE_B); l++ )
1510 {
1511 int16_t (*mv)[2] = h->mb.mv[l];
1512 int8_t *ref = h->mb.ref[l];
1513
1514 int i8 = x264_scan8[0] - 8;
1515 if( neighbour_changed & MB_TOP )
1516 {
1517 h->mb.cache.ref[l][i8+0] =
1518 h->mb.cache.ref[l][i8+1] = ref[top_8x8 + 0];
1519 h->mb.cache.ref[l][i8+2] =
1520 h->mb.cache.ref[l][i8+3] = ref[top_8x8 + 1];
1521 CP128( h->mb.cache.mv[l][i8], mv[top_4x4] );
1522 }
1523
1524 i8 = x264_scan8[0] - 1;
1525 if( neighbour_changed & MB_LEFT )
1526 {
1527 h->mb.cache.ref[l][i8+0*8] =
1528 h->mb.cache.ref[l][i8+1*8] = ref[h->mb.left_b8[0] + 1 + s8x8*left_index_table->ref[0]];
1529 h->mb.cache.ref[l][i8+2*8] =
1530 h->mb.cache.ref[l][i8+3*8] = ref[h->mb.left_b8[1] + 1 + s8x8*left_index_table->ref[2]];
1531
1532 CP32( h->mb.cache.mv[l][i8+0*8], mv[h->mb.left_b4[0] + 3 + s4x4*left_index_table->mv[0]] );
1533 CP32( h->mb.cache.mv[l][i8+1*8], mv[h->mb.left_b4[0] + 3 + s4x4*left_index_table->mv[1]] );
1534 CP32( h->mb.cache.mv[l][i8+2*8], mv[h->mb.left_b4[1] + 3 + s4x4*left_index_table->mv[2]] );
1535 CP32( h->mb.cache.mv[l][i8+3*8], mv[h->mb.left_b4[1] + 3 + s4x4*left_index_table->mv[3]] );
1536 }
1537 }
1538 }
1539
1540 if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART && h->sh.i_type == SLICE_TYPE_P )
1541 {
1542 /* Handle reference frame duplicates */
1543 int i8 = x264_scan8[0] - 8;
1544 h->mb.cache.ref[0][i8+0] =
1545 h->mb.cache.ref[0][i8+1] = deblock_ref_table(h->mb.cache.ref[0][i8+0]);
1546 h->mb.cache.ref[0][i8+2] =
1547 h->mb.cache.ref[0][i8+3] = deblock_ref_table(h->mb.cache.ref[0][i8+2]);
1548
1549 i8 = x264_scan8[0] - 1;
1550 h->mb.cache.ref[0][i8+0*8] =
1551 h->mb.cache.ref[0][i8+1*8] = deblock_ref_table(h->mb.cache.ref[0][i8+0*8]);
1552 h->mb.cache.ref[0][i8+2*8] =
1553 h->mb.cache.ref[0][i8+3*8] = deblock_ref_table(h->mb.cache.ref[0][i8+2*8]);
1554
1555 int ref0 = deblock_ref_table(h->mb.cache.ref[0][x264_scan8[ 0]]);
1556 int ref1 = deblock_ref_table(h->mb.cache.ref[0][x264_scan8[ 4]]);
1557 int ref2 = deblock_ref_table(h->mb.cache.ref[0][x264_scan8[ 8]]);
1558 int ref3 = deblock_ref_table(h->mb.cache.ref[0][x264_scan8[12]]);
1559 uint32_t reftop = pack16to32( (uint8_t)ref0, (uint8_t)ref1 ) * 0x0101;
1560 uint32_t refbot = pack16to32( (uint8_t)ref2, (uint8_t)ref3 ) * 0x0101;
1561
1562 M32( &h->mb.cache.ref[0][x264_scan8[0]+8*0] ) = reftop;
1563 M32( &h->mb.cache.ref[0][x264_scan8[0]+8*1] ) = reftop;
1564 M32( &h->mb.cache.ref[0][x264_scan8[0]+8*2] ) = refbot;
1565 M32( &h->mb.cache.ref[0][x264_scan8[0]+8*3] ) = refbot;
1566 }
1567
1568 /* Munge NNZ for cavlc + 8x8dct */
1569 if( !h->param.b_cabac && h->pps->b_transform_8x8_mode )
1570 {
1571 uint8_t (*nnz)[48] = h->mb.non_zero_count;
1572 int top = h->mb.i_mb_top_xy;
1573 int *left = h->mb.i_mb_left_xy;
1574
1575 if( (h->mb.i_neighbour & MB_TOP) && h->mb.mb_transform_size[top] )
1576 {
1577 int i8 = x264_scan8[0] - 8;
1578 int nnz_top0 = M16( &nnz[top][8] ) | M16( &nnz[top][12] );
1579 int nnz_top1 = M16( &nnz[top][10] ) | M16( &nnz[top][14] );
1580 M16( &h->mb.cache.non_zero_count[i8+0] ) = nnz_top0 ? 0x0101 : 0;
1581 M16( &h->mb.cache.non_zero_count[i8+2] ) = nnz_top1 ? 0x0101 : 0;
1582 }
1583
1584 if( h->mb.i_neighbour & MB_LEFT )
1585 {
1586 int i8 = x264_scan8[0] - 1;
1587 if( h->mb.mb_transform_size[left[0]] )
1588 {
1589 int nnz_left0 = M16( &nnz[left[0]][2] ) | M16( &nnz[left[0]][6] );
1590 h->mb.cache.non_zero_count[i8+8*0] = !!nnz_left0;
1591 h->mb.cache.non_zero_count[i8+8*1] = !!nnz_left0;
1592 }
1593 if( h->mb.mb_transform_size[left[1]] )
1594 {
1595 int nnz_left1 = M16( &nnz[left[1]][10] ) | M16( &nnz[left[1]][14] );
1596 h->mb.cache.non_zero_count[i8+8*2] = !!nnz_left1;
1597 h->mb.cache.non_zero_count[i8+8*3] = !!nnz_left1;
1598 }
1599 }
1600
1601 if( h->mb.b_transform_8x8 )
1602 {
1603 int nnz0 = M16( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[ 2]] );
1604 int nnz1 = M16( &h->mb.cache.non_zero_count[x264_scan8[ 4]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[ 6]] );
1605 int nnz2 = M16( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[10]] );
1606 int nnz3 = M16( &h->mb.cache.non_zero_count[x264_scan8[12]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[14]] );
1607 uint32_t nnztop = pack16to32( !!nnz0, !!nnz1 ) * 0x0101;
1608 uint32_t nnzbot = pack16to32( !!nnz2, !!nnz3 ) * 0x0101;
1609
1610 M32( &h->mb.cache.non_zero_count[x264_scan8[0]+8*0] ) = nnztop;
1611 M32( &h->mb.cache.non_zero_count[x264_scan8[0]+8*1] ) = nnztop;
1612 M32( &h->mb.cache.non_zero_count[x264_scan8[0]+8*2] ) = nnzbot;
1613 M32( &h->mb.cache.non_zero_count[x264_scan8[0]+8*3] ) = nnzbot;
1614 }
1615 }
1616
1617 h->loopf.deblock_strength( h->mb.cache.non_zero_count, h->mb.cache.ref, h->mb.cache.mv,
1618 bs, 4 >> MB_INTERLACED, h->sh.i_type == SLICE_TYPE_B );
1619
1620 if( SLICE_MBAFF )
1621 macroblock_deblock_strength_mbaff( h, bs );
1622 }
1623
macroblock_store_pic(x264_t * h,int mb_x,int mb_y,int i,int b_chroma,int b_mbaff)1624 static ALWAYS_INLINE void macroblock_store_pic( x264_t *h, int mb_x, int mb_y, int i, int b_chroma, int b_mbaff )
1625 {
1626 int height = b_chroma ? 16>>CHROMA_V_SHIFT : 16;
1627 int i_stride = h->fdec->i_stride[i];
1628 int i_stride2 = i_stride << (b_mbaff && MB_INTERLACED);
1629 int i_pix_offset = (b_mbaff && MB_INTERLACED)
1630 ? 16 * mb_x + height * (mb_y&~1) * i_stride + (mb_y&1) * i_stride
1631 : 16 * mb_x + height * mb_y * i_stride;
1632 if( b_chroma )
1633 h->mc.store_interleave_chroma( &h->fdec->plane[1][i_pix_offset], i_stride2, h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], height );
1634 else
1635 h->mc.copy[PIXEL_16x16]( &h->fdec->plane[i][i_pix_offset], i_stride2, h->mb.pic.p_fdec[i], FDEC_STRIDE, 16 );
1636 }
1637
macroblock_backup_intra(x264_t * h,int mb_x,int mb_y,int b_mbaff)1638 static ALWAYS_INLINE void macroblock_backup_intra( x264_t *h, int mb_x, int mb_y, int b_mbaff )
1639 {
1640 /* In MBAFF we store the last two rows in intra_border_backup[0] and [1].
1641 * For progressive mbs this is the bottom two rows, and for interlaced the
1642 * bottom row of each field. We also store samples needed for the next
1643 * mbpair in intra_border_backup[2]. */
1644 int backup_dst = !b_mbaff ? (mb_y&1) : (mb_y&1) ? 1 : MB_INTERLACED ? 0 : 2;
1645 memcpy( &h->intra_border_backup[backup_dst][0][mb_x*16 ], h->mb.pic.p_fdec[0]+FDEC_STRIDE*15, 16*SIZEOF_PIXEL );
1646 if( CHROMA444 )
1647 {
1648 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16 ], h->mb.pic.p_fdec[1]+FDEC_STRIDE*15, 16*SIZEOF_PIXEL );
1649 memcpy( &h->intra_border_backup[backup_dst][2][mb_x*16 ], h->mb.pic.p_fdec[2]+FDEC_STRIDE*15, 16*SIZEOF_PIXEL );
1650 }
1651 else if( CHROMA_FORMAT )
1652 {
1653 int backup_src = (15>>CHROMA_V_SHIFT) * FDEC_STRIDE;
1654 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16 ], h->mb.pic.p_fdec[1]+backup_src, 8*SIZEOF_PIXEL );
1655 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16+8], h->mb.pic.p_fdec[2]+backup_src, 8*SIZEOF_PIXEL );
1656 }
1657 if( b_mbaff )
1658 {
1659 if( mb_y&1 )
1660 {
1661 int backup_src = (MB_INTERLACED ? 7 : 14) * FDEC_STRIDE;
1662 backup_dst = MB_INTERLACED ? 2 : 0;
1663 memcpy( &h->intra_border_backup[backup_dst][0][mb_x*16 ], h->mb.pic.p_fdec[0]+backup_src, 16*SIZEOF_PIXEL );
1664 if( CHROMA444 )
1665 {
1666 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16 ], h->mb.pic.p_fdec[1]+backup_src, 16*SIZEOF_PIXEL );
1667 memcpy( &h->intra_border_backup[backup_dst][2][mb_x*16 ], h->mb.pic.p_fdec[2]+backup_src, 16*SIZEOF_PIXEL );
1668 }
1669 else if( CHROMA_FORMAT )
1670 {
1671 if( CHROMA_FORMAT == CHROMA_420 )
1672 backup_src = (MB_INTERLACED ? 3 : 6) * FDEC_STRIDE;
1673 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16 ], h->mb.pic.p_fdec[1]+backup_src, 8*SIZEOF_PIXEL );
1674 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16+8], h->mb.pic.p_fdec[2]+backup_src, 8*SIZEOF_PIXEL );
1675 }
1676 }
1677 }
1678 }
1679
x264_macroblock_cache_save(x264_t * h)1680 void x264_macroblock_cache_save( x264_t *h )
1681 {
1682 const int i_mb_xy = h->mb.i_mb_xy;
1683 const int i_mb_type = x264_mb_type_fix[h->mb.i_type];
1684 const int s8x8 = h->mb.i_b8_stride;
1685 const int s4x4 = h->mb.i_b4_stride;
1686 const int i_mb_4x4 = h->mb.i_b4_xy;
1687 const int i_mb_8x8 = h->mb.i_b8_xy;
1688
1689 /* GCC pessimizes direct stores to heap-allocated arrays due to aliasing. */
1690 /* By only dereferencing them once, we avoid this issue. */
1691 int8_t *i4x4 = h->mb.intra4x4_pred_mode[i_mb_xy];
1692 uint8_t *nnz = h->mb.non_zero_count[i_mb_xy];
1693
1694 if( SLICE_MBAFF )
1695 {
1696 macroblock_backup_intra( h, h->mb.i_mb_x, h->mb.i_mb_y, 1 );
1697 macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 0, 0, 1 );
1698 if( CHROMA444 )
1699 {
1700 macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 1, 0, 1 );
1701 macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 2, 0, 1 );
1702 }
1703 else if( CHROMA_FORMAT )
1704 macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 1, 1, 1 );
1705 }
1706 else
1707 {
1708 macroblock_backup_intra( h, h->mb.i_mb_x, h->mb.i_mb_y, 0 );
1709 macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 0, 0, 0 );
1710 if( CHROMA444 )
1711 {
1712 macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 1, 0, 0 );
1713 macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 2, 0, 0 );
1714 }
1715 else if( CHROMA_FORMAT )
1716 macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 1, 1, 0 );
1717 }
1718
1719 x264_prefetch_fenc( h, h->fdec, h->mb.i_mb_x, h->mb.i_mb_y );
1720
1721 h->mb.type[i_mb_xy] = i_mb_type;
1722 h->mb.slice_table[i_mb_xy] = h->sh.i_first_mb;
1723 h->mb.partition[i_mb_xy] = IS_INTRA( i_mb_type ) ? D_16x16 : h->mb.i_partition;
1724 h->mb.i_mb_prev_xy = i_mb_xy;
1725
1726 /* save intra4x4 */
1727 if( i_mb_type == I_4x4 )
1728 {
1729 CP32( &i4x4[0], &h->mb.cache.intra4x4_pred_mode[x264_scan8[10]] );
1730 M32( &i4x4[4] ) = pack8to32( h->mb.cache.intra4x4_pred_mode[x264_scan8[5] ],
1731 h->mb.cache.intra4x4_pred_mode[x264_scan8[7] ],
1732 h->mb.cache.intra4x4_pred_mode[x264_scan8[13] ], 0);
1733 }
1734 else if( !h->param.b_constrained_intra || IS_INTRA(i_mb_type) )
1735 M64( i4x4 ) = I_PRED_4x4_DC * 0x0101010101010101ULL;
1736 else
1737 M64( i4x4 ) = (uint8_t)(-1) * 0x0101010101010101ULL;
1738
1739
1740 if( i_mb_type == I_PCM )
1741 {
1742 h->mb.qp[i_mb_xy] = 0;
1743 h->mb.i_last_dqp = 0;
1744 h->mb.i_cbp_chroma = CHROMA444 ? 0 : 2;
1745 h->mb.i_cbp_luma = 0xf;
1746 h->mb.cbp[i_mb_xy] = (h->mb.i_cbp_chroma << 4) | h->mb.i_cbp_luma | 0x1700;
1747 h->mb.b_transform_8x8 = 0;
1748 for( int i = 0; i < 48; i++ )
1749 h->mb.cache.non_zero_count[x264_scan8[i]] = h->param.b_cabac ? 1 : 16;
1750 }
1751 else
1752 {
1753 if( h->mb.i_type != I_16x16 && h->mb.i_cbp_luma == 0 && h->mb.i_cbp_chroma == 0 )
1754 h->mb.i_qp = h->mb.i_last_qp;
1755 h->mb.qp[i_mb_xy] = h->mb.i_qp;
1756 h->mb.i_last_dqp = h->mb.i_qp - h->mb.i_last_qp;
1757 h->mb.i_last_qp = h->mb.i_qp;
1758 }
1759
1760 /* save non zero count */
1761 CP32( &nnz[ 0+0*4], &h->mb.cache.non_zero_count[x264_scan8[ 0]] );
1762 CP32( &nnz[ 0+1*4], &h->mb.cache.non_zero_count[x264_scan8[ 2]] );
1763 CP32( &nnz[ 0+2*4], &h->mb.cache.non_zero_count[x264_scan8[ 8]] );
1764 CP32( &nnz[ 0+3*4], &h->mb.cache.non_zero_count[x264_scan8[10]] );
1765 CP32( &nnz[16+0*4], &h->mb.cache.non_zero_count[x264_scan8[16+0]] );
1766 CP32( &nnz[16+1*4], &h->mb.cache.non_zero_count[x264_scan8[16+2]] );
1767 CP32( &nnz[32+0*4], &h->mb.cache.non_zero_count[x264_scan8[32+0]] );
1768 CP32( &nnz[32+1*4], &h->mb.cache.non_zero_count[x264_scan8[32+2]] );
1769 if( CHROMA_FORMAT >= CHROMA_422 )
1770 {
1771 CP32( &nnz[16+2*4], &h->mb.cache.non_zero_count[x264_scan8[16+ 8]] );
1772 CP32( &nnz[16+3*4], &h->mb.cache.non_zero_count[x264_scan8[16+10]] );
1773 CP32( &nnz[32+2*4], &h->mb.cache.non_zero_count[x264_scan8[32+ 8]] );
1774 CP32( &nnz[32+3*4], &h->mb.cache.non_zero_count[x264_scan8[32+10]] );
1775 }
1776
1777 if( h->mb.i_cbp_luma == 0 && h->mb.i_type != I_8x8 )
1778 h->mb.b_transform_8x8 = 0;
1779 h->mb.mb_transform_size[i_mb_xy] = h->mb.b_transform_8x8;
1780
1781 if( h->sh.i_type != SLICE_TYPE_I )
1782 {
1783 int16_t (*mv0)[2] = &h->mb.mv[0][i_mb_4x4];
1784 int8_t *ref0 = &h->mb.ref[0][i_mb_8x8];
1785 if( !IS_INTRA( i_mb_type ) )
1786 {
1787 ref0[0+0*s8x8] = h->mb.cache.ref[0][x264_scan8[0]];
1788 ref0[1+0*s8x8] = h->mb.cache.ref[0][x264_scan8[4]];
1789 ref0[0+1*s8x8] = h->mb.cache.ref[0][x264_scan8[8]];
1790 ref0[1+1*s8x8] = h->mb.cache.ref[0][x264_scan8[12]];
1791 CP128( &mv0[0*s4x4], h->mb.cache.mv[0][x264_scan8[0]+8*0] );
1792 CP128( &mv0[1*s4x4], h->mb.cache.mv[0][x264_scan8[0]+8*1] );
1793 CP128( &mv0[2*s4x4], h->mb.cache.mv[0][x264_scan8[0]+8*2] );
1794 CP128( &mv0[3*s4x4], h->mb.cache.mv[0][x264_scan8[0]+8*3] );
1795 if( h->sh.i_type == SLICE_TYPE_B )
1796 {
1797 int16_t (*mv1)[2] = &h->mb.mv[1][i_mb_4x4];
1798 int8_t *ref1 = &h->mb.ref[1][i_mb_8x8];
1799 ref1[0+0*s8x8] = h->mb.cache.ref[1][x264_scan8[0]];
1800 ref1[1+0*s8x8] = h->mb.cache.ref[1][x264_scan8[4]];
1801 ref1[0+1*s8x8] = h->mb.cache.ref[1][x264_scan8[8]];
1802 ref1[1+1*s8x8] = h->mb.cache.ref[1][x264_scan8[12]];
1803 CP128( &mv1[0*s4x4], h->mb.cache.mv[1][x264_scan8[0]+8*0] );
1804 CP128( &mv1[1*s4x4], h->mb.cache.mv[1][x264_scan8[0]+8*1] );
1805 CP128( &mv1[2*s4x4], h->mb.cache.mv[1][x264_scan8[0]+8*2] );
1806 CP128( &mv1[3*s4x4], h->mb.cache.mv[1][x264_scan8[0]+8*3] );
1807 }
1808 }
1809 else
1810 {
1811 M16( &ref0[0*s8x8] ) = (uint8_t)(-1) * 0x0101;
1812 M16( &ref0[1*s8x8] ) = (uint8_t)(-1) * 0x0101;
1813 M128( &mv0[0*s4x4] ) = M128_ZERO;
1814 M128( &mv0[1*s4x4] ) = M128_ZERO;
1815 M128( &mv0[2*s4x4] ) = M128_ZERO;
1816 M128( &mv0[3*s4x4] ) = M128_ZERO;
1817 if( h->sh.i_type == SLICE_TYPE_B )
1818 {
1819 int16_t (*mv1)[2] = &h->mb.mv[1][i_mb_4x4];
1820 int8_t *ref1 = &h->mb.ref[1][i_mb_8x8];
1821 M16( &ref1[0*s8x8] ) = (uint8_t)(-1) * 0x0101;
1822 M16( &ref1[1*s8x8] ) = (uint8_t)(-1) * 0x0101;
1823 M128( &mv1[0*s4x4] ) = M128_ZERO;
1824 M128( &mv1[1*s4x4] ) = M128_ZERO;
1825 M128( &mv1[2*s4x4] ) = M128_ZERO;
1826 M128( &mv1[3*s4x4] ) = M128_ZERO;
1827 }
1828 }
1829 }
1830
1831 if( h->param.b_cabac )
1832 {
1833 uint8_t (*mvd0)[2] = h->mb.mvd[0][i_mb_xy];
1834 if( IS_INTRA(i_mb_type) && i_mb_type != I_PCM )
1835 h->mb.chroma_pred_mode[i_mb_xy] = x264_mb_chroma_pred_mode_fix[h->mb.i_chroma_pred_mode];
1836 else
1837 h->mb.chroma_pred_mode[i_mb_xy] = I_PRED_CHROMA_DC;
1838
1839 if( (0x3FF30 >> i_mb_type) & 1 ) /* !INTRA && !SKIP && !DIRECT */
1840 {
1841 CP64( mvd0[0], h->mb.cache.mvd[0][x264_scan8[10]] );
1842 CP16( mvd0[4], h->mb.cache.mvd[0][x264_scan8[5 ]] );
1843 CP16( mvd0[5], h->mb.cache.mvd[0][x264_scan8[7 ]] );
1844 CP16( mvd0[6], h->mb.cache.mvd[0][x264_scan8[13]] );
1845 if( h->sh.i_type == SLICE_TYPE_B )
1846 {
1847 uint8_t (*mvd1)[2] = h->mb.mvd[1][i_mb_xy];
1848 CP64( mvd1[0], h->mb.cache.mvd[1][x264_scan8[10]] );
1849 CP16( mvd1[4], h->mb.cache.mvd[1][x264_scan8[5 ]] );
1850 CP16( mvd1[5], h->mb.cache.mvd[1][x264_scan8[7 ]] );
1851 CP16( mvd1[6], h->mb.cache.mvd[1][x264_scan8[13]] );
1852 }
1853 }
1854 else
1855 {
1856 M128( mvd0[0] ) = M128_ZERO;
1857 if( h->sh.i_type == SLICE_TYPE_B )
1858 {
1859 uint8_t (*mvd1)[2] = h->mb.mvd[1][i_mb_xy];
1860 M128( mvd1[0] ) = M128_ZERO;
1861 }
1862 }
1863
1864 if( h->sh.i_type == SLICE_TYPE_B )
1865 {
1866 if( i_mb_type == B_SKIP || i_mb_type == B_DIRECT )
1867 h->mb.skipbp[i_mb_xy] = 0xf;
1868 else if( i_mb_type == B_8x8 )
1869 {
1870 int skipbp = ( h->mb.i_sub_partition[0] == D_DIRECT_8x8 ) << 0;
1871 skipbp |= ( h->mb.i_sub_partition[1] == D_DIRECT_8x8 ) << 1;
1872 skipbp |= ( h->mb.i_sub_partition[2] == D_DIRECT_8x8 ) << 2;
1873 skipbp |= ( h->mb.i_sub_partition[3] == D_DIRECT_8x8 ) << 3;
1874 h->mb.skipbp[i_mb_xy] = skipbp;
1875 }
1876 else
1877 h->mb.skipbp[i_mb_xy] = 0;
1878 }
1879 }
1880 }
1881
1882
x264_macroblock_bipred_init(x264_t * h)1883 void x264_macroblock_bipred_init( x264_t *h )
1884 {
1885 for( int mbfield = 0; mbfield <= SLICE_MBAFF; mbfield++ )
1886 for( int field = 0; field <= SLICE_MBAFF; field++ )
1887 for( int i_ref0 = 0; i_ref0 < (h->i_ref[0]<<mbfield); i_ref0++ )
1888 {
1889 x264_frame_t *l0 = h->fref[0][i_ref0>>mbfield];
1890 int poc0 = l0->i_poc + mbfield*l0->i_delta_poc[field^(i_ref0&1)];
1891 for( int i_ref1 = 0; i_ref1 < (h->i_ref[1]<<mbfield); i_ref1++ )
1892 {
1893 x264_frame_t *l1 = h->fref[1][i_ref1>>mbfield];
1894 int cur_poc = h->fdec->i_poc + mbfield*h->fdec->i_delta_poc[field];
1895 int poc1 = l1->i_poc + mbfield*l1->i_delta_poc[field^(i_ref1&1)];
1896 int td = x264_clip3( poc1 - poc0, -128, 127 );
1897 if( td == 0 /* || pic0 is a long-term ref */ )
1898 {
1899 h->mb.dist_scale_factor_buf[mbfield][field][i_ref0][i_ref1] = 256;
1900 h->mb.bipred_weight_buf[mbfield][field][i_ref0][i_ref1] = 32;
1901 }
1902 else
1903 {
1904 int tb = x264_clip3( cur_poc - poc0, -128, 127 );
1905 int tx = (16384 + (abs(td) >> 1)) / td;
1906 int dist_scale_factor = x264_clip3( (tb * tx + 32) >> 6, -1024, 1023 );
1907
1908 h->mb.dist_scale_factor_buf[mbfield][field][i_ref0][i_ref1] = dist_scale_factor;
1909
1910 dist_scale_factor >>= 2;
1911 if( h->param.analyse.b_weighted_bipred /* && pic1 is not a long-term ref */
1912 && dist_scale_factor >= -64
1913 && dist_scale_factor <= 128 )
1914 {
1915 h->mb.bipred_weight_buf[mbfield][field][i_ref0][i_ref1] = 64 - dist_scale_factor;
1916 // ssse3 implementation of biweight doesn't support the extrema.
1917 // if we ever generate them, we'll have to drop that optimization.
1918 assert( dist_scale_factor >= -63 && dist_scale_factor <= 127 );
1919 }
1920 else
1921 h->mb.bipred_weight_buf[mbfield][field][i_ref0][i_ref1] = 32;
1922 }
1923 }
1924 }
1925 }
1926
1927