1 /*****************************************************************************
2 * analyse.c: macroblock analysis
3 *****************************************************************************
4 * Copyright (C) 2003-2021 x264 project
5 *
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
8 * Fiona Glaser <fiona@x264.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 *
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
27
28 #include "common/common.h"
29 #include "macroblock.h"
30 #include "me.h"
31 #include "ratecontrol.h"
32 #include "analyse.h"
33 #include "rdo.c"
34
35 typedef struct
36 {
37 x264_me_t me16x16;
38 x264_me_t bi16x16; /* for b16x16 BI mode, since MVs can differ from l0/l1 */
39 x264_me_t me8x8[4];
40 x264_me_t me4x4[4][4];
41 x264_me_t me8x4[4][2];
42 x264_me_t me4x8[4][2];
43 x264_me_t me16x8[2];
44 x264_me_t me8x16[2];
45 int i_rd16x16;
46 int i_cost8x8;
47 int i_cost4x4[4]; /* cost per 8x8 partition */
48 int i_cost8x4[4]; /* cost per 8x8 partition */
49 int i_cost4x8[4]; /* cost per 8x8 partition */
50 int i_cost16x8;
51 int i_cost8x16;
52 /* [ref][0] is 16x16 mv, [ref][1..4] are 8x8 mv from partition [0..3], [ref][5] is for alignment */
53 ALIGNED_8( int16_t mvc[32][6][2] );
54 } x264_mb_analysis_list_t;
55
56 typedef struct
57 {
58 /* conduct the analysis using this lamda and QP */
59 int i_lambda;
60 int i_lambda2;
61 int i_qp;
62 uint16_t *p_cost_mv;
63 uint16_t *p_cost_ref[2];
64 int i_mbrd;
65
66
67 /* I: Intra part */
68 /* Take some shortcuts in intra search if intra is deemed unlikely */
69 int b_fast_intra;
70 int b_force_intra; /* For Periodic Intra Refresh. Only supported in P-frames. */
71 int b_avoid_topright; /* For Periodic Intra Refresh: don't predict from top-right pixels. */
72 int b_try_skip;
73
74 /* Luma part */
75 int i_satd_i16x16;
76 int i_satd_i16x16_dir[7];
77 int i_predict16x16;
78
79 int i_satd_i8x8;
80 int i_cbp_i8x8_luma;
81 ALIGNED_16( uint16_t i_satd_i8x8_dir[4][16] );
82 int i_predict8x8[4];
83
84 int i_satd_i4x4;
85 int i_predict4x4[16];
86
87 int i_satd_pcm;
88
89 /* Chroma part */
90 int i_satd_chroma;
91 int i_satd_chroma_dir[7];
92 int i_predict8x8chroma;
93
94 /* II: Inter part P/B frame */
95 x264_mb_analysis_list_t l0;
96 x264_mb_analysis_list_t l1;
97
98 int i_cost16x16bi; /* used the same ref and mv as l0 and l1 (at least for now) */
99 int i_cost16x16direct;
100 int i_cost8x8bi;
101 int i_cost8x8direct[4];
102 int i_satd8x8[3][4]; /* [L0,L1,BI][8x8 0..3] SATD only */
103 int i_cost_est16x8[2]; /* Per-partition estimated cost */
104 int i_cost_est8x16[2];
105 int i_cost16x8bi;
106 int i_cost8x16bi;
107 int i_rd16x16bi;
108 int i_rd16x16direct;
109 int i_rd16x8bi;
110 int i_rd8x16bi;
111 int i_rd8x8bi;
112
113 int i_mb_partition16x8[2]; /* mb_partition_e */
114 int i_mb_partition8x16[2];
115 int i_mb_type16x8; /* mb_class_e */
116 int i_mb_type8x16;
117
118 int b_direct_available;
119 int b_early_terminate;
120
121 } x264_mb_analysis_t;
122
123 /* TODO: calculate CABAC costs */
124 static const uint8_t i_mb_b_cost_table[X264_MBTYPE_MAX] =
125 {
126 9, 9, 9, 9, 0, 0, 0, 1, 3, 7, 7, 7, 3, 7, 7, 7, 5, 9, 0
127 };
128 static const uint8_t i_mb_b16x8_cost_table[17] =
129 {
130 0, 0, 0, 0, 0, 0, 0, 0, 5, 7, 7, 7, 5, 7, 9, 9, 9
131 };
132 static const uint8_t i_sub_mb_b_cost_table[13] =
133 {
134 7, 5, 5, 3, 7, 5, 7, 3, 7, 7, 7, 5, 1
135 };
136 static const uint8_t i_sub_mb_p_cost_table[4] =
137 {
138 5, 3, 3, 1
139 };
140
141 static void analyse_update_cache( x264_t *h, x264_mb_analysis_t *a );
142
init_costs(x264_t * h,float * logs,int qp)143 static int init_costs( x264_t *h, float *logs, int qp )
144 {
145 if( h->cost_mv[qp] )
146 return 0;
147
148 int mv_range = h->param.analyse.i_mv_range << PARAM_INTERLACED;
149 int lambda = x264_lambda_tab[qp];
150 /* factor of 4 from qpel, 2 from sign, and 2 because mv can be opposite from mvp */
151 CHECKED_MALLOC( h->cost_mv[qp], (4*4*mv_range + 1) * sizeof(uint16_t) );
152 h->cost_mv[qp] += 2*4*mv_range;
153 for( int i = 0; i <= 2*4*mv_range; i++ )
154 {
155 h->cost_mv[qp][-i] =
156 h->cost_mv[qp][i] = X264_MIN( (int)(lambda * logs[i] + .5f), UINT16_MAX );
157 }
158 for( int i = 0; i < 3; i++ )
159 for( int j = 0; j < 33; j++ )
160 h->cost_table->ref[qp][i][j] = i ? X264_MIN( lambda * bs_size_te( i, j ), UINT16_MAX ) : 0;
161 if( h->param.analyse.i_me_method >= X264_ME_ESA && !h->cost_mv_fpel[qp][0] )
162 {
163 for( int j = 0; j < 4; j++ )
164 {
165 CHECKED_MALLOC( h->cost_mv_fpel[qp][j], (4*mv_range + 1) * sizeof(uint16_t) );
166 h->cost_mv_fpel[qp][j] += 2*mv_range;
167 for( int i = -2*mv_range; i < 2*mv_range; i++ )
168 h->cost_mv_fpel[qp][j][i] = h->cost_mv[qp][i*4+j];
169 }
170 }
171 uint16_t *cost_i4x4_mode = h->cost_table->i4x4_mode[qp];
172 for( int i = 0; i < 17; i++ )
173 cost_i4x4_mode[i] = 3*lambda*(i!=8);
174 return 0;
175 fail:
176 return -1;
177 }
178
x264_analyse_init_costs(x264_t * h)179 int x264_analyse_init_costs( x264_t *h )
180 {
181 int mv_range = h->param.analyse.i_mv_range << PARAM_INTERLACED;
182 float *logs = x264_malloc( (2*4*mv_range+1) * sizeof(float) );
183 if( !logs )
184 return -1;
185
186 logs[0] = 0.718f;
187 for( int i = 1; i <= 2*4*mv_range; i++ )
188 logs[i] = log2f( i+1 ) * 2.0f + 1.718f;
189
190 for( int qp = X264_MIN( h->param.rc.i_qp_min, QP_MAX_SPEC ); qp <= h->param.rc.i_qp_max; qp++ )
191 if( init_costs( h, logs, qp ) )
192 goto fail;
193
194 if( init_costs( h, logs, X264_LOOKAHEAD_QP ) )
195 goto fail;
196
197 x264_free( logs );
198 return 0;
199 fail:
200 x264_free( logs );
201 return -1;
202 }
203
x264_analyse_free_costs(x264_t * h)204 void x264_analyse_free_costs( x264_t *h )
205 {
206 int mv_range = h->param.analyse.i_mv_range << PARAM_INTERLACED;
207 for( int i = 0; i < QP_MAX+1; i++ )
208 {
209 if( h->cost_mv[i] )
210 x264_free( h->cost_mv[i] - 2*4*mv_range );
211 for( int j = 0; j < 4; j++ )
212 {
213 if( h->cost_mv_fpel[i][j] )
214 x264_free( h->cost_mv_fpel[i][j] - 2*mv_range );
215 }
216 }
217 }
218
x264_analyse_weight_frame(x264_t * h,int end)219 void x264_analyse_weight_frame( x264_t *h, int end )
220 {
221 for( int j = 0; j < h->i_ref[0]; j++ )
222 {
223 if( h->sh.weight[j][0].weightfn )
224 {
225 x264_frame_t *frame = h->fref[0][j];
226 int width = frame->i_width[0] + PADH2;
227 int i_padv = PADV << PARAM_INTERLACED;
228 int offset, height;
229 pixel *src = frame->filtered[0][0] - frame->i_stride[0]*i_padv - PADH_ALIGN;
230 height = X264_MIN( 16 + end + i_padv, h->fref[0][j]->i_lines[0] + i_padv*2 ) - h->fenc->i_lines_weighted;
231 offset = h->fenc->i_lines_weighted*frame->i_stride[0];
232 h->fenc->i_lines_weighted += height;
233 if( height )
234 for( int k = j; k < h->i_ref[0]; k++ )
235 if( h->sh.weight[k][0].weightfn )
236 {
237 pixel *dst = h->fenc->weighted[k] - h->fenc->i_stride[0]*i_padv - PADH_ALIGN;
238 x264_weight_scale_plane( h, dst + offset, frame->i_stride[0],
239 src + offset, frame->i_stride[0],
240 width, height, &h->sh.weight[k][0] );
241 }
242 break;
243 }
244 }
245 }
246
247 /* initialize an array of lambda*nbits for all possible mvs */
mb_analyse_load_costs(x264_t * h,x264_mb_analysis_t * a)248 static void mb_analyse_load_costs( x264_t *h, x264_mb_analysis_t *a )
249 {
250 a->p_cost_mv = h->cost_mv[a->i_qp];
251 a->p_cost_ref[0] = h->cost_table->ref[a->i_qp][x264_clip3(h->sh.i_num_ref_idx_l0_active-1,0,2)];
252 a->p_cost_ref[1] = h->cost_table->ref[a->i_qp][x264_clip3(h->sh.i_num_ref_idx_l1_active-1,0,2)];
253 }
254
mb_analyse_init_qp(x264_t * h,x264_mb_analysis_t * a,int qp)255 static void mb_analyse_init_qp( x264_t *h, x264_mb_analysis_t *a, int qp )
256 {
257 int effective_chroma_qp = h->chroma_qp_table[SPEC_QP(qp)] + X264_MAX( qp - QP_MAX_SPEC, 0 );
258 a->i_lambda = x264_lambda_tab[qp];
259 a->i_lambda2 = x264_lambda2_tab[qp];
260
261 h->mb.b_trellis = h->param.analyse.i_trellis > 1 && a->i_mbrd;
262 if( h->param.analyse.i_trellis )
263 {
264 h->mb.i_trellis_lambda2[0][0] = x264_trellis_lambda2_tab[0][qp];
265 h->mb.i_trellis_lambda2[0][1] = x264_trellis_lambda2_tab[1][qp];
266 h->mb.i_trellis_lambda2[1][0] = x264_trellis_lambda2_tab[0][effective_chroma_qp];
267 h->mb.i_trellis_lambda2[1][1] = x264_trellis_lambda2_tab[1][effective_chroma_qp];
268 }
269 h->mb.i_psy_rd_lambda = a->i_lambda;
270 /* Adjusting chroma lambda based on QP offset hurts PSNR but improves visual quality. */
271 int chroma_offset_idx = X264_MIN( qp-effective_chroma_qp+12, MAX_CHROMA_LAMBDA_OFFSET );
272 h->mb.i_chroma_lambda2_offset = h->param.analyse.b_psy ? x264_chroma_lambda2_offset_tab[chroma_offset_idx] : 256;
273
274 if( qp > QP_MAX_SPEC )
275 {
276 h->nr_offset = h->nr_offset_emergency[qp-QP_MAX_SPEC-1];
277 h->nr_residual_sum = h->nr_residual_sum_buf[1];
278 h->nr_count = h->nr_count_buf[1];
279 h->mb.b_noise_reduction = 1;
280 qp = QP_MAX_SPEC; /* Out-of-spec QPs are just used for calculating lambda values. */
281 }
282 else
283 {
284 h->nr_offset = h->nr_offset_denoise;
285 h->nr_residual_sum = h->nr_residual_sum_buf[0];
286 h->nr_count = h->nr_count_buf[0];
287 h->mb.b_noise_reduction = 0;
288 }
289
290 a->i_qp = h->mb.i_qp = qp;
291 h->mb.i_chroma_qp = h->chroma_qp_table[qp];
292 }
293
mb_analyse_init(x264_t * h,x264_mb_analysis_t * a,int qp)294 static void mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int qp )
295 {
296 int subme = h->param.analyse.i_subpel_refine - (h->sh.i_type == SLICE_TYPE_B);
297
298 /* mbrd == 1 -> RD mode decision */
299 /* mbrd == 2 -> RD refinement */
300 /* mbrd == 3 -> QPRD */
301 a->i_mbrd = (subme>=6) + (subme>=8) + (h->param.analyse.i_subpel_refine>=10);
302 h->mb.b_deblock_rdo = h->param.analyse.i_subpel_refine >= 9 && h->sh.i_disable_deblocking_filter_idc != 1;
303 a->b_early_terminate = h->param.analyse.i_subpel_refine < 11;
304
305 mb_analyse_init_qp( h, a, qp );
306
307 h->mb.b_transform_8x8 = 0;
308
309 /* I: Intra part */
310 a->i_satd_i16x16 =
311 a->i_satd_i8x8 =
312 a->i_satd_i4x4 = COST_MAX;
313 a->i_satd_chroma = CHROMA_FORMAT ? COST_MAX : 0;
314
315 /* non-RD PCM decision is inaccurate (as is psy-rd), so don't do it.
316 * PCM cost can overflow with high lambda2, so cap it at COST_MAX. */
317 uint64_t pcm_cost = ((uint64_t)X264_PCM_COST*a->i_lambda2 + 128) >> 8;
318 a->i_satd_pcm = !h->param.i_avcintra_class && !h->mb.i_psy_rd && a->i_mbrd && pcm_cost < COST_MAX ? pcm_cost : COST_MAX;
319
320 a->b_fast_intra = 0;
321 a->b_avoid_topright = 0;
322 h->mb.i_skip_intra =
323 h->mb.b_lossless ? 0 :
324 a->i_mbrd ? 2 :
325 !h->param.analyse.i_trellis && !h->param.analyse.i_noise_reduction;
326
327 /* II: Inter part P/B frame */
328 if( h->sh.i_type != SLICE_TYPE_I )
329 {
330 int i_fmv_range = 4 * h->param.analyse.i_mv_range;
331 // limit motion search to a slightly smaller range than the theoretical limit,
332 // since the search may go a few iterations past its given range
333 int i_fpel_border = 6; // umh: 1 for diamond, 2 for octagon, 2 for hpel
334
335 /* Calculate max allowed MV range */
336 h->mb.mv_min[0] = 4*( -16*h->mb.i_mb_x - 24 );
337 h->mb.mv_max[0] = 4*( 16*( h->mb.i_mb_width - h->mb.i_mb_x - 1 ) + 24 );
338 h->mb.mv_min_spel[0] = X264_MAX( h->mb.mv_min[0], -i_fmv_range );
339 h->mb.mv_max_spel[0] = X264_MIN( h->mb.mv_max[0], i_fmv_range-1 );
340 if( h->param.b_intra_refresh && h->sh.i_type == SLICE_TYPE_P )
341 {
342 int max_x = (h->fref[0][0]->i_pir_end_col * 16 - 3)*4; /* 3 pixels of hpel border */
343 int max_mv = max_x - 4*16*h->mb.i_mb_x;
344 /* If we're left of the refresh bar, don't reference right of it. */
345 if( max_mv > 0 && h->mb.i_mb_x < h->fdec->i_pir_start_col )
346 h->mb.mv_max_spel[0] = X264_MIN( h->mb.mv_max_spel[0], max_mv );
347 }
348 h->mb.mv_limit_fpel[0][0] = (h->mb.mv_min_spel[0]>>2) + i_fpel_border;
349 h->mb.mv_limit_fpel[1][0] = (h->mb.mv_max_spel[0]>>2) - i_fpel_border;
350 if( h->mb.i_mb_x == 0 && !(h->mb.i_mb_y & PARAM_INTERLACED) )
351 {
352 int mb_y = h->mb.i_mb_y >> SLICE_MBAFF;
353 int thread_mvy_range = i_fmv_range;
354
355 if( h->i_thread_frames > 1 )
356 {
357 int pix_y = (h->mb.i_mb_y | PARAM_INTERLACED) * 16;
358 int thresh = pix_y + h->param.analyse.i_mv_range_thread;
359 for( int i = (h->sh.i_type == SLICE_TYPE_B); i >= 0; i-- )
360 for( int j = 0; j < h->i_ref[i]; j++ )
361 {
362 int completed = x264_frame_cond_wait( h->fref[i][j]->orig, thresh );
363 thread_mvy_range = X264_MIN( thread_mvy_range, completed - pix_y );
364 }
365
366 if( h->param.b_deterministic )
367 thread_mvy_range = h->param.analyse.i_mv_range_thread;
368 if( PARAM_INTERLACED )
369 thread_mvy_range >>= 1;
370
371 x264_analyse_weight_frame( h, pix_y + thread_mvy_range );
372 }
373
374 if( PARAM_INTERLACED )
375 {
376 /* 0 == top progressive, 1 == bot progressive, 2 == interlaced */
377 for( int i = 0; i < 3; i++ )
378 {
379 int j = i == 2;
380 mb_y = (h->mb.i_mb_y >> j) + (i == 1);
381 h->mb.mv_miny_row[i] = 4*( -16*mb_y - 24 );
382 h->mb.mv_maxy_row[i] = 4*( 16*( (h->mb.i_mb_height>>j) - mb_y - 1 ) + 24 );
383 h->mb.mv_miny_spel_row[i] = X264_MAX( h->mb.mv_miny_row[i], -i_fmv_range );
384 h->mb.mv_maxy_spel_row[i] = X264_MIN3( h->mb.mv_maxy_row[i], i_fmv_range-1, 4*thread_mvy_range );
385 h->mb.mv_miny_fpel_row[i] = (h->mb.mv_miny_spel_row[i]>>2) + i_fpel_border;
386 h->mb.mv_maxy_fpel_row[i] = (h->mb.mv_maxy_spel_row[i]>>2) - i_fpel_border;
387 }
388 }
389 else
390 {
391 h->mb.mv_min[1] = 4*( -16*mb_y - 24 );
392 h->mb.mv_max[1] = 4*( 16*( h->mb.i_mb_height - mb_y - 1 ) + 24 );
393 h->mb.mv_min_spel[1] = X264_MAX( h->mb.mv_min[1], -i_fmv_range );
394 h->mb.mv_max_spel[1] = X264_MIN3( h->mb.mv_max[1], i_fmv_range-1, 4*thread_mvy_range );
395 h->mb.mv_limit_fpel[0][1] = (h->mb.mv_min_spel[1]>>2) + i_fpel_border;
396 h->mb.mv_limit_fpel[1][1] = (h->mb.mv_max_spel[1]>>2) - i_fpel_border;
397 }
398 }
399 if( PARAM_INTERLACED )
400 {
401 int i = MB_INTERLACED ? 2 : h->mb.i_mb_y&1;
402 h->mb.mv_min[1] = h->mb.mv_miny_row[i];
403 h->mb.mv_max[1] = h->mb.mv_maxy_row[i];
404 h->mb.mv_min_spel[1] = h->mb.mv_miny_spel_row[i];
405 h->mb.mv_max_spel[1] = h->mb.mv_maxy_spel_row[i];
406 h->mb.mv_limit_fpel[0][1] = h->mb.mv_miny_fpel_row[i];
407 h->mb.mv_limit_fpel[1][1] = h->mb.mv_maxy_fpel_row[i];
408 }
409
410 a->l0.me16x16.cost =
411 a->l0.i_rd16x16 =
412 a->l0.i_cost8x8 =
413 a->l0.i_cost16x8 =
414 a->l0.i_cost8x16 = COST_MAX;
415 if( h->sh.i_type == SLICE_TYPE_B )
416 {
417 a->l1.me16x16.cost =
418 a->l1.i_rd16x16 =
419 a->l1.i_cost8x8 =
420 a->i_cost8x8direct[0] =
421 a->i_cost8x8direct[1] =
422 a->i_cost8x8direct[2] =
423 a->i_cost8x8direct[3] =
424 a->l1.i_cost16x8 =
425 a->l1.i_cost8x16 =
426 a->i_rd16x16bi =
427 a->i_rd16x16direct =
428 a->i_rd8x8bi =
429 a->i_rd16x8bi =
430 a->i_rd8x16bi =
431 a->i_cost16x16bi =
432 a->i_cost16x16direct =
433 a->i_cost8x8bi =
434 a->i_cost16x8bi =
435 a->i_cost8x16bi = COST_MAX;
436 }
437 else if( h->param.analyse.inter & X264_ANALYSE_PSUB8x8 )
438 for( int i = 0; i < 4; i++ )
439 {
440 a->l0.i_cost4x4[i] =
441 a->l0.i_cost8x4[i] =
442 a->l0.i_cost4x8[i] = COST_MAX;
443 }
444
445 /* Fast intra decision */
446 if( a->b_early_terminate && h->mb.i_mb_xy - h->sh.i_first_mb > 4 )
447 {
448 if( IS_INTRA( h->mb.i_mb_type_left[0] ) ||
449 IS_INTRA( h->mb.i_mb_type_top ) ||
450 IS_INTRA( h->mb.i_mb_type_topleft ) ||
451 IS_INTRA( h->mb.i_mb_type_topright ) ||
452 (h->sh.i_type == SLICE_TYPE_P && IS_INTRA( h->fref[0][0]->mb_type[h->mb.i_mb_xy] )) ||
453 (h->mb.i_mb_xy - h->sh.i_first_mb < 3*(h->stat.frame.i_mb_count[I_4x4] + h->stat.frame.i_mb_count[I_8x8] + h->stat.frame.i_mb_count[I_16x16] + h->stat.frame.i_mb_count[I_PCM])) )
454 { /* intra is likely */ }
455 else
456 {
457 a->b_fast_intra = 1;
458 }
459 }
460 h->mb.b_skip_mc = 0;
461 if( h->param.b_intra_refresh && h->sh.i_type == SLICE_TYPE_P &&
462 h->mb.i_mb_x >= h->fdec->i_pir_start_col && h->mb.i_mb_x <= h->fdec->i_pir_end_col )
463 {
464 a->b_force_intra = 1;
465 a->b_fast_intra = 0;
466 a->b_avoid_topright = h->mb.i_mb_x == h->fdec->i_pir_end_col;
467 }
468 else
469 a->b_force_intra = 0;
470 }
471 }
472
473 /* Prediction modes allowed for various combinations of neighbors. */
474 /* Terminated by a -1. */
475 /* In order, no neighbors, left, top, top/left, top/left/topleft */
476 static const int8_t i16x16_mode_available[5][5] =
477 {
478 {I_PRED_16x16_DC_128, -1, -1, -1, -1},
479 {I_PRED_16x16_DC_LEFT, I_PRED_16x16_H, -1, -1, -1},
480 {I_PRED_16x16_DC_TOP, I_PRED_16x16_V, -1, -1, -1},
481 {I_PRED_16x16_V, I_PRED_16x16_H, I_PRED_16x16_DC, -1, -1},
482 {I_PRED_16x16_V, I_PRED_16x16_H, I_PRED_16x16_DC, I_PRED_16x16_P, -1},
483 };
484
485 static const int8_t chroma_mode_available[5][5] =
486 {
487 {I_PRED_CHROMA_DC_128, -1, -1, -1, -1},
488 {I_PRED_CHROMA_DC_LEFT, I_PRED_CHROMA_H, -1, -1, -1},
489 {I_PRED_CHROMA_DC_TOP, I_PRED_CHROMA_V, -1, -1, -1},
490 {I_PRED_CHROMA_V, I_PRED_CHROMA_H, I_PRED_CHROMA_DC, -1, -1},
491 {I_PRED_CHROMA_V, I_PRED_CHROMA_H, I_PRED_CHROMA_DC, I_PRED_CHROMA_P, -1},
492 };
493
494 static const int8_t i8x8_mode_available[2][5][10] =
495 {
496 {
497 {I_PRED_4x4_DC_128, -1, -1, -1, -1, -1, -1, -1, -1, -1},
498 {I_PRED_4x4_DC_LEFT, I_PRED_4x4_H, I_PRED_4x4_HU, -1, -1, -1, -1, -1, -1, -1},
499 {I_PRED_4x4_DC_TOP, I_PRED_4x4_V, I_PRED_4x4_DDL, I_PRED_4x4_VL, -1, -1, -1, -1, -1, -1},
500 {I_PRED_4x4_DC, I_PRED_4x4_H, I_PRED_4x4_V, I_PRED_4x4_DDL, I_PRED_4x4_VL, I_PRED_4x4_HU, -1, -1, -1, -1},
501 {I_PRED_4x4_DC, I_PRED_4x4_H, I_PRED_4x4_V, I_PRED_4x4_DDL, I_PRED_4x4_DDR, I_PRED_4x4_VR, I_PRED_4x4_HD, I_PRED_4x4_VL, I_PRED_4x4_HU, -1},
502 },
503 {
504 {I_PRED_4x4_DC_128, -1, -1, -1, -1, -1, -1, -1, -1, -1},
505 {I_PRED_4x4_DC_LEFT, I_PRED_4x4_H, I_PRED_4x4_HU, -1, -1, -1, -1, -1, -1, -1},
506 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
507 {I_PRED_4x4_H, I_PRED_4x4_HU, -1, -1, -1, -1, -1, -1, -1, -1},
508 {I_PRED_4x4_H, I_PRED_4x4_HD, I_PRED_4x4_HU, -1, -1, -1, -1, -1, -1, -1},
509 }
510 };
511
512 static const int8_t i4x4_mode_available[2][5][10] =
513 {
514 {
515 {I_PRED_4x4_DC_128, -1, -1, -1, -1, -1, -1, -1, -1, -1},
516 {I_PRED_4x4_DC_LEFT, I_PRED_4x4_H, I_PRED_4x4_HU, -1, -1, -1, -1, -1, -1, -1},
517 {I_PRED_4x4_DC_TOP, I_PRED_4x4_V, I_PRED_4x4_DDL, I_PRED_4x4_VL, -1, -1, -1, -1, -1, -1},
518 {I_PRED_4x4_DC, I_PRED_4x4_H, I_PRED_4x4_V, I_PRED_4x4_DDL, I_PRED_4x4_VL, I_PRED_4x4_HU, -1, -1, -1, -1},
519 {I_PRED_4x4_DC, I_PRED_4x4_H, I_PRED_4x4_V, I_PRED_4x4_DDL, I_PRED_4x4_DDR, I_PRED_4x4_VR, I_PRED_4x4_HD, I_PRED_4x4_VL, I_PRED_4x4_HU, -1},
520 },
521 {
522 {I_PRED_4x4_DC_128, -1, -1, -1, -1, -1, -1, -1, -1, -1},
523 {I_PRED_4x4_DC_LEFT, I_PRED_4x4_H, I_PRED_4x4_HU, -1, -1, -1, -1, -1, -1, -1},
524 {I_PRED_4x4_DC_TOP, I_PRED_4x4_V, -1, -1, -1, -1, -1, -1, -1, -1},
525 {I_PRED_4x4_DC, I_PRED_4x4_H, I_PRED_4x4_V, I_PRED_4x4_HU, -1, -1, -1, -1, -1, -1},
526 {I_PRED_4x4_DC, I_PRED_4x4_H, I_PRED_4x4_V, I_PRED_4x4_DDR, I_PRED_4x4_VR, I_PRED_4x4_HD, I_PRED_4x4_HU, -1, -1, -1},
527 }
528 };
529
predict_16x16_mode_available(int i_neighbour)530 static ALWAYS_INLINE const int8_t *predict_16x16_mode_available( int i_neighbour )
531 {
532 int idx = i_neighbour & (MB_TOP|MB_LEFT|MB_TOPLEFT);
533 idx = (idx == (MB_TOP|MB_LEFT|MB_TOPLEFT)) ? 4 : idx & (MB_TOP|MB_LEFT);
534 return i16x16_mode_available[idx];
535 }
536
predict_chroma_mode_available(int i_neighbour)537 static ALWAYS_INLINE const int8_t *predict_chroma_mode_available( int i_neighbour )
538 {
539 int idx = i_neighbour & (MB_TOP|MB_LEFT|MB_TOPLEFT);
540 idx = (idx == (MB_TOP|MB_LEFT|MB_TOPLEFT)) ? 4 : idx & (MB_TOP|MB_LEFT);
541 return chroma_mode_available[idx];
542 }
543
predict_8x8_mode_available(int force_intra,int i_neighbour,int i)544 static ALWAYS_INLINE const int8_t *predict_8x8_mode_available( int force_intra, int i_neighbour, int i )
545 {
546 int avoid_topright = force_intra && (i&1);
547 int idx = i_neighbour & (MB_TOP|MB_LEFT|MB_TOPLEFT);
548 idx = (idx == (MB_TOP|MB_LEFT|MB_TOPLEFT)) ? 4 : idx & (MB_TOP|MB_LEFT);
549 return i8x8_mode_available[avoid_topright][idx];
550 }
551
predict_4x4_mode_available(int force_intra,int i_neighbour,int i)552 static ALWAYS_INLINE const int8_t *predict_4x4_mode_available( int force_intra, int i_neighbour, int i )
553 {
554 int avoid_topright = force_intra && ((i&5) == 5);
555 int idx = i_neighbour & (MB_TOP|MB_LEFT|MB_TOPLEFT);
556 idx = (idx == (MB_TOP|MB_LEFT|MB_TOPLEFT)) ? 4 : idx & (MB_TOP|MB_LEFT);
557 return i4x4_mode_available[avoid_topright][idx];
558 }
559
560 /* For trellis=2, we need to do this for both sizes of DCT, for trellis=1 we only need to use it on the chosen mode. */
psy_trellis_init(x264_t * h,int do_both_dct)561 static inline void psy_trellis_init( x264_t *h, int do_both_dct )
562 {
563 if( do_both_dct || h->mb.b_transform_8x8 )
564 h->dctf.sub16x16_dct8( h->mb.pic.fenc_dct8, h->mb.pic.p_fenc[0], (pixel*)x264_zero );
565 if( do_both_dct || !h->mb.b_transform_8x8 )
566 h->dctf.sub16x16_dct( h->mb.pic.fenc_dct4, h->mb.pic.p_fenc[0], (pixel*)x264_zero );
567 }
568
569 /* Reset fenc satd scores cache for psy RD */
mb_init_fenc_cache(x264_t * h,int b_satd)570 static inline void mb_init_fenc_cache( x264_t *h, int b_satd )
571 {
572 if( h->param.analyse.i_trellis == 2 && h->mb.i_psy_trellis )
573 psy_trellis_init( h, h->param.analyse.b_transform_8x8 );
574 if( !h->mb.i_psy_rd )
575 return;
576
577 M128( &h->mb.pic.fenc_hadamard_cache[0] ) = M128_ZERO;
578 M128( &h->mb.pic.fenc_hadamard_cache[2] ) = M128_ZERO;
579 M128( &h->mb.pic.fenc_hadamard_cache[4] ) = M128_ZERO;
580 M128( &h->mb.pic.fenc_hadamard_cache[6] ) = M128_ZERO;
581 h->mb.pic.fenc_hadamard_cache[8] = 0;
582 if( b_satd )
583 h->mc.memzero_aligned( h->mb.pic.fenc_satd_cache, sizeof(h->mb.pic.fenc_satd_cache) );
584 }
585
mb_analyse_intra_chroma(x264_t * h,x264_mb_analysis_t * a)586 static void mb_analyse_intra_chroma( x264_t *h, x264_mb_analysis_t *a )
587 {
588 if( a->i_satd_chroma < COST_MAX )
589 return;
590
591 if( CHROMA444 )
592 {
593 if( !h->mb.b_chroma_me )
594 {
595 a->i_satd_chroma = 0;
596 return;
597 }
598
599 /* Cheap approximation of chroma costs to avoid a full i4x4/i8x8 analysis. */
600 if( h->mb.b_lossless )
601 {
602 x264_predict_lossless_16x16( h, 1, a->i_predict16x16 );
603 x264_predict_lossless_16x16( h, 2, a->i_predict16x16 );
604 }
605 else
606 {
607 h->predict_16x16[a->i_predict16x16]( h->mb.pic.p_fdec[1] );
608 h->predict_16x16[a->i_predict16x16]( h->mb.pic.p_fdec[2] );
609 }
610 a->i_satd_chroma = h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE )
611 + h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE );
612 return;
613 }
614
615 const int8_t *predict_mode = predict_chroma_mode_available( h->mb.i_neighbour_intra );
616 int chromapix = h->luma2chroma_pixel[PIXEL_16x16];
617
618 /* Prediction selection for chroma */
619 if( predict_mode[3] >= 0 && !h->mb.b_lossless )
620 {
621 int satdu[4], satdv[4];
622 h->pixf.intra_mbcmp_x3_chroma( h->mb.pic.p_fenc[1], h->mb.pic.p_fdec[1], satdu );
623 h->pixf.intra_mbcmp_x3_chroma( h->mb.pic.p_fenc[2], h->mb.pic.p_fdec[2], satdv );
624 h->predict_chroma[I_PRED_CHROMA_P]( h->mb.pic.p_fdec[1] );
625 h->predict_chroma[I_PRED_CHROMA_P]( h->mb.pic.p_fdec[2] );
626 satdu[I_PRED_CHROMA_P] = h->pixf.mbcmp[chromapix]( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE );
627 satdv[I_PRED_CHROMA_P] = h->pixf.mbcmp[chromapix]( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE );
628
629 for( ; *predict_mode >= 0; predict_mode++ )
630 {
631 int i_mode = *predict_mode;
632 int i_satd = satdu[i_mode] + satdv[i_mode] + a->i_lambda * bs_size_ue( i_mode );
633
634 a->i_satd_chroma_dir[i_mode] = i_satd;
635 COPY2_IF_LT( a->i_satd_chroma, i_satd, a->i_predict8x8chroma, i_mode );
636 }
637 }
638 else
639 {
640 for( ; *predict_mode >= 0; predict_mode++ )
641 {
642 int i_satd;
643 int i_mode = *predict_mode;
644
645 /* we do the prediction */
646 if( h->mb.b_lossless )
647 x264_predict_lossless_chroma( h, i_mode );
648 else
649 {
650 h->predict_chroma[i_mode]( h->mb.pic.p_fdec[1] );
651 h->predict_chroma[i_mode]( h->mb.pic.p_fdec[2] );
652 }
653
654 /* we calculate the cost */
655 i_satd = h->pixf.mbcmp[chromapix]( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE ) +
656 h->pixf.mbcmp[chromapix]( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE ) +
657 a->i_lambda * bs_size_ue( x264_mb_chroma_pred_mode_fix[i_mode] );
658
659 a->i_satd_chroma_dir[i_mode] = i_satd;
660 COPY2_IF_LT( a->i_satd_chroma, i_satd, a->i_predict8x8chroma, i_mode );
661 }
662 }
663
664 h->mb.i_chroma_pred_mode = a->i_predict8x8chroma;
665 }
666
667 /* FIXME: should we do any sort of merged chroma analysis with 4:4:4? */
mb_analyse_intra(x264_t * h,x264_mb_analysis_t * a,int i_satd_inter)668 static void mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_inter )
669 {
670 const unsigned int flags = h->sh.i_type == SLICE_TYPE_I ? h->param.analyse.intra : h->param.analyse.inter;
671 pixel *p_src = h->mb.pic.p_fenc[0];
672 pixel *p_dst = h->mb.pic.p_fdec[0];
673 static const int8_t intra_analysis_shortcut[2][2][2][5] =
674 {
675 {{{I_PRED_4x4_HU, -1, -1, -1, -1},
676 {I_PRED_4x4_DDL, I_PRED_4x4_VL, -1, -1, -1}},
677 {{I_PRED_4x4_DDR, I_PRED_4x4_HD, I_PRED_4x4_HU, -1, -1},
678 {I_PRED_4x4_DDL, I_PRED_4x4_DDR, I_PRED_4x4_VR, I_PRED_4x4_VL, -1}}},
679 {{{I_PRED_4x4_HU, -1, -1, -1, -1},
680 {-1, -1, -1, -1, -1}},
681 {{I_PRED_4x4_DDR, I_PRED_4x4_HD, I_PRED_4x4_HU, -1, -1},
682 {I_PRED_4x4_DDR, I_PRED_4x4_VR, -1, -1, -1}}},
683 };
684
685 int idx;
686 int lambda = a->i_lambda;
687
688 /*---------------- Try all mode and calculate their score ---------------*/
689 /* Disabled i16x16 for AVC-Intra compat */
690 if( !h->param.i_avcintra_class )
691 {
692 const int8_t *predict_mode = predict_16x16_mode_available( h->mb.i_neighbour_intra );
693
694 /* Not heavily tuned */
695 static const uint8_t i16x16_thresh_lut[11] = { 2, 2, 2, 3, 3, 4, 4, 4, 4, 4, 4 };
696 int i16x16_thresh = a->b_fast_intra ? (i16x16_thresh_lut[h->mb.i_subpel_refine]*i_satd_inter)>>1 : COST_MAX;
697
698 if( !h->mb.b_lossless && predict_mode[3] >= 0 )
699 {
700 h->pixf.intra_mbcmp_x3_16x16( p_src, p_dst, a->i_satd_i16x16_dir );
701 a->i_satd_i16x16_dir[0] += lambda * bs_size_ue(0);
702 a->i_satd_i16x16_dir[1] += lambda * bs_size_ue(1);
703 a->i_satd_i16x16_dir[2] += lambda * bs_size_ue(2);
704 COPY2_IF_LT( a->i_satd_i16x16, a->i_satd_i16x16_dir[0], a->i_predict16x16, 0 );
705 COPY2_IF_LT( a->i_satd_i16x16, a->i_satd_i16x16_dir[1], a->i_predict16x16, 1 );
706 COPY2_IF_LT( a->i_satd_i16x16, a->i_satd_i16x16_dir[2], a->i_predict16x16, 2 );
707
708 /* Plane is expensive, so don't check it unless one of the previous modes was useful. */
709 if( a->i_satd_i16x16 <= i16x16_thresh )
710 {
711 h->predict_16x16[I_PRED_16x16_P]( p_dst );
712 a->i_satd_i16x16_dir[I_PRED_16x16_P] = h->pixf.mbcmp[PIXEL_16x16]( p_src, FENC_STRIDE, p_dst, FDEC_STRIDE );
713 a->i_satd_i16x16_dir[I_PRED_16x16_P] += lambda * bs_size_ue(3);
714 COPY2_IF_LT( a->i_satd_i16x16, a->i_satd_i16x16_dir[I_PRED_16x16_P], a->i_predict16x16, 3 );
715 }
716 }
717 else
718 {
719 for( ; *predict_mode >= 0; predict_mode++ )
720 {
721 int i_satd;
722 int i_mode = *predict_mode;
723
724 if( h->mb.b_lossless )
725 x264_predict_lossless_16x16( h, 0, i_mode );
726 else
727 h->predict_16x16[i_mode]( p_dst );
728
729 i_satd = h->pixf.mbcmp[PIXEL_16x16]( p_src, FENC_STRIDE, p_dst, FDEC_STRIDE ) +
730 lambda * bs_size_ue( x264_mb_pred_mode16x16_fix[i_mode] );
731 COPY2_IF_LT( a->i_satd_i16x16, i_satd, a->i_predict16x16, i_mode );
732 a->i_satd_i16x16_dir[i_mode] = i_satd;
733 }
734 }
735
736 if( h->sh.i_type == SLICE_TYPE_B )
737 /* cavlc mb type prefix */
738 a->i_satd_i16x16 += lambda * i_mb_b_cost_table[I_16x16];
739
740 if( a->i_satd_i16x16 > i16x16_thresh )
741 return;
742 }
743
744 uint16_t *cost_i4x4_mode = h->cost_table->i4x4_mode[a->i_qp] + 8;
745 /* 8x8 prediction selection */
746 if( flags & X264_ANALYSE_I8x8 )
747 {
748 ALIGNED_ARRAY_32( pixel, edge,[36] );
749 x264_pixel_cmp_t sa8d = (h->pixf.mbcmp[0] == h->pixf.satd[0]) ? h->pixf.sa8d[PIXEL_8x8] : h->pixf.mbcmp[PIXEL_8x8];
750 int i_satd_thresh = a->i_mbrd ? COST_MAX : X264_MIN( i_satd_inter, a->i_satd_i16x16 );
751
752 // FIXME some bias like in i4x4?
753 int i_cost = lambda * 4; /* base predmode costs */
754 h->mb.i_cbp_luma = 0;
755
756 if( h->sh.i_type == SLICE_TYPE_B )
757 i_cost += lambda * i_mb_b_cost_table[I_8x8];
758
759 for( idx = 0;; idx++ )
760 {
761 int x = idx&1;
762 int y = idx>>1;
763 pixel *p_src_by = p_src + 8*x + 8*y*FENC_STRIDE;
764 pixel *p_dst_by = p_dst + 8*x + 8*y*FDEC_STRIDE;
765 int i_best = COST_MAX;
766 int i_pred_mode = x264_mb_predict_intra4x4_mode( h, 4*idx );
767
768 const int8_t *predict_mode = predict_8x8_mode_available( a->b_avoid_topright, h->mb.i_neighbour8[idx], idx );
769 h->predict_8x8_filter( p_dst_by, edge, h->mb.i_neighbour8[idx], ALL_NEIGHBORS );
770
771 if( h->pixf.intra_mbcmp_x9_8x8 && predict_mode[8] >= 0 )
772 {
773 /* No shortcuts here. The SSSE3 implementation of intra_mbcmp_x9 is fast enough. */
774 i_best = h->pixf.intra_mbcmp_x9_8x8( p_src_by, p_dst_by, edge, cost_i4x4_mode-i_pred_mode, a->i_satd_i8x8_dir[idx] );
775 i_cost += i_best & 0xffff;
776 i_best >>= 16;
777 a->i_predict8x8[idx] = i_best;
778 if( idx == 3 || i_cost > i_satd_thresh )
779 break;
780 x264_macroblock_cache_intra8x8_pred( h, 2*x, 2*y, i_best );
781 }
782 else
783 {
784 if( !h->mb.b_lossless && predict_mode[5] >= 0 )
785 {
786 ALIGNED_ARRAY_16( int32_t, satd,[4] );
787 h->pixf.intra_mbcmp_x3_8x8( p_src_by, edge, satd );
788 int favor_vertical = satd[I_PRED_4x4_H] > satd[I_PRED_4x4_V];
789 if( i_pred_mode < 3 )
790 satd[i_pred_mode] -= 3 * lambda;
791 for( int i = 2; i >= 0; i-- )
792 {
793 int cost = satd[i];
794 a->i_satd_i8x8_dir[idx][i] = cost + 4 * lambda;
795 COPY2_IF_LT( i_best, cost, a->i_predict8x8[idx], i );
796 }
797
798 /* Take analysis shortcuts: don't analyse modes that are too
799 * far away direction-wise from the favored mode. */
800 if( a->i_mbrd < 1 + a->b_fast_intra )
801 predict_mode = intra_analysis_shortcut[a->b_avoid_topright][predict_mode[8] >= 0][favor_vertical];
802 else
803 predict_mode += 3;
804 }
805
806 for( ; *predict_mode >= 0 && (i_best >= 0 || a->i_mbrd >= 2); predict_mode++ )
807 {
808 int i_satd;
809 int i_mode = *predict_mode;
810
811 if( h->mb.b_lossless )
812 x264_predict_lossless_8x8( h, p_dst_by, 0, idx, i_mode, edge );
813 else
814 h->predict_8x8[i_mode]( p_dst_by, edge );
815
816 i_satd = sa8d( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE );
817 if( i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) )
818 i_satd -= 3 * lambda;
819
820 COPY2_IF_LT( i_best, i_satd, a->i_predict8x8[idx], i_mode );
821 a->i_satd_i8x8_dir[idx][i_mode] = i_satd + 4 * lambda;
822 }
823 i_cost += i_best + 3*lambda;
824
825 if( idx == 3 || i_cost > i_satd_thresh )
826 break;
827 if( h->mb.b_lossless )
828 x264_predict_lossless_8x8( h, p_dst_by, 0, idx, a->i_predict8x8[idx], edge );
829 else
830 h->predict_8x8[a->i_predict8x8[idx]]( p_dst_by, edge );
831 x264_macroblock_cache_intra8x8_pred( h, 2*x, 2*y, a->i_predict8x8[idx] );
832 }
833 /* we need to encode this block now (for next ones) */
834 x264_mb_encode_i8x8( h, 0, idx, a->i_qp, a->i_predict8x8[idx], edge, 0 );
835 }
836
837 if( idx == 3 )
838 {
839 a->i_satd_i8x8 = i_cost;
840 if( h->mb.i_skip_intra )
841 {
842 h->mc.copy[PIXEL_16x16]( h->mb.pic.i8x8_fdec_buf, 16, p_dst, FDEC_STRIDE, 16 );
843 h->mb.pic.i8x8_nnz_buf[0] = M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] );
844 h->mb.pic.i8x8_nnz_buf[1] = M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] );
845 h->mb.pic.i8x8_nnz_buf[2] = M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] );
846 h->mb.pic.i8x8_nnz_buf[3] = M32( &h->mb.cache.non_zero_count[x264_scan8[10]] );
847 h->mb.pic.i8x8_cbp = h->mb.i_cbp_luma;
848 if( h->mb.i_skip_intra == 2 )
849 h->mc.memcpy_aligned( h->mb.pic.i8x8_dct_buf, h->dct.luma8x8, sizeof(h->mb.pic.i8x8_dct_buf) );
850 }
851 }
852 else
853 {
854 static const uint16_t cost_div_fix8[3] = {1024,512,341};
855 a->i_satd_i8x8 = COST_MAX;
856 i_cost = (i_cost * cost_div_fix8[idx]) >> 8;
857 }
858 /* Not heavily tuned */
859 static const uint8_t i8x8_thresh[11] = { 4, 4, 4, 5, 5, 5, 6, 6, 6, 6, 6 };
860 if( a->b_early_terminate && X264_MIN(i_cost, a->i_satd_i16x16) > (i_satd_inter*i8x8_thresh[h->mb.i_subpel_refine])>>2 )
861 return;
862 }
863
864 /* 4x4 prediction selection */
865 if( flags & X264_ANALYSE_I4x4 )
866 {
867 int i_cost = lambda * (24+16); /* 24from JVT (SATD0), 16 from base predmode costs */
868 int i_satd_thresh = a->b_early_terminate ? X264_MIN3( i_satd_inter, a->i_satd_i16x16, a->i_satd_i8x8 ) : COST_MAX;
869 h->mb.i_cbp_luma = 0;
870
871 if( a->b_early_terminate && a->i_mbrd )
872 i_satd_thresh = i_satd_thresh * (10-a->b_fast_intra)/8;
873
874 if( h->sh.i_type == SLICE_TYPE_B )
875 i_cost += lambda * i_mb_b_cost_table[I_4x4];
876
877 for( idx = 0;; idx++ )
878 {
879 pixel *p_src_by = p_src + block_idx_xy_fenc[idx];
880 pixel *p_dst_by = p_dst + block_idx_xy_fdec[idx];
881 int i_best = COST_MAX;
882 int i_pred_mode = x264_mb_predict_intra4x4_mode( h, idx );
883
884 const int8_t *predict_mode = predict_4x4_mode_available( a->b_avoid_topright, h->mb.i_neighbour4[idx], idx );
885
886 if( (h->mb.i_neighbour4[idx] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
887 /* emulate missing topright samples */
888 MPIXEL_X4( &p_dst_by[4 - FDEC_STRIDE] ) = PIXEL_SPLAT_X4( p_dst_by[3 - FDEC_STRIDE] );
889
890 if( h->pixf.intra_mbcmp_x9_4x4 && predict_mode[8] >= 0 )
891 {
892 /* No shortcuts here. The SSSE3 implementation of intra_mbcmp_x9 is fast enough. */
893 i_best = h->pixf.intra_mbcmp_x9_4x4( p_src_by, p_dst_by, cost_i4x4_mode-i_pred_mode );
894 i_cost += i_best & 0xffff;
895 i_best >>= 16;
896 a->i_predict4x4[idx] = i_best;
897 if( i_cost > i_satd_thresh || idx == 15 )
898 break;
899 h->mb.cache.intra4x4_pred_mode[x264_scan8[idx]] = i_best;
900 }
901 else
902 {
903 if( !h->mb.b_lossless && predict_mode[5] >= 0 )
904 {
905 ALIGNED_ARRAY_16( int32_t, satd,[4] );
906 h->pixf.intra_mbcmp_x3_4x4( p_src_by, p_dst_by, satd );
907 int favor_vertical = satd[I_PRED_4x4_H] > satd[I_PRED_4x4_V];
908 if( i_pred_mode < 3 )
909 satd[i_pred_mode] -= 3 * lambda;
910 i_best = satd[I_PRED_4x4_DC]; a->i_predict4x4[idx] = I_PRED_4x4_DC;
911 COPY2_IF_LT( i_best, satd[I_PRED_4x4_H], a->i_predict4x4[idx], I_PRED_4x4_H );
912 COPY2_IF_LT( i_best, satd[I_PRED_4x4_V], a->i_predict4x4[idx], I_PRED_4x4_V );
913
914 /* Take analysis shortcuts: don't analyse modes that are too
915 * far away direction-wise from the favored mode. */
916 if( a->i_mbrd < 1 + a->b_fast_intra )
917 predict_mode = intra_analysis_shortcut[a->b_avoid_topright][predict_mode[8] >= 0][favor_vertical];
918 else
919 predict_mode += 3;
920 }
921
922 if( i_best > 0 )
923 {
924 for( ; *predict_mode >= 0; predict_mode++ )
925 {
926 int i_satd;
927 int i_mode = *predict_mode;
928
929 if( h->mb.b_lossless )
930 x264_predict_lossless_4x4( h, p_dst_by, 0, idx, i_mode );
931 else
932 h->predict_4x4[i_mode]( p_dst_by );
933
934 i_satd = h->pixf.mbcmp[PIXEL_4x4]( p_src_by, FENC_STRIDE, p_dst_by, FDEC_STRIDE );
935 if( i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) )
936 {
937 i_satd -= lambda * 3;
938 if( i_satd <= 0 )
939 {
940 i_best = i_satd;
941 a->i_predict4x4[idx] = i_mode;
942 break;
943 }
944 }
945
946 COPY2_IF_LT( i_best, i_satd, a->i_predict4x4[idx], i_mode );
947 }
948 }
949
950 i_cost += i_best + 3 * lambda;
951 if( i_cost > i_satd_thresh || idx == 15 )
952 break;
953 if( h->mb.b_lossless )
954 x264_predict_lossless_4x4( h, p_dst_by, 0, idx, a->i_predict4x4[idx] );
955 else
956 h->predict_4x4[a->i_predict4x4[idx]]( p_dst_by );
957 h->mb.cache.intra4x4_pred_mode[x264_scan8[idx]] = a->i_predict4x4[idx];
958 }
959 /* we need to encode this block now (for next ones) */
960 x264_mb_encode_i4x4( h, 0, idx, a->i_qp, a->i_predict4x4[idx], 0 );
961 }
962 if( idx == 15 )
963 {
964 a->i_satd_i4x4 = i_cost;
965 if( h->mb.i_skip_intra )
966 {
967 h->mc.copy[PIXEL_16x16]( h->mb.pic.i4x4_fdec_buf, 16, p_dst, FDEC_STRIDE, 16 );
968 h->mb.pic.i4x4_nnz_buf[0] = M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] );
969 h->mb.pic.i4x4_nnz_buf[1] = M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] );
970 h->mb.pic.i4x4_nnz_buf[2] = M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] );
971 h->mb.pic.i4x4_nnz_buf[3] = M32( &h->mb.cache.non_zero_count[x264_scan8[10]] );
972 h->mb.pic.i4x4_cbp = h->mb.i_cbp_luma;
973 if( h->mb.i_skip_intra == 2 )
974 h->mc.memcpy_aligned( h->mb.pic.i4x4_dct_buf, h->dct.luma4x4, sizeof(h->mb.pic.i4x4_dct_buf) );
975 }
976 }
977 else
978 a->i_satd_i4x4 = COST_MAX;
979 }
980 }
981
intra_rd(x264_t * h,x264_mb_analysis_t * a,int i_satd_thresh)982 static void intra_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd_thresh )
983 {
984 if( !a->b_early_terminate )
985 i_satd_thresh = COST_MAX;
986
987 if( a->i_satd_i16x16 < i_satd_thresh )
988 {
989 h->mb.i_type = I_16x16;
990 analyse_update_cache( h, a );
991 a->i_satd_i16x16 = rd_cost_mb( h, a->i_lambda2 );
992 }
993 else
994 a->i_satd_i16x16 = COST_MAX;
995
996 if( a->i_satd_i4x4 < i_satd_thresh )
997 {
998 h->mb.i_type = I_4x4;
999 analyse_update_cache( h, a );
1000 a->i_satd_i4x4 = rd_cost_mb( h, a->i_lambda2 );
1001 }
1002 else
1003 a->i_satd_i4x4 = COST_MAX;
1004
1005 if( a->i_satd_i8x8 < i_satd_thresh )
1006 {
1007 h->mb.i_type = I_8x8;
1008 analyse_update_cache( h, a );
1009 a->i_satd_i8x8 = rd_cost_mb( h, a->i_lambda2 );
1010 a->i_cbp_i8x8_luma = h->mb.i_cbp_luma;
1011 }
1012 else
1013 a->i_satd_i8x8 = COST_MAX;
1014 }
1015
intra_rd_refine(x264_t * h,x264_mb_analysis_t * a)1016 static void intra_rd_refine( x264_t *h, x264_mb_analysis_t *a )
1017 {
1018 uint64_t i_satd, i_best;
1019 int plane_count = CHROMA444 ? 3 : 1;
1020 h->mb.i_skip_intra = 0;
1021
1022 if( h->mb.i_type == I_16x16 )
1023 {
1024 int old_pred_mode = a->i_predict16x16;
1025 const int8_t *predict_mode = predict_16x16_mode_available( h->mb.i_neighbour_intra );
1026 int i_thresh = a->b_early_terminate ? a->i_satd_i16x16_dir[old_pred_mode] * 9/8 : COST_MAX;
1027 i_best = a->i_satd_i16x16;
1028 for( ; *predict_mode >= 0; predict_mode++ )
1029 {
1030 int i_mode = *predict_mode;
1031 if( i_mode == old_pred_mode || a->i_satd_i16x16_dir[i_mode] > i_thresh )
1032 continue;
1033 h->mb.i_intra16x16_pred_mode = i_mode;
1034 i_satd = rd_cost_mb( h, a->i_lambda2 );
1035 COPY2_IF_LT( i_best, i_satd, a->i_predict16x16, i_mode );
1036 }
1037 }
1038
1039 /* RD selection for chroma prediction */
1040 if( CHROMA_FORMAT == CHROMA_420 || CHROMA_FORMAT == CHROMA_422 )
1041 {
1042 const int8_t *predict_mode = predict_chroma_mode_available( h->mb.i_neighbour_intra );
1043 if( predict_mode[1] >= 0 )
1044 {
1045 int8_t predict_mode_sorted[4];
1046 int i_max;
1047 int i_thresh = a->b_early_terminate ? a->i_satd_chroma * 5/4 : COST_MAX;
1048
1049 for( i_max = 0; *predict_mode >= 0; predict_mode++ )
1050 {
1051 int i_mode = *predict_mode;
1052 if( a->i_satd_chroma_dir[i_mode] < i_thresh && i_mode != a->i_predict8x8chroma )
1053 predict_mode_sorted[i_max++] = i_mode;
1054 }
1055
1056 if( i_max > 0 )
1057 {
1058 int i_cbp_chroma_best = h->mb.i_cbp_chroma;
1059 int i_chroma_lambda = x264_lambda2_tab[h->mb.i_chroma_qp];
1060 /* the previous thing encoded was intra_rd(), so the pixels and
1061 * coefs for the current chroma mode are still around, so we only
1062 * have to recount the bits. */
1063 i_best = rd_cost_chroma( h, i_chroma_lambda, a->i_predict8x8chroma, 0 );
1064 for( int i = 0; i < i_max; i++ )
1065 {
1066 int i_mode = predict_mode_sorted[i];
1067 if( h->mb.b_lossless )
1068 x264_predict_lossless_chroma( h, i_mode );
1069 else
1070 {
1071 h->predict_chroma[i_mode]( h->mb.pic.p_fdec[1] );
1072 h->predict_chroma[i_mode]( h->mb.pic.p_fdec[2] );
1073 }
1074 /* if we've already found a mode that needs no residual, then
1075 * probably any mode with a residual will be worse.
1076 * so avoid dct on the remaining modes to improve speed. */
1077 i_satd = rd_cost_chroma( h, i_chroma_lambda, i_mode, h->mb.i_cbp_chroma != 0x00 );
1078 COPY3_IF_LT( i_best, i_satd, a->i_predict8x8chroma, i_mode, i_cbp_chroma_best, h->mb.i_cbp_chroma );
1079 }
1080 h->mb.i_chroma_pred_mode = a->i_predict8x8chroma;
1081 h->mb.i_cbp_chroma = i_cbp_chroma_best;
1082 }
1083 }
1084 }
1085
1086 if( h->mb.i_type == I_4x4 )
1087 {
1088 pixel4 pels[3][4] = {{0}}; // doesn't need initting, just shuts up a gcc warning
1089 int nnz[3] = {0};
1090 for( int idx = 0; idx < 16; idx++ )
1091 {
1092 pixel *dst[3] = {h->mb.pic.p_fdec[0] + block_idx_xy_fdec[idx],
1093 CHROMA_FORMAT ? h->mb.pic.p_fdec[1] + block_idx_xy_fdec[idx] : NULL,
1094 CHROMA_FORMAT ? h->mb.pic.p_fdec[2] + block_idx_xy_fdec[idx] : NULL};
1095 i_best = COST_MAX64;
1096
1097 const int8_t *predict_mode = predict_4x4_mode_available( a->b_avoid_topright, h->mb.i_neighbour4[idx], idx );
1098
1099 if( (h->mb.i_neighbour4[idx] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
1100 for( int p = 0; p < plane_count; p++ )
1101 /* emulate missing topright samples */
1102 MPIXEL_X4( dst[p]+4-FDEC_STRIDE ) = PIXEL_SPLAT_X4( dst[p][3-FDEC_STRIDE] );
1103
1104 for( ; *predict_mode >= 0; predict_mode++ )
1105 {
1106 int i_mode = *predict_mode;
1107 i_satd = rd_cost_i4x4( h, a->i_lambda2, idx, i_mode );
1108
1109 if( i_best > i_satd )
1110 {
1111 a->i_predict4x4[idx] = i_mode;
1112 i_best = i_satd;
1113 for( int p = 0; p < plane_count; p++ )
1114 {
1115 pels[p][0] = MPIXEL_X4( dst[p]+0*FDEC_STRIDE );
1116 pels[p][1] = MPIXEL_X4( dst[p]+1*FDEC_STRIDE );
1117 pels[p][2] = MPIXEL_X4( dst[p]+2*FDEC_STRIDE );
1118 pels[p][3] = MPIXEL_X4( dst[p]+3*FDEC_STRIDE );
1119 nnz[p] = h->mb.cache.non_zero_count[x264_scan8[idx+p*16]];
1120 }
1121 }
1122 }
1123
1124 for( int p = 0; p < plane_count; p++ )
1125 {
1126 MPIXEL_X4( dst[p]+0*FDEC_STRIDE ) = pels[p][0];
1127 MPIXEL_X4( dst[p]+1*FDEC_STRIDE ) = pels[p][1];
1128 MPIXEL_X4( dst[p]+2*FDEC_STRIDE ) = pels[p][2];
1129 MPIXEL_X4( dst[p]+3*FDEC_STRIDE ) = pels[p][3];
1130 h->mb.cache.non_zero_count[x264_scan8[idx+p*16]] = nnz[p];
1131 }
1132
1133 h->mb.cache.intra4x4_pred_mode[x264_scan8[idx]] = a->i_predict4x4[idx];
1134 }
1135 }
1136 else if( h->mb.i_type == I_8x8 )
1137 {
1138 ALIGNED_ARRAY_32( pixel, edge,[4],[32] ); // really [3][36], but they can overlap
1139 pixel4 pels_h[3][2] = {{0}};
1140 pixel pels_v[3][7] = {{0}};
1141 uint16_t nnz[3][2] = {{0}}; //shut up gcc
1142 for( int idx = 0; idx < 4; idx++ )
1143 {
1144 int x = idx&1;
1145 int y = idx>>1;
1146 int s8 = X264_SCAN8_0 + 2*x + 16*y;
1147 pixel *dst[3] = {h->mb.pic.p_fdec[0] + 8*x + 8*y*FDEC_STRIDE,
1148 CHROMA_FORMAT ? h->mb.pic.p_fdec[1] + 8*x + 8*y*FDEC_STRIDE : NULL,
1149 CHROMA_FORMAT ? h->mb.pic.p_fdec[2] + 8*x + 8*y*FDEC_STRIDE : NULL};
1150 int cbp_luma_new = 0;
1151 int i_thresh = a->b_early_terminate ? a->i_satd_i8x8_dir[idx][a->i_predict8x8[idx]] * 11/8 : COST_MAX;
1152
1153 i_best = COST_MAX64;
1154
1155 const int8_t *predict_mode = predict_8x8_mode_available( a->b_avoid_topright, h->mb.i_neighbour8[idx], idx );
1156 for( int p = 0; p < plane_count; p++ )
1157 h->predict_8x8_filter( dst[p], edge[p], h->mb.i_neighbour8[idx], ALL_NEIGHBORS );
1158
1159 for( ; *predict_mode >= 0; predict_mode++ )
1160 {
1161 int i_mode = *predict_mode;
1162 if( a->i_satd_i8x8_dir[idx][i_mode] > i_thresh )
1163 continue;
1164
1165 h->mb.i_cbp_luma = a->i_cbp_i8x8_luma;
1166 i_satd = rd_cost_i8x8( h, a->i_lambda2, idx, i_mode, edge );
1167
1168 if( i_best > i_satd )
1169 {
1170 a->i_predict8x8[idx] = i_mode;
1171 cbp_luma_new = h->mb.i_cbp_luma;
1172 i_best = i_satd;
1173
1174 for( int p = 0; p < plane_count; p++ )
1175 {
1176 pels_h[p][0] = MPIXEL_X4( dst[p]+7*FDEC_STRIDE+0 );
1177 pels_h[p][1] = MPIXEL_X4( dst[p]+7*FDEC_STRIDE+4 );
1178 if( !(idx&1) )
1179 for( int j = 0; j < 7; j++ )
1180 pels_v[p][j] = dst[p][7+j*FDEC_STRIDE];
1181 nnz[p][0] = M16( &h->mb.cache.non_zero_count[s8 + 0*8 + p*16] );
1182 nnz[p][1] = M16( &h->mb.cache.non_zero_count[s8 + 1*8 + p*16] );
1183 }
1184 }
1185 }
1186 a->i_cbp_i8x8_luma = cbp_luma_new;
1187 for( int p = 0; p < plane_count; p++ )
1188 {
1189 MPIXEL_X4( dst[p]+7*FDEC_STRIDE+0 ) = pels_h[p][0];
1190 MPIXEL_X4( dst[p]+7*FDEC_STRIDE+4 ) = pels_h[p][1];
1191 if( !(idx&1) )
1192 for( int j = 0; j < 7; j++ )
1193 dst[p][7+j*FDEC_STRIDE] = pels_v[p][j];
1194 M16( &h->mb.cache.non_zero_count[s8 + 0*8 + p*16] ) = nnz[p][0];
1195 M16( &h->mb.cache.non_zero_count[s8 + 1*8 + p*16] ) = nnz[p][1];
1196 }
1197
1198 x264_macroblock_cache_intra8x8_pred( h, 2*x, 2*y, a->i_predict8x8[idx] );
1199 }
1200 }
1201 }
1202
1203 #define LOAD_FENC(m, src, xoff, yoff) \
1204 { \
1205 (m)->p_cost_mv = a->p_cost_mv; \
1206 (m)->i_stride[0] = h->mb.pic.i_stride[0]; \
1207 (m)->i_stride[1] = h->mb.pic.i_stride[1]; \
1208 (m)->i_stride[2] = h->mb.pic.i_stride[2]; \
1209 (m)->p_fenc[0] = &(src)[0][(xoff)+(yoff)*FENC_STRIDE]; \
1210 if( CHROMA_FORMAT ) \
1211 { \
1212 (m)->p_fenc[1] = &(src)[1][((xoff)>>CHROMA_H_SHIFT)+((yoff)>>CHROMA_V_SHIFT)*FENC_STRIDE]; \
1213 (m)->p_fenc[2] = &(src)[2][((xoff)>>CHROMA_H_SHIFT)+((yoff)>>CHROMA_V_SHIFT)*FENC_STRIDE]; \
1214 } \
1215 }
1216
1217 #define LOAD_HPELS(m, src, list, ref, xoff, yoff) \
1218 { \
1219 (m)->p_fref_w = (m)->p_fref[0] = &(src)[0][(xoff)+(yoff)*(m)->i_stride[0]]; \
1220 if( h->param.analyse.i_subpel_refine ) \
1221 { \
1222 (m)->p_fref[1] = &(src)[1][(xoff)+(yoff)*(m)->i_stride[0]]; \
1223 (m)->p_fref[2] = &(src)[2][(xoff)+(yoff)*(m)->i_stride[0]]; \
1224 (m)->p_fref[3] = &(src)[3][(xoff)+(yoff)*(m)->i_stride[0]]; \
1225 } \
1226 if( CHROMA444 ) \
1227 { \
1228 (m)->p_fref[ 4] = &(src)[ 4][(xoff)+(yoff)*(m)->i_stride[1]]; \
1229 (m)->p_fref[ 8] = &(src)[ 8][(xoff)+(yoff)*(m)->i_stride[2]]; \
1230 if( h->param.analyse.i_subpel_refine ) \
1231 { \
1232 (m)->p_fref[ 5] = &(src)[ 5][(xoff)+(yoff)*(m)->i_stride[1]]; \
1233 (m)->p_fref[ 6] = &(src)[ 6][(xoff)+(yoff)*(m)->i_stride[1]]; \
1234 (m)->p_fref[ 7] = &(src)[ 7][(xoff)+(yoff)*(m)->i_stride[1]]; \
1235 (m)->p_fref[ 9] = &(src)[ 9][(xoff)+(yoff)*(m)->i_stride[2]]; \
1236 (m)->p_fref[10] = &(src)[10][(xoff)+(yoff)*(m)->i_stride[2]]; \
1237 (m)->p_fref[11] = &(src)[11][(xoff)+(yoff)*(m)->i_stride[2]]; \
1238 } \
1239 } \
1240 else if( CHROMA_FORMAT ) \
1241 (m)->p_fref[4] = &(src)[4][(xoff)+((yoff)>>CHROMA_V_SHIFT)*(m)->i_stride[1]]; \
1242 if( h->param.analyse.i_me_method >= X264_ME_ESA ) \
1243 (m)->integral = &h->mb.pic.p_integral[list][ref][(xoff)+(yoff)*(m)->i_stride[0]]; \
1244 (m)->weight = x264_weight_none; \
1245 (m)->i_ref = ref; \
1246 }
1247
1248 #define LOAD_WPELS(m, src, list, ref, xoff, yoff) \
1249 (m)->p_fref_w = &(src)[(xoff)+(yoff)*(m)->i_stride[0]]; \
1250 (m)->weight = h->sh.weight[i_ref];
1251
1252 #define REF_COST(list, ref) \
1253 (a->p_cost_ref[list][ref])
1254
mb_analyse_inter_p16x16(x264_t * h,x264_mb_analysis_t * a)1255 static void mb_analyse_inter_p16x16( x264_t *h, x264_mb_analysis_t *a )
1256 {
1257 x264_me_t m;
1258 int i_mvc;
1259 ALIGNED_ARRAY_8( int16_t, mvc,[8],[2] );
1260 int i_halfpel_thresh = INT_MAX;
1261 int *p_halfpel_thresh = (a->b_early_terminate && h->mb.pic.i_fref[0]>1) ? &i_halfpel_thresh : NULL;
1262
1263 /* 16x16 Search on all ref frame */
1264 m.i_pixel = PIXEL_16x16;
1265 LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 );
1266
1267 a->l0.me16x16.cost = INT_MAX;
1268 for( int i_ref = 0; i_ref < h->mb.pic.i_fref[0]; i_ref++ )
1269 {
1270 m.i_ref_cost = REF_COST( 0, i_ref );
1271 i_halfpel_thresh -= m.i_ref_cost;
1272
1273 /* search with ref */
1274 LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 0 );
1275 LOAD_WPELS( &m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 0, 0 );
1276
1277 x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp );
1278
1279 if( h->mb.ref_blind_dupe == i_ref )
1280 {
1281 CP32( m.mv, a->l0.mvc[0][0] );
1282 x264_me_refine_qpel_refdupe( h, &m, p_halfpel_thresh );
1283 }
1284 else
1285 {
1286 x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc );
1287 x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh );
1288 }
1289
1290 /* save mv for predicting neighbors */
1291 CP32( h->mb.mvr[0][i_ref][h->mb.i_mb_xy], m.mv );
1292 CP32( a->l0.mvc[i_ref][0], m.mv );
1293
1294 /* early termination
1295 * SSD threshold would probably be better than SATD */
1296 if( i_ref == 0
1297 && a->b_try_skip
1298 && m.cost-m.cost_mv < 300*a->i_lambda
1299 && abs(m.mv[0]-h->mb.cache.pskip_mv[0])
1300 + abs(m.mv[1]-h->mb.cache.pskip_mv[1]) <= 1
1301 && x264_macroblock_probe_pskip( h ) )
1302 {
1303 h->mb.i_type = P_SKIP;
1304 analyse_update_cache( h, a );
1305 assert( h->mb.cache.pskip_mv[1] <= h->mb.mv_max_spel[1] || h->i_thread_frames == 1 );
1306 return;
1307 }
1308
1309 m.cost += m.i_ref_cost;
1310 i_halfpel_thresh += m.i_ref_cost;
1311
1312 if( m.cost < a->l0.me16x16.cost )
1313 h->mc.memcpy_aligned( &a->l0.me16x16, &m, sizeof(x264_me_t) );
1314 }
1315
1316 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref );
1317 assert( a->l0.me16x16.mv[1] <= h->mb.mv_max_spel[1] || h->i_thread_frames == 1 );
1318
1319 h->mb.i_type = P_L0;
1320 if( a->i_mbrd )
1321 {
1322 mb_init_fenc_cache( h, a->i_mbrd >= 2 || h->param.analyse.inter & X264_ANALYSE_PSUB8x8 );
1323 if( a->l0.me16x16.i_ref == 0 && M32( a->l0.me16x16.mv ) == M32( h->mb.cache.pskip_mv ) && !a->b_force_intra )
1324 {
1325 h->mb.i_partition = D_16x16;
1326 x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv );
1327 a->l0.i_rd16x16 = rd_cost_mb( h, a->i_lambda2 );
1328 if( !(h->mb.i_cbp_luma|h->mb.i_cbp_chroma) )
1329 h->mb.i_type = P_SKIP;
1330 }
1331 }
1332 }
1333
mb_analyse_inter_p8x8_mixed_ref(x264_t * h,x264_mb_analysis_t * a)1334 static void mb_analyse_inter_p8x8_mixed_ref( x264_t *h, x264_mb_analysis_t *a )
1335 {
1336 x264_me_t m;
1337 pixel **p_fenc = h->mb.pic.p_fenc;
1338 int i_maxref = h->mb.pic.i_fref[0]-1;
1339
1340 h->mb.i_partition = D_8x8;
1341
1342 #define CHECK_NEIGHBOUR(i)\
1343 {\
1344 int ref = h->mb.cache.ref[0][X264_SCAN8_0+i];\
1345 if( ref > i_maxref && ref != h->mb.ref_blind_dupe )\
1346 i_maxref = ref;\
1347 }
1348
1349 /* early termination: if 16x16 chose ref 0, then evalute no refs older
1350 * than those used by the neighbors */
1351 if( a->b_early_terminate && (i_maxref > 0 && (a->l0.me16x16.i_ref == 0 || a->l0.me16x16.i_ref == h->mb.ref_blind_dupe) &&
1352 h->mb.i_mb_type_top > 0 && h->mb.i_mb_type_left[0] > 0) )
1353 {
1354 i_maxref = 0;
1355 CHECK_NEIGHBOUR( -8 - 1 );
1356 CHECK_NEIGHBOUR( -8 + 0 );
1357 CHECK_NEIGHBOUR( -8 + 2 );
1358 CHECK_NEIGHBOUR( -8 + 4 );
1359 CHECK_NEIGHBOUR( 0 - 1 );
1360 CHECK_NEIGHBOUR( 2*8 - 1 );
1361 }
1362 #undef CHECK_NEIGHBOUR
1363
1364 for( int i_ref = 0; i_ref <= i_maxref; i_ref++ )
1365 CP32( a->l0.mvc[i_ref][0], h->mb.mvr[0][i_ref][h->mb.i_mb_xy] );
1366
1367 for( int i = 0; i < 4; i++ )
1368 {
1369 x264_me_t *l0m = &a->l0.me8x8[i];
1370 int x8 = i&1;
1371 int y8 = i>>1;
1372
1373 m.i_pixel = PIXEL_8x8;
1374
1375 LOAD_FENC( &m, p_fenc, 8*x8, 8*y8 );
1376 l0m->cost = INT_MAX;
1377 for( int i_ref = 0; i_ref <= i_maxref || i_ref == h->mb.ref_blind_dupe; )
1378 {
1379 m.i_ref_cost = REF_COST( 0, i_ref );
1380
1381 LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*x8, 8*y8 );
1382 LOAD_WPELS( &m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 8*x8, 8*y8 );
1383
1384 x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, i_ref );
1385 x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp );
1386 if( h->mb.ref_blind_dupe == i_ref )
1387 {
1388 CP32( m.mv, a->l0.mvc[0][i+1] );
1389 x264_me_refine_qpel_refdupe( h, &m, NULL );
1390 }
1391 else
1392 x264_me_search( h, &m, a->l0.mvc[i_ref], i+1 );
1393
1394 m.cost += m.i_ref_cost;
1395
1396 CP32( a->l0.mvc[i_ref][i+1], m.mv );
1397
1398 if( m.cost < l0m->cost )
1399 h->mc.memcpy_aligned( l0m, &m, sizeof(x264_me_t) );
1400 if( i_ref == i_maxref && i_maxref < h->mb.ref_blind_dupe )
1401 i_ref = h->mb.ref_blind_dupe;
1402 else
1403 i_ref++;
1404 }
1405 x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, 0, l0m->mv );
1406 x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, l0m->i_ref );
1407
1408 a->i_satd8x8[0][i] = l0m->cost - ( l0m->cost_mv + l0m->i_ref_cost );
1409
1410 /* If CABAC is on and we're not doing sub-8x8 analysis, the costs
1411 are effectively zero. */
1412 if( !h->param.b_cabac || (h->param.analyse.inter & X264_ANALYSE_PSUB8x8) )
1413 l0m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
1414 }
1415
1416 a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost +
1417 a->l0.me8x8[2].cost + a->l0.me8x8[3].cost;
1418 /* P_8x8 ref0 has no ref cost */
1419 if( !h->param.b_cabac && !(a->l0.me8x8[0].i_ref | a->l0.me8x8[1].i_ref |
1420 a->l0.me8x8[2].i_ref | a->l0.me8x8[3].i_ref) )
1421 a->l0.i_cost8x8 -= REF_COST( 0, 0 ) * 4;
1422 M32( h->mb.i_sub_partition ) = D_L0_8x8 * 0x01010101;
1423 }
1424
mb_analyse_inter_p8x8(x264_t * h,x264_mb_analysis_t * a)1425 static void mb_analyse_inter_p8x8( x264_t *h, x264_mb_analysis_t *a )
1426 {
1427 /* Duplicate refs are rarely useful in p8x8 due to the high cost of the
1428 * reference frame flags. Thus, if we're not doing mixedrefs, just
1429 * don't bother analysing the dupes. */
1430 const int i_ref = h->mb.ref_blind_dupe == a->l0.me16x16.i_ref ? 0 : a->l0.me16x16.i_ref;
1431 const int i_ref_cost = h->param.b_cabac || i_ref ? REF_COST( 0, i_ref ) : 0;
1432 pixel **p_fenc = h->mb.pic.p_fenc;
1433 int i_mvc;
1434 int16_t (*mvc)[2] = a->l0.mvc[i_ref];
1435
1436 /* XXX Needed for x264_mb_predict_mv */
1437 h->mb.i_partition = D_8x8;
1438
1439 i_mvc = 1;
1440 CP32( mvc[0], a->l0.me16x16.mv );
1441
1442 for( int i = 0; i < 4; i++ )
1443 {
1444 x264_me_t *m = &a->l0.me8x8[i];
1445 int x8 = i&1;
1446 int y8 = i>>1;
1447
1448 m->i_pixel = PIXEL_8x8;
1449 m->i_ref_cost = i_ref_cost;
1450
1451 LOAD_FENC( m, p_fenc, 8*x8, 8*y8 );
1452 LOAD_HPELS( m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*x8, 8*y8 );
1453 LOAD_WPELS( m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 8*x8, 8*y8 );
1454
1455 x264_mb_predict_mv( h, 0, 4*i, 2, m->mvp );
1456 x264_me_search( h, m, mvc, i_mvc );
1457
1458 x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, 0, m->mv );
1459
1460 CP32( mvc[i_mvc], m->mv );
1461 i_mvc++;
1462
1463 a->i_satd8x8[0][i] = m->cost - m->cost_mv;
1464
1465 /* mb type cost */
1466 m->cost += i_ref_cost;
1467 if( !h->param.b_cabac || (h->param.analyse.inter & X264_ANALYSE_PSUB8x8) )
1468 m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
1469 }
1470
1471 a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost +
1472 a->l0.me8x8[2].cost + a->l0.me8x8[3].cost;
1473 /* theoretically this should include 4*ref_cost,
1474 * but 3 seems a better approximation of cabac. */
1475 if( h->param.b_cabac )
1476 a->l0.i_cost8x8 -= i_ref_cost;
1477 M32( h->mb.i_sub_partition ) = D_L0_8x8 * 0x01010101;
1478 }
1479
mb_analyse_inter_p16x8(x264_t * h,x264_mb_analysis_t * a,int i_best_satd)1480 static void mb_analyse_inter_p16x8( x264_t *h, x264_mb_analysis_t *a, int i_best_satd )
1481 {
1482 x264_me_t m;
1483 pixel **p_fenc = h->mb.pic.p_fenc;
1484 ALIGNED_ARRAY_8( int16_t, mvc,[3],[2] );
1485
1486 /* XXX Needed for x264_mb_predict_mv */
1487 h->mb.i_partition = D_16x8;
1488
1489 for( int i = 0; i < 2; i++ )
1490 {
1491 x264_me_t *l0m = &a->l0.me16x8[i];
1492 const int minref = X264_MIN( a->l0.me8x8[2*i].i_ref, a->l0.me8x8[2*i+1].i_ref );
1493 const int maxref = X264_MAX( a->l0.me8x8[2*i].i_ref, a->l0.me8x8[2*i+1].i_ref );
1494 const int ref8[2] = { minref, maxref };
1495 const int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2;
1496
1497 m.i_pixel = PIXEL_16x8;
1498
1499 LOAD_FENC( &m, p_fenc, 0, 8*i );
1500 l0m->cost = INT_MAX;
1501 for( int j = 0; j < i_ref8s; j++ )
1502 {
1503 const int i_ref = ref8[j];
1504 m.i_ref_cost = REF_COST( 0, i_ref );
1505
1506 /* if we skipped the 16x16 predictor, we wouldn't have to copy anything... */
1507 CP32( mvc[0], a->l0.mvc[i_ref][0] );
1508 CP32( mvc[1], a->l0.mvc[i_ref][2*i+1] );
1509 CP32( mvc[2], a->l0.mvc[i_ref][2*i+2] );
1510
1511 LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 8*i );
1512 LOAD_WPELS( &m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 0, 8*i );
1513
1514 x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, i_ref );
1515 x264_mb_predict_mv( h, 0, 8*i, 4, m.mvp );
1516 /* We can only take this shortcut if the first search was performed on ref0. */
1517 if( h->mb.ref_blind_dupe == i_ref && !ref8[0] )
1518 {
1519 /* We can just leave the MV from the previous ref search. */
1520 x264_me_refine_qpel_refdupe( h, &m, NULL );
1521 }
1522 else
1523 x264_me_search( h, &m, mvc, 3 );
1524
1525 m.cost += m.i_ref_cost;
1526
1527 if( m.cost < l0m->cost )
1528 h->mc.memcpy_aligned( l0m, &m, sizeof(x264_me_t) );
1529 }
1530
1531 /* Early termination based on the current SATD score of partition[0]
1532 plus the estimated SATD score of partition[1] */
1533 if( a->b_early_terminate && (!i && l0m->cost + a->i_cost_est16x8[1] > i_best_satd * (4 + !!a->i_mbrd) / 4) )
1534 {
1535 a->l0.i_cost16x8 = COST_MAX;
1536 return;
1537 }
1538
1539 x264_macroblock_cache_mv_ptr( h, 0, 2*i, 4, 2, 0, l0m->mv );
1540 x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, l0m->i_ref );
1541 }
1542
1543 a->l0.i_cost16x8 = a->l0.me16x8[0].cost + a->l0.me16x8[1].cost;
1544 }
1545
mb_analyse_inter_p8x16(x264_t * h,x264_mb_analysis_t * a,int i_best_satd)1546 static void mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a, int i_best_satd )
1547 {
1548 x264_me_t m;
1549 pixel **p_fenc = h->mb.pic.p_fenc;
1550 ALIGNED_ARRAY_8( int16_t, mvc,[3],[2] );
1551
1552 /* XXX Needed for x264_mb_predict_mv */
1553 h->mb.i_partition = D_8x16;
1554
1555 for( int i = 0; i < 2; i++ )
1556 {
1557 x264_me_t *l0m = &a->l0.me8x16[i];
1558 const int minref = X264_MIN( a->l0.me8x8[i].i_ref, a->l0.me8x8[i+2].i_ref );
1559 const int maxref = X264_MAX( a->l0.me8x8[i].i_ref, a->l0.me8x8[i+2].i_ref );
1560 const int ref8[2] = { minref, maxref };
1561 const int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2;
1562
1563 m.i_pixel = PIXEL_8x16;
1564
1565 LOAD_FENC( &m, p_fenc, 8*i, 0 );
1566 l0m->cost = INT_MAX;
1567 for( int j = 0; j < i_ref8s; j++ )
1568 {
1569 const int i_ref = ref8[j];
1570 m.i_ref_cost = REF_COST( 0, i_ref );
1571
1572 CP32( mvc[0], a->l0.mvc[i_ref][0] );
1573 CP32( mvc[1], a->l0.mvc[i_ref][i+1] );
1574 CP32( mvc[2], a->l0.mvc[i_ref][i+3] );
1575
1576 LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*i, 0 );
1577 LOAD_WPELS( &m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 8*i, 0 );
1578
1579 x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, i_ref );
1580 x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp );
1581 /* We can only take this shortcut if the first search was performed on ref0. */
1582 if( h->mb.ref_blind_dupe == i_ref && !ref8[0] )
1583 {
1584 /* We can just leave the MV from the previous ref search. */
1585 x264_me_refine_qpel_refdupe( h, &m, NULL );
1586 }
1587 else
1588 x264_me_search( h, &m, mvc, 3 );
1589
1590 m.cost += m.i_ref_cost;
1591
1592 if( m.cost < l0m->cost )
1593 h->mc.memcpy_aligned( l0m, &m, sizeof(x264_me_t) );
1594 }
1595
1596 /* Early termination based on the current SATD score of partition[0]
1597 plus the estimated SATD score of partition[1] */
1598 if( a->b_early_terminate && (!i && l0m->cost + a->i_cost_est8x16[1] > i_best_satd * (4 + !!a->i_mbrd) / 4) )
1599 {
1600 a->l0.i_cost8x16 = COST_MAX;
1601 return;
1602 }
1603
1604 x264_macroblock_cache_mv_ptr( h, 2*i, 0, 2, 4, 0, l0m->mv );
1605 x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, l0m->i_ref );
1606 }
1607
1608 a->l0.i_cost8x16 = a->l0.me8x16[0].cost + a->l0.me8x16[1].cost;
1609 }
1610
mb_analyse_inter_p4x4_chroma_internal(x264_t * h,x264_mb_analysis_t * a,pixel ** p_fref,int i8x8,int size,int chroma)1611 static ALWAYS_INLINE int mb_analyse_inter_p4x4_chroma_internal( x264_t *h, x264_mb_analysis_t *a,
1612 pixel **p_fref, int i8x8, int size, int chroma )
1613 {
1614 ALIGNED_ARRAY_32( pixel, pix1,[16*16] );
1615 pixel *pix2 = pix1+8;
1616 int i_stride = h->mb.pic.i_stride[1];
1617 int chroma_h_shift = chroma <= CHROMA_422;
1618 int chroma_v_shift = chroma == CHROMA_420;
1619 int or = 8*(i8x8&1) + (4>>chroma_v_shift)*(i8x8&2)*i_stride;
1620 int i_ref = a->l0.me8x8[i8x8].i_ref;
1621 int mvy_offset = chroma_v_shift && MB_INTERLACED & i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
1622 x264_weight_t *weight = h->sh.weight[i_ref];
1623
1624 // FIXME weight can be done on 4x4 blocks even if mc is smaller
1625 #define CHROMA4x4MC( width, height, me, x, y ) \
1626 if( chroma == CHROMA_444 ) \
1627 { \
1628 int mvx = (me).mv[0] + 4*2*x; \
1629 int mvy = (me).mv[1] + 4*2*y; \
1630 h->mc.mc_luma( &pix1[2*x+2*y*16], 16, &h->mb.pic.p_fref[0][i_ref][4], i_stride, \
1631 mvx, mvy, 2*width, 2*height, &h->sh.weight[i_ref][1] ); \
1632 h->mc.mc_luma( &pix2[2*x+2*y*16], 16, &h->mb.pic.p_fref[0][i_ref][8], i_stride, \
1633 mvx, mvy, 2*width, 2*height, &h->sh.weight[i_ref][2] ); \
1634 } \
1635 else \
1636 { \
1637 int offset = x + (2>>chroma_v_shift)*16*y; \
1638 int chroma_height = (2>>chroma_v_shift)*height; \
1639 h->mc.mc_chroma( &pix1[offset], &pix2[offset], 16, &p_fref[4][or+2*x+(2>>chroma_v_shift)*y*i_stride], i_stride, \
1640 (me).mv[0], (2>>chroma_v_shift)*((me).mv[1]+mvy_offset), width, chroma_height ); \
1641 if( weight[1].weightfn ) \
1642 weight[1].weightfn[width>>2]( &pix1[offset], 16, &pix1[offset], 16, &weight[1], chroma_height ); \
1643 if( weight[2].weightfn ) \
1644 weight[2].weightfn[width>>2]( &pix2[offset], 16, &pix2[offset], 16, &weight[2], chroma_height ); \
1645 }
1646
1647 if( size == PIXEL_4x4 )
1648 {
1649 x264_me_t *m = a->l0.me4x4[i8x8];
1650 CHROMA4x4MC( 2,2, m[0], 0,0 );
1651 CHROMA4x4MC( 2,2, m[1], 2,0 );
1652 CHROMA4x4MC( 2,2, m[2], 0,2 );
1653 CHROMA4x4MC( 2,2, m[3], 2,2 );
1654 }
1655 else if( size == PIXEL_8x4 )
1656 {
1657 x264_me_t *m = a->l0.me8x4[i8x8];
1658 CHROMA4x4MC( 4,2, m[0], 0,0 );
1659 CHROMA4x4MC( 4,2, m[1], 0,2 );
1660 }
1661 else
1662 {
1663 x264_me_t *m = a->l0.me4x8[i8x8];
1664 CHROMA4x4MC( 2,4, m[0], 0,0 );
1665 CHROMA4x4MC( 2,4, m[1], 2,0 );
1666 }
1667 #undef CHROMA4x4MC
1668
1669 int oe = (8>>chroma_h_shift)*(i8x8&1) + (4>>chroma_v_shift)*(i8x8&2)*FENC_STRIDE;
1670 int chromapix = chroma == CHROMA_444 ? PIXEL_8x8 : chroma == CHROMA_422 ? PIXEL_4x8 : PIXEL_4x4;
1671 return h->pixf.mbcmp[chromapix]( &h->mb.pic.p_fenc[1][oe], FENC_STRIDE, pix1, 16 )
1672 + h->pixf.mbcmp[chromapix]( &h->mb.pic.p_fenc[2][oe], FENC_STRIDE, pix2, 16 );
1673 }
1674
mb_analyse_inter_p4x4_chroma(x264_t * h,x264_mb_analysis_t * a,pixel ** p_fref,int i8x8,int size)1675 static int mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, pixel **p_fref, int i8x8, int size )
1676 {
1677 if( CHROMA_FORMAT == CHROMA_444 )
1678 return mb_analyse_inter_p4x4_chroma_internal( h, a, p_fref, i8x8, size, CHROMA_444 );
1679 else if( CHROMA_FORMAT == CHROMA_422 )
1680 return mb_analyse_inter_p4x4_chroma_internal( h, a, p_fref, i8x8, size, CHROMA_422 );
1681 else
1682 return mb_analyse_inter_p4x4_chroma_internal( h, a, p_fref, i8x8, size, CHROMA_420 );
1683 }
1684
mb_analyse_inter_p4x4(x264_t * h,x264_mb_analysis_t * a,int i8x8)1685 static void mb_analyse_inter_p4x4( x264_t *h, x264_mb_analysis_t *a, int i8x8 )
1686 {
1687 pixel **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref];
1688 pixel **p_fenc = h->mb.pic.p_fenc;
1689 const int i_ref = a->l0.me8x8[i8x8].i_ref;
1690
1691 /* XXX Needed for x264_mb_predict_mv */
1692 h->mb.i_partition = D_8x8;
1693
1694 for( int i4x4 = 0; i4x4 < 4; i4x4++ )
1695 {
1696 const int idx = 4*i8x8 + i4x4;
1697 const int x4 = block_idx_x[idx];
1698 const int y4 = block_idx_y[idx];
1699 const int i_mvc = (i4x4 == 0);
1700
1701 x264_me_t *m = &a->l0.me4x4[i8x8][i4x4];
1702
1703 m->i_pixel = PIXEL_4x4;
1704
1705 LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
1706 LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
1707 LOAD_WPELS( m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 4*x4, 4*y4 );
1708
1709 x264_mb_predict_mv( h, 0, idx, 1, m->mvp );
1710 x264_me_search( h, m, &a->l0.me8x8[i8x8].mv, i_mvc );
1711
1712 x264_macroblock_cache_mv_ptr( h, x4, y4, 1, 1, 0, m->mv );
1713 }
1714 a->l0.i_cost4x4[i8x8] = a->l0.me4x4[i8x8][0].cost +
1715 a->l0.me4x4[i8x8][1].cost +
1716 a->l0.me4x4[i8x8][2].cost +
1717 a->l0.me4x4[i8x8][3].cost +
1718 REF_COST( 0, i_ref ) +
1719 a->i_lambda * i_sub_mb_p_cost_table[D_L0_4x4];
1720 if( h->mb.b_chroma_me && !CHROMA444 )
1721 a->l0.i_cost4x4[i8x8] += mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_4x4 );
1722 }
1723
mb_analyse_inter_p8x4(x264_t * h,x264_mb_analysis_t * a,int i8x8)1724 static void mb_analyse_inter_p8x4( x264_t *h, x264_mb_analysis_t *a, int i8x8 )
1725 {
1726 pixel **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref];
1727 pixel **p_fenc = h->mb.pic.p_fenc;
1728 const int i_ref = a->l0.me8x8[i8x8].i_ref;
1729
1730 /* XXX Needed for x264_mb_predict_mv */
1731 h->mb.i_partition = D_8x8;
1732
1733 for( int i8x4 = 0; i8x4 < 2; i8x4++ )
1734 {
1735 const int idx = 4*i8x8 + 2*i8x4;
1736 const int x4 = block_idx_x[idx];
1737 const int y4 = block_idx_y[idx];
1738 const int i_mvc = (i8x4 == 0);
1739
1740 x264_me_t *m = &a->l0.me8x4[i8x8][i8x4];
1741
1742 m->i_pixel = PIXEL_8x4;
1743
1744 LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
1745 LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
1746 LOAD_WPELS( m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 4*x4, 4*y4 );
1747
1748 x264_mb_predict_mv( h, 0, idx, 2, m->mvp );
1749 x264_me_search( h, m, &a->l0.me4x4[i8x8][0].mv, i_mvc );
1750
1751 x264_macroblock_cache_mv_ptr( h, x4, y4, 2, 1, 0, m->mv );
1752 }
1753 a->l0.i_cost8x4[i8x8] = a->l0.me8x4[i8x8][0].cost + a->l0.me8x4[i8x8][1].cost +
1754 REF_COST( 0, i_ref ) +
1755 a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x4];
1756 if( h->mb.b_chroma_me && !CHROMA444 )
1757 a->l0.i_cost8x4[i8x8] += mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_8x4 );
1758 }
1759
mb_analyse_inter_p4x8(x264_t * h,x264_mb_analysis_t * a,int i8x8)1760 static void mb_analyse_inter_p4x8( x264_t *h, x264_mb_analysis_t *a, int i8x8 )
1761 {
1762 pixel **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref];
1763 pixel **p_fenc = h->mb.pic.p_fenc;
1764 const int i_ref = a->l0.me8x8[i8x8].i_ref;
1765
1766 /* XXX Needed for x264_mb_predict_mv */
1767 h->mb.i_partition = D_8x8;
1768
1769 for( int i4x8 = 0; i4x8 < 2; i4x8++ )
1770 {
1771 const int idx = 4*i8x8 + i4x8;
1772 const int x4 = block_idx_x[idx];
1773 const int y4 = block_idx_y[idx];
1774 const int i_mvc = (i4x8 == 0);
1775
1776 x264_me_t *m = &a->l0.me4x8[i8x8][i4x8];
1777
1778 m->i_pixel = PIXEL_4x8;
1779
1780 LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
1781 LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
1782 LOAD_WPELS( m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 4*x4, 4*y4 );
1783
1784 x264_mb_predict_mv( h, 0, idx, 1, m->mvp );
1785 x264_me_search( h, m, &a->l0.me4x4[i8x8][0].mv, i_mvc );
1786
1787 x264_macroblock_cache_mv_ptr( h, x4, y4, 1, 2, 0, m->mv );
1788 }
1789 a->l0.i_cost4x8[i8x8] = a->l0.me4x8[i8x8][0].cost + a->l0.me4x8[i8x8][1].cost +
1790 REF_COST( 0, i_ref ) +
1791 a->i_lambda * i_sub_mb_p_cost_table[D_L0_4x8];
1792 if( h->mb.b_chroma_me && !CHROMA444 )
1793 a->l0.i_cost4x8[i8x8] += mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_4x8 );
1794 }
1795
analyse_bi_chroma(x264_t * h,x264_mb_analysis_t * a,int idx,int i_pixel)1796 static ALWAYS_INLINE int analyse_bi_chroma( x264_t *h, x264_mb_analysis_t *a, int idx, int i_pixel )
1797 {
1798 ALIGNED_ARRAY_32( pixel, pix, [4],[16*16] );
1799 ALIGNED_ARRAY_32( pixel, bi, [2],[16*16] );
1800 int i_chroma_cost = 0;
1801 int chromapix = h->luma2chroma_pixel[i_pixel];
1802
1803 #define COST_BI_CHROMA( m0, m1, width, height ) \
1804 { \
1805 if( CHROMA444 ) \
1806 { \
1807 h->mc.mc_luma( pix[0], 16, &m0.p_fref[4], m0.i_stride[1], \
1808 m0.mv[0], m0.mv[1], width, height, x264_weight_none ); \
1809 h->mc.mc_luma( pix[1], 16, &m0.p_fref[8], m0.i_stride[2], \
1810 m0.mv[0], m0.mv[1], width, height, x264_weight_none ); \
1811 h->mc.mc_luma( pix[2], 16, &m1.p_fref[4], m1.i_stride[1], \
1812 m1.mv[0], m1.mv[1], width, height, x264_weight_none ); \
1813 h->mc.mc_luma( pix[3], 16, &m1.p_fref[8], m1.i_stride[2], \
1814 m1.mv[0], m1.mv[1], width, height, x264_weight_none ); \
1815 } \
1816 else \
1817 { \
1818 int v_shift = CHROMA_V_SHIFT; \
1819 int l0_mvy_offset = v_shift & MB_INTERLACED & m0.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0; \
1820 int l1_mvy_offset = v_shift & MB_INTERLACED & m1.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0; \
1821 h->mc.mc_chroma( pix[0], pix[1], 16, m0.p_fref[4], m0.i_stride[1], \
1822 m0.mv[0], 2*(m0.mv[1]+l0_mvy_offset)>>v_shift, width>>1, height>>v_shift ); \
1823 h->mc.mc_chroma( pix[2], pix[3], 16, m1.p_fref[4], m1.i_stride[1], \
1824 m1.mv[0], 2*(m1.mv[1]+l1_mvy_offset)>>v_shift, width>>1, height>>v_shift ); \
1825 } \
1826 h->mc.avg[chromapix]( bi[0], 16, pix[0], 16, pix[2], 16, h->mb.bipred_weight[m0.i_ref][m1.i_ref] ); \
1827 h->mc.avg[chromapix]( bi[1], 16, pix[1], 16, pix[3], 16, h->mb.bipred_weight[m0.i_ref][m1.i_ref] ); \
1828 i_chroma_cost = h->pixf.mbcmp[chromapix]( m0.p_fenc[1], FENC_STRIDE, bi[0], 16 ) \
1829 + h->pixf.mbcmp[chromapix]( m0.p_fenc[2], FENC_STRIDE, bi[1], 16 ); \
1830 }
1831
1832 if( i_pixel == PIXEL_16x16 )
1833 COST_BI_CHROMA( a->l0.bi16x16, a->l1.bi16x16, 16, 16 )
1834 else if( i_pixel == PIXEL_16x8 )
1835 COST_BI_CHROMA( a->l0.me16x8[idx], a->l1.me16x8[idx], 16, 8 )
1836 else if( i_pixel == PIXEL_8x16 )
1837 COST_BI_CHROMA( a->l0.me8x16[idx], a->l1.me8x16[idx], 8, 16 )
1838 else
1839 COST_BI_CHROMA( a->l0.me8x8[idx], a->l1.me8x8[idx], 8, 8 )
1840
1841 return i_chroma_cost;
1842 }
1843
mb_analyse_inter_direct(x264_t * h,x264_mb_analysis_t * a)1844 static void mb_analyse_inter_direct( x264_t *h, x264_mb_analysis_t *a )
1845 {
1846 /* Assumes that fdec still contains the results of
1847 * x264_mb_predict_mv_direct16x16 and x264_mb_mc */
1848
1849 pixel *p_fenc = h->mb.pic.p_fenc[0];
1850 pixel *p_fdec = h->mb.pic.p_fdec[0];
1851
1852 a->i_cost16x16direct = a->i_lambda * i_mb_b_cost_table[B_DIRECT];
1853 if( h->param.analyse.inter & X264_ANALYSE_BSUB16x16 )
1854 {
1855 int chromapix = h->luma2chroma_pixel[PIXEL_8x8];
1856
1857 for( int i = 0; i < 4; i++ )
1858 {
1859 const int x = (i&1)*8;
1860 const int y = (i>>1)*8;
1861 a->i_cost8x8direct[i] = h->pixf.mbcmp[PIXEL_8x8]( &p_fenc[x+y*FENC_STRIDE], FENC_STRIDE,
1862 &p_fdec[x+y*FDEC_STRIDE], FDEC_STRIDE );
1863 if( h->mb.b_chroma_me )
1864 {
1865 int fenc_offset = (x>>CHROMA_H_SHIFT) + (y>>CHROMA_V_SHIFT)*FENC_STRIDE;
1866 int fdec_offset = (x>>CHROMA_H_SHIFT) + (y>>CHROMA_V_SHIFT)*FDEC_STRIDE;
1867 a->i_cost8x8direct[i] += h->pixf.mbcmp[chromapix]( &h->mb.pic.p_fenc[1][fenc_offset], FENC_STRIDE,
1868 &h->mb.pic.p_fdec[1][fdec_offset], FDEC_STRIDE )
1869 + h->pixf.mbcmp[chromapix]( &h->mb.pic.p_fenc[2][fenc_offset], FENC_STRIDE,
1870 &h->mb.pic.p_fdec[2][fdec_offset], FDEC_STRIDE );
1871 }
1872 a->i_cost16x16direct += a->i_cost8x8direct[i];
1873
1874 /* mb type cost */
1875 a->i_cost8x8direct[i] += a->i_lambda * i_sub_mb_b_cost_table[D_DIRECT_8x8];
1876 }
1877 }
1878 else
1879 {
1880 a->i_cost16x16direct += h->pixf.mbcmp[PIXEL_16x16]( p_fenc, FENC_STRIDE, p_fdec, FDEC_STRIDE );
1881 if( h->mb.b_chroma_me )
1882 {
1883 int chromapix = h->luma2chroma_pixel[PIXEL_16x16];
1884 a->i_cost16x16direct += h->pixf.mbcmp[chromapix]( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE )
1885 + h->pixf.mbcmp[chromapix]( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE );
1886 }
1887 }
1888 }
1889
mb_analyse_inter_b16x16(x264_t * h,x264_mb_analysis_t * a)1890 static void mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a )
1891 {
1892 ALIGNED_ARRAY_32( pixel, pix0,[16*16] );
1893 ALIGNED_ARRAY_32( pixel, pix1,[16*16] );
1894 pixel *src0, *src1;
1895 intptr_t stride0 = 16, stride1 = 16;
1896 int i_ref, i_mvc;
1897 ALIGNED_ARRAY_8( int16_t, mvc,[9],[2] );
1898 int try_skip = a->b_try_skip;
1899 int list1_skipped = 0;
1900 int i_halfpel_thresh[2] = {INT_MAX, INT_MAX};
1901 int *p_halfpel_thresh[2] = {(a->b_early_terminate && h->mb.pic.i_fref[0]>1) ? &i_halfpel_thresh[0] : NULL,
1902 (a->b_early_terminate && h->mb.pic.i_fref[1]>1) ? &i_halfpel_thresh[1] : NULL};
1903
1904 x264_me_t m;
1905 m.i_pixel = PIXEL_16x16;
1906
1907 LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 );
1908
1909 /* 16x16 Search on list 0 and list 1 */
1910 a->l0.me16x16.cost = INT_MAX;
1911 a->l1.me16x16.cost = INT_MAX;
1912 for( int l = 1; l >= 0; )
1913 {
1914 x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0;
1915
1916 /* This loop is extremely munged in order to facilitate the following order of operations,
1917 * necessary for an efficient fast skip.
1918 * 1. Search list1 ref0.
1919 * 2. Search list0 ref0.
1920 * 3. Try skip.
1921 * 4. Search the rest of list0.
1922 * 5. Go back and finish list1.
1923 */
1924 for( i_ref = (list1_skipped && l == 1) ? 1 : 0; i_ref < h->mb.pic.i_fref[l]; i_ref++ )
1925 {
1926 if( try_skip && l == 1 && i_ref > 0 )
1927 {
1928 list1_skipped = 1;
1929 break;
1930 }
1931
1932 m.i_ref_cost = REF_COST( l, i_ref );
1933
1934 /* search with ref */
1935 LOAD_HPELS( &m, h->mb.pic.p_fref[l][i_ref], l, i_ref, 0, 0 );
1936 x264_mb_predict_mv_16x16( h, l, i_ref, m.mvp );
1937 x264_mb_predict_mv_ref16x16( h, l, i_ref, mvc, &i_mvc );
1938 x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh[l] );
1939
1940 /* add ref cost */
1941 m.cost += m.i_ref_cost;
1942
1943 if( m.cost < lX->me16x16.cost )
1944 h->mc.memcpy_aligned( &lX->me16x16, &m, sizeof(x264_me_t) );
1945
1946 /* save mv for predicting neighbors */
1947 CP32( lX->mvc[i_ref][0], m.mv );
1948 CP32( h->mb.mvr[l][i_ref][h->mb.i_mb_xy], m.mv );
1949
1950 /* Fast skip detection. */
1951 if( i_ref == 0 && try_skip )
1952 {
1953 if( abs(lX->me16x16.mv[0]-h->mb.cache.direct_mv[l][0][0]) +
1954 abs(lX->me16x16.mv[1]-h->mb.cache.direct_mv[l][0][1]) > 1 )
1955 {
1956 try_skip = 0;
1957 }
1958 else if( !l )
1959 {
1960 /* We already tested skip */
1961 h->mb.i_type = B_SKIP;
1962 analyse_update_cache( h, a );
1963 return;
1964 }
1965 }
1966 }
1967 if( list1_skipped && l == 1 && i_ref == h->mb.pic.i_fref[1] )
1968 break;
1969 if( list1_skipped && l == 0 )
1970 l = 1;
1971 else
1972 l--;
1973 }
1974
1975 /* get cost of BI mode */
1976 h->mc.memcpy_aligned( &a->l0.bi16x16, &a->l0.me16x16, sizeof(x264_me_t) );
1977 h->mc.memcpy_aligned( &a->l1.bi16x16, &a->l1.me16x16, sizeof(x264_me_t) );
1978 int ref_costs = REF_COST( 0, a->l0.bi16x16.i_ref ) + REF_COST( 1, a->l1.bi16x16.i_ref );
1979 src0 = h->mc.get_ref( pix0, &stride0,
1980 h->mb.pic.p_fref[0][a->l0.bi16x16.i_ref], h->mb.pic.i_stride[0],
1981 a->l0.bi16x16.mv[0], a->l0.bi16x16.mv[1], 16, 16, x264_weight_none );
1982 src1 = h->mc.get_ref( pix1, &stride1,
1983 h->mb.pic.p_fref[1][a->l1.bi16x16.i_ref], h->mb.pic.i_stride[0],
1984 a->l1.bi16x16.mv[0], a->l1.bi16x16.mv[1], 16, 16, x264_weight_none );
1985
1986 h->mc.avg[PIXEL_16x16]( pix0, 16, src0, stride0, src1, stride1, h->mb.bipred_weight[a->l0.bi16x16.i_ref][a->l1.bi16x16.i_ref] );
1987
1988 a->i_cost16x16bi = h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, pix0, 16 )
1989 + ref_costs
1990 + a->l0.bi16x16.cost_mv
1991 + a->l1.bi16x16.cost_mv;
1992
1993 if( h->mb.b_chroma_me )
1994 a->i_cost16x16bi += analyse_bi_chroma( h, a, 0, PIXEL_16x16 );
1995
1996 /* Always try the 0,0,0,0 vector; helps avoid errant motion vectors in fades */
1997 if( M32( a->l0.bi16x16.mv ) | M32( a->l1.bi16x16.mv ) )
1998 {
1999 int l0_mv_cost = a->l0.bi16x16.p_cost_mv[-a->l0.bi16x16.mvp[0]]
2000 + a->l0.bi16x16.p_cost_mv[-a->l0.bi16x16.mvp[1]];
2001 int l1_mv_cost = a->l1.bi16x16.p_cost_mv[-a->l1.bi16x16.mvp[0]]
2002 + a->l1.bi16x16.p_cost_mv[-a->l1.bi16x16.mvp[1]];
2003 h->mc.avg[PIXEL_16x16]( pix0, 16, h->mb.pic.p_fref[0][a->l0.bi16x16.i_ref][0], h->mb.pic.i_stride[0],
2004 h->mb.pic.p_fref[1][a->l1.bi16x16.i_ref][0], h->mb.pic.i_stride[0],
2005 h->mb.bipred_weight[a->l0.bi16x16.i_ref][a->l1.bi16x16.i_ref] );
2006 int cost00 = h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, pix0, 16 )
2007 + ref_costs + l0_mv_cost + l1_mv_cost;
2008
2009 if( h->mb.b_chroma_me && cost00 < a->i_cost16x16bi )
2010 {
2011 ALIGNED_ARRAY_16( pixel, bi, [16*FENC_STRIDE] );
2012
2013 if( CHROMA444 )
2014 {
2015 h->mc.avg[PIXEL_16x16]( bi, FENC_STRIDE, h->mb.pic.p_fref[0][a->l0.bi16x16.i_ref][4], h->mb.pic.i_stride[1],
2016 h->mb.pic.p_fref[1][a->l1.bi16x16.i_ref][4], h->mb.pic.i_stride[1],
2017 h->mb.bipred_weight[a->l0.bi16x16.i_ref][a->l1.bi16x16.i_ref] );
2018 cost00 += h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[1], FENC_STRIDE, bi, FENC_STRIDE );
2019 h->mc.avg[PIXEL_16x16]( bi, FENC_STRIDE, h->mb.pic.p_fref[0][a->l0.bi16x16.i_ref][8], h->mb.pic.i_stride[2],
2020 h->mb.pic.p_fref[1][a->l1.bi16x16.i_ref][8], h->mb.pic.i_stride[2],
2021 h->mb.bipred_weight[a->l0.bi16x16.i_ref][a->l1.bi16x16.i_ref] );
2022 cost00 += h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[2], FENC_STRIDE, bi, FENC_STRIDE );
2023 }
2024 else
2025 {
2026 ALIGNED_ARRAY_64( pixel, pixuv, [2],[16*FENC_STRIDE] );
2027 int chromapix = h->luma2chroma_pixel[PIXEL_16x16];
2028 int v_shift = CHROMA_V_SHIFT;
2029
2030 if( v_shift & MB_INTERLACED & a->l0.bi16x16.i_ref )
2031 {
2032 int l0_mvy_offset = (h->mb.i_mb_y & 1)*4 - 2;
2033 h->mc.mc_chroma( pixuv[0], pixuv[0]+8, FENC_STRIDE, h->mb.pic.p_fref[0][a->l0.bi16x16.i_ref][4],
2034 h->mb.pic.i_stride[1], 0, 0 + l0_mvy_offset, 8, 8 );
2035 }
2036 else
2037 h->mc.load_deinterleave_chroma_fenc( pixuv[0], h->mb.pic.p_fref[0][a->l0.bi16x16.i_ref][4],
2038 h->mb.pic.i_stride[1], 16>>v_shift );
2039
2040 if( v_shift & MB_INTERLACED & a->l1.bi16x16.i_ref )
2041 {
2042 int l1_mvy_offset = (h->mb.i_mb_y & 1)*4 - 2;
2043 h->mc.mc_chroma( pixuv[1], pixuv[1]+8, FENC_STRIDE, h->mb.pic.p_fref[1][a->l1.bi16x16.i_ref][4],
2044 h->mb.pic.i_stride[1], 0, 0 + l1_mvy_offset, 8, 8 );
2045 }
2046 else
2047 h->mc.load_deinterleave_chroma_fenc( pixuv[1], h->mb.pic.p_fref[1][a->l1.bi16x16.i_ref][4],
2048 h->mb.pic.i_stride[1], 16>>v_shift );
2049
2050 h->mc.avg[chromapix]( bi, FENC_STRIDE, pixuv[0], FENC_STRIDE, pixuv[1], FENC_STRIDE,
2051 h->mb.bipred_weight[a->l0.bi16x16.i_ref][a->l1.bi16x16.i_ref] );
2052 h->mc.avg[chromapix]( bi+8, FENC_STRIDE, pixuv[0]+8, FENC_STRIDE, pixuv[1]+8, FENC_STRIDE,
2053 h->mb.bipred_weight[a->l0.bi16x16.i_ref][a->l1.bi16x16.i_ref] );
2054
2055 cost00 += h->pixf.mbcmp[chromapix]( h->mb.pic.p_fenc[1], FENC_STRIDE, bi, FENC_STRIDE )
2056 + h->pixf.mbcmp[chromapix]( h->mb.pic.p_fenc[2], FENC_STRIDE, bi+8, FENC_STRIDE );
2057 }
2058 }
2059
2060 if( cost00 < a->i_cost16x16bi )
2061 {
2062 M32( a->l0.bi16x16.mv ) = 0;
2063 M32( a->l1.bi16x16.mv ) = 0;
2064 a->l0.bi16x16.cost_mv = l0_mv_cost;
2065 a->l1.bi16x16.cost_mv = l1_mv_cost;
2066 a->i_cost16x16bi = cost00;
2067 }
2068 }
2069
2070 /* mb type cost */
2071 a->i_cost16x16bi += a->i_lambda * i_mb_b_cost_table[B_BI_BI];
2072 a->l0.me16x16.cost += a->i_lambda * i_mb_b_cost_table[B_L0_L0];
2073 a->l1.me16x16.cost += a->i_lambda * i_mb_b_cost_table[B_L1_L1];
2074 }
2075
mb_cache_mv_p8x8(x264_t * h,x264_mb_analysis_t * a,int i)2076 static inline void mb_cache_mv_p8x8( x264_t *h, x264_mb_analysis_t *a, int i )
2077 {
2078 int x = 2*(i&1);
2079 int y = i&2;
2080
2081 switch( h->mb.i_sub_partition[i] )
2082 {
2083 case D_L0_8x8:
2084 x264_macroblock_cache_mv_ptr( h, x, y, 2, 2, 0, a->l0.me8x8[i].mv );
2085 break;
2086 case D_L0_8x4:
2087 x264_macroblock_cache_mv_ptr( h, x, y+0, 2, 1, 0, a->l0.me8x4[i][0].mv );
2088 x264_macroblock_cache_mv_ptr( h, x, y+1, 2, 1, 0, a->l0.me8x4[i][1].mv );
2089 break;
2090 case D_L0_4x8:
2091 x264_macroblock_cache_mv_ptr( h, x+0, y, 1, 2, 0, a->l0.me4x8[i][0].mv );
2092 x264_macroblock_cache_mv_ptr( h, x+1, y, 1, 2, 0, a->l0.me4x8[i][1].mv );
2093 break;
2094 case D_L0_4x4:
2095 x264_macroblock_cache_mv_ptr( h, x+0, y+0, 1, 1, 0, a->l0.me4x4[i][0].mv );
2096 x264_macroblock_cache_mv_ptr( h, x+1, y+0, 1, 1, 0, a->l0.me4x4[i][1].mv );
2097 x264_macroblock_cache_mv_ptr( h, x+0, y+1, 1, 1, 0, a->l0.me4x4[i][2].mv );
2098 x264_macroblock_cache_mv_ptr( h, x+1, y+1, 1, 1, 0, a->l0.me4x4[i][3].mv );
2099 break;
2100 default:
2101 x264_log( h, X264_LOG_ERROR, "internal error\n" );
2102 break;
2103 }
2104 }
2105
mb_load_mv_direct8x8(x264_t * h,int idx)2106 static void mb_load_mv_direct8x8( x264_t *h, int idx )
2107 {
2108 int x = 2*(idx&1);
2109 int y = idx&2;
2110 x264_macroblock_cache_ref( h, x, y, 2, 2, 0, h->mb.cache.direct_ref[0][idx] );
2111 x264_macroblock_cache_ref( h, x, y, 2, 2, 1, h->mb.cache.direct_ref[1][idx] );
2112 x264_macroblock_cache_mv_ptr( h, x, y, 2, 2, 0, h->mb.cache.direct_mv[0][idx] );
2113 x264_macroblock_cache_mv_ptr( h, x, y, 2, 2, 1, h->mb.cache.direct_mv[1][idx] );
2114 }
2115
2116 #define CACHE_MV_BI(x,y,dx,dy,me0,me1,part) \
2117 if( x264_mb_partition_listX_table[0][part] ) \
2118 { \
2119 x264_macroblock_cache_ref( h, x,y,dx,dy, 0, me0.i_ref ); \
2120 x264_macroblock_cache_mv_ptr( h, x,y,dx,dy, 0, me0.mv ); \
2121 } \
2122 else \
2123 { \
2124 x264_macroblock_cache_ref( h, x,y,dx,dy, 0, -1 ); \
2125 x264_macroblock_cache_mv( h, x,y,dx,dy, 0, 0 ); \
2126 if( b_mvd ) \
2127 x264_macroblock_cache_mvd( h, x,y,dx,dy, 0, 0 ); \
2128 } \
2129 if( x264_mb_partition_listX_table[1][part] ) \
2130 { \
2131 x264_macroblock_cache_ref( h, x,y,dx,dy, 1, me1.i_ref ); \
2132 x264_macroblock_cache_mv_ptr( h, x,y,dx,dy, 1, me1.mv ); \
2133 } \
2134 else \
2135 { \
2136 x264_macroblock_cache_ref( h, x,y,dx,dy, 1, -1 ); \
2137 x264_macroblock_cache_mv( h, x,y,dx,dy, 1, 0 ); \
2138 if( b_mvd ) \
2139 x264_macroblock_cache_mvd( h, x,y,dx,dy, 1, 0 ); \
2140 }
2141
mb_cache_mv_b8x8(x264_t * h,x264_mb_analysis_t * a,int i,int b_mvd)2142 static inline void mb_cache_mv_b8x8( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd )
2143 {
2144 int x = 2*(i&1);
2145 int y = i&2;
2146 if( h->mb.i_sub_partition[i] == D_DIRECT_8x8 )
2147 {
2148 mb_load_mv_direct8x8( h, i );
2149 if( b_mvd )
2150 {
2151 x264_macroblock_cache_mvd( h, x, y, 2, 2, 0, 0 );
2152 x264_macroblock_cache_mvd( h, x, y, 2, 2, 1, 0 );
2153 x264_macroblock_cache_skip( h, x, y, 2, 2, 1 );
2154 }
2155 }
2156 else
2157 {
2158 CACHE_MV_BI( x, y, 2, 2, a->l0.me8x8[i], a->l1.me8x8[i], h->mb.i_sub_partition[i] );
2159 }
2160 }
mb_cache_mv_b16x8(x264_t * h,x264_mb_analysis_t * a,int i,int b_mvd)2161 static inline void mb_cache_mv_b16x8( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd )
2162 {
2163 CACHE_MV_BI( 0, 2*i, 4, 2, a->l0.me16x8[i], a->l1.me16x8[i], a->i_mb_partition16x8[i] );
2164 }
mb_cache_mv_b8x16(x264_t * h,x264_mb_analysis_t * a,int i,int b_mvd)2165 static inline void mb_cache_mv_b8x16( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd )
2166 {
2167 CACHE_MV_BI( 2*i, 0, 2, 4, a->l0.me8x16[i], a->l1.me8x16[i], a->i_mb_partition8x16[i] );
2168 }
2169 #undef CACHE_MV_BI
2170
mb_analyse_inter_b8x8_mixed_ref(x264_t * h,x264_mb_analysis_t * a)2171 static void mb_analyse_inter_b8x8_mixed_ref( x264_t *h, x264_mb_analysis_t *a )
2172 {
2173 ALIGNED_ARRAY_16( pixel, pix,[2],[8*8] );
2174 int i_maxref[2] = {h->mb.pic.i_fref[0]-1, h->mb.pic.i_fref[1]-1};
2175
2176 /* early termination: if 16x16 chose ref 0, then evalute no refs older
2177 * than those used by the neighbors */
2178 #define CHECK_NEIGHBOUR(i)\
2179 {\
2180 int ref = h->mb.cache.ref[l][X264_SCAN8_0+i];\
2181 if( ref > i_maxref[l] )\
2182 i_maxref[l] = ref;\
2183 }
2184
2185 for( int l = 0; l < 2; l++ )
2186 {
2187 x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0;
2188 if( i_maxref[l] > 0 && lX->me16x16.i_ref == 0 &&
2189 h->mb.i_mb_type_top > 0 && h->mb.i_mb_type_left[0] > 0 )
2190 {
2191 i_maxref[l] = 0;
2192 CHECK_NEIGHBOUR( -8 - 1 );
2193 CHECK_NEIGHBOUR( -8 + 0 );
2194 CHECK_NEIGHBOUR( -8 + 2 );
2195 CHECK_NEIGHBOUR( -8 + 4 );
2196 CHECK_NEIGHBOUR( 0 - 1 );
2197 CHECK_NEIGHBOUR( 2*8 - 1 );
2198 }
2199 }
2200
2201 /* XXX Needed for x264_mb_predict_mv */
2202 h->mb.i_partition = D_8x8;
2203
2204 a->i_cost8x8bi = 0;
2205
2206 for( int i = 0; i < 4; i++ )
2207 {
2208 int x8 = i&1;
2209 int y8 = i>>1;
2210 int i_part_cost;
2211 int i_part_cost_bi;
2212 intptr_t stride[2] = {8,8};
2213 pixel *src[2];
2214 x264_me_t m;
2215 m.i_pixel = PIXEL_8x8;
2216 LOAD_FENC( &m, h->mb.pic.p_fenc, 8*x8, 8*y8 );
2217
2218 for( int l = 0; l < 2; l++ )
2219 {
2220 x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0;
2221
2222 lX->me8x8[i].cost = INT_MAX;
2223 for( int i_ref = 0; i_ref <= i_maxref[l]; i_ref++ )
2224 {
2225 m.i_ref_cost = REF_COST( l, i_ref );
2226
2227 LOAD_HPELS( &m, h->mb.pic.p_fref[l][i_ref], l, i_ref, 8*x8, 8*y8 );
2228
2229 x264_macroblock_cache_ref( h, x8*2, y8*2, 2, 2, l, i_ref );
2230 x264_mb_predict_mv( h, l, 4*i, 2, m.mvp );
2231 x264_me_search( h, &m, lX->mvc[i_ref], i+1 );
2232 m.cost += m.i_ref_cost;
2233
2234 if( m.cost < lX->me8x8[i].cost )
2235 {
2236 h->mc.memcpy_aligned( &lX->me8x8[i], &m, sizeof(x264_me_t) );
2237 a->i_satd8x8[l][i] = m.cost - ( m.cost_mv + m.i_ref_cost );
2238 }
2239
2240 /* save mv for predicting other partitions within this MB */
2241 CP32( lX->mvc[i_ref][i+1], m.mv );
2242 }
2243 }
2244
2245 /* BI mode */
2246 src[0] = h->mc.get_ref( pix[0], &stride[0], a->l0.me8x8[i].p_fref, a->l0.me8x8[i].i_stride[0],
2247 a->l0.me8x8[i].mv[0], a->l0.me8x8[i].mv[1], 8, 8, x264_weight_none );
2248 src[1] = h->mc.get_ref( pix[1], &stride[1], a->l1.me8x8[i].p_fref, a->l1.me8x8[i].i_stride[0],
2249 a->l1.me8x8[i].mv[0], a->l1.me8x8[i].mv[1], 8, 8, x264_weight_none );
2250 h->mc.avg[PIXEL_8x8]( pix[0], 8, src[0], stride[0], src[1], stride[1],
2251 h->mb.bipred_weight[a->l0.me8x8[i].i_ref][a->l1.me8x8[i].i_ref] );
2252
2253 a->i_satd8x8[2][i] = h->pixf.mbcmp[PIXEL_8x8]( a->l0.me8x8[i].p_fenc[0], FENC_STRIDE, pix[0], 8 );
2254 i_part_cost_bi = a->i_satd8x8[2][i] + a->l0.me8x8[i].cost_mv + a->l1.me8x8[i].cost_mv
2255 + a->l0.me8x8[i].i_ref_cost + a->l1.me8x8[i].i_ref_cost
2256 + a->i_lambda * i_sub_mb_b_cost_table[D_BI_8x8];
2257
2258 if( h->mb.b_chroma_me )
2259 {
2260 int i_chroma_cost = analyse_bi_chroma( h, a, i, PIXEL_8x8 );
2261 i_part_cost_bi += i_chroma_cost;
2262 a->i_satd8x8[2][i] += i_chroma_cost;
2263 }
2264
2265 a->l0.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L0_8x8];
2266 a->l1.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L1_8x8];
2267
2268 i_part_cost = a->l0.me8x8[i].cost;
2269 h->mb.i_sub_partition[i] = D_L0_8x8;
2270 COPY2_IF_LT( i_part_cost, a->l1.me8x8[i].cost, h->mb.i_sub_partition[i], D_L1_8x8 );
2271 COPY2_IF_LT( i_part_cost, i_part_cost_bi, h->mb.i_sub_partition[i], D_BI_8x8 );
2272 COPY2_IF_LT( i_part_cost, a->i_cost8x8direct[i], h->mb.i_sub_partition[i], D_DIRECT_8x8 );
2273 a->i_cost8x8bi += i_part_cost;
2274
2275 /* XXX Needed for x264_mb_predict_mv */
2276 mb_cache_mv_b8x8( h, a, i, 0 );
2277 }
2278
2279 /* mb type cost */
2280 a->i_cost8x8bi += a->i_lambda * i_mb_b_cost_table[B_8x8];
2281 }
2282
mb_analyse_inter_b8x8(x264_t * h,x264_mb_analysis_t * a)2283 static void mb_analyse_inter_b8x8( x264_t *h, x264_mb_analysis_t *a )
2284 {
2285 pixel **p_fref[2] =
2286 { h->mb.pic.p_fref[0][a->l0.me16x16.i_ref],
2287 h->mb.pic.p_fref[1][a->l1.me16x16.i_ref] };
2288 ALIGNED_ARRAY_16( pixel, pix,[2],[8*8] );
2289
2290 /* XXX Needed for x264_mb_predict_mv */
2291 h->mb.i_partition = D_8x8;
2292
2293 a->i_cost8x8bi = 0;
2294
2295 for( int i = 0; i < 4; i++ )
2296 {
2297 int x8 = i&1;
2298 int y8 = i>>1;
2299 int i_part_cost;
2300 int i_part_cost_bi = 0;
2301 intptr_t stride[2] = {8,8};
2302 pixel *src[2];
2303
2304 for( int l = 0; l < 2; l++ )
2305 {
2306 x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0;
2307 x264_me_t *m = &lX->me8x8[i];
2308 m->i_pixel = PIXEL_8x8;
2309 LOAD_FENC( m, h->mb.pic.p_fenc, 8*x8, 8*y8 );
2310
2311 m->i_ref_cost = REF_COST( l, lX->me16x16.i_ref );
2312 m->i_ref = lX->me16x16.i_ref;
2313
2314 LOAD_HPELS( m, p_fref[l], l, lX->me16x16.i_ref, 8*x8, 8*y8 );
2315
2316 x264_macroblock_cache_ref( h, x8*2, y8*2, 2, 2, l, lX->me16x16.i_ref );
2317 x264_mb_predict_mv( h, l, 4*i, 2, m->mvp );
2318 x264_me_search( h, m, &lX->me16x16.mv, 1 );
2319 a->i_satd8x8[l][i] = m->cost - m->cost_mv;
2320 m->cost += m->i_ref_cost;
2321
2322 x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, l, m->mv );
2323
2324 /* save mv for predicting other partitions within this MB */
2325 CP32( lX->mvc[lX->me16x16.i_ref][i+1], m->mv );
2326
2327 /* BI mode */
2328 src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0],
2329 m->mv[0], m->mv[1], 8, 8, x264_weight_none );
2330 i_part_cost_bi += m->cost_mv + m->i_ref_cost;
2331 }
2332 h->mc.avg[PIXEL_8x8]( pix[0], 8, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.me16x16.i_ref][a->l1.me16x16.i_ref] );
2333 a->i_satd8x8[2][i] = h->pixf.mbcmp[PIXEL_8x8]( a->l0.me8x8[i].p_fenc[0], FENC_STRIDE, pix[0], 8 );
2334 i_part_cost_bi += a->i_satd8x8[2][i] + a->i_lambda * i_sub_mb_b_cost_table[D_BI_8x8];
2335 a->l0.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L0_8x8];
2336 a->l1.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L1_8x8];
2337
2338 if( h->mb.b_chroma_me )
2339 {
2340 int i_chroma_cost = analyse_bi_chroma( h, a, i, PIXEL_8x8 );
2341 i_part_cost_bi += i_chroma_cost;
2342 a->i_satd8x8[2][i] += i_chroma_cost;
2343 }
2344
2345 i_part_cost = a->l0.me8x8[i].cost;
2346 h->mb.i_sub_partition[i] = D_L0_8x8;
2347 COPY2_IF_LT( i_part_cost, a->l1.me8x8[i].cost, h->mb.i_sub_partition[i], D_L1_8x8 );
2348 COPY2_IF_LT( i_part_cost, i_part_cost_bi, h->mb.i_sub_partition[i], D_BI_8x8 );
2349 COPY2_IF_LT( i_part_cost, a->i_cost8x8direct[i], h->mb.i_sub_partition[i], D_DIRECT_8x8 );
2350 a->i_cost8x8bi += i_part_cost;
2351
2352 /* XXX Needed for x264_mb_predict_mv */
2353 mb_cache_mv_b8x8( h, a, i, 0 );
2354 }
2355
2356 /* mb type cost */
2357 a->i_cost8x8bi += a->i_lambda * i_mb_b_cost_table[B_8x8];
2358 }
2359
mb_analyse_inter_b16x8(x264_t * h,x264_mb_analysis_t * a,int i_best_satd)2360 static void mb_analyse_inter_b16x8( x264_t *h, x264_mb_analysis_t *a, int i_best_satd )
2361 {
2362 ALIGNED_ARRAY_32( pixel, pix,[2],[16*8] );
2363 ALIGNED_ARRAY_8( int16_t, mvc,[3],[2] );
2364
2365 h->mb.i_partition = D_16x8;
2366 a->i_cost16x8bi = 0;
2367
2368 for( int i = 0; i < 2; i++ )
2369 {
2370 int i_part_cost;
2371 int i_part_cost_bi = 0;
2372 intptr_t stride[2] = {16,16};
2373 pixel *src[2];
2374 x264_me_t m;
2375 m.i_pixel = PIXEL_16x8;
2376 LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 8*i );
2377
2378 for( int l = 0; l < 2; l++ )
2379 {
2380 x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0;
2381 int ref8[2] = { lX->me8x8[2*i].i_ref, lX->me8x8[2*i+1].i_ref };
2382 int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2;
2383 lX->me16x8[i].cost = INT_MAX;
2384 for( int j = 0; j < i_ref8s; j++ )
2385 {
2386 int i_ref = ref8[j];
2387 m.i_ref_cost = REF_COST( l, i_ref );
2388
2389 LOAD_HPELS( &m, h->mb.pic.p_fref[l][i_ref], l, i_ref, 0, 8*i );
2390
2391 CP32( mvc[0], lX->mvc[i_ref][0] );
2392 CP32( mvc[1], lX->mvc[i_ref][2*i+1] );
2393 CP32( mvc[2], lX->mvc[i_ref][2*i+2] );
2394
2395 x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, l, i_ref );
2396 x264_mb_predict_mv( h, l, 8*i, 4, m.mvp );
2397 x264_me_search( h, &m, mvc, 3 );
2398 m.cost += m.i_ref_cost;
2399
2400 if( m.cost < lX->me16x8[i].cost )
2401 h->mc.memcpy_aligned( &lX->me16x8[i], &m, sizeof(x264_me_t) );
2402 }
2403 }
2404
2405 /* BI mode */
2406 src[0] = h->mc.get_ref( pix[0], &stride[0], a->l0.me16x8[i].p_fref, a->l0.me16x8[i].i_stride[0],
2407 a->l0.me16x8[i].mv[0], a->l0.me16x8[i].mv[1], 16, 8, x264_weight_none );
2408 src[1] = h->mc.get_ref( pix[1], &stride[1], a->l1.me16x8[i].p_fref, a->l1.me16x8[i].i_stride[0],
2409 a->l1.me16x8[i].mv[0], a->l1.me16x8[i].mv[1], 16, 8, x264_weight_none );
2410 h->mc.avg[PIXEL_16x8]( pix[0], 16, src[0], stride[0], src[1], stride[1],
2411 h->mb.bipred_weight[a->l0.me16x8[i].i_ref][a->l1.me16x8[i].i_ref] );
2412
2413 i_part_cost_bi = h->pixf.mbcmp[PIXEL_16x8]( a->l0.me16x8[i].p_fenc[0], FENC_STRIDE, pix[0], 16 )
2414 + a->l0.me16x8[i].cost_mv + a->l1.me16x8[i].cost_mv + a->l0.me16x8[i].i_ref_cost
2415 + a->l1.me16x8[i].i_ref_cost;
2416
2417 if( h->mb.b_chroma_me )
2418 i_part_cost_bi += analyse_bi_chroma( h, a, i, PIXEL_16x8 );
2419
2420 i_part_cost = a->l0.me16x8[i].cost;
2421 a->i_mb_partition16x8[i] = D_L0_8x8; /* not actually 8x8, only the L0 matters */
2422
2423 if( a->l1.me16x8[i].cost < i_part_cost )
2424 {
2425 i_part_cost = a->l1.me16x8[i].cost;
2426 a->i_mb_partition16x8[i] = D_L1_8x8;
2427 }
2428 if( i_part_cost_bi + a->i_lambda * 1 < i_part_cost )
2429 {
2430 i_part_cost = i_part_cost_bi;
2431 a->i_mb_partition16x8[i] = D_BI_8x8;
2432 }
2433 a->i_cost16x8bi += i_part_cost;
2434
2435 /* Early termination based on the current SATD score of partition[0]
2436 plus the estimated SATD score of partition[1] */
2437 if( a->b_early_terminate && (!i && i_part_cost + a->i_cost_est16x8[1] > i_best_satd
2438 * (16 + (!!a->i_mbrd + !!h->mb.i_psy_rd))/16) )
2439 {
2440 a->i_cost16x8bi = COST_MAX;
2441 return;
2442 }
2443
2444 mb_cache_mv_b16x8( h, a, i, 0 );
2445 }
2446
2447 /* mb type cost */
2448 a->i_mb_type16x8 = B_L0_L0
2449 + (a->i_mb_partition16x8[0]>>2) * 3
2450 + (a->i_mb_partition16x8[1]>>2);
2451 a->i_cost16x8bi += a->i_lambda * i_mb_b16x8_cost_table[a->i_mb_type16x8];
2452 }
2453
mb_analyse_inter_b8x16(x264_t * h,x264_mb_analysis_t * a,int i_best_satd)2454 static void mb_analyse_inter_b8x16( x264_t *h, x264_mb_analysis_t *a, int i_best_satd )
2455 {
2456 ALIGNED_ARRAY_16( pixel, pix,[2],[8*16] );
2457 ALIGNED_ARRAY_8( int16_t, mvc,[3],[2] );
2458
2459 h->mb.i_partition = D_8x16;
2460 a->i_cost8x16bi = 0;
2461
2462 for( int i = 0; i < 2; i++ )
2463 {
2464 int i_part_cost;
2465 int i_part_cost_bi = 0;
2466 intptr_t stride[2] = {8,8};
2467 pixel *src[2];
2468 x264_me_t m;
2469 m.i_pixel = PIXEL_8x16;
2470 LOAD_FENC( &m, h->mb.pic.p_fenc, 8*i, 0 );
2471
2472 for( int l = 0; l < 2; l++ )
2473 {
2474 x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0;
2475 int ref8[2] = { lX->me8x8[i].i_ref, lX->me8x8[i+2].i_ref };
2476 int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2;
2477 lX->me8x16[i].cost = INT_MAX;
2478 for( int j = 0; j < i_ref8s; j++ )
2479 {
2480 int i_ref = ref8[j];
2481 m.i_ref_cost = REF_COST( l, i_ref );
2482
2483 LOAD_HPELS( &m, h->mb.pic.p_fref[l][i_ref], l, i_ref, 8*i, 0 );
2484
2485 CP32( mvc[0], lX->mvc[i_ref][0] );
2486 CP32( mvc[1], lX->mvc[i_ref][i+1] );
2487 CP32( mvc[2], lX->mvc[i_ref][i+3] );
2488
2489 x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, l, i_ref );
2490 x264_mb_predict_mv( h, l, 4*i, 2, m.mvp );
2491 x264_me_search( h, &m, mvc, 3 );
2492 m.cost += m.i_ref_cost;
2493
2494 if( m.cost < lX->me8x16[i].cost )
2495 h->mc.memcpy_aligned( &lX->me8x16[i], &m, sizeof(x264_me_t) );
2496 }
2497 }
2498
2499 /* BI mode */
2500 src[0] = h->mc.get_ref( pix[0], &stride[0], a->l0.me8x16[i].p_fref, a->l0.me8x16[i].i_stride[0],
2501 a->l0.me8x16[i].mv[0], a->l0.me8x16[i].mv[1], 8, 16, x264_weight_none );
2502 src[1] = h->mc.get_ref( pix[1], &stride[1], a->l1.me8x16[i].p_fref, a->l1.me8x16[i].i_stride[0],
2503 a->l1.me8x16[i].mv[0], a->l1.me8x16[i].mv[1], 8, 16, x264_weight_none );
2504 h->mc.avg[PIXEL_8x16]( pix[0], 8, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.me8x16[i].i_ref][a->l1.me8x16[i].i_ref] );
2505
2506 i_part_cost_bi = h->pixf.mbcmp[PIXEL_8x16]( a->l0.me8x16[i].p_fenc[0], FENC_STRIDE, pix[0], 8 )
2507 + a->l0.me8x16[i].cost_mv + a->l1.me8x16[i].cost_mv + a->l0.me8x16[i].i_ref_cost
2508 + a->l1.me8x16[i].i_ref_cost;
2509
2510 if( h->mb.b_chroma_me )
2511 i_part_cost_bi += analyse_bi_chroma( h, a, i, PIXEL_8x16 );
2512
2513 i_part_cost = a->l0.me8x16[i].cost;
2514 a->i_mb_partition8x16[i] = D_L0_8x8;
2515
2516 if( a->l1.me8x16[i].cost < i_part_cost )
2517 {
2518 i_part_cost = a->l1.me8x16[i].cost;
2519 a->i_mb_partition8x16[i] = D_L1_8x8;
2520 }
2521 if( i_part_cost_bi + a->i_lambda * 1 < i_part_cost )
2522 {
2523 i_part_cost = i_part_cost_bi;
2524 a->i_mb_partition8x16[i] = D_BI_8x8;
2525 }
2526 a->i_cost8x16bi += i_part_cost;
2527
2528 /* Early termination based on the current SATD score of partition[0]
2529 plus the estimated SATD score of partition[1] */
2530 if( a->b_early_terminate && (!i && i_part_cost + a->i_cost_est8x16[1] > i_best_satd
2531 * (16 + (!!a->i_mbrd + !!h->mb.i_psy_rd))/16) )
2532 {
2533 a->i_cost8x16bi = COST_MAX;
2534 return;
2535 }
2536
2537 mb_cache_mv_b8x16( h, a, i, 0 );
2538 }
2539
2540 /* mb type cost */
2541 a->i_mb_type8x16 = B_L0_L0
2542 + (a->i_mb_partition8x16[0]>>2) * 3
2543 + (a->i_mb_partition8x16[1]>>2);
2544 a->i_cost8x16bi += a->i_lambda * i_mb_b16x8_cost_table[a->i_mb_type8x16];
2545 }
2546
mb_analyse_p_rd(x264_t * h,x264_mb_analysis_t * a,int i_satd)2547 static void mb_analyse_p_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd )
2548 {
2549 int thresh = a->b_early_terminate ? i_satd * 5/4 + 1 : COST_MAX;
2550
2551 h->mb.i_type = P_L0;
2552 if( a->l0.i_rd16x16 == COST_MAX && (!a->b_early_terminate || a->l0.me16x16.cost <= i_satd * 3/2) )
2553 {
2554 h->mb.i_partition = D_16x16;
2555 analyse_update_cache( h, a );
2556 a->l0.i_rd16x16 = rd_cost_mb( h, a->i_lambda2 );
2557 }
2558
2559 if( a->l0.i_cost16x8 < thresh )
2560 {
2561 h->mb.i_partition = D_16x8;
2562 analyse_update_cache( h, a );
2563 a->l0.i_cost16x8 = rd_cost_mb( h, a->i_lambda2 );
2564 }
2565 else
2566 a->l0.i_cost16x8 = COST_MAX;
2567
2568 if( a->l0.i_cost8x16 < thresh )
2569 {
2570 h->mb.i_partition = D_8x16;
2571 analyse_update_cache( h, a );
2572 a->l0.i_cost8x16 = rd_cost_mb( h, a->i_lambda2 );
2573 }
2574 else
2575 a->l0.i_cost8x16 = COST_MAX;
2576
2577 if( a->l0.i_cost8x8 < thresh )
2578 {
2579 h->mb.i_type = P_8x8;
2580 h->mb.i_partition = D_8x8;
2581 if( h->param.analyse.inter & X264_ANALYSE_PSUB8x8 )
2582 {
2583 x264_macroblock_cache_ref( h, 0, 0, 2, 2, 0, a->l0.me8x8[0].i_ref );
2584 x264_macroblock_cache_ref( h, 2, 0, 2, 2, 0, a->l0.me8x8[1].i_ref );
2585 x264_macroblock_cache_ref( h, 0, 2, 2, 2, 0, a->l0.me8x8[2].i_ref );
2586 x264_macroblock_cache_ref( h, 2, 2, 2, 2, 0, a->l0.me8x8[3].i_ref );
2587 /* FIXME: In the 8x8 blocks where RDO isn't run, the NNZ values used for context selection
2588 * for future blocks are those left over from previous RDO calls. */
2589 for( int i = 0; i < 4; i++ )
2590 {
2591 int costs[4] = {a->l0.i_cost4x4[i], a->l0.i_cost8x4[i], a->l0.i_cost4x8[i], a->l0.me8x8[i].cost};
2592 int sub8x8_thresh = a->b_early_terminate ? X264_MIN4( costs[0], costs[1], costs[2], costs[3] ) * 5 / 4 : COST_MAX;
2593 int subtype, btype = D_L0_8x8;
2594 uint64_t bcost = COST_MAX64;
2595 for( subtype = D_L0_4x4; subtype <= D_L0_8x8; subtype++ )
2596 {
2597 uint64_t cost;
2598 if( costs[subtype] > sub8x8_thresh )
2599 continue;
2600 h->mb.i_sub_partition[i] = subtype;
2601 mb_cache_mv_p8x8( h, a, i );
2602 if( subtype == btype )
2603 continue;
2604 cost = x264_rd_cost_part( h, a->i_lambda2, i<<2, PIXEL_8x8 );
2605 COPY2_IF_LT( bcost, cost, btype, subtype );
2606 }
2607 if( h->mb.i_sub_partition[i] != btype )
2608 {
2609 h->mb.i_sub_partition[i] = btype;
2610 mb_cache_mv_p8x8( h, a, i );
2611 }
2612 }
2613 }
2614 else
2615 analyse_update_cache( h, a );
2616 a->l0.i_cost8x8 = rd_cost_mb( h, a->i_lambda2 );
2617 }
2618 else
2619 a->l0.i_cost8x8 = COST_MAX;
2620 }
2621
mb_analyse_b_rd(x264_t * h,x264_mb_analysis_t * a,int i_satd_inter)2622 static void mb_analyse_b_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd_inter )
2623 {
2624 int thresh = a->b_early_terminate ? i_satd_inter * (17 + (!!h->mb.i_psy_rd))/16 + 1 : COST_MAX;
2625
2626 if( a->b_direct_available && a->i_rd16x16direct == COST_MAX )
2627 {
2628 h->mb.i_type = B_DIRECT;
2629 /* Assumes direct/skip MC is still in fdec */
2630 /* Requires b-rdo to be done before intra analysis */
2631 h->mb.b_skip_mc = 1;
2632 analyse_update_cache( h, a );
2633 a->i_rd16x16direct = rd_cost_mb( h, a->i_lambda2 );
2634 h->mb.b_skip_mc = 0;
2635 }
2636
2637 //FIXME not all the update_cache calls are needed
2638 h->mb.i_partition = D_16x16;
2639 /* L0 */
2640 if( a->l0.me16x16.cost < thresh && a->l0.i_rd16x16 == COST_MAX )
2641 {
2642 h->mb.i_type = B_L0_L0;
2643 analyse_update_cache( h, a );
2644 a->l0.i_rd16x16 = rd_cost_mb( h, a->i_lambda2 );
2645 }
2646
2647 /* L1 */
2648 if( a->l1.me16x16.cost < thresh && a->l1.i_rd16x16 == COST_MAX )
2649 {
2650 h->mb.i_type = B_L1_L1;
2651 analyse_update_cache( h, a );
2652 a->l1.i_rd16x16 = rd_cost_mb( h, a->i_lambda2 );
2653 }
2654
2655 /* BI */
2656 if( a->i_cost16x16bi < thresh && a->i_rd16x16bi == COST_MAX )
2657 {
2658 h->mb.i_type = B_BI_BI;
2659 analyse_update_cache( h, a );
2660 a->i_rd16x16bi = rd_cost_mb( h, a->i_lambda2 );
2661 }
2662
2663 /* 8x8 */
2664 if( a->i_cost8x8bi < thresh && a->i_rd8x8bi == COST_MAX )
2665 {
2666 h->mb.i_type = B_8x8;
2667 h->mb.i_partition = D_8x8;
2668 analyse_update_cache( h, a );
2669 a->i_rd8x8bi = rd_cost_mb( h, a->i_lambda2 );
2670 x264_macroblock_cache_skip( h, 0, 0, 4, 4, 0 );
2671 }
2672
2673 /* 16x8 */
2674 if( a->i_cost16x8bi < thresh && a->i_rd16x8bi == COST_MAX )
2675 {
2676 h->mb.i_type = a->i_mb_type16x8;
2677 h->mb.i_partition = D_16x8;
2678 analyse_update_cache( h, a );
2679 a->i_rd16x8bi = rd_cost_mb( h, a->i_lambda2 );
2680 }
2681
2682 /* 8x16 */
2683 if( a->i_cost8x16bi < thresh && a->i_rd8x16bi == COST_MAX )
2684 {
2685 h->mb.i_type = a->i_mb_type8x16;
2686 h->mb.i_partition = D_8x16;
2687 analyse_update_cache( h, a );
2688 a->i_rd8x16bi = rd_cost_mb( h, a->i_lambda2 );
2689 }
2690 }
2691
refine_bidir(x264_t * h,x264_mb_analysis_t * a)2692 static void refine_bidir( x264_t *h, x264_mb_analysis_t *a )
2693 {
2694 int i_biweight;
2695
2696 if( IS_INTRA(h->mb.i_type) )
2697 return;
2698
2699 switch( h->mb.i_partition )
2700 {
2701 case D_16x16:
2702 if( h->mb.i_type == B_BI_BI )
2703 {
2704 i_biweight = h->mb.bipred_weight[a->l0.bi16x16.i_ref][a->l1.bi16x16.i_ref];
2705 x264_me_refine_bidir_satd( h, &a->l0.bi16x16, &a->l1.bi16x16, i_biweight );
2706 }
2707 break;
2708 case D_16x8:
2709 for( int i = 0; i < 2; i++ )
2710 if( a->i_mb_partition16x8[i] == D_BI_8x8 )
2711 {
2712 i_biweight = h->mb.bipred_weight[a->l0.me16x8[i].i_ref][a->l1.me16x8[i].i_ref];
2713 x264_me_refine_bidir_satd( h, &a->l0.me16x8[i], &a->l1.me16x8[i], i_biweight );
2714 }
2715 break;
2716 case D_8x16:
2717 for( int i = 0; i < 2; i++ )
2718 if( a->i_mb_partition8x16[i] == D_BI_8x8 )
2719 {
2720 i_biweight = h->mb.bipred_weight[a->l0.me8x16[i].i_ref][a->l1.me8x16[i].i_ref];
2721 x264_me_refine_bidir_satd( h, &a->l0.me8x16[i], &a->l1.me8x16[i], i_biweight );
2722 }
2723 break;
2724 case D_8x8:
2725 for( int i = 0; i < 4; i++ )
2726 if( h->mb.i_sub_partition[i] == D_BI_8x8 )
2727 {
2728 i_biweight = h->mb.bipred_weight[a->l0.me8x8[i].i_ref][a->l1.me8x8[i].i_ref];
2729 x264_me_refine_bidir_satd( h, &a->l0.me8x8[i], &a->l1.me8x8[i], i_biweight );
2730 }
2731 break;
2732 }
2733 }
2734
mb_analyse_transform(x264_t * h)2735 static inline void mb_analyse_transform( x264_t *h )
2736 {
2737 if( x264_mb_transform_8x8_allowed( h ) && h->param.analyse.b_transform_8x8 && !h->mb.b_lossless )
2738 {
2739 /* Only luma MC is really needed for 4:2:0, but the full MC is re-used in macroblock_encode. */
2740 x264_mb_mc( h );
2741
2742 int plane_count = CHROMA444 && h->mb.b_chroma_me ? 3 : 1;
2743 int i_cost8 = 0, i_cost4 = 0;
2744 /* Not all platforms have a merged SATD function */
2745 if( h->pixf.sa8d_satd[PIXEL_16x16] )
2746 {
2747 uint64_t cost = 0;
2748 for( int p = 0; p < plane_count; p++ )
2749 {
2750 cost += h->pixf.sa8d_satd[PIXEL_16x16]( h->mb.pic.p_fenc[p], FENC_STRIDE,
2751 h->mb.pic.p_fdec[p], FDEC_STRIDE );
2752
2753 }
2754 i_cost8 = (uint32_t)cost;
2755 i_cost4 = (uint32_t)(cost >> 32);
2756 }
2757 else
2758 {
2759 for( int p = 0; p < plane_count; p++ )
2760 {
2761 i_cost8 += h->pixf.sa8d[PIXEL_16x16]( h->mb.pic.p_fenc[p], FENC_STRIDE,
2762 h->mb.pic.p_fdec[p], FDEC_STRIDE );
2763 i_cost4 += h->pixf.satd[PIXEL_16x16]( h->mb.pic.p_fenc[p], FENC_STRIDE,
2764 h->mb.pic.p_fdec[p], FDEC_STRIDE );
2765 }
2766 }
2767
2768 h->mb.b_transform_8x8 = i_cost8 < i_cost4;
2769 h->mb.b_skip_mc = 1;
2770 }
2771 }
2772
mb_analyse_transform_rd(x264_t * h,x264_mb_analysis_t * a,int * i_satd,int * i_rd)2773 static inline void mb_analyse_transform_rd( x264_t *h, x264_mb_analysis_t *a, int *i_satd, int *i_rd )
2774 {
2775 if( h->param.analyse.b_transform_8x8 && h->pps->b_transform_8x8_mode )
2776 {
2777 uint32_t subpart_bak = M32( h->mb.i_sub_partition );
2778 /* Try switching the subpartitions to 8x8 so that we can use 8x8 transform mode */
2779 if( h->mb.i_type == P_8x8 )
2780 M32( h->mb.i_sub_partition ) = D_L0_8x8*0x01010101;
2781 else if( !x264_transform_allowed[h->mb.i_type] )
2782 return;
2783
2784 analyse_update_cache( h, a );
2785 h->mb.b_transform_8x8 ^= 1;
2786 /* FIXME only luma is needed for 4:2:0, but the score for comparison already includes chroma */
2787 int i_rd8 = rd_cost_mb( h, a->i_lambda2 );
2788
2789 if( *i_rd >= i_rd8 )
2790 {
2791 if( *i_rd > 0 )
2792 *i_satd = (int64_t)(*i_satd) * i_rd8 / *i_rd;
2793 *i_rd = i_rd8;
2794 }
2795 else
2796 {
2797 h->mb.b_transform_8x8 ^= 1;
2798 M32( h->mb.i_sub_partition ) = subpart_bak;
2799 }
2800 }
2801 }
2802
2803 /* Rate-distortion optimal QP selection.
2804 * FIXME: More than half of the benefit of this function seems to be
2805 * in the way it improves the coding of chroma DC (by decimating or
2806 * finding a better way to code a single DC coefficient.)
2807 * There must be a more efficient way to get that portion of the benefit
2808 * without doing full QP-RD, but RD-decimation doesn't seem to do the
2809 * trick. */
mb_analyse_qp_rd(x264_t * h,x264_mb_analysis_t * a)2810 static inline void mb_analyse_qp_rd( x264_t *h, x264_mb_analysis_t *a )
2811 {
2812 int bcost, cost, failures, prevcost, origcost;
2813 int orig_qp = h->mb.i_qp, bqp = h->mb.i_qp;
2814 int last_qp_tried = 0;
2815 origcost = bcost = rd_cost_mb( h, a->i_lambda2 );
2816 int origcbp = h->mb.cbp[h->mb.i_mb_xy];
2817
2818 /* If CBP is already zero, don't raise the quantizer any higher. */
2819 for( int direction = origcbp ? 1 : -1; direction >= -1; direction-=2 )
2820 {
2821 /* Without psy-RD, require monotonicity when moving quant away from previous
2822 * macroblock's quant; allow 1 failure when moving quant towards previous quant.
2823 * With psy-RD, allow 1 failure when moving quant away from previous quant,
2824 * allow 2 failures when moving quant towards previous quant.
2825 * Psy-RD generally seems to result in more chaotic RD score-vs-quantizer curves. */
2826 int threshold = (!!h->mb.i_psy_rd);
2827 /* Raise the threshold for failures if we're moving towards the last QP. */
2828 if( ( h->mb.i_last_qp < orig_qp && direction == -1 ) ||
2829 ( h->mb.i_last_qp > orig_qp && direction == 1 ) )
2830 threshold++;
2831 h->mb.i_qp = orig_qp;
2832 failures = 0;
2833 prevcost = origcost;
2834
2835 /* If the current QP results in an empty CBP, it's highly likely that lower QPs
2836 * (up to a point) will too. So, jump down to where the threshold will kick in
2837 * and check the QP there. If the CBP is still empty, skip the main loop.
2838 * If it isn't empty, we would have ended up having to check this QP anyways,
2839 * so as long as we store it for later lookup, we lose nothing. */
2840 int already_checked_qp = -1;
2841 int already_checked_cost = COST_MAX;
2842 if( direction == -1 )
2843 {
2844 if( !origcbp )
2845 {
2846 h->mb.i_qp = X264_MAX( h->mb.i_qp - threshold - 1, SPEC_QP( h->param.rc.i_qp_min ) );
2847 h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp];
2848 already_checked_cost = rd_cost_mb( h, a->i_lambda2 );
2849 if( !h->mb.cbp[h->mb.i_mb_xy] )
2850 {
2851 /* If our empty-CBP block is lower QP than the last QP,
2852 * the last QP almost surely doesn't have a CBP either. */
2853 if( h->mb.i_last_qp > h->mb.i_qp )
2854 last_qp_tried = 1;
2855 break;
2856 }
2857 already_checked_qp = h->mb.i_qp;
2858 h->mb.i_qp = orig_qp;
2859 }
2860 }
2861
2862 h->mb.i_qp += direction;
2863 while( h->mb.i_qp >= h->param.rc.i_qp_min && h->mb.i_qp <= SPEC_QP( h->param.rc.i_qp_max ) )
2864 {
2865 if( h->mb.i_last_qp == h->mb.i_qp )
2866 last_qp_tried = 1;
2867 if( h->mb.i_qp == already_checked_qp )
2868 cost = already_checked_cost;
2869 else
2870 {
2871 h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp];
2872 cost = rd_cost_mb( h, a->i_lambda2 );
2873 COPY2_IF_LT( bcost, cost, bqp, h->mb.i_qp );
2874 }
2875
2876 /* We can't assume that the costs are monotonic over QPs.
2877 * Tie case-as-failure seems to give better results. */
2878 if( cost < prevcost )
2879 failures = 0;
2880 else
2881 failures++;
2882 prevcost = cost;
2883
2884 if( failures > threshold )
2885 break;
2886 if( direction == 1 && !h->mb.cbp[h->mb.i_mb_xy] )
2887 break;
2888 h->mb.i_qp += direction;
2889 }
2890 }
2891
2892 /* Always try the last block's QP. */
2893 if( !last_qp_tried )
2894 {
2895 h->mb.i_qp = h->mb.i_last_qp;
2896 h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp];
2897 cost = rd_cost_mb( h, a->i_lambda2 );
2898 COPY2_IF_LT( bcost, cost, bqp, h->mb.i_qp );
2899 }
2900
2901 h->mb.i_qp = bqp;
2902 h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp];
2903
2904 /* Check transform again; decision from before may no longer be optimal. */
2905 if( h->mb.i_qp != orig_qp && h->param.analyse.b_transform_8x8 &&
2906 x264_mb_transform_8x8_allowed( h ) )
2907 {
2908 h->mb.b_transform_8x8 ^= 1;
2909 cost = rd_cost_mb( h, a->i_lambda2 );
2910 if( cost > bcost )
2911 h->mb.b_transform_8x8 ^= 1;
2912 }
2913 }
2914
2915 /*****************************************************************************
2916 * x264_macroblock_analyse:
2917 *****************************************************************************/
x264_macroblock_analyse(x264_t * h)2918 void x264_macroblock_analyse( x264_t *h )
2919 {
2920 x264_mb_analysis_t analysis;
2921 int i_cost = COST_MAX;
2922
2923 h->mb.i_qp = x264_ratecontrol_mb_qp( h );
2924 /* If the QP of this MB is within 1 of the previous MB, code the same QP as the previous MB,
2925 * to lower the bit cost of the qp_delta. Don't do this if QPRD is enabled. */
2926 if( h->param.rc.i_aq_mode && h->param.analyse.i_subpel_refine < 10 )
2927 h->mb.i_qp = abs(h->mb.i_qp - h->mb.i_last_qp) == 1 ? h->mb.i_last_qp : h->mb.i_qp;
2928
2929 if( h->param.analyse.b_mb_info )
2930 h->fdec->effective_qp[h->mb.i_mb_xy] = h->mb.i_qp; /* Store the real analysis QP. */
2931 mb_analyse_init( h, &analysis, h->mb.i_qp );
2932
2933 /*--------------------------- Do the analysis ---------------------------*/
2934 if( h->sh.i_type == SLICE_TYPE_I )
2935 {
2936 intra_analysis:
2937 if( analysis.i_mbrd )
2938 mb_init_fenc_cache( h, analysis.i_mbrd >= 2 );
2939 mb_analyse_intra( h, &analysis, COST_MAX );
2940 if( analysis.i_mbrd )
2941 intra_rd( h, &analysis, COST_MAX );
2942
2943 i_cost = analysis.i_satd_i16x16;
2944 h->mb.i_type = I_16x16;
2945 COPY2_IF_LT( i_cost, analysis.i_satd_i4x4, h->mb.i_type, I_4x4 );
2946 COPY2_IF_LT( i_cost, analysis.i_satd_i8x8, h->mb.i_type, I_8x8 );
2947 if( analysis.i_satd_pcm < i_cost )
2948 h->mb.i_type = I_PCM;
2949
2950 else if( analysis.i_mbrd >= 2 )
2951 intra_rd_refine( h, &analysis );
2952 }
2953 else if( h->sh.i_type == SLICE_TYPE_P )
2954 {
2955 int b_skip = 0;
2956
2957 h->mc.prefetch_ref( h->mb.pic.p_fref[0][0][h->mb.i_mb_x&3], h->mb.pic.i_stride[0], 0 );
2958
2959 analysis.b_try_skip = 0;
2960 if( analysis.b_force_intra )
2961 {
2962 if( !h->param.analyse.b_psy )
2963 {
2964 mb_analyse_init_qp( h, &analysis, X264_MAX( h->mb.i_qp - h->mb.ip_offset, h->param.rc.i_qp_min ) );
2965 goto intra_analysis;
2966 }
2967 }
2968 else
2969 {
2970 /* Special fast-skip logic using information from mb_info. */
2971 if( h->fdec->mb_info && (h->fdec->mb_info[h->mb.i_mb_xy]&X264_MBINFO_CONSTANT) )
2972 {
2973 if( !SLICE_MBAFF && (h->fdec->i_frame - h->fref[0][0]->i_frame) == 1 && !h->sh.b_weighted_pred &&
2974 h->fref[0][0]->effective_qp[h->mb.i_mb_xy] <= h->mb.i_qp )
2975 {
2976 h->mb.i_partition = D_16x16;
2977 /* Use the P-SKIP MV if we can... */
2978 if( !M32(h->mb.cache.pskip_mv) )
2979 {
2980 b_skip = 1;
2981 h->mb.i_type = P_SKIP;
2982 }
2983 /* Otherwise, just force a 16x16 block. */
2984 else
2985 {
2986 h->mb.i_type = P_L0;
2987 analysis.l0.me16x16.i_ref = 0;
2988 M32( analysis.l0.me16x16.mv ) = 0;
2989 }
2990 goto skip_analysis;
2991 }
2992 /* Reset the information accordingly */
2993 else if( h->param.analyse.b_mb_info_update )
2994 h->fdec->mb_info[h->mb.i_mb_xy] &= ~X264_MBINFO_CONSTANT;
2995 }
2996
2997 int skip_invalid = h->i_thread_frames > 1 && h->mb.cache.pskip_mv[1] > h->mb.mv_max_spel[1];
2998 /* If the current macroblock is off the frame, just skip it. */
2999 if( HAVE_INTERLACED && !MB_INTERLACED && h->mb.i_mb_y * 16 >= h->param.i_height && !skip_invalid )
3000 b_skip = 1;
3001 /* Fast P_SKIP detection */
3002 else if( h->param.analyse.b_fast_pskip )
3003 {
3004 if( skip_invalid )
3005 // FIXME don't need to check this if the reference frame is done
3006 {}
3007 else if( h->param.analyse.i_subpel_refine >= 3 )
3008 analysis.b_try_skip = 1;
3009 else if( h->mb.i_mb_type_left[0] == P_SKIP ||
3010 h->mb.i_mb_type_top == P_SKIP ||
3011 h->mb.i_mb_type_topleft == P_SKIP ||
3012 h->mb.i_mb_type_topright == P_SKIP )
3013 b_skip = x264_macroblock_probe_pskip( h );
3014 }
3015 }
3016
3017 h->mc.prefetch_ref( h->mb.pic.p_fref[0][0][h->mb.i_mb_x&3], h->mb.pic.i_stride[0], 1 );
3018
3019 if( b_skip )
3020 {
3021 h->mb.i_type = P_SKIP;
3022 h->mb.i_partition = D_16x16;
3023 assert( h->mb.cache.pskip_mv[1] <= h->mb.mv_max_spel[1] || h->i_thread_frames == 1 );
3024 skip_analysis:
3025 /* Set up MVs for future predictors */
3026 for( int i = 0; i < h->mb.pic.i_fref[0]; i++ )
3027 M32( h->mb.mvr[0][i][h->mb.i_mb_xy] ) = 0;
3028 }
3029 else
3030 {
3031 const unsigned int flags = h->param.analyse.inter;
3032 int i_type;
3033 int i_partition;
3034 int i_satd_inter, i_satd_intra;
3035
3036 mb_analyse_load_costs( h, &analysis );
3037
3038 mb_analyse_inter_p16x16( h, &analysis );
3039
3040 if( h->mb.i_type == P_SKIP )
3041 {
3042 for( int i = 1; i < h->mb.pic.i_fref[0]; i++ )
3043 M32( h->mb.mvr[0][i][h->mb.i_mb_xy] ) = 0;
3044 return;
3045 }
3046
3047 if( flags & X264_ANALYSE_PSUB16x16 )
3048 {
3049 if( h->param.analyse.b_mixed_references )
3050 mb_analyse_inter_p8x8_mixed_ref( h, &analysis );
3051 else
3052 mb_analyse_inter_p8x8( h, &analysis );
3053 }
3054
3055 /* Select best inter mode */
3056 i_type = P_L0;
3057 i_partition = D_16x16;
3058 i_cost = analysis.l0.me16x16.cost;
3059
3060 if( ( flags & X264_ANALYSE_PSUB16x16 ) && (!analysis.b_early_terminate ||
3061 analysis.l0.i_cost8x8 < analysis.l0.me16x16.cost) )
3062 {
3063 i_type = P_8x8;
3064 i_partition = D_8x8;
3065 i_cost = analysis.l0.i_cost8x8;
3066
3067 /* Do sub 8x8 */
3068 if( flags & X264_ANALYSE_PSUB8x8 )
3069 {
3070 for( int i = 0; i < 4; i++ )
3071 {
3072 mb_analyse_inter_p4x4( h, &analysis, i );
3073 int i_thresh8x4 = analysis.l0.me4x4[i][1].cost_mv + analysis.l0.me4x4[i][2].cost_mv;
3074 if( !analysis.b_early_terminate || analysis.l0.i_cost4x4[i] < analysis.l0.me8x8[i].cost + i_thresh8x4 )
3075 {
3076 int i_cost8x8 = analysis.l0.i_cost4x4[i];
3077 h->mb.i_sub_partition[i] = D_L0_4x4;
3078
3079 mb_analyse_inter_p8x4( h, &analysis, i );
3080 COPY2_IF_LT( i_cost8x8, analysis.l0.i_cost8x4[i],
3081 h->mb.i_sub_partition[i], D_L0_8x4 );
3082
3083 mb_analyse_inter_p4x8( h, &analysis, i );
3084 COPY2_IF_LT( i_cost8x8, analysis.l0.i_cost4x8[i],
3085 h->mb.i_sub_partition[i], D_L0_4x8 );
3086
3087 i_cost += i_cost8x8 - analysis.l0.me8x8[i].cost;
3088 }
3089 mb_cache_mv_p8x8( h, &analysis, i );
3090 }
3091 analysis.l0.i_cost8x8 = i_cost;
3092 }
3093 }
3094
3095 /* Now do 16x8/8x16 */
3096 int i_thresh16x8 = analysis.l0.me8x8[1].cost_mv + analysis.l0.me8x8[2].cost_mv;
3097 if( ( flags & X264_ANALYSE_PSUB16x16 ) && (!analysis.b_early_terminate ||
3098 analysis.l0.i_cost8x8 < analysis.l0.me16x16.cost + i_thresh16x8) )
3099 {
3100 int i_avg_mv_ref_cost = (analysis.l0.me8x8[2].cost_mv + analysis.l0.me8x8[2].i_ref_cost
3101 + analysis.l0.me8x8[3].cost_mv + analysis.l0.me8x8[3].i_ref_cost + 1) >> 1;
3102 analysis.i_cost_est16x8[1] = analysis.i_satd8x8[0][2] + analysis.i_satd8x8[0][3] + i_avg_mv_ref_cost;
3103
3104 mb_analyse_inter_p16x8( h, &analysis, i_cost );
3105 COPY3_IF_LT( i_cost, analysis.l0.i_cost16x8, i_type, P_L0, i_partition, D_16x8 );
3106
3107 i_avg_mv_ref_cost = (analysis.l0.me8x8[1].cost_mv + analysis.l0.me8x8[1].i_ref_cost
3108 + analysis.l0.me8x8[3].cost_mv + analysis.l0.me8x8[3].i_ref_cost + 1) >> 1;
3109 analysis.i_cost_est8x16[1] = analysis.i_satd8x8[0][1] + analysis.i_satd8x8[0][3] + i_avg_mv_ref_cost;
3110
3111 mb_analyse_inter_p8x16( h, &analysis, i_cost );
3112 COPY3_IF_LT( i_cost, analysis.l0.i_cost8x16, i_type, P_L0, i_partition, D_8x16 );
3113 }
3114
3115 h->mb.i_partition = i_partition;
3116
3117 /* refine qpel */
3118 //FIXME mb_type costs?
3119 if( analysis.i_mbrd || !h->mb.i_subpel_refine )
3120 {
3121 /* refine later */
3122 }
3123 else if( i_partition == D_16x16 )
3124 {
3125 x264_me_refine_qpel( h, &analysis.l0.me16x16 );
3126 i_cost = analysis.l0.me16x16.cost;
3127 }
3128 else if( i_partition == D_16x8 )
3129 {
3130 x264_me_refine_qpel( h, &analysis.l0.me16x8[0] );
3131 x264_me_refine_qpel( h, &analysis.l0.me16x8[1] );
3132 i_cost = analysis.l0.me16x8[0].cost + analysis.l0.me16x8[1].cost;
3133 }
3134 else if( i_partition == D_8x16 )
3135 {
3136 x264_me_refine_qpel( h, &analysis.l0.me8x16[0] );
3137 x264_me_refine_qpel( h, &analysis.l0.me8x16[1] );
3138 i_cost = analysis.l0.me8x16[0].cost + analysis.l0.me8x16[1].cost;
3139 }
3140 else if( i_partition == D_8x8 )
3141 {
3142 i_cost = 0;
3143 for( int i8x8 = 0; i8x8 < 4; i8x8++ )
3144 {
3145 switch( h->mb.i_sub_partition[i8x8] )
3146 {
3147 case D_L0_8x8:
3148 x264_me_refine_qpel( h, &analysis.l0.me8x8[i8x8] );
3149 i_cost += analysis.l0.me8x8[i8x8].cost;
3150 break;
3151 case D_L0_8x4:
3152 x264_me_refine_qpel( h, &analysis.l0.me8x4[i8x8][0] );
3153 x264_me_refine_qpel( h, &analysis.l0.me8x4[i8x8][1] );
3154 i_cost += analysis.l0.me8x4[i8x8][0].cost +
3155 analysis.l0.me8x4[i8x8][1].cost;
3156 break;
3157 case D_L0_4x8:
3158 x264_me_refine_qpel( h, &analysis.l0.me4x8[i8x8][0] );
3159 x264_me_refine_qpel( h, &analysis.l0.me4x8[i8x8][1] );
3160 i_cost += analysis.l0.me4x8[i8x8][0].cost +
3161 analysis.l0.me4x8[i8x8][1].cost;
3162 break;
3163
3164 case D_L0_4x4:
3165 x264_me_refine_qpel( h, &analysis.l0.me4x4[i8x8][0] );
3166 x264_me_refine_qpel( h, &analysis.l0.me4x4[i8x8][1] );
3167 x264_me_refine_qpel( h, &analysis.l0.me4x4[i8x8][2] );
3168 x264_me_refine_qpel( h, &analysis.l0.me4x4[i8x8][3] );
3169 i_cost += analysis.l0.me4x4[i8x8][0].cost +
3170 analysis.l0.me4x4[i8x8][1].cost +
3171 analysis.l0.me4x4[i8x8][2].cost +
3172 analysis.l0.me4x4[i8x8][3].cost;
3173 break;
3174 default:
3175 x264_log( h, X264_LOG_ERROR, "internal error (!8x8 && !4x4)\n" );
3176 break;
3177 }
3178 }
3179 }
3180
3181 if( h->mb.b_chroma_me )
3182 {
3183 if( CHROMA444 )
3184 {
3185 mb_analyse_intra( h, &analysis, i_cost );
3186 mb_analyse_intra_chroma( h, &analysis );
3187 }
3188 else
3189 {
3190 mb_analyse_intra_chroma( h, &analysis );
3191 mb_analyse_intra( h, &analysis, i_cost - analysis.i_satd_chroma );
3192 }
3193 analysis.i_satd_i16x16 += analysis.i_satd_chroma;
3194 analysis.i_satd_i8x8 += analysis.i_satd_chroma;
3195 analysis.i_satd_i4x4 += analysis.i_satd_chroma;
3196 }
3197 else
3198 mb_analyse_intra( h, &analysis, i_cost );
3199
3200 i_satd_inter = i_cost;
3201 i_satd_intra = X264_MIN3( analysis.i_satd_i16x16,
3202 analysis.i_satd_i8x8,
3203 analysis.i_satd_i4x4 );
3204
3205 if( analysis.i_mbrd )
3206 {
3207 mb_analyse_p_rd( h, &analysis, X264_MIN(i_satd_inter, i_satd_intra) );
3208 i_type = P_L0;
3209 i_partition = D_16x16;
3210 i_cost = analysis.l0.i_rd16x16;
3211 COPY2_IF_LT( i_cost, analysis.l0.i_cost16x8, i_partition, D_16x8 );
3212 COPY2_IF_LT( i_cost, analysis.l0.i_cost8x16, i_partition, D_8x16 );
3213 COPY3_IF_LT( i_cost, analysis.l0.i_cost8x8, i_partition, D_8x8, i_type, P_8x8 );
3214 h->mb.i_type = i_type;
3215 h->mb.i_partition = i_partition;
3216 if( i_cost < COST_MAX )
3217 mb_analyse_transform_rd( h, &analysis, &i_satd_inter, &i_cost );
3218 intra_rd( h, &analysis, i_satd_inter * 5/4 + 1 );
3219 }
3220
3221 COPY2_IF_LT( i_cost, analysis.i_satd_i16x16, i_type, I_16x16 );
3222 COPY2_IF_LT( i_cost, analysis.i_satd_i8x8, i_type, I_8x8 );
3223 COPY2_IF_LT( i_cost, analysis.i_satd_i4x4, i_type, I_4x4 );
3224 COPY2_IF_LT( i_cost, analysis.i_satd_pcm, i_type, I_PCM );
3225
3226 h->mb.i_type = i_type;
3227
3228 if( analysis.b_force_intra && !IS_INTRA(i_type) )
3229 {
3230 /* Intra masking: copy fdec to fenc and re-encode the block as intra in order to make it appear as if
3231 * it was an inter block. */
3232 analyse_update_cache( h, &analysis );
3233 x264_macroblock_encode( h );
3234 for( int p = 0; p < (CHROMA444 ? 3 : 1); p++ )
3235 h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fenc[p], FENC_STRIDE, h->mb.pic.p_fdec[p], FDEC_STRIDE, 16 );
3236 if( !CHROMA444 )
3237 {
3238 int height = 16 >> CHROMA_V_SHIFT;
3239 h->mc.copy[PIXEL_8x8] ( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE, height );
3240 h->mc.copy[PIXEL_8x8] ( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE, height );
3241 }
3242 mb_analyse_init_qp( h, &analysis, X264_MAX( h->mb.i_qp - h->mb.ip_offset, h->param.rc.i_qp_min ) );
3243 goto intra_analysis;
3244 }
3245
3246 if( analysis.i_mbrd >= 2 && h->mb.i_type != I_PCM )
3247 {
3248 if( IS_INTRA( h->mb.i_type ) )
3249 {
3250 intra_rd_refine( h, &analysis );
3251 }
3252 else if( i_partition == D_16x16 )
3253 {
3254 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, analysis.l0.me16x16.i_ref );
3255 analysis.l0.me16x16.cost = i_cost;
3256 x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0, 0 );
3257 }
3258 else if( i_partition == D_16x8 )
3259 {
3260 M32( h->mb.i_sub_partition ) = D_L0_8x8 * 0x01010101;
3261 x264_macroblock_cache_ref( h, 0, 0, 4, 2, 0, analysis.l0.me16x8[0].i_ref );
3262 x264_macroblock_cache_ref( h, 0, 2, 4, 2, 0, analysis.l0.me16x8[1].i_ref );
3263 x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[0], analysis.i_lambda2, 0, 0 );
3264 x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[1], analysis.i_lambda2, 8, 0 );
3265 }
3266 else if( i_partition == D_8x16 )
3267 {
3268 M32( h->mb.i_sub_partition ) = D_L0_8x8 * 0x01010101;
3269 x264_macroblock_cache_ref( h, 0, 0, 2, 4, 0, analysis.l0.me8x16[0].i_ref );
3270 x264_macroblock_cache_ref( h, 2, 0, 2, 4, 0, analysis.l0.me8x16[1].i_ref );
3271 x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[0], analysis.i_lambda2, 0, 0 );
3272 x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[1], analysis.i_lambda2, 4, 0 );
3273 }
3274 else if( i_partition == D_8x8 )
3275 {
3276 analyse_update_cache( h, &analysis );
3277 for( int i8x8 = 0; i8x8 < 4; i8x8++ )
3278 {
3279 if( h->mb.i_sub_partition[i8x8] == D_L0_8x8 )
3280 {
3281 x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i8x8], analysis.i_lambda2, i8x8*4, 0 );
3282 }
3283 else if( h->mb.i_sub_partition[i8x8] == D_L0_8x4 )
3284 {
3285 x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
3286 x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][1], analysis.i_lambda2, i8x8*4+2, 0 );
3287 }
3288 else if( h->mb.i_sub_partition[i8x8] == D_L0_4x8 )
3289 {
3290 x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
3291 x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
3292 }
3293 else if( h->mb.i_sub_partition[i8x8] == D_L0_4x4 )
3294 {
3295 x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
3296 x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
3297 x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][2], analysis.i_lambda2, i8x8*4+2, 0 );
3298 x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][3], analysis.i_lambda2, i8x8*4+3, 0 );
3299 }
3300 }
3301 }
3302 }
3303 }
3304 }
3305 else if( h->sh.i_type == SLICE_TYPE_B )
3306 {
3307 int i_bskip_cost = COST_MAX;
3308 int b_skip = 0;
3309
3310 if( analysis.i_mbrd )
3311 mb_init_fenc_cache( h, analysis.i_mbrd >= 2 );
3312
3313 h->mb.i_type = B_SKIP;
3314 if( h->mb.b_direct_auto_write )
3315 {
3316 /* direct=auto heuristic: prefer whichever mode allows more Skip macroblocks */
3317 for( int i = 0; i < 2; i++ )
3318 {
3319 int b_changed = 1;
3320 h->sh.b_direct_spatial_mv_pred ^= 1;
3321 analysis.b_direct_available = x264_mb_predict_mv_direct16x16( h, i && analysis.b_direct_available ? &b_changed : NULL );
3322 if( analysis.b_direct_available )
3323 {
3324 if( b_changed )
3325 {
3326 x264_mb_mc( h );
3327 b_skip = x264_macroblock_probe_bskip( h );
3328 }
3329 h->stat.frame.i_direct_score[ h->sh.b_direct_spatial_mv_pred ] += b_skip;
3330 }
3331 else
3332 b_skip = 0;
3333 }
3334 }
3335 else
3336 analysis.b_direct_available = x264_mb_predict_mv_direct16x16( h, NULL );
3337
3338 analysis.b_try_skip = 0;
3339 if( analysis.b_direct_available )
3340 {
3341 if( !h->mb.b_direct_auto_write )
3342 x264_mb_mc( h );
3343 /* If the current macroblock is off the frame, just skip it. */
3344 if( HAVE_INTERLACED && !MB_INTERLACED && h->mb.i_mb_y * 16 >= h->param.i_height )
3345 b_skip = 1;
3346 else if( analysis.i_mbrd )
3347 {
3348 i_bskip_cost = ssd_mb( h );
3349 /* 6 = minimum cavlc cost of a non-skipped MB */
3350 b_skip = h->mb.b_skip_mc = i_bskip_cost <= ((6 * analysis.i_lambda2 + 128) >> 8);
3351 }
3352 else if( !h->mb.b_direct_auto_write )
3353 {
3354 /* Conditioning the probe on neighboring block types
3355 * doesn't seem to help speed or quality. */
3356 analysis.b_try_skip = x264_macroblock_probe_bskip( h );
3357 if( h->param.analyse.i_subpel_refine < 3 )
3358 b_skip = analysis.b_try_skip;
3359 }
3360 /* Set up MVs for future predictors */
3361 if( b_skip )
3362 {
3363 for( int i = 0; i < h->mb.pic.i_fref[0]; i++ )
3364 M32( h->mb.mvr[0][i][h->mb.i_mb_xy] ) = 0;
3365 for( int i = 0; i < h->mb.pic.i_fref[1]; i++ )
3366 M32( h->mb.mvr[1][i][h->mb.i_mb_xy] ) = 0;
3367 }
3368 }
3369
3370 if( !b_skip )
3371 {
3372 const unsigned int flags = h->param.analyse.inter;
3373 int i_type;
3374 int i_partition;
3375 int i_satd_inter;
3376 h->mb.b_skip_mc = 0;
3377 h->mb.i_type = B_DIRECT;
3378
3379 mb_analyse_load_costs( h, &analysis );
3380
3381 /* select best inter mode */
3382 /* direct must be first */
3383 if( analysis.b_direct_available )
3384 mb_analyse_inter_direct( h, &analysis );
3385
3386 mb_analyse_inter_b16x16( h, &analysis );
3387
3388 if( h->mb.i_type == B_SKIP )
3389 {
3390 for( int i = 1; i < h->mb.pic.i_fref[0]; i++ )
3391 M32( h->mb.mvr[0][i][h->mb.i_mb_xy] ) = 0;
3392 for( int i = 1; i < h->mb.pic.i_fref[1]; i++ )
3393 M32( h->mb.mvr[1][i][h->mb.i_mb_xy] ) = 0;
3394 return;
3395 }
3396
3397 i_type = B_L0_L0;
3398 i_partition = D_16x16;
3399 i_cost = analysis.l0.me16x16.cost;
3400 COPY2_IF_LT( i_cost, analysis.l1.me16x16.cost, i_type, B_L1_L1 );
3401 COPY2_IF_LT( i_cost, analysis.i_cost16x16bi, i_type, B_BI_BI );
3402 COPY2_IF_LT( i_cost, analysis.i_cost16x16direct, i_type, B_DIRECT );
3403
3404 if( analysis.i_mbrd && analysis.b_early_terminate && analysis.i_cost16x16direct <= i_cost * 33/32 )
3405 {
3406 mb_analyse_b_rd( h, &analysis, i_cost );
3407 if( i_bskip_cost < analysis.i_rd16x16direct &&
3408 i_bskip_cost < analysis.i_rd16x16bi &&
3409 i_bskip_cost < analysis.l0.i_rd16x16 &&
3410 i_bskip_cost < analysis.l1.i_rd16x16 )
3411 {
3412 h->mb.i_type = B_SKIP;
3413 analyse_update_cache( h, &analysis );
3414 return;
3415 }
3416 }
3417
3418 if( flags & X264_ANALYSE_BSUB16x16 )
3419 {
3420 if( h->param.analyse.b_mixed_references )
3421 mb_analyse_inter_b8x8_mixed_ref( h, &analysis );
3422 else
3423 mb_analyse_inter_b8x8( h, &analysis );
3424
3425 COPY3_IF_LT( i_cost, analysis.i_cost8x8bi, i_type, B_8x8, i_partition, D_8x8 );
3426
3427 /* Try to estimate the cost of b16x8/b8x16 based on the satd scores of the b8x8 modes */
3428 int i_cost_est16x8bi_total = 0, i_cost_est8x16bi_total = 0;
3429 int i_mb_type, i_partition16x8[2], i_partition8x16[2];
3430 for( int i = 0; i < 2; i++ )
3431 {
3432 int avg_l0_mv_ref_cost, avg_l1_mv_ref_cost;
3433 int i_l0_satd, i_l1_satd, i_bi_satd, i_best_cost;
3434 // 16x8
3435 i_best_cost = COST_MAX;
3436 i_l0_satd = analysis.i_satd8x8[0][i*2] + analysis.i_satd8x8[0][i*2+1];
3437 i_l1_satd = analysis.i_satd8x8[1][i*2] + analysis.i_satd8x8[1][i*2+1];
3438 i_bi_satd = analysis.i_satd8x8[2][i*2] + analysis.i_satd8x8[2][i*2+1];
3439 avg_l0_mv_ref_cost = ( analysis.l0.me8x8[i*2].cost_mv + analysis.l0.me8x8[i*2].i_ref_cost
3440 + analysis.l0.me8x8[i*2+1].cost_mv + analysis.l0.me8x8[i*2+1].i_ref_cost + 1 ) >> 1;
3441 avg_l1_mv_ref_cost = ( analysis.l1.me8x8[i*2].cost_mv + analysis.l1.me8x8[i*2].i_ref_cost
3442 + analysis.l1.me8x8[i*2+1].cost_mv + analysis.l1.me8x8[i*2+1].i_ref_cost + 1 ) >> 1;
3443 COPY2_IF_LT( i_best_cost, i_l0_satd + avg_l0_mv_ref_cost, i_partition16x8[i], D_L0_8x8 );
3444 COPY2_IF_LT( i_best_cost, i_l1_satd + avg_l1_mv_ref_cost, i_partition16x8[i], D_L1_8x8 );
3445 COPY2_IF_LT( i_best_cost, i_bi_satd + avg_l0_mv_ref_cost + avg_l1_mv_ref_cost, i_partition16x8[i], D_BI_8x8 );
3446 analysis.i_cost_est16x8[i] = i_best_cost;
3447
3448 // 8x16
3449 i_best_cost = COST_MAX;
3450 i_l0_satd = analysis.i_satd8x8[0][i] + analysis.i_satd8x8[0][i+2];
3451 i_l1_satd = analysis.i_satd8x8[1][i] + analysis.i_satd8x8[1][i+2];
3452 i_bi_satd = analysis.i_satd8x8[2][i] + analysis.i_satd8x8[2][i+2];
3453 avg_l0_mv_ref_cost = ( analysis.l0.me8x8[i].cost_mv + analysis.l0.me8x8[i].i_ref_cost
3454 + analysis.l0.me8x8[i+2].cost_mv + analysis.l0.me8x8[i+2].i_ref_cost + 1 ) >> 1;
3455 avg_l1_mv_ref_cost = ( analysis.l1.me8x8[i].cost_mv + analysis.l1.me8x8[i].i_ref_cost
3456 + analysis.l1.me8x8[i+2].cost_mv + analysis.l1.me8x8[i+2].i_ref_cost + 1 ) >> 1;
3457 COPY2_IF_LT( i_best_cost, i_l0_satd + avg_l0_mv_ref_cost, i_partition8x16[i], D_L0_8x8 );
3458 COPY2_IF_LT( i_best_cost, i_l1_satd + avg_l1_mv_ref_cost, i_partition8x16[i], D_L1_8x8 );
3459 COPY2_IF_LT( i_best_cost, i_bi_satd + avg_l0_mv_ref_cost + avg_l1_mv_ref_cost, i_partition8x16[i], D_BI_8x8 );
3460 analysis.i_cost_est8x16[i] = i_best_cost;
3461 }
3462 i_mb_type = B_L0_L0 + (i_partition16x8[0]>>2) * 3 + (i_partition16x8[1]>>2);
3463 analysis.i_cost_est16x8[1] += analysis.i_lambda * i_mb_b16x8_cost_table[i_mb_type];
3464 i_cost_est16x8bi_total = analysis.i_cost_est16x8[0] + analysis.i_cost_est16x8[1];
3465 i_mb_type = B_L0_L0 + (i_partition8x16[0]>>2) * 3 + (i_partition8x16[1]>>2);
3466 analysis.i_cost_est8x16[1] += analysis.i_lambda * i_mb_b16x8_cost_table[i_mb_type];
3467 i_cost_est8x16bi_total = analysis.i_cost_est8x16[0] + analysis.i_cost_est8x16[1];
3468
3469 /* We can gain a little speed by checking the mode with the lowest estimated cost first */
3470 int try_16x8_first = i_cost_est16x8bi_total < i_cost_est8x16bi_total;
3471 if( try_16x8_first && (!analysis.b_early_terminate || i_cost_est16x8bi_total < i_cost) )
3472 {
3473 mb_analyse_inter_b16x8( h, &analysis, i_cost );
3474 COPY3_IF_LT( i_cost, analysis.i_cost16x8bi, i_type, analysis.i_mb_type16x8, i_partition, D_16x8 );
3475 }
3476 if( !analysis.b_early_terminate || i_cost_est8x16bi_total < i_cost )
3477 {
3478 mb_analyse_inter_b8x16( h, &analysis, i_cost );
3479 COPY3_IF_LT( i_cost, analysis.i_cost8x16bi, i_type, analysis.i_mb_type8x16, i_partition, D_8x16 );
3480 }
3481 if( !try_16x8_first && (!analysis.b_early_terminate || i_cost_est16x8bi_total < i_cost) )
3482 {
3483 mb_analyse_inter_b16x8( h, &analysis, i_cost );
3484 COPY3_IF_LT( i_cost, analysis.i_cost16x8bi, i_type, analysis.i_mb_type16x8, i_partition, D_16x8 );
3485 }
3486 }
3487
3488 if( analysis.i_mbrd || !h->mb.i_subpel_refine )
3489 {
3490 /* refine later */
3491 }
3492 /* refine qpel */
3493 else if( i_partition == D_16x16 )
3494 {
3495 analysis.l0.me16x16.cost -= analysis.i_lambda * i_mb_b_cost_table[B_L0_L0];
3496 analysis.l1.me16x16.cost -= analysis.i_lambda * i_mb_b_cost_table[B_L1_L1];
3497 if( i_type == B_L0_L0 )
3498 {
3499 x264_me_refine_qpel( h, &analysis.l0.me16x16 );
3500 i_cost = analysis.l0.me16x16.cost
3501 + analysis.i_lambda * i_mb_b_cost_table[B_L0_L0];
3502 }
3503 else if( i_type == B_L1_L1 )
3504 {
3505 x264_me_refine_qpel( h, &analysis.l1.me16x16 );
3506 i_cost = analysis.l1.me16x16.cost
3507 + analysis.i_lambda * i_mb_b_cost_table[B_L1_L1];
3508 }
3509 else if( i_type == B_BI_BI )
3510 {
3511 x264_me_refine_qpel( h, &analysis.l0.bi16x16 );
3512 x264_me_refine_qpel( h, &analysis.l1.bi16x16 );
3513 }
3514 }
3515 else if( i_partition == D_16x8 )
3516 {
3517 for( int i = 0; i < 2; i++ )
3518 {
3519 if( analysis.i_mb_partition16x8[i] != D_L1_8x8 )
3520 x264_me_refine_qpel( h, &analysis.l0.me16x8[i] );
3521 if( analysis.i_mb_partition16x8[i] != D_L0_8x8 )
3522 x264_me_refine_qpel( h, &analysis.l1.me16x8[i] );
3523 }
3524 }
3525 else if( i_partition == D_8x16 )
3526 {
3527 for( int i = 0; i < 2; i++ )
3528 {
3529 if( analysis.i_mb_partition8x16[i] != D_L1_8x8 )
3530 x264_me_refine_qpel( h, &analysis.l0.me8x16[i] );
3531 if( analysis.i_mb_partition8x16[i] != D_L0_8x8 )
3532 x264_me_refine_qpel( h, &analysis.l1.me8x16[i] );
3533 }
3534 }
3535 else if( i_partition == D_8x8 )
3536 {
3537 for( int i = 0; i < 4; i++ )
3538 {
3539 x264_me_t *m;
3540 int i_part_cost_old;
3541 int i_type_cost;
3542 int i_part_type = h->mb.i_sub_partition[i];
3543 int b_bidir = (i_part_type == D_BI_8x8);
3544
3545 if( i_part_type == D_DIRECT_8x8 )
3546 continue;
3547 if( x264_mb_partition_listX_table[0][i_part_type] )
3548 {
3549 m = &analysis.l0.me8x8[i];
3550 i_part_cost_old = m->cost;
3551 i_type_cost = analysis.i_lambda * i_sub_mb_b_cost_table[D_L0_8x8];
3552 m->cost -= i_type_cost;
3553 x264_me_refine_qpel( h, m );
3554 if( !b_bidir )
3555 analysis.i_cost8x8bi += m->cost + i_type_cost - i_part_cost_old;
3556 }
3557 if( x264_mb_partition_listX_table[1][i_part_type] )
3558 {
3559 m = &analysis.l1.me8x8[i];
3560 i_part_cost_old = m->cost;
3561 i_type_cost = analysis.i_lambda * i_sub_mb_b_cost_table[D_L1_8x8];
3562 m->cost -= i_type_cost;
3563 x264_me_refine_qpel( h, m );
3564 if( !b_bidir )
3565 analysis.i_cost8x8bi += m->cost + i_type_cost - i_part_cost_old;
3566 }
3567 /* TODO: update mvp? */
3568 }
3569 }
3570
3571 i_satd_inter = i_cost;
3572
3573 if( analysis.i_mbrd )
3574 {
3575 mb_analyse_b_rd( h, &analysis, i_satd_inter );
3576 i_type = B_SKIP;
3577 i_cost = i_bskip_cost;
3578 i_partition = D_16x16;
3579 COPY2_IF_LT( i_cost, analysis.l0.i_rd16x16, i_type, B_L0_L0 );
3580 COPY2_IF_LT( i_cost, analysis.l1.i_rd16x16, i_type, B_L1_L1 );
3581 COPY2_IF_LT( i_cost, analysis.i_rd16x16bi, i_type, B_BI_BI );
3582 COPY2_IF_LT( i_cost, analysis.i_rd16x16direct, i_type, B_DIRECT );
3583 COPY3_IF_LT( i_cost, analysis.i_rd16x8bi, i_type, analysis.i_mb_type16x8, i_partition, D_16x8 );
3584 COPY3_IF_LT( i_cost, analysis.i_rd8x16bi, i_type, analysis.i_mb_type8x16, i_partition, D_8x16 );
3585 COPY3_IF_LT( i_cost, analysis.i_rd8x8bi, i_type, B_8x8, i_partition, D_8x8 );
3586
3587 h->mb.i_type = i_type;
3588 h->mb.i_partition = i_partition;
3589 }
3590
3591 if( h->mb.b_chroma_me )
3592 {
3593 if( CHROMA444 )
3594 {
3595 mb_analyse_intra( h, &analysis, i_satd_inter );
3596 mb_analyse_intra_chroma( h, &analysis );
3597 }
3598 else
3599 {
3600 mb_analyse_intra_chroma( h, &analysis );
3601 mb_analyse_intra( h, &analysis, i_satd_inter - analysis.i_satd_chroma );
3602 }
3603 analysis.i_satd_i16x16 += analysis.i_satd_chroma;
3604 analysis.i_satd_i8x8 += analysis.i_satd_chroma;
3605 analysis.i_satd_i4x4 += analysis.i_satd_chroma;
3606 }
3607 else
3608 mb_analyse_intra( h, &analysis, i_satd_inter );
3609
3610 if( analysis.i_mbrd )
3611 {
3612 mb_analyse_transform_rd( h, &analysis, &i_satd_inter, &i_cost );
3613 intra_rd( h, &analysis, i_satd_inter * 17/16 + 1 );
3614 }
3615
3616 COPY2_IF_LT( i_cost, analysis.i_satd_i16x16, i_type, I_16x16 );
3617 COPY2_IF_LT( i_cost, analysis.i_satd_i8x8, i_type, I_8x8 );
3618 COPY2_IF_LT( i_cost, analysis.i_satd_i4x4, i_type, I_4x4 );
3619 COPY2_IF_LT( i_cost, analysis.i_satd_pcm, i_type, I_PCM );
3620
3621 h->mb.i_type = i_type;
3622 h->mb.i_partition = i_partition;
3623
3624 if( analysis.i_mbrd >= 2 && IS_INTRA( i_type ) && i_type != I_PCM )
3625 intra_rd_refine( h, &analysis );
3626 if( h->mb.i_subpel_refine >= 5 )
3627 refine_bidir( h, &analysis );
3628
3629 if( analysis.i_mbrd >= 2 && i_type > B_DIRECT && i_type < B_SKIP )
3630 {
3631 int i_biweight;
3632 analyse_update_cache( h, &analysis );
3633
3634 if( i_partition == D_16x16 )
3635 {
3636 if( i_type == B_L0_L0 )
3637 {
3638 analysis.l0.me16x16.cost = i_cost;
3639 x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0, 0 );
3640 }
3641 else if( i_type == B_L1_L1 )
3642 {
3643 analysis.l1.me16x16.cost = i_cost;
3644 x264_me_refine_qpel_rd( h, &analysis.l1.me16x16, analysis.i_lambda2, 0, 1 );
3645 }
3646 else if( i_type == B_BI_BI )
3647 {
3648 i_biweight = h->mb.bipred_weight[analysis.l0.bi16x16.i_ref][analysis.l1.bi16x16.i_ref];
3649 x264_me_refine_bidir_rd( h, &analysis.l0.bi16x16, &analysis.l1.bi16x16, i_biweight, 0, analysis.i_lambda2 );
3650 }
3651 }
3652 else if( i_partition == D_16x8 )
3653 {
3654 for( int i = 0; i < 2; i++ )
3655 {
3656 h->mb.i_sub_partition[i*2] = h->mb.i_sub_partition[i*2+1] = analysis.i_mb_partition16x8[i];
3657 if( analysis.i_mb_partition16x8[i] == D_L0_8x8 )
3658 x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[i], analysis.i_lambda2, i*8, 0 );
3659 else if( analysis.i_mb_partition16x8[i] == D_L1_8x8 )
3660 x264_me_refine_qpel_rd( h, &analysis.l1.me16x8[i], analysis.i_lambda2, i*8, 1 );
3661 else if( analysis.i_mb_partition16x8[i] == D_BI_8x8 )
3662 {
3663 i_biweight = h->mb.bipred_weight[analysis.l0.me16x8[i].i_ref][analysis.l1.me16x8[i].i_ref];
3664 x264_me_refine_bidir_rd( h, &analysis.l0.me16x8[i], &analysis.l1.me16x8[i], i_biweight, i*2, analysis.i_lambda2 );
3665 }
3666 }
3667 }
3668 else if( i_partition == D_8x16 )
3669 {
3670 for( int i = 0; i < 2; i++ )
3671 {
3672 h->mb.i_sub_partition[i] = h->mb.i_sub_partition[i+2] = analysis.i_mb_partition8x16[i];
3673 if( analysis.i_mb_partition8x16[i] == D_L0_8x8 )
3674 x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[i], analysis.i_lambda2, i*4, 0 );
3675 else if( analysis.i_mb_partition8x16[i] == D_L1_8x8 )
3676 x264_me_refine_qpel_rd( h, &analysis.l1.me8x16[i], analysis.i_lambda2, i*4, 1 );
3677 else if( analysis.i_mb_partition8x16[i] == D_BI_8x8 )
3678 {
3679 i_biweight = h->mb.bipred_weight[analysis.l0.me8x16[i].i_ref][analysis.l1.me8x16[i].i_ref];
3680 x264_me_refine_bidir_rd( h, &analysis.l0.me8x16[i], &analysis.l1.me8x16[i], i_biweight, i, analysis.i_lambda2 );
3681 }
3682 }
3683 }
3684 else if( i_partition == D_8x8 )
3685 {
3686 for( int i = 0; i < 4; i++ )
3687 {
3688 if( h->mb.i_sub_partition[i] == D_L0_8x8 )
3689 x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i], analysis.i_lambda2, i*4, 0 );
3690 else if( h->mb.i_sub_partition[i] == D_L1_8x8 )
3691 x264_me_refine_qpel_rd( h, &analysis.l1.me8x8[i], analysis.i_lambda2, i*4, 1 );
3692 else if( h->mb.i_sub_partition[i] == D_BI_8x8 )
3693 {
3694 i_biweight = h->mb.bipred_weight[analysis.l0.me8x8[i].i_ref][analysis.l1.me8x8[i].i_ref];
3695 x264_me_refine_bidir_rd( h, &analysis.l0.me8x8[i], &analysis.l1.me8x8[i], i_biweight, i, analysis.i_lambda2 );
3696 }
3697 }
3698 }
3699 }
3700 }
3701 }
3702
3703 analyse_update_cache( h, &analysis );
3704
3705 /* In rare cases we can end up qpel-RDing our way back to a larger partition size
3706 * without realizing it. Check for this and account for it if necessary. */
3707 if( analysis.i_mbrd >= 2 )
3708 {
3709 /* Don't bother with bipred or 8x8-and-below, the odds are incredibly low. */
3710 static const uint8_t check_mv_lists[X264_MBTYPE_MAX] = {[P_L0]=1, [B_L0_L0]=1, [B_L1_L1]=2};
3711 int list = check_mv_lists[h->mb.i_type] - 1;
3712 if( list >= 0 && h->mb.i_partition != D_16x16 &&
3713 M32( &h->mb.cache.mv[list][x264_scan8[0]] ) == M32( &h->mb.cache.mv[list][x264_scan8[12]] ) &&
3714 h->mb.cache.ref[list][x264_scan8[0]] == h->mb.cache.ref[list][x264_scan8[12]] )
3715 h->mb.i_partition = D_16x16;
3716 }
3717
3718 if( !analysis.i_mbrd )
3719 mb_analyse_transform( h );
3720
3721 if( analysis.i_mbrd == 3 && !IS_SKIP(h->mb.i_type) )
3722 mb_analyse_qp_rd( h, &analysis );
3723
3724 h->mb.b_trellis = h->param.analyse.i_trellis;
3725 h->mb.b_noise_reduction = h->mb.b_noise_reduction || (!!h->param.analyse.i_noise_reduction && !IS_INTRA( h->mb.i_type ));
3726
3727 if( !IS_SKIP(h->mb.i_type) && h->mb.i_psy_trellis && h->param.analyse.i_trellis == 1 )
3728 psy_trellis_init( h, 0 );
3729 if( h->mb.b_trellis == 1 || h->mb.b_noise_reduction )
3730 h->mb.i_skip_intra = 0;
3731 }
3732
3733 /*-------------------- Update MB from the analysis ----------------------*/
analyse_update_cache(x264_t * h,x264_mb_analysis_t * a)3734 static void analyse_update_cache( x264_t *h, x264_mb_analysis_t *a )
3735 {
3736 switch( h->mb.i_type )
3737 {
3738 case I_4x4:
3739 for( int i = 0; i < 16; i++ )
3740 h->mb.cache.intra4x4_pred_mode[x264_scan8[i]] = a->i_predict4x4[i];
3741
3742 mb_analyse_intra_chroma( h, a );
3743 break;
3744 case I_8x8:
3745 for( int i = 0; i < 4; i++ )
3746 x264_macroblock_cache_intra8x8_pred( h, 2*(i&1), 2*(i>>1), a->i_predict8x8[i] );
3747
3748 mb_analyse_intra_chroma( h, a );
3749 break;
3750 case I_16x16:
3751 h->mb.i_intra16x16_pred_mode = a->i_predict16x16;
3752 mb_analyse_intra_chroma( h, a );
3753 break;
3754
3755 case I_PCM:
3756 break;
3757
3758 case P_L0:
3759 switch( h->mb.i_partition )
3760 {
3761 case D_16x16:
3762 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref );
3763 x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv );
3764 break;
3765
3766 case D_16x8:
3767 x264_macroblock_cache_ref( h, 0, 0, 4, 2, 0, a->l0.me16x8[0].i_ref );
3768 x264_macroblock_cache_ref( h, 0, 2, 4, 2, 0, a->l0.me16x8[1].i_ref );
3769 x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 2, 0, a->l0.me16x8[0].mv );
3770 x264_macroblock_cache_mv_ptr( h, 0, 2, 4, 2, 0, a->l0.me16x8[1].mv );
3771 break;
3772
3773 case D_8x16:
3774 x264_macroblock_cache_ref( h, 0, 0, 2, 4, 0, a->l0.me8x16[0].i_ref );
3775 x264_macroblock_cache_ref( h, 2, 0, 2, 4, 0, a->l0.me8x16[1].i_ref );
3776 x264_macroblock_cache_mv_ptr( h, 0, 0, 2, 4, 0, a->l0.me8x16[0].mv );
3777 x264_macroblock_cache_mv_ptr( h, 2, 0, 2, 4, 0, a->l0.me8x16[1].mv );
3778 break;
3779
3780 default:
3781 x264_log( h, X264_LOG_ERROR, "internal error P_L0 and partition=%d\n", h->mb.i_partition );
3782 break;
3783 }
3784 break;
3785
3786 case P_8x8:
3787 x264_macroblock_cache_ref( h, 0, 0, 2, 2, 0, a->l0.me8x8[0].i_ref );
3788 x264_macroblock_cache_ref( h, 2, 0, 2, 2, 0, a->l0.me8x8[1].i_ref );
3789 x264_macroblock_cache_ref( h, 0, 2, 2, 2, 0, a->l0.me8x8[2].i_ref );
3790 x264_macroblock_cache_ref( h, 2, 2, 2, 2, 0, a->l0.me8x8[3].i_ref );
3791 for( int i = 0; i < 4; i++ )
3792 mb_cache_mv_p8x8( h, a, i );
3793 break;
3794
3795 case P_SKIP:
3796 {
3797 h->mb.i_partition = D_16x16;
3798 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, 0 );
3799 x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, h->mb.cache.pskip_mv );
3800 break;
3801 }
3802
3803 case B_SKIP:
3804 case B_DIRECT:
3805 h->mb.i_partition = h->mb.cache.direct_partition;
3806 mb_load_mv_direct8x8( h, 0 );
3807 mb_load_mv_direct8x8( h, 1 );
3808 mb_load_mv_direct8x8( h, 2 );
3809 mb_load_mv_direct8x8( h, 3 );
3810 break;
3811
3812 case B_8x8:
3813 /* optimize: cache might not need to be rewritten */
3814 for( int i = 0; i < 4; i++ )
3815 mb_cache_mv_b8x8( h, a, i, 1 );
3816 break;
3817
3818 default: /* the rest of the B types */
3819 switch( h->mb.i_partition )
3820 {
3821 case D_16x16:
3822 switch( h->mb.i_type )
3823 {
3824 case B_L0_L0:
3825 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref );
3826 x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv );
3827
3828 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, -1 );
3829 x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 1, 0 );
3830 x264_macroblock_cache_mvd( h, 0, 0, 4, 4, 1, 0 );
3831 break;
3832 case B_L1_L1:
3833 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, -1 );
3834 x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 0, 0 );
3835 x264_macroblock_cache_mvd( h, 0, 0, 4, 4, 0, 0 );
3836
3837 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.me16x16.i_ref );
3838 x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 1, a->l1.me16x16.mv );
3839 break;
3840 case B_BI_BI:
3841 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.bi16x16.i_ref );
3842 x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.bi16x16.mv );
3843
3844 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.bi16x16.i_ref );
3845 x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 1, a->l1.bi16x16.mv );
3846 break;
3847 }
3848 break;
3849 case D_16x8:
3850 mb_cache_mv_b16x8( h, a, 0, 1 );
3851 mb_cache_mv_b16x8( h, a, 1, 1 );
3852 break;
3853 case D_8x16:
3854 mb_cache_mv_b8x16( h, a, 0, 1 );
3855 mb_cache_mv_b8x16( h, a, 1, 1 );
3856 break;
3857 default:
3858 x264_log( h, X264_LOG_ERROR, "internal error (invalid MB type)\n" );
3859 break;
3860 }
3861 }
3862
3863 #ifndef NDEBUG
3864 if( h->i_thread_frames > 1 && !IS_INTRA(h->mb.i_type) )
3865 {
3866 for( int l = 0; l <= (h->sh.i_type == SLICE_TYPE_B); l++ )
3867 {
3868 int completed;
3869 int ref = h->mb.cache.ref[l][x264_scan8[0]];
3870 if( ref < 0 )
3871 continue;
3872 completed = x264_frame_cond_wait( h->fref[l][ ref >> MB_INTERLACED ]->orig, -1 );
3873 if( (h->mb.cache.mv[l][x264_scan8[15]][1] >> (2 - MB_INTERLACED)) + h->mb.i_mb_y*16 > completed )
3874 {
3875 x264_log( h, X264_LOG_WARNING, "internal error (MV out of thread range)\n");
3876 x264_log( h, X264_LOG_DEBUG, "mb type: %d \n", h->mb.i_type);
3877 x264_log( h, X264_LOG_DEBUG, "mv: l%dr%d (%d,%d) \n", l, ref,
3878 h->mb.cache.mv[l][x264_scan8[15]][0],
3879 h->mb.cache.mv[l][x264_scan8[15]][1] );
3880 x264_log( h, X264_LOG_DEBUG, "limit: %d \n", h->mb.mv_max_spel[1]);
3881 x264_log( h, X264_LOG_DEBUG, "mb_xy: %d,%d \n", h->mb.i_mb_x, h->mb.i_mb_y);
3882 x264_log( h, X264_LOG_DEBUG, "completed: %d \n", completed );
3883 x264_log( h, X264_LOG_WARNING, "recovering by using intra mode\n");
3884 mb_analyse_intra( h, a, COST_MAX );
3885 h->mb.i_type = I_16x16;
3886 h->mb.i_intra16x16_pred_mode = a->i_predict16x16;
3887 mb_analyse_intra_chroma( h, a );
3888 }
3889 }
3890 }
3891 #endif
3892 }
3893
3894 #include "slicetype.c"
3895
3896