1 /*****************************************************************************
2 * me.c: motion estimation
3 *****************************************************************************
4 * Copyright (C) 2003-2014 x264 project
5 *
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Fiona Glaser <fiona@x264.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 *
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
27
28 #include "common/common.h"
29 #include "macroblock.h"
30 #include "me.h"
31
32 /* presets selected from good points on the speed-vs-quality curve of several test videos
33 * subpel_iters[i_subpel_refine] = { refine_hpel, refine_qpel, me_hpel, me_qpel }
34 * where me_* are the number of EPZS iterations run on all candidate block types,
35 * and refine_* are run only on the winner.
36 * the subme=8,9 values are much higher because any amount of satd search makes
37 * up its time by reducing the number of qpel-rd iterations. */
38 static const uint8_t subpel_iterations[][4] =
39 {{0,0,0,0},
40 {1,1,0,0},
41 {0,1,1,0},
42 {0,2,1,0},
43 {0,2,1,1},
44 {0,2,1,2},
45 {0,0,2,2},
46 {0,0,2,2},
47 {0,0,4,10},
48 {0,0,4,10},
49 {0,0,4,10},
50 {0,0,4,10}};
51
52 /* (x-1)%6 */
53 static const uint8_t mod6m1[8] = {5,0,1,2,3,4,5,0};
54 /* radius 2 hexagon. repeated entries are to avoid having to compute mod6 every time. */
55 static const int8_t hex2[8][2] = {{-1,-2}, {-2,0}, {-1,2}, {1,2}, {2,0}, {1,-2}, {-1,-2}, {-2,0}};
56 static const int8_t square1[9][2] = {{0,0}, {0,-1}, {0,1}, {-1,0}, {1,0}, {-1,-1}, {-1,1}, {1,-1}, {1,1}};
57
58 static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_iters, int *p_halfpel_thresh, int b_refine_qpel );
59
60 #define BITS_MVD( mx, my )\
61 (p_cost_mvx[(mx)<<2] + p_cost_mvy[(my)<<2])
62
63 #define COST_MV( mx, my )\
64 do\
65 {\
66 int cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE,\
67 &p_fref_w[(my)*stride+(mx)], stride )\
68 + BITS_MVD(mx,my);\
69 COPY3_IF_LT( bcost, cost, bmx, mx, bmy, my );\
70 } while(0)
71
72 #define COST_MV_HPEL( mx, my, cost )\
73 do\
74 {\
75 intptr_t stride2 = 16;\
76 pixel *src = h->mc.get_ref( pix, &stride2, m->p_fref, stride, mx, my, bw, bh, &m->weight[0] );\
77 cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, src, stride2 )\
78 + p_cost_mvx[ mx ] + p_cost_mvy[ my ];\
79 } while(0)
80
81 #define COST_MV_X3_DIR( m0x, m0y, m1x, m1y, m2x, m2y, costs )\
82 {\
83 pixel *pix_base = p_fref_w + bmx + bmy*stride;\
84 h->pixf.fpelcmp_x3[i_pixel]( p_fenc,\
85 pix_base + (m0x) + (m0y)*stride,\
86 pix_base + (m1x) + (m1y)*stride,\
87 pix_base + (m2x) + (m2y)*stride,\
88 stride, costs );\
89 (costs)[0] += BITS_MVD( bmx+(m0x), bmy+(m0y) );\
90 (costs)[1] += BITS_MVD( bmx+(m1x), bmy+(m1y) );\
91 (costs)[2] += BITS_MVD( bmx+(m2x), bmy+(m2y) );\
92 }
93
94 #define COST_MV_X4_DIR( m0x, m0y, m1x, m1y, m2x, m2y, m3x, m3y, costs )\
95 {\
96 pixel *pix_base = p_fref_w + bmx + bmy*stride;\
97 h->pixf.fpelcmp_x4[i_pixel]( p_fenc,\
98 pix_base + (m0x) + (m0y)*stride,\
99 pix_base + (m1x) + (m1y)*stride,\
100 pix_base + (m2x) + (m2y)*stride,\
101 pix_base + (m3x) + (m3y)*stride,\
102 stride, costs );\
103 (costs)[0] += BITS_MVD( bmx+(m0x), bmy+(m0y) );\
104 (costs)[1] += BITS_MVD( bmx+(m1x), bmy+(m1y) );\
105 (costs)[2] += BITS_MVD( bmx+(m2x), bmy+(m2y) );\
106 (costs)[3] += BITS_MVD( bmx+(m3x), bmy+(m3y) );\
107 }
108
109 #define COST_MV_X4( m0x, m0y, m1x, m1y, m2x, m2y, m3x, m3y )\
110 {\
111 pixel *pix_base = p_fref_w + omx + omy*stride;\
112 h->pixf.fpelcmp_x4[i_pixel]( p_fenc,\
113 pix_base + (m0x) + (m0y)*stride,\
114 pix_base + (m1x) + (m1y)*stride,\
115 pix_base + (m2x) + (m2y)*stride,\
116 pix_base + (m3x) + (m3y)*stride,\
117 stride, costs );\
118 costs[0] += BITS_MVD( omx+(m0x), omy+(m0y) );\
119 costs[1] += BITS_MVD( omx+(m1x), omy+(m1y) );\
120 costs[2] += BITS_MVD( omx+(m2x), omy+(m2y) );\
121 costs[3] += BITS_MVD( omx+(m3x), omy+(m3y) );\
122 COPY3_IF_LT( bcost, costs[0], bmx, omx+(m0x), bmy, omy+(m0y) );\
123 COPY3_IF_LT( bcost, costs[1], bmx, omx+(m1x), bmy, omy+(m1y) );\
124 COPY3_IF_LT( bcost, costs[2], bmx, omx+(m2x), bmy, omy+(m2y) );\
125 COPY3_IF_LT( bcost, costs[3], bmx, omx+(m3x), bmy, omy+(m3y) );\
126 }
127
128 #define COST_MV_X3_ABS( m0x, m0y, m1x, m1y, m2x, m2y )\
129 {\
130 h->pixf.fpelcmp_x3[i_pixel]( p_fenc,\
131 p_fref_w + (m0x) + (m0y)*stride,\
132 p_fref_w + (m1x) + (m1y)*stride,\
133 p_fref_w + (m2x) + (m2y)*stride,\
134 stride, costs );\
135 costs[0] += p_cost_mvx[(m0x)<<2]; /* no cost_mvy */\
136 costs[1] += p_cost_mvx[(m1x)<<2];\
137 costs[2] += p_cost_mvx[(m2x)<<2];\
138 COPY3_IF_LT( bcost, costs[0], bmx, m0x, bmy, m0y );\
139 COPY3_IF_LT( bcost, costs[1], bmx, m1x, bmy, m1y );\
140 COPY3_IF_LT( bcost, costs[2], bmx, m2x, bmy, m2y );\
141 }
142
143 /* 1 */
144 /* 101 */
145 /* 1 */
146 #define DIA1_ITER( mx, my )\
147 {\
148 omx = mx; omy = my;\
149 COST_MV_X4( 0,-1, 0,1, -1,0, 1,0 );\
150 }
151
152 #define CROSS( start, x_max, y_max )\
153 {\
154 int i = start;\
155 if( (x_max) <= X264_MIN(mv_x_max-omx, omx-mv_x_min) )\
156 for( ; i < (x_max)-2; i+=4 )\
157 COST_MV_X4( i,0, -i,0, i+2,0, -i-2,0 );\
158 for( ; i < (x_max); i+=2 )\
159 {\
160 if( omx+i <= mv_x_max )\
161 COST_MV( omx+i, omy );\
162 if( omx-i >= mv_x_min )\
163 COST_MV( omx-i, omy );\
164 }\
165 i = start;\
166 if( (y_max) <= X264_MIN(mv_y_max-omy, omy-mv_y_min) )\
167 for( ; i < (y_max)-2; i+=4 )\
168 COST_MV_X4( 0,i, 0,-i, 0,i+2, 0,-i-2 );\
169 for( ; i < (y_max); i+=2 )\
170 {\
171 if( omy+i <= mv_y_max )\
172 COST_MV( omx, omy+i );\
173 if( omy-i >= mv_y_min )\
174 COST_MV( omx, omy-i );\
175 }\
176 }
177
178 #define FPEL(mv) (((mv)+2)>>2) /* Convert subpel MV to fullpel with rounding... */
179 #define SPEL(mv) ((mv)<<2) /* ... and the reverse. */
180 #define SPELx2(mv) (SPEL(mv)&0xFFFCFFFC) /* for two packed MVs */
181
x264_me_search_ref(x264_t * h,x264_me_t * m,int16_t (* mvc)[2],int i_mvc,int * p_halfpel_thresh)182 void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, int *p_halfpel_thresh )
183 {
184 const int bw = x264_pixel_size[m->i_pixel].w;
185 const int bh = x264_pixel_size[m->i_pixel].h;
186 const int i_pixel = m->i_pixel;
187 const int stride = m->i_stride[0];
188 int i_me_range = h->param.analyse.i_me_range;
189 int bmx, bmy, bcost = COST_MAX;
190 int bpred_cost = COST_MAX;
191 int omx, omy, pmx, pmy;
192 pixel *p_fenc = m->p_fenc[0];
193 pixel *p_fref_w = m->p_fref_w;
194
195 ALIGNED_ARRAY_N( pixel, pix,[16*16] );
196 ALIGNED_ARRAY_8( int16_t, mvc_temp,[16],[2] );
197 ALIGNED_ARRAY_16( int, costs,[16] );
198
199 int mv_x_min = h->mb.mv_limit_fpel[0][0];
200 int mv_y_min = h->mb.mv_limit_fpel[0][1];
201 int mv_x_max = h->mb.mv_limit_fpel[1][0];
202 int mv_y_max = h->mb.mv_limit_fpel[1][1];
203 /* Special version of pack to allow shortcuts in CHECK_MVRANGE */
204 #define pack16to32_mask2(mx,my) ((mx<<16)|(my&0x7FFF))
205 uint32_t mv_min = pack16to32_mask2( -mv_x_min, -mv_y_min );
206 uint32_t mv_max = pack16to32_mask2( mv_x_max, mv_y_max )|0x8000;
207 uint32_t pmv, bpred_mv = 0;
208
209 #define CHECK_MVRANGE(mx,my) (!(((pack16to32_mask2(mx,my) + mv_min) | (mv_max - pack16to32_mask2(mx,my))) & 0x80004000))
210
211 const uint16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0];
212 const uint16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
213 uint32_t bmv;
214 uint32_t bmv_spel;
215
216 /* Try extra predictors if provided. If subme >= 3, check subpel predictors,
217 * otherwise round them to fullpel. */
218 if( h->mb.i_subpel_refine >= 3 )
219 {
220 /* Calculate and check the MVP first */
221 int bpred_mx = x264_clip3( m->mvp[0], SPEL(mv_x_min), SPEL(mv_x_max) );
222 int bpred_my = x264_clip3( m->mvp[1], SPEL(mv_y_min), SPEL(mv_y_max) );
223 int pmv_cost;
224
225 pmv = pack16to32_mask( bpred_mx, bpred_my );
226 pmx = FPEL( bpred_mx );
227 pmy = FPEL( bpred_my );
228
229 COST_MV_HPEL( bpred_mx, bpred_my, bpred_cost );
230 pmv_cost = bpred_cost;
231
232 if( i_mvc > 0 )
233 {
234 /* Clip MV candidates and eliminate those equal to zero and pmv. */
235 int valid_mvcs = x264_predictor_clip( mvc_temp+2, mvc, i_mvc, h->mb.mv_limit_fpel, pmv );
236 if( valid_mvcs > 0 )
237 {
238 int i = 1, cost;
239 /* We stuff pmv here to branchlessly pick between pmv and the various
240 * MV candidates. [0] gets skipped in order to maintain alignment for
241 * x264_predictor_clip. */
242 M32( mvc_temp[1] ) = pmv;
243 bpred_cost <<= 4;
244 do
245 {
246 int mx = mvc_temp[i+1][0];
247 int my = mvc_temp[i+1][1];
248 COST_MV_HPEL( mx, my, cost );
249 COPY1_IF_LT( bpred_cost, (cost << 4) + i );
250 } while( ++i <= valid_mvcs );
251 bpred_mx = mvc_temp[(bpred_cost&15)+1][0];
252 bpred_my = mvc_temp[(bpred_cost&15)+1][1];
253 bpred_cost >>= 4;
254 }
255 }
256
257 /* Round the best predictor back to fullpel and get the cost, since this is where
258 * we'll be starting the fullpel motion search. */
259 bmx = FPEL( bpred_mx );
260 bmy = FPEL( bpred_my );
261 bpred_mv = pack16to32_mask(bpred_mx, bpred_my);
262 if( bpred_mv&0x00030003 ) /* Only test if the tested predictor is actually subpel... */
263 COST_MV( bmx, bmy );
264 else /* Otherwise just copy the cost (we already know it) */
265 bcost = bpred_cost;
266
267 /* Test the zero vector if it hasn't been tested yet. */
268 if( pmv )
269 {
270 if( bmx|bmy ) COST_MV( 0, 0 );
271 }
272 /* If a subpel mv candidate was better than the zero vector, the previous
273 * fullpel check won't have gotten it even if the pmv was zero. So handle
274 * that possibility here. */
275 else
276 {
277 COPY3_IF_LT( bcost, pmv_cost, bmx, 0, bmy, 0 );
278 }
279 }
280 else
281 {
282 /* Calculate and check the fullpel MVP first */
283 bmx = pmx = x264_clip3( FPEL(m->mvp[0]), mv_x_min, mv_x_max );
284 bmy = pmy = x264_clip3( FPEL(m->mvp[1]), mv_y_min, mv_y_max );
285 pmv = pack16to32_mask( bmx, bmy );
286
287 /* Because we are rounding the predicted motion vector to fullpel, there will be
288 * an extra MV cost in 15 out of 16 cases. However, when the predicted MV is
289 * chosen as the best predictor, it is often the case that the subpel search will
290 * result in a vector at or next to the predicted motion vector. Therefore, we omit
291 * the cost of the MV from the rounded MVP to avoid unfairly biasing against use of
292 * the predicted motion vector.
293 *
294 * Disclaimer: this is a post-hoc rationalization for why this hack works. */
295 bcost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, &p_fref_w[bmy*stride+bmx], stride );
296
297 if( i_mvc > 0 )
298 {
299 /* Like in subme>=3, except we also round the candidates to fullpel. */
300 int valid_mvcs = x264_predictor_roundclip( mvc_temp+2, mvc, i_mvc, h->mb.mv_limit_fpel, pmv );
301 if( valid_mvcs > 0 )
302 {
303 int i = 1, cost;
304 M32( mvc_temp[1] ) = pmv;
305 bcost <<= 4;
306 do
307 {
308 int mx = mvc_temp[i+1][0];
309 int my = mvc_temp[i+1][1];
310 cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, &p_fref_w[my*stride+mx], stride ) + BITS_MVD( mx, my );
311 COPY1_IF_LT( bcost, (cost << 4) + i );
312 } while( ++i <= valid_mvcs );
313 bmx = mvc_temp[(bcost&15)+1][0];
314 bmy = mvc_temp[(bcost&15)+1][1];
315 bcost >>= 4;
316 }
317 }
318
319 /* Same as above, except the condition is simpler. */
320 if( pmv )
321 COST_MV( 0, 0 );
322 }
323
324 switch( h->mb.i_me_method )
325 {
326 case X264_ME_DIA:
327 {
328 int i;
329
330 /* diamond search, radius 1 */
331 bcost <<= 4;
332 i = i_me_range;
333 do
334 {
335 COST_MV_X4_DIR( 0,-1, 0,1, -1,0, 1,0, costs );
336 COPY1_IF_LT( bcost, (costs[0]<<4)+1 );
337 COPY1_IF_LT( bcost, (costs[1]<<4)+3 );
338 COPY1_IF_LT( bcost, (costs[2]<<4)+4 );
339 COPY1_IF_LT( bcost, (costs[3]<<4)+12 );
340 if( !(bcost&15) )
341 break;
342 bmx -= (bcost<<28)>>30;
343 bmy -= (bcost<<30)>>30;
344 bcost &= ~15;
345 } while( --i && CHECK_MVRANGE(bmx, bmy) );
346 bcost >>= 4;
347 break;
348 }
349
350 case X264_ME_HEX:
351 {
352 me_hex2:
353 /* hexagon search, radius 2 */
354 #if 0
355 for( i = 0; i < i_me_range/2; i++ )
356 {
357 omx = bmx; omy = bmy;
358 COST_MV( omx-2, omy );
359 COST_MV( omx-1, omy+2 );
360 COST_MV( omx+1, omy+2 );
361 COST_MV( omx+2, omy );
362 COST_MV( omx+1, omy-2 );
363 COST_MV( omx-1, omy-2 );
364 if( bmx == omx && bmy == omy )
365 break;
366 if( !CHECK_MVRANGE(bmx, bmy) )
367 break;
368 }
369 #else
370 /* equivalent to the above, but eliminates duplicate candidates */
371
372 /* hexagon */
373 COST_MV_X3_DIR( -2,0, -1, 2, 1, 2, costs );
374 COST_MV_X3_DIR( 2,0, 1,-2, -1,-2, costs+4 ); /* +4 for 16-byte alignment */
375 bcost <<= 3;
376 COPY1_IF_LT( bcost, (costs[0]<<3)+2 );
377 COPY1_IF_LT( bcost, (costs[1]<<3)+3 );
378 COPY1_IF_LT( bcost, (costs[2]<<3)+4 );
379 COPY1_IF_LT( bcost, (costs[4]<<3)+5 );
380 COPY1_IF_LT( bcost, (costs[5]<<3)+6 );
381 COPY1_IF_LT( bcost, (costs[6]<<3)+7 );
382
383 if( bcost&7 )
384 {
385 int dir = (bcost&7)-2;
386 int i;
387
388 bmx += hex2[dir+1][0];
389 bmy += hex2[dir+1][1];
390
391 /* half hexagon, not overlapping the previous iteration */
392 for( i = (i_me_range>>1) - 1; i > 0 && CHECK_MVRANGE(bmx, bmy); i-- )
393 {
394 COST_MV_X3_DIR( hex2[dir+0][0], hex2[dir+0][1],
395 hex2[dir+1][0], hex2[dir+1][1],
396 hex2[dir+2][0], hex2[dir+2][1],
397 costs );
398 bcost &= ~7;
399 COPY1_IF_LT( bcost, (costs[0]<<3)+1 );
400 COPY1_IF_LT( bcost, (costs[1]<<3)+2 );
401 COPY1_IF_LT( bcost, (costs[2]<<3)+3 );
402 if( !(bcost&7) )
403 break;
404 dir += (bcost&7)-2;
405 dir = mod6m1[dir+1];
406 bmx += hex2[dir+1][0];
407 bmy += hex2[dir+1][1];
408 }
409 }
410 bcost >>= 3;
411 #endif
412 /* square refine */
413 bcost <<= 4;
414 COST_MV_X4_DIR( 0,-1, 0,1, -1,0, 1,0, costs );
415 COPY1_IF_LT( bcost, (costs[0]<<4)+1 );
416 COPY1_IF_LT( bcost, (costs[1]<<4)+2 );
417 COPY1_IF_LT( bcost, (costs[2]<<4)+3 );
418 COPY1_IF_LT( bcost, (costs[3]<<4)+4 );
419 COST_MV_X4_DIR( -1,-1, -1,1, 1,-1, 1,1, costs );
420 COPY1_IF_LT( bcost, (costs[0]<<4)+5 );
421 COPY1_IF_LT( bcost, (costs[1]<<4)+6 );
422 COPY1_IF_LT( bcost, (costs[2]<<4)+7 );
423 COPY1_IF_LT( bcost, (costs[3]<<4)+8 );
424 bmx += square1[bcost&15][0];
425 bmy += square1[bcost&15][1];
426 bcost >>= 4;
427 break;
428 }
429
430 case X264_ME_UMH:
431 {
432 /* Uneven-cross Multi-Hexagon-grid Search
433 * as in JM, except with different early termination */
434
435 static const uint8_t x264_pixel_size_shift[7] = { 0, 1, 1, 2, 3, 3, 4 };
436
437 int ucost1, ucost2;
438 int cross_start = 1;
439 const uint16_t *p_cost_omvx;
440 const uint16_t *p_cost_omvy;
441 int i2;
442
443 /* refine predictors */
444 ucost1 = bcost;
445 DIA1_ITER( pmx, pmy );
446 if( pmx | pmy )
447 DIA1_ITER( 0, 0 );
448
449 if( i_pixel == PIXEL_4x4 )
450 goto me_hex2;
451
452 ucost2 = bcost;
453 if( (bmx | bmy) && ((bmx-pmx) | (bmy-pmy)) )
454 DIA1_ITER( bmx, bmy );
455 if( bcost == ucost2 )
456 cross_start = 3;
457 omx = bmx; omy = bmy;
458
459 /* early termination */
460 #define SAD_THRESH(v) ( bcost < ( v >> x264_pixel_size_shift[i_pixel] ) )
461 if( bcost == ucost2 && SAD_THRESH(2000) )
462 {
463 COST_MV_X4( 0,-2, -1,-1, 1,-1, -2,0 );
464 COST_MV_X4( 2, 0, -1, 1, 1, 1, 0,2 );
465 if( bcost == ucost1 && SAD_THRESH(500) )
466 break;
467 if( bcost == ucost2 )
468 {
469 int range = (i_me_range>>1) | 1;
470 CROSS( 3, range, range );
471 COST_MV_X4( -1,-2, 1,-2, -2,-1, 2,-1 );
472 COST_MV_X4( -2, 1, 2, 1, -1, 2, 1, 2 );
473 if( bcost == ucost2 )
474 break;
475 cross_start = range + 2;
476 }
477 }
478
479 /* adaptive search range */
480 if( i_mvc )
481 {
482 /* range multipliers based on casual inspection of some statistics of
483 * average distance between current predictor and final mv found by ESA.
484 * these have not been tuned much by actual encoding. */
485 static const uint8_t range_mul[4][4] =
486 {
487 { 3, 3, 4, 4 },
488 { 3, 4, 4, 4 },
489 { 4, 4, 4, 5 },
490 { 4, 4, 5, 6 },
491 };
492 int mvd;
493 int sad_ctx, mvd_ctx;
494 int denom = 1;
495
496 if( i_mvc == 1 )
497 {
498 if( i_pixel == PIXEL_16x16 )
499 /* mvc is probably the same as mvp, so the difference isn't meaningful.
500 * but prediction usually isn't too bad, so just use medium range */
501 mvd = 25;
502 else
503 mvd = abs( m->mvp[0] - mvc[0][0] )
504 + abs( m->mvp[1] - mvc[0][1] );
505 }
506 else
507 {
508 /* calculate the degree of agreement between predictors. */
509 /* in 16x16, mvc includes all the neighbors used to make mvp,
510 * so don't count mvp separately. */
511 denom = i_mvc - 1;
512 mvd = 0;
513 if( i_pixel != PIXEL_16x16 )
514 {
515 mvd = abs( m->mvp[0] - mvc[0][0] )
516 + abs( m->mvp[1] - mvc[0][1] );
517 denom++;
518 }
519 mvd += x264_predictor_difference( mvc, i_mvc );
520 }
521
522 sad_ctx = SAD_THRESH(1000) ? 0
523 : SAD_THRESH(2000) ? 1
524 : SAD_THRESH(4000) ? 2 : 3;
525 mvd_ctx = mvd < 10*denom ? 0
526 : mvd < 20*denom ? 1
527 : mvd < 40*denom ? 2 : 3;
528
529 i_me_range = i_me_range * range_mul[mvd_ctx][sad_ctx] >> 2;
530 }
531
532 /* FIXME if the above DIA2/OCT2/CROSS found a new mv, it has not updated omx/omy.
533 * we are still centered on the same place as the DIA2. is this desirable? */
534 CROSS( cross_start, i_me_range, i_me_range>>1 );
535
536 COST_MV_X4( -2,-2, -2,2, 2,-2, 2,2 );
537
538 /* hexagon grid */
539 omx = bmx; omy = bmy;
540 p_cost_omvx = p_cost_mvx + omx*4;
541 p_cost_omvy = p_cost_mvy + omy*4;
542 i2 = 1;
543 do
544 {
545 static const int8_t hex4[16][2] = {
546 { 0,-4}, { 0, 4}, {-2,-3}, { 2,-3},
547 {-4,-2}, { 4,-2}, {-4,-1}, { 4,-1},
548 {-4, 0}, { 4, 0}, {-4, 1}, { 4, 1},
549 {-4, 2}, { 4, 2}, {-2, 3}, { 2, 3},
550 };
551
552 if( 4*i2 > X264_MIN4( mv_x_max-omx, omx-mv_x_min,
553 mv_y_max-omy, omy-mv_y_min ) )
554 {
555 int j;
556 for( j = 0; j < 16; j++ )
557 {
558 int mx = omx + hex4[j][0]*i2;
559 int my = omy + hex4[j][1]*i2;
560 if( CHECK_MVRANGE(mx, my) )
561 COST_MV( mx, my );
562 }
563 }
564 else
565 {
566 int dir = 0;
567 pixel *pix_base = p_fref_w + omx + (omy-4*i2)*stride;
568 int dy = i2*stride;
569 #define SADS(k,x0,y0,x1,y1,x2,y2,x3,y3)\
570 h->pixf.fpelcmp_x4[i_pixel]( p_fenc,\
571 pix_base x0*i2+(y0-2*k+4)*dy,\
572 pix_base x1*i2+(y1-2*k+4)*dy,\
573 pix_base x2*i2+(y2-2*k+4)*dy,\
574 pix_base x3*i2+(y3-2*k+4)*dy,\
575 stride, costs+4*k );\
576 pix_base += 2*dy;
577 #define ADD_MVCOST(k,x,y) costs[k] += p_cost_omvx[x*4*i2] + p_cost_omvy[y*4*i2]
578 #define MIN_MV(k,x,y) COPY2_IF_LT( bcost, costs[k], dir, x*16+(y&15) )
579 SADS( 0, +0,-4, +0,+4, -2,-3, +2,-3 );
580 SADS( 1, -4,-2, +4,-2, -4,-1, +4,-1 );
581 SADS( 2, -4,+0, +4,+0, -4,+1, +4,+1 );
582 SADS( 3, -4,+2, +4,+2, -2,+3, +2,+3 );
583 ADD_MVCOST( 0, 0,-4 );
584 ADD_MVCOST( 1, 0, 4 );
585 ADD_MVCOST( 2,-2,-3 );
586 ADD_MVCOST( 3, 2,-3 );
587 ADD_MVCOST( 4,-4,-2 );
588 ADD_MVCOST( 5, 4,-2 );
589 ADD_MVCOST( 6,-4,-1 );
590 ADD_MVCOST( 7, 4,-1 );
591 ADD_MVCOST( 8,-4, 0 );
592 ADD_MVCOST( 9, 4, 0 );
593 ADD_MVCOST( 10,-4, 1 );
594 ADD_MVCOST( 11, 4, 1 );
595 ADD_MVCOST( 12,-4, 2 );
596 ADD_MVCOST( 13, 4, 2 );
597 ADD_MVCOST( 14,-2, 3 );
598 ADD_MVCOST( 15, 2, 3 );
599 MIN_MV( 0, 0,-4 );
600 MIN_MV( 1, 0, 4 );
601 MIN_MV( 2,-2,-3 );
602 MIN_MV( 3, 2,-3 );
603 MIN_MV( 4,-4,-2 );
604 MIN_MV( 5, 4,-2 );
605 MIN_MV( 6,-4,-1 );
606 MIN_MV( 7, 4,-1 );
607 MIN_MV( 8,-4, 0 );
608 MIN_MV( 9, 4, 0 );
609 MIN_MV( 10,-4, 1 );
610 MIN_MV( 11, 4, 1 );
611 MIN_MV( 12,-4, 2 );
612 MIN_MV( 13, 4, 2 );
613 MIN_MV( 14,-2, 3 );
614 MIN_MV( 15, 2, 3 );
615 #undef SADS
616 #undef ADD_MVCOST
617 #undef MIN_MV
618 if(dir)
619 {
620 bmx = omx + i2*(dir>>4);
621 bmy = omy + i2*((dir<<28)>>28);
622 }
623 }
624 } while( ++i2 <= i_me_range>>2 );
625 if( bmy <= mv_y_max && bmy >= mv_y_min && bmx <= mv_x_max && bmx >= mv_x_min )
626 goto me_hex2;
627 break;
628 }
629
630 case X264_ME_ESA:
631 case X264_ME_TESA:
632 {
633 const int min_x = X264_MAX( bmx - i_me_range, mv_x_min );
634 const int min_y = X264_MAX( bmy - i_me_range, mv_y_min );
635 const int max_x = X264_MIN( bmx + i_me_range, mv_x_max );
636 const int max_y = X264_MIN( bmy + i_me_range, mv_y_max );
637 /* SEA is fastest in multiples of 4 */
638 const int width = (max_x - min_x + 3) & ~3;
639 #if 0
640 /* plain old exhaustive search */
641 for( my = min_y; my <= max_y; my++ )
642 for( mx = min_x; mx < min_x + width; mx++ )
643 COST_MV( mx, my );
644 #else
645 /* successive elimination by comparing DC before a full SAD,
646 * because sum(abs(diff)) >= abs(diff(sum)). */
647 uint16_t *sums_base = m->integral;
648
649 ALIGNED_16( static pixel zero[8*FENC_STRIDE] ) = {0};
650 ALIGNED_ARRAY_16( int, enc_dc,[4] );
651
652 int sad_size = i_pixel <= PIXEL_8x8 ? PIXEL_8x8 : PIXEL_4x4;
653 int delta = x264_pixel_size[sad_size].w;
654 int16_t *xs = h->scratch_buffer;
655 int xn;
656 uint16_t *cost_fpel_mvx = h->cost_mv_fpel[h->mb.i_qp][-m->mvp[0]&3] + (-m->mvp[0]>>2);
657
658 h->pixf.sad_x4[sad_size]( zero, p_fenc, p_fenc+delta,
659 p_fenc+delta*FENC_STRIDE, p_fenc+delta+delta*FENC_STRIDE,
660 FENC_STRIDE, enc_dc );
661 if( delta == 4 )
662 sums_base += stride * (h->fenc->i_lines[0] + PADV*2);
663 if( i_pixel == PIXEL_16x16 || i_pixel == PIXEL_8x16 || i_pixel == PIXEL_4x8 )
664 delta *= stride;
665 if( i_pixel == PIXEL_8x16 || i_pixel == PIXEL_4x8 )
666 enc_dc[1] = enc_dc[2];
667
668 if( h->mb.i_me_method == X264_ME_TESA )
669 {
670 // ADS threshold, then SAD threshold, then keep the best few SADs, then SATD
671 mvsad_t *mvsads = (mvsad_t *)(xs + ((width+31)&~31) + 4);
672 int nmvsad = 0, limit;
673 int sad_thresh = i_me_range <= 16 ? 10 : i_me_range <= 24 ? 11 : 12;
674 int bsad = h->pixf.sad[i_pixel]( p_fenc, FENC_STRIDE, p_fref_w+bmy*stride+bmx, stride )
675 + BITS_MVD( bmx, bmy );
676 int my;
677 int i2;
678
679 for( my = min_y; my <= max_y; my++ )
680 {
681 int i;
682 int ycost = p_cost_mvy[my<<2];
683 if( bsad <= ycost )
684 continue;
685 bsad -= ycost;
686 xn = h->pixf.ads[i_pixel]( enc_dc, sums_base + min_x + my * stride, delta,
687 cost_fpel_mvx+min_x, xs, width, bsad * 17 >> 4 );
688 for( i = 0; i < xn-2; i += 3 )
689 {
690 int j;
691 pixel *ref = p_fref_w+min_x+my*stride;
692
693 ALIGNED_ARRAY_16( int, sads,[4] ); /* padded to [4] for asm */
694
695 h->pixf.sad_x3[i_pixel]( p_fenc, ref+xs[i], ref+xs[i+1], ref+xs[i+2], stride, sads );
696 for( j = 0; j < 3; j++ )
697 {
698 int sad = sads[j] + cost_fpel_mvx[xs[i+j]];
699 if( sad < bsad*sad_thresh>>3 )
700 {
701 COPY1_IF_LT( bsad, sad );
702 mvsads[nmvsad].sad = sad + ycost;
703 mvsads[nmvsad].mv[0] = min_x+xs[i+j];
704 mvsads[nmvsad].mv[1] = my;
705 nmvsad++;
706 }
707 }
708 }
709 for( ; i < xn; i++ )
710 {
711 int mx = min_x+xs[i];
712 int sad = h->pixf.sad[i_pixel]( p_fenc, FENC_STRIDE, p_fref_w+mx+my*stride, stride )
713 + cost_fpel_mvx[xs[i]];
714 if( sad < bsad*sad_thresh>>3 )
715 {
716 COPY1_IF_LT( bsad, sad );
717 mvsads[nmvsad].sad = sad + ycost;
718 mvsads[nmvsad].mv[0] = mx;
719 mvsads[nmvsad].mv[1] = my;
720 nmvsad++;
721 }
722 }
723 bsad += ycost;
724 }
725
726 limit = i_me_range >> 1;
727 sad_thresh = bsad*sad_thresh>>3;
728 while( nmvsad > limit*2 && sad_thresh > bsad )
729 {
730 int j;
731 // halve the range if the domain is too large... eh, close enough
732 sad_thresh = (sad_thresh + bsad) >> 1;
733 for( i2 = 0; i2 < nmvsad && mvsads[i2].sad <= sad_thresh; i2++ );
734 for( j = i2; j < nmvsad; j++ )
735 {
736 uint32_t sad;
737 if( WORD_SIZE == 8 && sizeof(mvsad_t) == 8 )
738 {
739 uint64_t mvsad = M64( &mvsads[i2] ) = M64( &mvsads[j] );
740 #if WORDS_BIGENDIAN
741 mvsad >>= 32;
742 #endif
743 sad = mvsad;
744 }
745 else
746 {
747 sad = mvsads[j].sad;
748 CP32( mvsads[i2].mv, mvsads[j].mv );
749 mvsads[i2].sad = sad;
750 }
751 i2 += (sad - (sad_thresh+1)) >> 31;
752 }
753 nmvsad = i2;
754 }
755 while( nmvsad > limit )
756 {
757 int bi = 0;
758 int i;
759 for( i = 1; i < nmvsad; i++ )
760 if( mvsads[i].sad > mvsads[bi].sad )
761 bi = i;
762 nmvsad--;
763 if( sizeof( mvsad_t ) == sizeof( uint64_t ) )
764 CP64( &mvsads[bi], &mvsads[nmvsad] );
765 else
766 mvsads[bi] = mvsads[nmvsad];
767 }
768 for( i2 = 0; i2 < nmvsad; i2++ )
769 COST_MV( mvsads[i2].mv[0], mvsads[i2].mv[1] );
770 }
771 else
772 {
773 int my;
774 // just ADS and SAD
775 for( my = min_y; my <= max_y; my++ )
776 {
777 int i;
778 int ycost = p_cost_mvy[my<<2];
779 if( bcost <= ycost )
780 continue;
781 bcost -= ycost;
782 xn = h->pixf.ads[i_pixel]( enc_dc, sums_base + min_x + my * stride, delta,
783 cost_fpel_mvx+min_x, xs, width, bcost );
784 for( i = 0; i < xn-2; i += 3 )
785 COST_MV_X3_ABS( min_x+xs[i],my, min_x+xs[i+1],my, min_x+xs[i+2],my );
786 bcost += ycost;
787 for( ; i < xn; i++ )
788 COST_MV( min_x+xs[i], my );
789 }
790 }
791 #endif
792 }
793 break;
794 }
795
796 /* -> qpel mv */
797 bmv = pack16to32_mask(bmx,bmy);
798 bmv_spel = SPELx2(bmv);
799 if( h->mb.i_subpel_refine < 3 )
800 {
801 m->cost_mv = p_cost_mvx[bmx<<2] + p_cost_mvy[bmy<<2];
802 m->cost = bcost;
803 /* compute the real cost */
804 if( bmv == pmv ) m->cost += m->cost_mv;
805 M32( m->mv ) = bmv_spel;
806 }
807 else
808 {
809 M32(m->mv) = bpred_cost < bcost ? bpred_mv : bmv_spel;
810 m->cost = X264_MIN( bpred_cost, bcost );
811 }
812
813 /* subpel refine */
814 if( h->mb.i_subpel_refine >= 2 )
815 {
816 int hpel = subpel_iterations[h->mb.i_subpel_refine][2];
817 int qpel = subpel_iterations[h->mb.i_subpel_refine][3];
818 refine_subpel( h, m, hpel, qpel, p_halfpel_thresh, 0 );
819 }
820 }
821 #undef COST_MV
822
x264_me_refine_qpel(x264_t * h,x264_me_t * m)823 void x264_me_refine_qpel( x264_t *h, x264_me_t *m )
824 {
825 int hpel = subpel_iterations[h->mb.i_subpel_refine][0];
826 int qpel = subpel_iterations[h->mb.i_subpel_refine][1];
827
828 if( m->i_pixel <= PIXEL_8x8 )
829 m->cost -= m->i_ref_cost;
830
831 refine_subpel( h, m, hpel, qpel, NULL, 1 );
832 }
833
x264_me_refine_qpel_refdupe(x264_t * h,x264_me_t * m,int * p_halfpel_thresh)834 void x264_me_refine_qpel_refdupe( x264_t *h, x264_me_t *m, int *p_halfpel_thresh )
835 {
836 refine_subpel( h, m, 0, X264_MIN( 2, subpel_iterations[h->mb.i_subpel_refine][3] ), p_halfpel_thresh, 0 );
837 }
838
839 #define COST_MV_SAD( mx, my ) \
840 { \
841 intptr_t stride = 16; \
842 pixel *src = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
843 int cost = h->pixf.fpelcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
844 + p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
845 COPY3_IF_LT( bcost, cost, bmx, mx, bmy, my ); \
846 }
847
848 #define COST_MV_SATD( mx, my, dir ) \
849 if( b_refine_qpel || (dir^1) != odir ) \
850 { \
851 intptr_t stride = 16; \
852 pixel *src = h->mc.get_ref( pix, &stride, &m->p_fref[0], m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
853 int cost = h->pixf.mbcmp_unaligned[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
854 + p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
855 if( b_chroma_me && cost < bcost ) \
856 { \
857 if( CHROMA444 ) \
858 { \
859 stride = 16; \
860 src = h->mc.get_ref( pix, &stride, &m->p_fref[4], m->i_stride[1], mx, my, bw, bh, &m->weight[1] ); \
861 cost += h->pixf.mbcmp_unaligned[i_pixel]( m->p_fenc[1], FENC_STRIDE, src, stride ); \
862 if( cost < bcost ) \
863 { \
864 stride = 16; \
865 src = h->mc.get_ref( pix, &stride, &m->p_fref[8], m->i_stride[2], mx, my, bw, bh, &m->weight[2] ); \
866 cost += h->pixf.mbcmp_unaligned[i_pixel]( m->p_fenc[2], FENC_STRIDE, src, stride ); \
867 } \
868 } \
869 else \
870 { \
871 h->mc.mc_chroma( pix, pix+8, 16, m->p_fref[4], m->i_stride[1], \
872 mx, 2*(my+mvy_offset)>>chroma_v_shift, bw>>1, bh>>chroma_v_shift ); \
873 if( m->weight[1].weightfn ) \
874 m->weight[1].weightfn[bw>>3]( pix, 16, pix, 16, &m->weight[1], bh>>chroma_v_shift ); \
875 cost += h->pixf.mbcmp[chromapix]( m->p_fenc[1], FENC_STRIDE, pix, 16 ); \
876 if( cost < bcost ) \
877 { \
878 if( m->weight[2].weightfn ) \
879 m->weight[2].weightfn[bw>>3]( pix+8, 16, pix+8, 16, &m->weight[2], bh>>chroma_v_shift ); \
880 cost += h->pixf.mbcmp[chromapix]( m->p_fenc[2], FENC_STRIDE, pix+8, 16 ); \
881 } \
882 } \
883 } \
884 COPY4_IF_LT( bcost, cost, bmx, mx, bmy, my, bdir, dir ); \
885 }
886
refine_subpel(x264_t * h,x264_me_t * m,int hpel_iters,int qpel_iters,int * p_halfpel_thresh,int b_refine_qpel)887 static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_iters, int *p_halfpel_thresh, int b_refine_qpel )
888 {
889 const int bw = x264_pixel_size[m->i_pixel].w;
890 const int bh = x264_pixel_size[m->i_pixel].h;
891 const uint16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0];
892 const uint16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
893 const int i_pixel = m->i_pixel;
894 const int b_chroma_me = h->mb.b_chroma_me && (i_pixel <= PIXEL_8x8 || CHROMA444);
895 int chromapix = h->luma2chroma_pixel[i_pixel];
896 int chroma_v_shift = CHROMA_V_SHIFT;
897 int mvy_offset = chroma_v_shift & MB_INTERLACED & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
898
899 ALIGNED_ARRAY_N( pixel, pix,[64*18] ); // really 17x17x2, but round up for alignment
900 ALIGNED_ARRAY_16( int, costs,[4] );
901
902 int bmx = m->mv[0];
903 int bmy = m->mv[1];
904 int bcost = m->cost;
905 int odir = -1, bdir;
906
907 /* halfpel diamond search */
908 if( hpel_iters )
909 {
910 int i;
911
912 /* try the subpel component of the predicted mv */
913 if( h->mb.i_subpel_refine < 3 )
914 {
915 int mx = x264_clip3( m->mvp[0], h->mb.mv_min_spel[0]+2, h->mb.mv_max_spel[0]-2 );
916 int my = x264_clip3( m->mvp[1], h->mb.mv_min_spel[1]+2, h->mb.mv_max_spel[1]-2 );
917 if( (mx-bmx)|(my-bmy) )
918 COST_MV_SAD( mx, my );
919 }
920
921 bcost <<= 6;
922 for( i = hpel_iters; i > 0; i-- )
923 {
924 int omx = bmx, omy = bmy;
925 intptr_t stride = 64; // candidates are either all hpel or all qpel, so one stride is enough
926 pixel *src0, *src1, *src2, *src3;
927 src0 = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], omx, omy-2, bw, bh+1, &m->weight[0] );
928 src2 = h->mc.get_ref( pix+32, &stride, m->p_fref, m->i_stride[0], omx-2, omy, bw+4, bh, &m->weight[0] );
929 src1 = src0 + stride;
930 src3 = src2 + 1;
931 h->pixf.fpelcmp_x4[i_pixel]( m->p_fenc[0], src0, src1, src2, src3, stride, costs );
932 costs[0] += p_cost_mvx[omx ] + p_cost_mvy[omy-2];
933 costs[1] += p_cost_mvx[omx ] + p_cost_mvy[omy+2];
934 costs[2] += p_cost_mvx[omx-2] + p_cost_mvy[omy ];
935 costs[3] += p_cost_mvx[omx+2] + p_cost_mvy[omy ];
936 COPY1_IF_LT( bcost, (costs[0]<<6)+2 );
937 COPY1_IF_LT( bcost, (costs[1]<<6)+6 );
938 COPY1_IF_LT( bcost, (costs[2]<<6)+16 );
939 COPY1_IF_LT( bcost, (costs[3]<<6)+48 );
940 if( !(bcost&63) )
941 break;
942 bmx -= (bcost<<26)>>29;
943 bmy -= (bcost<<29)>>29;
944 bcost &= ~63;
945 }
946 bcost >>= 6;
947 }
948
949 if( !b_refine_qpel && (h->pixf.mbcmp_unaligned[0] != h->pixf.fpelcmp[0] || b_chroma_me) )
950 {
951 bcost = COST_MAX;
952 COST_MV_SATD( bmx, bmy, -1 );
953 }
954
955 /* early termination when examining multiple reference frames */
956 if( p_halfpel_thresh )
957 {
958 if( (bcost*7)>>3 > *p_halfpel_thresh )
959 {
960 m->cost = bcost;
961 m->mv[0] = bmx;
962 m->mv[1] = bmy;
963 // don't need cost_mv
964 return;
965 }
966 else if( bcost < *p_halfpel_thresh )
967 *p_halfpel_thresh = bcost;
968 }
969
970 /* quarterpel diamond search */
971 if( h->mb.i_subpel_refine != 1 )
972 {
973 int i;
974 bdir = -1;
975 for( i = qpel_iters; i > 0; i-- )
976 {
977 int omx;
978 int omy;
979 if( bmy <= h->mb.mv_min_spel[1] || bmy >= h->mb.mv_max_spel[1] || bmx <= h->mb.mv_min_spel[0] || bmx >= h->mb.mv_max_spel[0] )
980 break;
981 odir = bdir;
982 omx = bmx, omy = bmy;
983 COST_MV_SATD( omx, omy - 1, 0 );
984 COST_MV_SATD( omx, omy + 1, 1 );
985 COST_MV_SATD( omx - 1, omy, 2 );
986 COST_MV_SATD( omx + 1, omy, 3 );
987 if( (bmx == omx) & (bmy == omy) )
988 break;
989 }
990 }
991 /* Special simplified case for subme=1 */
992 else if( bmy > h->mb.mv_min_spel[1] && bmy < h->mb.mv_max_spel[1] && bmx > h->mb.mv_min_spel[0] && bmx < h->mb.mv_max_spel[0] )
993 {
994 int omx = bmx, omy = bmy;
995 /* We have to use mc_luma because all strides must be the same to use fpelcmp_x4 */
996 h->mc.mc_luma( pix , 64, m->p_fref, m->i_stride[0], omx, omy-1, bw, bh, &m->weight[0] );
997 h->mc.mc_luma( pix+16, 64, m->p_fref, m->i_stride[0], omx, omy+1, bw, bh, &m->weight[0] );
998 h->mc.mc_luma( pix+32, 64, m->p_fref, m->i_stride[0], omx-1, omy, bw, bh, &m->weight[0] );
999 h->mc.mc_luma( pix+48, 64, m->p_fref, m->i_stride[0], omx+1, omy, bw, bh, &m->weight[0] );
1000 h->pixf.fpelcmp_x4[i_pixel]( m->p_fenc[0], pix, pix+16, pix+32, pix+48, 64, costs );
1001 costs[0] += p_cost_mvx[omx ] + p_cost_mvy[omy-1];
1002 costs[1] += p_cost_mvx[omx ] + p_cost_mvy[omy+1];
1003 costs[2] += p_cost_mvx[omx-1] + p_cost_mvy[omy ];
1004 costs[3] += p_cost_mvx[omx+1] + p_cost_mvy[omy ];
1005 bcost <<= 4;
1006 COPY1_IF_LT( bcost, (costs[0]<<4)+1 );
1007 COPY1_IF_LT( bcost, (costs[1]<<4)+3 );
1008 COPY1_IF_LT( bcost, (costs[2]<<4)+4 );
1009 COPY1_IF_LT( bcost, (costs[3]<<4)+12 );
1010 bmx -= (bcost<<28)>>30;
1011 bmy -= (bcost<<30)>>30;
1012 bcost >>= 4;
1013 }
1014
1015 m->cost = bcost;
1016 m->mv[0] = bmx;
1017 m->mv[1] = bmy;
1018 m->cost_mv = p_cost_mvx[bmx] + p_cost_mvy[bmy];
1019 }
1020
1021 #define BIME_CACHE( dx, dy, list )\
1022 {\
1023 x264_me_t *m = m##list;\
1024 int i = 4 + 3*dx + dy;\
1025 int mvx = bm##list##x+dx;\
1026 int mvy = bm##list##y+dy;\
1027 stride[0][list][i] = bw;\
1028 src[0][list][i] = h->mc.get_ref( pixy_buf[list][i], &stride[0][list][i], &m->p_fref[0],\
1029 m->i_stride[0], mvx, mvy, bw, bh, x264_weight_none );\
1030 if( rd )\
1031 {\
1032 if( CHROMA444 )\
1033 {\
1034 stride[1][list][i] = bw;\
1035 src[1][list][i] = h->mc.get_ref( pixu_buf[list][i], &stride[1][list][i], &m->p_fref[4],\
1036 m->i_stride[1], mvx, mvy, bw, bh, x264_weight_none );\
1037 stride[2][list][i] = bw;\
1038 src[2][list][i] = h->mc.get_ref( pixv_buf[list][i], &stride[2][list][i], &m->p_fref[8],\
1039 m->i_stride[2], mvx, mvy, bw, bh, x264_weight_none );\
1040 }\
1041 else\
1042 h->mc.mc_chroma( pixu_buf[list][i], pixv_buf[list][i], 8, m->p_fref[4], m->i_stride[1],\
1043 mvx, 2*(mvy+mv##list##y_offset)>>chroma_v_shift, bw>>1, bh>>chroma_v_shift );\
1044 }\
1045 }
1046
1047 #define SATD_THRESH(cost) (cost+(cost>>4))
1048
1049 /* Don't unroll the BIME_CACHE loop. I couldn't find any way to force this
1050 * other than making its iteration count not a compile-time constant. */
1051 int x264_iter_kludge = 0;
1052
x264_me_refine_bidir(x264_t * h,x264_me_t * m0,x264_me_t * m1,int i_weight,int i8,int i_lambda2,int rd)1053 static void ALWAYS_INLINE x264_me_refine_bidir( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2, int rd )
1054 {
1055 int x = i8&1;
1056 int y = i8>>1;
1057 int s8 = X264_SCAN8_0 + 2*x + 16*y;
1058 int16_t *cache0_mv = h->mb.cache.mv[0][s8];
1059 int16_t *cache1_mv = h->mb.cache.mv[1][s8];
1060 const int i_pixel = m0->i_pixel;
1061 const int bw = x264_pixel_size[i_pixel].w;
1062 const int bh = x264_pixel_size[i_pixel].h;
1063
1064 ALIGNED_ARRAY_N( pixel, pixy_buf,[2],[9][16*16] );
1065 ALIGNED_ARRAY_N( pixel, pixu_buf,[2],[9][16*16] );
1066 ALIGNED_ARRAY_N( pixel, pixv_buf,[2],[9][16*16] );
1067
1068 pixel *src[3][2][9];
1069 int chromapix = h->luma2chroma_pixel[i_pixel];
1070 int chroma_v_shift = CHROMA_V_SHIFT;
1071 int chroma_x = (8 >> CHROMA_H_SHIFT) * x;
1072 int chroma_y = (8 >> chroma_v_shift) * y;
1073 pixel *pix = &h->mb.pic.p_fdec[0][8*x + 8*y*FDEC_STRIDE];
1074 pixel *pixu = &h->mb.pic.p_fdec[1][chroma_x + chroma_y*FDEC_STRIDE];
1075 pixel *pixv = &h->mb.pic.p_fdec[2][chroma_x + chroma_y*FDEC_STRIDE];
1076 int ref0 = h->mb.cache.ref[0][s8];
1077 int ref1 = h->mb.cache.ref[1][s8];
1078 const int mv0y_offset = chroma_v_shift & MB_INTERLACED & ref0 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
1079 const int mv1y_offset = chroma_v_shift & MB_INTERLACED & ref1 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
1080 intptr_t stride[3][2][9];
1081 int bm0x = m0->mv[0];
1082 int bm0y = m0->mv[1];
1083 int bm1x = m1->mv[0];
1084 int bm1y = m1->mv[1];
1085 int bcost = COST_MAX;
1086 int mc_list0 = 1, mc_list1 = 1;
1087 uint64_t bcostrd = COST_MAX64;
1088 uint16_t amvd;
1089 const uint16_t *p_cost_m0x;
1090 const uint16_t *p_cost_m0y;
1091 const uint16_t *p_cost_m1x;
1092 const uint16_t *p_cost_m1y;
1093 int pass;
1094
1095 /* each byte of visited represents 8 possible m1y positions, so a 4D array isn't needed */
1096 ALIGNED_ARRAY_N( uint8_t, visited,[8],[8][8] );
1097 /* all permutations of an offset in up to 2 of the dimensions */
1098 ALIGNED_4( static const int8_t dia4d[33][4] ) =
1099 {
1100 {0,0,0,0},
1101 {0,0,0,1}, {0,0,0,-1}, {0,0,1,0}, {0,0,-1,0},
1102 {0,1,0,0}, {0,-1,0,0}, {1,0,0,0}, {-1,0,0,0},
1103 {0,0,1,1}, {0,0,-1,-1},{0,1,1,0}, {0,-1,-1,0},
1104 {1,1,0,0}, {-1,-1,0,0},{1,0,0,1}, {-1,0,0,-1},
1105 {0,1,0,1}, {0,-1,0,-1},{1,0,1,0}, {-1,0,-1,0},
1106 {0,0,-1,1},{0,0,1,-1}, {0,-1,1,0},{0,1,-1,0},
1107 {-1,1,0,0},{1,-1,0,0}, {1,0,0,-1},{-1,0,0,1},
1108 {0,-1,0,1},{0,1,0,-1}, {-1,0,1,0},{1,0,-1,0},
1109 };
1110
1111 if( bm0y < h->mb.mv_min_spel[1] + 8 || bm1y < h->mb.mv_min_spel[1] + 8 ||
1112 bm0y > h->mb.mv_max_spel[1] - 8 || bm1y > h->mb.mv_max_spel[1] - 8 ||
1113 bm0x < h->mb.mv_min_spel[0] + 8 || bm1x < h->mb.mv_min_spel[0] + 8 ||
1114 bm0x > h->mb.mv_max_spel[0] - 8 || bm1x > h->mb.mv_max_spel[0] - 8 )
1115 return;
1116
1117 if( rd && m0->i_pixel != PIXEL_16x16 && i8 != 0 )
1118 {
1119 x264_mb_predict_mv( h, 0, i8<<2, bw>>2, m0->mvp );
1120 x264_mb_predict_mv( h, 1, i8<<2, bw>>2, m1->mvp );
1121 }
1122
1123 p_cost_m0x = m0->p_cost_mv - m0->mvp[0];
1124 p_cost_m0y = m0->p_cost_mv - m0->mvp[1];
1125 p_cost_m1x = m1->p_cost_mv - m1->mvp[0];
1126 p_cost_m1y = m1->p_cost_mv - m1->mvp[1];
1127
1128 h->mc.memzero_aligned( visited, sizeof(uint8_t[8][8][8]) );
1129
1130 for( pass = 0; pass < 8; pass++ )
1131 {
1132 int bestj = 0;
1133 int j;
1134
1135 /* check all mv pairs that differ in at most 2 components from the current mvs. */
1136 /* doesn't do chroma ME. this probably doesn't matter, as the gains
1137 * from bidir ME are the same with and without chroma ME. */
1138
1139 if( mc_list0 )
1140 for( j = x264_iter_kludge; j < 9; j++ )
1141 BIME_CACHE( square1[j][0], square1[j][1], 0 );
1142
1143 if( mc_list1 )
1144 for( j = x264_iter_kludge; j < 9; j++ )
1145 BIME_CACHE( square1[j][0], square1[j][1], 1 );
1146
1147 for( j = !!pass; j < 33; j++ )
1148 {
1149 int m0x = dia4d[j][0] + bm0x;
1150 int m0y = dia4d[j][1] + bm0y;
1151 int m1x = dia4d[j][2] + bm1x;
1152 int m1y = dia4d[j][3] + bm1y;
1153 if( !pass || !((visited[(m0x)&7][(m0y)&7][(m1x)&7] & (1<<((m1y)&7)))) )
1154 {
1155 int i0 = 4 + 3*dia4d[j][0] + dia4d[j][1];
1156 int i1 = 4 + 3*dia4d[j][2] + dia4d[j][3];
1157 int cost;
1158
1159 visited[(m0x)&7][(m0y)&7][(m1x)&7] |= (1<<((m1y)&7));
1160 h->mc.avg[i_pixel]( pix, FDEC_STRIDE, src[0][0][i0], stride[0][0][i0], src[0][1][i1], stride[0][1][i1], i_weight );
1161 cost = h->pixf.mbcmp[i_pixel]( m0->p_fenc[0], FENC_STRIDE, pix, FDEC_STRIDE )
1162 + p_cost_m0x[m0x] + p_cost_m0y[m0y] + p_cost_m1x[m1x] + p_cost_m1y[m1y];
1163 if( rd )
1164 {
1165 if( cost < SATD_THRESH(bcost) )
1166 {
1167 uint64_t costrd;
1168 bcost = X264_MIN( cost, bcost );
1169 M32( cache0_mv ) = pack16to32_mask(m0x,m0y);
1170 M32( cache1_mv ) = pack16to32_mask(m1x,m1y);
1171 if( CHROMA444 )
1172 {
1173 h->mc.avg[i_pixel]( pixu, FDEC_STRIDE, src[1][0][i0], stride[1][0][i0], src[1][1][i1], stride[1][1][i1], i_weight );
1174 h->mc.avg[i_pixel]( pixv, FDEC_STRIDE, src[2][0][i0], stride[2][0][i0], src[2][1][i1], stride[2][1][i1], i_weight );
1175 }
1176 else
1177 {
1178 h->mc.avg[chromapix]( pixu, FDEC_STRIDE, pixu_buf[0][i0], 8, pixu_buf[1][i1], 8, i_weight );
1179 h->mc.avg[chromapix]( pixv, FDEC_STRIDE, pixv_buf[0][i0], 8, pixv_buf[1][i1], 8, i_weight );
1180 }
1181 costrd = x264_rd_cost_part( h, i_lambda2, i8*4, m0->i_pixel );
1182 COPY2_IF_LT( bcostrd, costrd, bestj, j );
1183 }
1184 }
1185 else
1186 COPY2_IF_LT( bcost, cost, bestj, j );
1187 }
1188 }
1189
1190 if( !bestj )
1191 break;
1192
1193 bm0x += dia4d[bestj][0];
1194 bm0y += dia4d[bestj][1];
1195 bm1x += dia4d[bestj][2];
1196 bm1y += dia4d[bestj][3];
1197
1198 mc_list0 = M16( &dia4d[bestj][0] );
1199 mc_list1 = M16( &dia4d[bestj][2] );
1200 }
1201
1202 if( rd )
1203 {
1204 x264_macroblock_cache_mv ( h, 2*x, 2*y, bw>>2, bh>>2, 0, pack16to32_mask(bm0x, bm0y) );
1205 amvd = pack8to16( X264_MIN(abs(bm0x - m0->mvp[0]),33), X264_MIN(abs(bm0y - m0->mvp[1]),33) );
1206 x264_macroblock_cache_mvd( h, 2*x, 2*y, bw>>2, bh>>2, 0, amvd );
1207
1208 x264_macroblock_cache_mv ( h, 2*x, 2*y, bw>>2, bh>>2, 1, pack16to32_mask(bm1x, bm1y) );
1209 amvd = pack8to16( X264_MIN(abs(bm1x - m1->mvp[0]),33), X264_MIN(abs(bm1y - m1->mvp[1]),33) );
1210 x264_macroblock_cache_mvd( h, 2*x, 2*y, bw>>2, bh>>2, 1, amvd );
1211 }
1212
1213 m0->mv[0] = bm0x;
1214 m0->mv[1] = bm0y;
1215 m1->mv[0] = bm1x;
1216 m1->mv[1] = bm1y;
1217 }
1218
x264_me_refine_bidir_satd(x264_t * h,x264_me_t * m0,x264_me_t * m1,int i_weight)1219 void x264_me_refine_bidir_satd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight )
1220 {
1221 x264_me_refine_bidir( h, m0, m1, i_weight, 0, 0, 0 );
1222 }
1223
x264_me_refine_bidir_rd(x264_t * h,x264_me_t * m0,x264_me_t * m1,int i_weight,int i8,int i_lambda2)1224 void x264_me_refine_bidir_rd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2 )
1225 {
1226 /* Motion compensation is done as part of bidir_rd; don't repeat
1227 * it in encoding. */
1228 h->mb.b_skip_mc = 1;
1229 x264_me_refine_bidir( h, m0, m1, i_weight, i8, i_lambda2, 1 );
1230 h->mb.b_skip_mc = 0;
1231 }
1232
1233 #undef COST_MV_SATD
1234 #define COST_MV_SATD( mx, my, dst, avoid_mvp ) \
1235 { \
1236 if( !avoid_mvp || !(mx == pmx && my == pmy) ) \
1237 { \
1238 h->mc.mc_luma( pix, FDEC_STRIDE, m->p_fref, m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
1239 dst = h->pixf.mbcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, pix, FDEC_STRIDE ) \
1240 + p_cost_mvx[mx] + p_cost_mvy[my]; \
1241 COPY1_IF_LT( bsatd, dst ); \
1242 } \
1243 else \
1244 dst = COST_MAX; \
1245 }
1246
1247 #define COST_MV_RD( mx, my, satd, do_dir, mdir ) \
1248 { \
1249 if( satd <= SATD_THRESH(bsatd) ) \
1250 { \
1251 uint64_t cost; \
1252 M32( cache_mv ) = pack16to32_mask(mx,my); \
1253 if( CHROMA444 ) \
1254 { \
1255 h->mc.mc_luma( pixu, FDEC_STRIDE, &m->p_fref[4], m->i_stride[1], mx, my, bw, bh, &m->weight[1] ); \
1256 h->mc.mc_luma( pixv, FDEC_STRIDE, &m->p_fref[8], m->i_stride[2], mx, my, bw, bh, &m->weight[2] ); \
1257 } \
1258 else if( m->i_pixel <= PIXEL_8x8 ) \
1259 { \
1260 h->mc.mc_chroma( pixu, pixv, FDEC_STRIDE, m->p_fref[4], m->i_stride[1], \
1261 mx, 2*(my+mvy_offset)>>chroma_v_shift, bw>>1, bh>>chroma_v_shift ); \
1262 if( m->weight[1].weightfn ) \
1263 m->weight[1].weightfn[bw>>3]( pixu, FDEC_STRIDE, pixu, FDEC_STRIDE, &m->weight[1], bh>>chroma_v_shift ); \
1264 if( m->weight[2].weightfn ) \
1265 m->weight[2].weightfn[bw>>3]( pixv, FDEC_STRIDE, pixv, FDEC_STRIDE, &m->weight[2], bh>>chroma_v_shift ); \
1266 } \
1267 cost = x264_rd_cost_part( h, i_lambda2, i4, m->i_pixel ); \
1268 COPY4_IF_LT( bcost, cost, bmx, mx, bmy, my, dir, do_dir?mdir:dir ); \
1269 } \
1270 }
1271
x264_me_refine_qpel_rd(x264_t * h,x264_me_t * m,int i_lambda2,int i4,int i_list)1272 void x264_me_refine_qpel_rd( x264_t *h, x264_me_t *m, int i_lambda2, int i4, int i_list )
1273 {
1274 int16_t *cache_mv = h->mb.cache.mv[i_list][x264_scan8[i4]];
1275 const uint16_t *p_cost_mvx, *p_cost_mvy;
1276 const int bw = x264_pixel_size[m->i_pixel].w;
1277 const int bh = x264_pixel_size[m->i_pixel].h;
1278 const int i_pixel = m->i_pixel;
1279 int chroma_v_shift = CHROMA_V_SHIFT;
1280 int mvy_offset = chroma_v_shift & MB_INTERLACED & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
1281
1282 uint64_t bcost = COST_MAX64;
1283 int bmx = m->mv[0];
1284 int bmy = m->mv[1];
1285 int omx, omy, pmx, pmy;
1286 int satd, bsatd;
1287 int dir = -2;
1288 int i8 = i4>>2;
1289 uint16_t amvd;
1290 int j;
1291 int i;
1292
1293 pixel *pix = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
1294 pixel *pixu, *pixv;
1295 if( CHROMA444 )
1296 {
1297 pixu = &h->mb.pic.p_fdec[1][block_idx_xy_fdec[i4]];
1298 pixv = &h->mb.pic.p_fdec[2][block_idx_xy_fdec[i4]];
1299 }
1300 else
1301 {
1302 pixu = &h->mb.pic.p_fdec[1][(i8>>1)*(8*FDEC_STRIDE>>chroma_v_shift)+(i8&1)*4];
1303 pixv = &h->mb.pic.p_fdec[2][(i8>>1)*(8*FDEC_STRIDE>>chroma_v_shift)+(i8&1)*4];
1304 }
1305
1306 h->mb.b_skip_mc = 1;
1307
1308 if( m->i_pixel != PIXEL_16x16 && i4 != 0 )
1309 x264_mb_predict_mv( h, i_list, i4, bw>>2, m->mvp );
1310 pmx = m->mvp[0];
1311 pmy = m->mvp[1];
1312 p_cost_mvx = m->p_cost_mv - pmx;
1313 p_cost_mvy = m->p_cost_mv - pmy;
1314 COST_MV_SATD( bmx, bmy, bsatd, 0 );
1315 if( m->i_pixel != PIXEL_16x16 )
1316 COST_MV_RD( bmx, bmy, 0, 0, 0 )
1317 else
1318 bcost = m->cost;
1319
1320 /* check the predicted mv */
1321 if( (bmx != pmx || bmy != pmy)
1322 && pmx >= h->mb.mv_min_spel[0] && pmx <= h->mb.mv_max_spel[0]
1323 && pmy >= h->mb.mv_min_spel[1] && pmy <= h->mb.mv_max_spel[1] )
1324 {
1325 COST_MV_SATD( pmx, pmy, satd, 0 );
1326 COST_MV_RD ( pmx, pmy, satd, 0, 0 );
1327 /* The hex motion search is guaranteed to not repeat the center candidate,
1328 * so if pmv is chosen, set the "MV to avoid checking" to bmv instead. */
1329 if( bmx == pmx && bmy == pmy )
1330 {
1331 pmx = m->mv[0];
1332 pmy = m->mv[1];
1333 }
1334 }
1335
1336 if( bmy < h->mb.mv_min_spel[1] + 3 || bmy > h->mb.mv_max_spel[1] - 3 ||
1337 bmx < h->mb.mv_min_spel[0] + 3 || bmx > h->mb.mv_max_spel[0] - 3 )
1338 {
1339 h->mb.b_skip_mc = 0;
1340 return;
1341 }
1342
1343 /* subpel hex search, same pattern as ME HEX. */
1344 dir = -2;
1345 omx = bmx;
1346 omy = bmy;
1347 for( j = 0; j < 6; j++ )
1348 {
1349 COST_MV_SATD( omx + hex2[j+1][0], omy + hex2[j+1][1], satd, 1 );
1350 COST_MV_RD ( omx + hex2[j+1][0], omy + hex2[j+1][1], satd, 1, j );
1351 }
1352
1353 if( dir != -2 )
1354 {
1355 /* half hexagon, not overlapping the previous iteration */
1356 for( i = 1; i < 10; i++ )
1357 {
1358 const int odir = mod6m1[dir+1];
1359 if( bmy < h->mb.mv_min_spel[1] + 3 ||
1360 bmy > h->mb.mv_max_spel[1] - 3 )
1361 break;
1362 dir = -2;
1363 omx = bmx;
1364 omy = bmy;
1365 for( j = 0; j < 3; j++ )
1366 {
1367 COST_MV_SATD( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satd, 1 );
1368 COST_MV_RD ( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satd, 1, odir-1+j );
1369 }
1370 if( dir == -2 )
1371 break;
1372 }
1373 }
1374
1375 /* square refine, same pattern as ME HEX. */
1376 omx = bmx;
1377 omy = bmy;
1378 for( i = 0; i < 8; i++ )
1379 {
1380 COST_MV_SATD( omx + square1[i+1][0], omy + square1[i+1][1], satd, 1 );
1381 COST_MV_RD ( omx + square1[i+1][0], omy + square1[i+1][1], satd, 0, 0 );
1382 }
1383
1384 m->cost = bcost;
1385 m->mv[0] = bmx;
1386 m->mv[1] = bmy;
1387 x264_macroblock_cache_mv ( h, block_idx_x[i4], block_idx_y[i4], bw>>2, bh>>2, i_list, pack16to32_mask(bmx, bmy) );
1388 amvd = pack8to16( X264_MIN(abs(bmx - m->mvp[0]),66), X264_MIN(abs(bmy - m->mvp[1]),66) );
1389 x264_macroblock_cache_mvd( h, block_idx_x[i4], block_idx_y[i4], bw>>2, bh>>2, i_list, amvd );
1390 h->mb.b_skip_mc = 0;
1391 }
1392