1 /*****************************************************************************
2 * mc.c: motion compensation
3 *****************************************************************************
4 * Copyright (C) 2003-2021 x264 project
5 *
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22 *
23 * This program is also available under a commercial proprietary license.
24 * For more information, contact us at licensing@x264.com.
25 *****************************************************************************/
26
27 #include "common.h"
28
29 #if HAVE_MMX
30 #include "x86/mc.h"
31 #endif
32 #if HAVE_ALTIVEC
33 #include "ppc/mc.h"
34 #endif
35 #if HAVE_ARMV6
36 #include "arm/mc.h"
37 #endif
38 #if HAVE_AARCH64
39 #include "aarch64/mc.h"
40 #endif
41 #if HAVE_MSA
42 #include "mips/mc.h"
43 #endif
44
45
pixel_avg(pixel * dst,intptr_t i_dst_stride,pixel * src1,intptr_t i_src1_stride,pixel * src2,intptr_t i_src2_stride,int i_width,int i_height)46 static inline void pixel_avg( pixel *dst, intptr_t i_dst_stride,
47 pixel *src1, intptr_t i_src1_stride,
48 pixel *src2, intptr_t i_src2_stride, int i_width, int i_height )
49 {
50 for( int y = 0; y < i_height; y++ )
51 {
52 for( int x = 0; x < i_width; x++ )
53 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
54 dst += i_dst_stride;
55 src1 += i_src1_stride;
56 src2 += i_src2_stride;
57 }
58 }
59
pixel_avg_wxh(pixel * dst,intptr_t i_dst,pixel * src1,intptr_t i_src1,pixel * src2,intptr_t i_src2,int width,int height)60 static inline void pixel_avg_wxh( pixel *dst, intptr_t i_dst,
61 pixel *src1, intptr_t i_src1,
62 pixel *src2, intptr_t i_src2, int width, int height )
63 {
64 for( int y = 0; y < height; y++ )
65 {
66 for( int x = 0; x < width; x++ )
67 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
68 src1 += i_src1;
69 src2 += i_src2;
70 dst += i_dst;
71 }
72 }
73
74 /* Implicit weighted bipred only:
75 * assumes log2_denom = 5, offset = 0, weight1 + weight2 = 64 */
pixel_avg_weight_wxh(pixel * dst,intptr_t i_dst,pixel * src1,intptr_t i_src1,pixel * src2,intptr_t i_src2,int width,int height,int i_weight1)76 static inline void pixel_avg_weight_wxh( pixel *dst, intptr_t i_dst,
77 pixel *src1, intptr_t i_src1,
78 pixel *src2, intptr_t i_src2, int width, int height, int i_weight1 )
79 {
80 int i_weight2 = 64 - i_weight1;
81 for( int y = 0; y<height; y++, dst += i_dst, src1 += i_src1, src2 += i_src2 )
82 for( int x = 0; x<width; x++ )
83 dst[x] = x264_clip_pixel( (src1[x]*i_weight1 + src2[x]*i_weight2 + (1<<5)) >> 6 );
84 }
85 #undef op_scale2
86
87 #define PIXEL_AVG_C( name, width, height ) \
88 static void name( pixel *pix1, intptr_t i_stride_pix1, \
89 pixel *pix2, intptr_t i_stride_pix2, \
90 pixel *pix3, intptr_t i_stride_pix3, int weight ) \
91 { \
92 if( weight == 32 ) \
93 pixel_avg_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height ); \
94 else \
95 pixel_avg_weight_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height, weight ); \
96 }
97 PIXEL_AVG_C( pixel_avg_16x16, 16, 16 )
98 PIXEL_AVG_C( pixel_avg_16x8, 16, 8 )
99 PIXEL_AVG_C( pixel_avg_8x16, 8, 16 )
100 PIXEL_AVG_C( pixel_avg_8x8, 8, 8 )
101 PIXEL_AVG_C( pixel_avg_8x4, 8, 4 )
102 PIXEL_AVG_C( pixel_avg_4x16, 4, 16 )
103 PIXEL_AVG_C( pixel_avg_4x8, 4, 8 )
104 PIXEL_AVG_C( pixel_avg_4x4, 4, 4 )
105 PIXEL_AVG_C( pixel_avg_4x2, 4, 2 )
106 PIXEL_AVG_C( pixel_avg_2x8, 2, 8 )
107 PIXEL_AVG_C( pixel_avg_2x4, 2, 4 )
108 PIXEL_AVG_C( pixel_avg_2x2, 2, 2 )
109
weight_cache(x264_t * h,x264_weight_t * w)110 static void weight_cache( x264_t *h, x264_weight_t *w )
111 {
112 w->weightfn = h->mc.weight;
113 }
114 #define opscale(x) dst[x] = x264_clip_pixel( ((src[x] * scale + (1<<(denom - 1))) >> denom) + offset )
115 #define opscale_noden(x) dst[x] = x264_clip_pixel( src[x] * scale + offset )
mc_weight(pixel * dst,intptr_t i_dst_stride,pixel * src,intptr_t i_src_stride,const x264_weight_t * weight,int i_width,int i_height)116 static void mc_weight( pixel *dst, intptr_t i_dst_stride, pixel *src, intptr_t i_src_stride,
117 const x264_weight_t *weight, int i_width, int i_height )
118 {
119 int offset = weight->i_offset << (BIT_DEPTH-8);
120 int scale = weight->i_scale;
121 int denom = weight->i_denom;
122 if( denom >= 1 )
123 {
124 for( int y = 0; y < i_height; y++, dst += i_dst_stride, src += i_src_stride )
125 for( int x = 0; x < i_width; x++ )
126 opscale( x );
127 }
128 else
129 {
130 for( int y = 0; y < i_height; y++, dst += i_dst_stride, src += i_src_stride )
131 for( int x = 0; x < i_width; x++ )
132 opscale_noden( x );
133 }
134 }
135
136 #define MC_WEIGHT_C( name, width ) \
137 static void name( pixel *dst, intptr_t i_dst_stride, pixel *src, intptr_t i_src_stride, const x264_weight_t *weight, int height ) \
138 { \
139 mc_weight( dst, i_dst_stride, src, i_src_stride, weight, width, height );\
140 }
141
142 MC_WEIGHT_C( mc_weight_w20, 20 )
143 MC_WEIGHT_C( mc_weight_w16, 16 )
144 MC_WEIGHT_C( mc_weight_w12, 12 )
145 MC_WEIGHT_C( mc_weight_w8, 8 )
146 MC_WEIGHT_C( mc_weight_w4, 4 )
147 MC_WEIGHT_C( mc_weight_w2, 2 )
148
149 static weight_fn_t mc_weight_wtab[6] =
150 {
151 mc_weight_w2,
152 mc_weight_w4,
153 mc_weight_w8,
154 mc_weight_w12,
155 mc_weight_w16,
156 mc_weight_w20,
157 };
158
mc_copy(pixel * src,intptr_t i_src_stride,pixel * dst,intptr_t i_dst_stride,int i_width,int i_height)159 static void mc_copy( pixel *src, intptr_t i_src_stride, pixel *dst, intptr_t i_dst_stride, int i_width, int i_height )
160 {
161 for( int y = 0; y < i_height; y++ )
162 {
163 memcpy( dst, src, i_width * SIZEOF_PIXEL );
164
165 src += i_src_stride;
166 dst += i_dst_stride;
167 }
168 }
169
170 #define TAPFILTER(pix, d) ((pix)[x-2*d] + (pix)[x+3*d] - 5*((pix)[x-d] + (pix)[x+2*d]) + 20*((pix)[x] + (pix)[x+d]))
hpel_filter(pixel * dsth,pixel * dstv,pixel * dstc,pixel * src,intptr_t stride,int width,int height,int16_t * buf)171 static void hpel_filter( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,
172 intptr_t stride, int width, int height, int16_t *buf )
173 {
174 const int pad = (BIT_DEPTH > 9) ? (-10 * PIXEL_MAX) : 0;
175 for( int y = 0; y < height; y++ )
176 {
177 for( int x = -2; x < width+3; x++ )
178 {
179 int v = TAPFILTER(src,stride);
180 dstv[x] = x264_clip_pixel( (v + 16) >> 5 );
181 /* transform v for storage in a 16-bit integer */
182 buf[x+2] = v + pad;
183 }
184 for( int x = 0; x < width; x++ )
185 dstc[x] = x264_clip_pixel( (TAPFILTER(buf+2,1) - 32*pad + 512) >> 10 );
186 for( int x = 0; x < width; x++ )
187 dsth[x] = x264_clip_pixel( (TAPFILTER(src,1) + 16) >> 5 );
188 dsth += stride;
189 dstv += stride;
190 dstc += stride;
191 src += stride;
192 }
193 }
194
mc_luma(pixel * dst,intptr_t i_dst_stride,pixel * src[4],intptr_t i_src_stride,int mvx,int mvy,int i_width,int i_height,const x264_weight_t * weight)195 static void mc_luma( pixel *dst, intptr_t i_dst_stride,
196 pixel *src[4], intptr_t i_src_stride,
197 int mvx, int mvy,
198 int i_width, int i_height, const x264_weight_t *weight )
199 {
200 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
201 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
202 pixel *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
203
204 if( qpel_idx & 5 ) /* qpel interpolation needed */
205 {
206 pixel *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
207 pixel_avg( dst, i_dst_stride, src1, i_src_stride,
208 src2, i_src_stride, i_width, i_height );
209 if( weight->weightfn )
210 mc_weight( dst, i_dst_stride, dst, i_dst_stride, weight, i_width, i_height );
211 }
212 else if( weight->weightfn )
213 mc_weight( dst, i_dst_stride, src1, i_src_stride, weight, i_width, i_height );
214 else
215 mc_copy( src1, i_src_stride, dst, i_dst_stride, i_width, i_height );
216 }
217
get_ref(pixel * dst,intptr_t * i_dst_stride,pixel * src[4],intptr_t i_src_stride,int mvx,int mvy,int i_width,int i_height,const x264_weight_t * weight)218 static pixel *get_ref( pixel *dst, intptr_t *i_dst_stride,
219 pixel *src[4], intptr_t i_src_stride,
220 int mvx, int mvy,
221 int i_width, int i_height, const x264_weight_t *weight )
222 {
223 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
224 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
225 pixel *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
226
227 if( qpel_idx & 5 ) /* qpel interpolation needed */
228 {
229 pixel *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
230 pixel_avg( dst, *i_dst_stride, src1, i_src_stride,
231 src2, i_src_stride, i_width, i_height );
232 if( weight->weightfn )
233 mc_weight( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_width, i_height );
234 return dst;
235 }
236 else if( weight->weightfn )
237 {
238 mc_weight( dst, *i_dst_stride, src1, i_src_stride, weight, i_width, i_height );
239 return dst;
240 }
241 else
242 {
243 *i_dst_stride = i_src_stride;
244 return src1;
245 }
246 }
247
248 /* full chroma mc (ie until 1/8 pixel)*/
mc_chroma(pixel * dstu,pixel * dstv,intptr_t i_dst_stride,pixel * src,intptr_t i_src_stride,int mvx,int mvy,int i_width,int i_height)249 static void mc_chroma( pixel *dstu, pixel *dstv, intptr_t i_dst_stride,
250 pixel *src, intptr_t i_src_stride,
251 int mvx, int mvy,
252 int i_width, int i_height )
253 {
254 pixel *srcp;
255
256 int d8x = mvx&0x07;
257 int d8y = mvy&0x07;
258 int cA = (8-d8x)*(8-d8y);
259 int cB = d8x *(8-d8y);
260 int cC = (8-d8x)*d8y;
261 int cD = d8x *d8y;
262
263 src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
264 srcp = &src[i_src_stride];
265
266 for( int y = 0; y < i_height; y++ )
267 {
268 for( int x = 0; x < i_width; x++ )
269 {
270 dstu[x] = ( cA*src[2*x] + cB*src[2*x+2] +
271 cC*srcp[2*x] + cD*srcp[2*x+2] + 32 ) >> 6;
272 dstv[x] = ( cA*src[2*x+1] + cB*src[2*x+3] +
273 cC*srcp[2*x+1] + cD*srcp[2*x+3] + 32 ) >> 6;
274 }
275 dstu += i_dst_stride;
276 dstv += i_dst_stride;
277 src = srcp;
278 srcp += i_src_stride;
279 }
280 }
281
282 #define MC_COPY(W) \
283 static void mc_copy_w##W( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int i_height ) \
284 { \
285 mc_copy( src, i_src, dst, i_dst, W, i_height ); \
286 }
287 MC_COPY( 16 )
288 MC_COPY( 8 )
289 MC_COPY( 4 )
290
x264_plane_copy_c(pixel * dst,intptr_t i_dst,pixel * src,intptr_t i_src,int w,int h)291 void x264_plane_copy_c( pixel *dst, intptr_t i_dst,
292 pixel *src, intptr_t i_src, int w, int h )
293 {
294 while( h-- )
295 {
296 memcpy( dst, src, w * SIZEOF_PIXEL );
297 dst += i_dst;
298 src += i_src;
299 }
300 }
301
x264_plane_copy_swap_c(pixel * dst,intptr_t i_dst,pixel * src,intptr_t i_src,int w,int h)302 void x264_plane_copy_swap_c( pixel *dst, intptr_t i_dst,
303 pixel *src, intptr_t i_src, int w, int h )
304 {
305 for( int y=0; y<h; y++, dst+=i_dst, src+=i_src )
306 for( int x=0; x<2*w; x+=2 )
307 {
308 dst[x] = src[x+1];
309 dst[x+1] = src[x];
310 }
311 }
312
x264_plane_copy_interleave_c(pixel * dst,intptr_t i_dst,pixel * srcu,intptr_t i_srcu,pixel * srcv,intptr_t i_srcv,int w,int h)313 void x264_plane_copy_interleave_c( pixel *dst, intptr_t i_dst,
314 pixel *srcu, intptr_t i_srcu,
315 pixel *srcv, intptr_t i_srcv, int w, int h )
316 {
317 for( int y=0; y<h; y++, dst+=i_dst, srcu+=i_srcu, srcv+=i_srcv )
318 for( int x=0; x<w; x++ )
319 {
320 dst[2*x] = srcu[x];
321 dst[2*x+1] = srcv[x];
322 }
323 }
324
x264_plane_copy_deinterleave_c(pixel * dsta,intptr_t i_dsta,pixel * dstb,intptr_t i_dstb,pixel * src,intptr_t i_src,int w,int h)325 void x264_plane_copy_deinterleave_c( pixel *dsta, intptr_t i_dsta, pixel *dstb, intptr_t i_dstb,
326 pixel *src, intptr_t i_src, int w, int h )
327 {
328 for( int y=0; y<h; y++, dsta+=i_dsta, dstb+=i_dstb, src+=i_src )
329 for( int x=0; x<w; x++ )
330 {
331 dsta[x] = src[2*x];
332 dstb[x] = src[2*x+1];
333 }
334 }
335
plane_copy_deinterleave_rgb_c(pixel * dsta,intptr_t i_dsta,pixel * dstb,intptr_t i_dstb,pixel * dstc,intptr_t i_dstc,pixel * src,intptr_t i_src,int pw,int w,int h)336 static void plane_copy_deinterleave_rgb_c( pixel *dsta, intptr_t i_dsta,
337 pixel *dstb, intptr_t i_dstb,
338 pixel *dstc, intptr_t i_dstc,
339 pixel *src, intptr_t i_src, int pw, int w, int h )
340 {
341 for( int y=0; y<h; y++, dsta+=i_dsta, dstb+=i_dstb, dstc+=i_dstc, src+=i_src )
342 {
343 for( int x=0; x<w; x++ )
344 {
345 dsta[x] = src[x*pw];
346 dstb[x] = src[x*pw+1];
347 dstc[x] = src[x*pw+2];
348 }
349 }
350 }
351
352 #if WORDS_BIGENDIAN
v210_endian_fix32(uint32_t x)353 static ALWAYS_INLINE uint32_t v210_endian_fix32( uint32_t x )
354 {
355 return (x<<24) + ((x<<8)&0xff0000) + ((x>>8)&0xff00) + (x>>24);
356 }
357 #else
358 #define v210_endian_fix32(x) (x)
359 #endif
360
plane_copy_deinterleave_v210_c(pixel * dsty,intptr_t i_dsty,pixel * dstc,intptr_t i_dstc,uint32_t * src,intptr_t i_src,int w,int h)361 static void plane_copy_deinterleave_v210_c( pixel *dsty, intptr_t i_dsty,
362 pixel *dstc, intptr_t i_dstc,
363 uint32_t *src, intptr_t i_src, int w, int h )
364 {
365 for( int l = 0; l < h; l++ )
366 {
367 pixel *dsty0 = dsty;
368 pixel *dstc0 = dstc;
369 uint32_t *src0 = src;
370
371 for( int n = 0; n < w; n += 3 )
372 {
373 uint32_t s = v210_endian_fix32( *src0++ );
374 *dstc0++ = s & 0x03FF;
375 *dsty0++ = (s >> 10) & 0x03FF;
376 *dstc0++ = (s >> 20) & 0x03FF;
377 s = v210_endian_fix32( *src0++ );
378 *dsty0++ = s & 0x03FF;
379 *dstc0++ = (s >> 10) & 0x03FF;
380 *dsty0++ = (s >> 20) & 0x03FF;
381 }
382
383 dsty += i_dsty;
384 dstc += i_dstc;
385 src += i_src;
386 }
387 }
388
store_interleave_chroma(pixel * dst,intptr_t i_dst,pixel * srcu,pixel * srcv,int height)389 static void store_interleave_chroma( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height )
390 {
391 for( int y=0; y<height; y++, dst+=i_dst, srcu+=FDEC_STRIDE, srcv+=FDEC_STRIDE )
392 for( int x=0; x<8; x++ )
393 {
394 dst[2*x] = srcu[x];
395 dst[2*x+1] = srcv[x];
396 }
397 }
398
load_deinterleave_chroma_fenc(pixel * dst,pixel * src,intptr_t i_src,int height)399 static void load_deinterleave_chroma_fenc( pixel *dst, pixel *src, intptr_t i_src, int height )
400 {
401 x264_plane_copy_deinterleave_c( dst, FENC_STRIDE, dst+FENC_STRIDE/2, FENC_STRIDE, src, i_src, 8, height );
402 }
403
load_deinterleave_chroma_fdec(pixel * dst,pixel * src,intptr_t i_src,int height)404 static void load_deinterleave_chroma_fdec( pixel *dst, pixel *src, intptr_t i_src, int height )
405 {
406 x264_plane_copy_deinterleave_c( dst, FDEC_STRIDE, dst+FDEC_STRIDE/2, FDEC_STRIDE, src, i_src, 8, height );
407 }
408
prefetch_fenc_null(pixel * pix_y,intptr_t stride_y,pixel * pix_uv,intptr_t stride_uv,int mb_x)409 static void prefetch_fenc_null( pixel *pix_y, intptr_t stride_y,
410 pixel *pix_uv, intptr_t stride_uv, int mb_x )
411 {}
412
prefetch_ref_null(pixel * pix,intptr_t stride,int parity)413 static void prefetch_ref_null( pixel *pix, intptr_t stride, int parity )
414 {}
415
memzero_aligned(void * dst,size_t n)416 static void memzero_aligned( void * dst, size_t n )
417 {
418 memset( dst, 0, n );
419 }
420
integral_init4h(uint16_t * sum,pixel * pix,intptr_t stride)421 static void integral_init4h( uint16_t *sum, pixel *pix, intptr_t stride )
422 {
423 int v = pix[0]+pix[1]+pix[2]+pix[3];
424 for( int x = 0; x < stride-4; x++ )
425 {
426 sum[x] = v + sum[x-stride];
427 v += pix[x+4] - pix[x];
428 }
429 }
430
integral_init8h(uint16_t * sum,pixel * pix,intptr_t stride)431 static void integral_init8h( uint16_t *sum, pixel *pix, intptr_t stride )
432 {
433 int v = pix[0]+pix[1]+pix[2]+pix[3]+pix[4]+pix[5]+pix[6]+pix[7];
434 for( int x = 0; x < stride-8; x++ )
435 {
436 sum[x] = v + sum[x-stride];
437 v += pix[x+8] - pix[x];
438 }
439 }
440
integral_init4v(uint16_t * sum8,uint16_t * sum4,intptr_t stride)441 static void integral_init4v( uint16_t *sum8, uint16_t *sum4, intptr_t stride )
442 {
443 for( int x = 0; x < stride-8; x++ )
444 sum4[x] = sum8[x+4*stride] - sum8[x];
445 for( int x = 0; x < stride-8; x++ )
446 sum8[x] = sum8[x+8*stride] + sum8[x+8*stride+4] - sum8[x] - sum8[x+4];
447 }
448
integral_init8v(uint16_t * sum8,intptr_t stride)449 static void integral_init8v( uint16_t *sum8, intptr_t stride )
450 {
451 for( int x = 0; x < stride-8; x++ )
452 sum8[x] = sum8[x+8*stride] - sum8[x];
453 }
454
x264_frame_init_lowres(x264_t * h,x264_frame_t * frame)455 void x264_frame_init_lowres( x264_t *h, x264_frame_t *frame )
456 {
457 pixel *src = frame->plane[0];
458 int i_stride = frame->i_stride[0];
459 int i_height = frame->i_lines[0];
460 int i_width = frame->i_width[0];
461
462 // duplicate last row and column so that their interpolation doesn't have to be special-cased
463 for( int y = 0; y < i_height; y++ )
464 src[i_width+y*i_stride] = src[i_width-1+y*i_stride];
465 memcpy( src+i_stride*i_height, src+i_stride*(i_height-1), (i_width+1) * SIZEOF_PIXEL );
466 h->mc.frame_init_lowres_core( src, frame->lowres[0], frame->lowres[1], frame->lowres[2], frame->lowres[3],
467 i_stride, frame->i_stride_lowres, frame->i_width_lowres, frame->i_lines_lowres );
468 x264_frame_expand_border_lowres( frame );
469
470 memset( frame->i_cost_est, -1, sizeof(frame->i_cost_est) );
471
472 for( int y = 0; y < h->param.i_bframe + 2; y++ )
473 for( int x = 0; x < h->param.i_bframe + 2; x++ )
474 frame->i_row_satds[y][x][0] = -1;
475
476 for( int y = 0; y <= !!h->param.i_bframe; y++ )
477 for( int x = 0; x <= h->param.i_bframe; x++ )
478 frame->lowres_mvs[y][x][0][0] = 0x7FFF;
479 }
480
frame_init_lowres_core(pixel * src0,pixel * dst0,pixel * dsth,pixel * dstv,pixel * dstc,intptr_t src_stride,intptr_t dst_stride,int width,int height)481 static void frame_init_lowres_core( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,
482 intptr_t src_stride, intptr_t dst_stride, int width, int height )
483 {
484 for( int y = 0; y < height; y++ )
485 {
486 pixel *src1 = src0+src_stride;
487 pixel *src2 = src1+src_stride;
488 for( int x = 0; x<width; x++ )
489 {
490 // slower than naive bilinear, but matches asm
491 #define FILTER(a,b,c,d) ((((a+b+1)>>1)+((c+d+1)>>1)+1)>>1)
492 dst0[x] = FILTER(src0[2*x ], src1[2*x ], src0[2*x+1], src1[2*x+1]);
493 dsth[x] = FILTER(src0[2*x+1], src1[2*x+1], src0[2*x+2], src1[2*x+2]);
494 dstv[x] = FILTER(src1[2*x ], src2[2*x ], src1[2*x+1], src2[2*x+1]);
495 dstc[x] = FILTER(src1[2*x+1], src2[2*x+1], src1[2*x+2], src2[2*x+2]);
496 #undef FILTER
497 }
498 src0 += src_stride*2;
499 dst0 += dst_stride;
500 dsth += dst_stride;
501 dstv += dst_stride;
502 dstc += dst_stride;
503 }
504 }
505
506 /* Estimate the total amount of influence on future quality that could be had if we
507 * were to improve the reference samples used to inter predict any given macroblock. */
mbtree_propagate_cost(int16_t * dst,uint16_t * propagate_in,uint16_t * intra_costs,uint16_t * inter_costs,uint16_t * inv_qscales,float * fps_factor,int len)508 static void mbtree_propagate_cost( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
509 uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len )
510 {
511 float fps = *fps_factor;
512 for( int i = 0; i < len; i++ )
513 {
514 int intra_cost = intra_costs[i];
515 int inter_cost = X264_MIN(intra_costs[i], inter_costs[i] & LOWRES_COST_MASK);
516 float propagate_intra = intra_cost * inv_qscales[i];
517 float propagate_amount = propagate_in[i] + propagate_intra*fps;
518 float propagate_num = intra_cost - inter_cost;
519 float propagate_denom = intra_cost;
520 dst[i] = X264_MIN((int)(propagate_amount * propagate_num / propagate_denom + 0.5f), 32767);
521 }
522 }
523
mbtree_propagate_list(x264_t * h,uint16_t * ref_costs,int16_t (* mvs)[2],int16_t * propagate_amount,uint16_t * lowres_costs,int bipred_weight,int mb_y,int len,int list)524 static void mbtree_propagate_list( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],
525 int16_t *propagate_amount, uint16_t *lowres_costs,
526 int bipred_weight, int mb_y, int len, int list )
527 {
528 unsigned stride = h->mb.i_mb_stride;
529 unsigned width = h->mb.i_mb_width;
530 unsigned height = h->mb.i_mb_height;
531
532 for( int i = 0; i < len; i++ )
533 {
534 int lists_used = lowres_costs[i]>>LOWRES_COST_SHIFT;
535
536 if( !(lists_used & (1 << list)) )
537 continue;
538
539 int listamount = propagate_amount[i];
540 /* Apply bipred weighting. */
541 if( lists_used == 3 )
542 listamount = (listamount * bipred_weight + 32) >> 6;
543
544 /* Early termination for simple case of mv0. */
545 if( !M32( mvs[i] ) )
546 {
547 MC_CLIP_ADD( ref_costs[mb_y*stride + i], listamount );
548 continue;
549 }
550
551 int x = mvs[i][0];
552 int y = mvs[i][1];
553 unsigned mbx = (x>>5)+i;
554 unsigned mby = (y>>5)+mb_y;
555 unsigned idx0 = mbx + mby * stride;
556 unsigned idx2 = idx0 + stride;
557 x &= 31;
558 y &= 31;
559 int idx0weight = (32-y)*(32-x);
560 int idx1weight = (32-y)*x;
561 int idx2weight = y*(32-x);
562 int idx3weight = y*x;
563 idx0weight = (idx0weight * listamount + 512) >> 10;
564 idx1weight = (idx1weight * listamount + 512) >> 10;
565 idx2weight = (idx2weight * listamount + 512) >> 10;
566 idx3weight = (idx3weight * listamount + 512) >> 10;
567
568 if( mbx < width-1 && mby < height-1 )
569 {
570 MC_CLIP_ADD( ref_costs[idx0+0], idx0weight );
571 MC_CLIP_ADD( ref_costs[idx0+1], idx1weight );
572 MC_CLIP_ADD( ref_costs[idx2+0], idx2weight );
573 MC_CLIP_ADD( ref_costs[idx2+1], idx3weight );
574 }
575 else
576 {
577 /* Note: this takes advantage of unsigned representation to
578 * catch negative mbx/mby. */
579 if( mby < height )
580 {
581 if( mbx < width )
582 MC_CLIP_ADD( ref_costs[idx0+0], idx0weight );
583 if( mbx+1 < width )
584 MC_CLIP_ADD( ref_costs[idx0+1], idx1weight );
585 }
586 if( mby+1 < height )
587 {
588 if( mbx < width )
589 MC_CLIP_ADD( ref_costs[idx2+0], idx2weight );
590 if( mbx+1 < width )
591 MC_CLIP_ADD( ref_costs[idx2+1], idx3weight );
592 }
593 }
594 }
595 }
596
597 /* Conversion between float and Q8.8 fixed point (big-endian) for storage */
mbtree_fix8_pack(uint16_t * dst,float * src,int count)598 static void mbtree_fix8_pack( uint16_t *dst, float *src, int count )
599 {
600 for( int i = 0; i < count; i++ )
601 dst[i] = endian_fix16( (int16_t)(src[i] * 256.0f) );
602 }
603
mbtree_fix8_unpack(float * dst,uint16_t * src,int count)604 static void mbtree_fix8_unpack( float *dst, uint16_t *src, int count )
605 {
606 for( int i = 0; i < count; i++ )
607 dst[i] = (int16_t)endian_fix16( src[i] ) * (1.0f/256.0f);
608 }
609
x264_mc_init(uint32_t cpu,x264_mc_functions_t * pf,int cpu_independent)610 void x264_mc_init( uint32_t cpu, x264_mc_functions_t *pf, int cpu_independent )
611 {
612 pf->mc_luma = mc_luma;
613 pf->get_ref = get_ref;
614
615 pf->mc_chroma = mc_chroma;
616
617 pf->avg[PIXEL_16x16]= pixel_avg_16x16;
618 pf->avg[PIXEL_16x8] = pixel_avg_16x8;
619 pf->avg[PIXEL_8x16] = pixel_avg_8x16;
620 pf->avg[PIXEL_8x8] = pixel_avg_8x8;
621 pf->avg[PIXEL_8x4] = pixel_avg_8x4;
622 pf->avg[PIXEL_4x16] = pixel_avg_4x16;
623 pf->avg[PIXEL_4x8] = pixel_avg_4x8;
624 pf->avg[PIXEL_4x4] = pixel_avg_4x4;
625 pf->avg[PIXEL_4x2] = pixel_avg_4x2;
626 pf->avg[PIXEL_2x8] = pixel_avg_2x8;
627 pf->avg[PIXEL_2x4] = pixel_avg_2x4;
628 pf->avg[PIXEL_2x2] = pixel_avg_2x2;
629
630 pf->weight = mc_weight_wtab;
631 pf->offsetadd = mc_weight_wtab;
632 pf->offsetsub = mc_weight_wtab;
633 pf->weight_cache = weight_cache;
634
635 pf->copy_16x16_unaligned = mc_copy_w16;
636 pf->copy[PIXEL_16x16] = mc_copy_w16;
637 pf->copy[PIXEL_8x8] = mc_copy_w8;
638 pf->copy[PIXEL_4x4] = mc_copy_w4;
639
640 pf->store_interleave_chroma = store_interleave_chroma;
641 pf->load_deinterleave_chroma_fenc = load_deinterleave_chroma_fenc;
642 pf->load_deinterleave_chroma_fdec = load_deinterleave_chroma_fdec;
643
644 pf->plane_copy = x264_plane_copy_c;
645 pf->plane_copy_swap = x264_plane_copy_swap_c;
646 pf->plane_copy_interleave = x264_plane_copy_interleave_c;
647
648 pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_c;
649 pf->plane_copy_deinterleave_yuyv = x264_plane_copy_deinterleave_c;
650 pf->plane_copy_deinterleave_rgb = plane_copy_deinterleave_rgb_c;
651 pf->plane_copy_deinterleave_v210 = plane_copy_deinterleave_v210_c;
652
653 pf->hpel_filter = hpel_filter;
654
655 pf->prefetch_fenc_400 = prefetch_fenc_null;
656 pf->prefetch_fenc_420 = prefetch_fenc_null;
657 pf->prefetch_fenc_422 = prefetch_fenc_null;
658 pf->prefetch_ref = prefetch_ref_null;
659 pf->memcpy_aligned = memcpy;
660 pf->memzero_aligned = memzero_aligned;
661 pf->frame_init_lowres_core = frame_init_lowres_core;
662
663 pf->integral_init4h = integral_init4h;
664 pf->integral_init8h = integral_init8h;
665 pf->integral_init4v = integral_init4v;
666 pf->integral_init8v = integral_init8v;
667
668 pf->mbtree_propagate_cost = mbtree_propagate_cost;
669 pf->mbtree_propagate_list = mbtree_propagate_list;
670 pf->mbtree_fix8_pack = mbtree_fix8_pack;
671 pf->mbtree_fix8_unpack = mbtree_fix8_unpack;
672
673 #if HAVE_MMX
674 x264_mc_init_mmx( cpu, pf );
675 #endif
676 #if HAVE_ALTIVEC
677 if( cpu&X264_CPU_ALTIVEC )
678 x264_mc_init_altivec( pf );
679 #endif
680 #if HAVE_ARMV6
681 x264_mc_init_arm( cpu, pf );
682 #endif
683 #if HAVE_AARCH64
684 x264_mc_init_aarch64( cpu, pf );
685 #endif
686 #if HAVE_MSA
687 if( cpu&X264_CPU_MSA )
688 x264_mc_init_mips( cpu, pf );
689 #endif
690
691 if( cpu_independent )
692 {
693 pf->mbtree_propagate_cost = mbtree_propagate_cost;
694 pf->mbtree_propagate_list = mbtree_propagate_list;
695 }
696 }
697
x264_frame_filter(x264_t * h,x264_frame_t * frame,int mb_y,int b_end)698 void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
699 {
700 const int b_interlaced = PARAM_INTERLACED;
701 int start = mb_y*16 - 8; // buffer = 4 for deblock + 3 for 6tap, rounded to 8
702 int height = (b_end ? frame->i_lines[0] + 16*PARAM_INTERLACED : (mb_y+b_interlaced)*16) + 8;
703
704 if( mb_y & b_interlaced )
705 return;
706
707 for( int p = 0; p < (CHROMA444 ? 3 : 1); p++ )
708 {
709 int stride = frame->i_stride[p];
710 const int width = frame->i_width[p];
711 int offs = start*stride - 8; // buffer = 3 for 6tap, aligned to 8 for simd
712
713 if( !b_interlaced || h->mb.b_adaptive_mbaff )
714 h->mc.hpel_filter(
715 frame->filtered[p][1] + offs,
716 frame->filtered[p][2] + offs,
717 frame->filtered[p][3] + offs,
718 frame->plane[p] + offs,
719 stride, width + 16, height - start,
720 h->scratch_buffer );
721
722 if( b_interlaced )
723 {
724 /* MC must happen between pixels in the same field. */
725 stride = frame->i_stride[p] << 1;
726 start = (mb_y*16 >> 1) - 8;
727 int height_fld = ((b_end ? frame->i_lines[p] : mb_y*16) >> 1) + 8;
728 offs = start*stride - 8;
729 for( int i = 0; i < 2; i++, offs += frame->i_stride[p] )
730 {
731 h->mc.hpel_filter(
732 frame->filtered_fld[p][1] + offs,
733 frame->filtered_fld[p][2] + offs,
734 frame->filtered_fld[p][3] + offs,
735 frame->plane_fld[p] + offs,
736 stride, width + 16, height_fld - start,
737 h->scratch_buffer );
738 }
739 }
740 }
741
742 /* generate integral image:
743 * frame->integral contains 2 planes. in the upper plane, each element is
744 * the sum of an 8x8 pixel region with top-left corner on that point.
745 * in the lower plane, 4x4 sums (needed only with --partitions p4x4). */
746
747 if( frame->integral )
748 {
749 int stride = frame->i_stride[0];
750 if( start < 0 )
751 {
752 memset( frame->integral - PADV * stride - PADH_ALIGN, 0, stride * sizeof(uint16_t) );
753 start = -PADV;
754 }
755 if( b_end )
756 height += PADV-9;
757 for( int y = start; y < height; y++ )
758 {
759 pixel *pix = frame->plane[0] + y * stride - PADH_ALIGN;
760 uint16_t *sum8 = frame->integral + (y+1) * stride - PADH_ALIGN;
761 uint16_t *sum4;
762 if( h->frames.b_have_sub8x8_esa )
763 {
764 h->mc.integral_init4h( sum8, pix, stride );
765 sum8 -= 8*stride;
766 sum4 = sum8 + stride * (frame->i_lines[0] + PADV*2);
767 if( y >= 8-PADV )
768 h->mc.integral_init4v( sum8, sum4, stride );
769 }
770 else
771 {
772 h->mc.integral_init8h( sum8, pix, stride );
773 if( y >= 8-PADV )
774 h->mc.integral_init8v( sum8-8*stride, stride );
775 }
776 }
777 }
778 }
779