1 /*****************************************************************************
2  * mc.h: motion compensation
3  *****************************************************************************
4  * Copyright (C) 2004-2021 x264 project
5  *
6  * Authors: Loren Merritt <lorenm@u.washington.edu>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
21  *
22  * This program is also available under a commercial proprietary license.
23  * For more information, contact us at licensing@x264.com.
24  *****************************************************************************/
25 
26 #ifndef X264_MC_H
27 #define X264_MC_H
28 
29 #define MC_CLIP_ADD(s,x) (s) = X264_MIN((s)+(x),(1<<15)-1)
30 #define MC_CLIP_ADD2(s,x)\
31 do\
32 {\
33     MC_CLIP_ADD((s)[0], (x)[0]);\
34     MC_CLIP_ADD((s)[1], (x)[1]);\
35 } while( 0 )
36 
37 #define x264_mbtree_propagate_list_internal_neon x264_template(mbtree_propagate_list_internal_neon)
38 #define PROPAGATE_LIST(cpu)\
39 void x264_mbtree_propagate_list_internal_##cpu( int16_t (*mvs)[2], int16_t *propagate_amount,\
40                                                 uint16_t *lowres_costs, int16_t *output,\
41                                                 int bipred_weight, int mb_y, int len );\
42 \
43 static void mbtree_propagate_list_##cpu( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],\
44                                          int16_t *propagate_amount, uint16_t *lowres_costs,\
45                                          int bipred_weight, int mb_y, int len, int list )\
46 {\
47     int16_t *current = h->scratch_buffer2;\
48 \
49     x264_mbtree_propagate_list_internal_##cpu( mvs, propagate_amount, lowres_costs,\
50                                                current, bipred_weight, mb_y, len );\
51 \
52     unsigned stride = h->mb.i_mb_stride;\
53     unsigned width = h->mb.i_mb_width;\
54     unsigned height = h->mb.i_mb_height;\
55 \
56     for( int i = 0; i < len; current += 32 )\
57     {\
58         int end = X264_MIN( i+8, len );\
59         for( ; i < end; i++, current += 2 )\
60         {\
61             if( !(lowres_costs[i] & (1 << (list+LOWRES_COST_SHIFT))) )\
62                 continue;\
63 \
64             unsigned mbx = current[0];\
65             unsigned mby = current[1];\
66             unsigned idx0 = mbx + mby * stride;\
67             unsigned idx2 = idx0 + stride;\
68 \
69             /* Shortcut for the simple/common case of zero MV */\
70             if( !M32( mvs[i] ) )\
71             {\
72                 MC_CLIP_ADD( ref_costs[idx0], current[16] );\
73                 continue;\
74             }\
75 \
76             if( mbx < width-1 && mby < height-1 )\
77             {\
78                 MC_CLIP_ADD2( ref_costs+idx0, current+16 );\
79                 MC_CLIP_ADD2( ref_costs+idx2, current+32 );\
80             }\
81             else\
82             {\
83                 /* Note: this takes advantage of unsigned representation to\
84                  * catch negative mbx/mby. */\
85                 if( mby < height )\
86                 {\
87                     if( mbx < width )\
88                         MC_CLIP_ADD( ref_costs[idx0+0], current[16] );\
89                     if( mbx+1 < width )\
90                         MC_CLIP_ADD( ref_costs[idx0+1], current[17] );\
91                 }\
92                 if( mby+1 < height )\
93                 {\
94                     if( mbx < width )\
95                         MC_CLIP_ADD( ref_costs[idx2+0], current[32] );\
96                     if( mbx+1 < width )\
97                         MC_CLIP_ADD( ref_costs[idx2+1], current[33] );\
98                 }\
99             }\
100         }\
101     }\
102 }
103 
104 #define x264_plane_copy_c x264_template(plane_copy_c)
105 void x264_plane_copy_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
106 
107 #define PLANE_COPY(align, cpu)\
108 static void plane_copy_##cpu( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h )\
109 {\
110     int c_w = (align) / SIZEOF_PIXEL - 1;\
111     if( w < 256 ) /* tiny resolutions don't want non-temporal hints. dunno the exact threshold. */\
112         x264_plane_copy_c( dst, i_dst, src, i_src, w, h );\
113     else if( !(w&c_w) )\
114         x264_plane_copy_core_##cpu( dst, i_dst, src, i_src, w, h );\
115     else\
116     {\
117         if( --h > 0 )\
118         {\
119             if( i_src > 0 )\
120             {\
121                 x264_plane_copy_core_##cpu( dst, i_dst, src, i_src, (w+c_w)&~c_w, h );\
122                 dst += i_dst * h;\
123                 src += i_src * h;\
124             }\
125             else\
126                 x264_plane_copy_core_##cpu( dst+i_dst, i_dst, src+i_src, i_src, (w+c_w)&~c_w, h );\
127         }\
128         /* use plain memcpy on the last line (in memory order) to avoid overreading src. */\
129         memcpy( dst, src, w*SIZEOF_PIXEL );\
130     }\
131 }
132 
133 #define x264_plane_copy_swap_c x264_template(plane_copy_swap_c)
134 void x264_plane_copy_swap_c( pixel *, intptr_t, pixel *, intptr_t, int w, int h );
135 
136 #define PLANE_COPY_SWAP(align, cpu)\
137 static void plane_copy_swap_##cpu( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h )\
138 {\
139     int c_w = (align>>1) / SIZEOF_PIXEL - 1;\
140     if( !(w&c_w) )\
141         x264_plane_copy_swap_core_##cpu( dst, i_dst, src, i_src, w, h );\
142     else if( w > c_w )\
143     {\
144         if( --h > 0 )\
145         {\
146             if( i_src > 0 )\
147             {\
148                 x264_plane_copy_swap_core_##cpu( dst, i_dst, src, i_src, (w+c_w)&~c_w, h );\
149                 dst += i_dst * h;\
150                 src += i_src * h;\
151             }\
152             else\
153                 x264_plane_copy_swap_core_##cpu( dst+i_dst, i_dst, src+i_src, i_src, (w+c_w)&~c_w, h );\
154         }\
155         x264_plane_copy_swap_core_##cpu( dst, 0, src, 0, w&~c_w, 1 );\
156         for( int x = 2*(w&~c_w); x < 2*w; x += 2 )\
157         {\
158             dst[x]   = src[x+1];\
159             dst[x+1] = src[x];\
160         }\
161     }\
162     else\
163         x264_plane_copy_swap_c( dst, i_dst, src, i_src, w, h );\
164 }
165 
166 #define x264_plane_copy_deinterleave_c x264_template(plane_copy_deinterleave_c)
167 void x264_plane_copy_deinterleave_c( pixel *dsta, intptr_t i_dsta, pixel *dstb, intptr_t i_dstb,
168                                      pixel *src, intptr_t i_src, int w, int h );
169 
170 /* We can utilize existing plane_copy_deinterleave() functions for YUYV/UYUV
171  * input with the additional constraint that we cannot overread src. */
172 #define PLANE_COPY_YUYV(align, cpu)\
173 static void plane_copy_deinterleave_yuyv_##cpu( pixel *dsta, intptr_t i_dsta, pixel *dstb, intptr_t i_dstb,\
174                                                 pixel *src, intptr_t i_src, int w, int h )\
175 {\
176     int c_w = (align>>1) / SIZEOF_PIXEL - 1;\
177     if( !(w&c_w) )\
178         x264_plane_copy_deinterleave_##cpu( dsta, i_dsta, dstb, i_dstb, src, i_src, w, h );\
179     else if( w > c_w )\
180     {\
181         if( --h > 0 )\
182         {\
183             if( i_src > 0 )\
184             {\
185                 x264_plane_copy_deinterleave_##cpu( dsta, i_dsta, dstb, i_dstb, src, i_src, w, h );\
186                 dsta += i_dsta * h;\
187                 dstb += i_dstb * h;\
188                 src  += i_src  * h;\
189             }\
190             else\
191                 x264_plane_copy_deinterleave_##cpu( dsta+i_dsta, i_dsta, dstb+i_dstb, i_dstb,\
192                                                     src+i_src, i_src, w, h );\
193         }\
194         x264_plane_copy_deinterleave_c( dsta, 0, dstb, 0, src, 0, w, 1 );\
195     }\
196     else\
197         x264_plane_copy_deinterleave_c( dsta, i_dsta, dstb, i_dstb, src, i_src, w, h );\
198 }
199 
200 #define x264_plane_copy_interleave_c x264_template(plane_copy_interleave_c)
201 void x264_plane_copy_interleave_c( pixel *dst,  intptr_t i_dst,
202                                    pixel *srcu, intptr_t i_srcu,
203                                    pixel *srcv, intptr_t i_srcv, int w, int h );
204 
205 #define PLANE_INTERLEAVE(cpu) \
206 static void plane_copy_interleave_##cpu( pixel *dst,  intptr_t i_dst,\
207                                          pixel *srcu, intptr_t i_srcu,\
208                                          pixel *srcv, intptr_t i_srcv, int w, int h )\
209 {\
210     int c_w = 16 / SIZEOF_PIXEL - 1;\
211     if( !(w&c_w) )\
212         x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
213     else if( w > c_w && (i_srcu ^ i_srcv) >= 0 ) /* only works correctly for strides with identical signs */\
214     {\
215         if( --h > 0 )\
216         {\
217             if( i_srcu > 0 )\
218             {\
219                 x264_plane_copy_interleave_core_##cpu( dst, i_dst, srcu, i_srcu, srcv, i_srcv, (w+c_w)&~c_w, h );\
220                 dst  += i_dst  * h;\
221                 srcu += i_srcu * h;\
222                 srcv += i_srcv * h;\
223             }\
224             else\
225                 x264_plane_copy_interleave_core_##cpu( dst+i_dst, i_dst, srcu+i_srcu, i_srcu, srcv+i_srcv, i_srcv, (w+c_w)&~c_w, h );\
226         }\
227         x264_plane_copy_interleave_c( dst, 0, srcu, 0, srcv, 0, w, 1 );\
228     }\
229     else\
230         x264_plane_copy_interleave_c( dst, i_dst, srcu, i_srcu, srcv, i_srcv, w, h );\
231 }
232 
233 struct x264_weight_t;
234 typedef void (* weight_fn_t)( pixel *, intptr_t, pixel *,intptr_t, const struct x264_weight_t *, int );
235 typedef struct x264_weight_t
236 {
237     /* aligning the first member is a gcc hack to force the struct to be
238      * 16 byte aligned, as well as force sizeof(struct) to be a multiple of 16 */
239     ALIGNED_16( int16_t cachea[8] );
240     int16_t cacheb[8];
241     int32_t i_denom;
242     int32_t i_scale;
243     int32_t i_offset;
244     weight_fn_t *weightfn;
245 } ALIGNED_16( x264_weight_t );
246 
247 #define x264_weight_none ((const x264_weight_t*)x264_zero)
248 
249 #define SET_WEIGHT( w, b, s, d, o )\
250 {\
251     (w).i_scale = (s);\
252     (w).i_denom = (d);\
253     (w).i_offset = (o);\
254     if( b )\
255         h->mc.weight_cache( h, &w );\
256     else\
257         w.weightfn = NULL;\
258 }
259 
260 /* Do the MC
261  * XXX: Only width = 4, 8 or 16 are valid
262  * width == 4 -> height == 4 or 8
263  * width == 8 -> height == 4 or 8 or 16
264  * width == 16-> height == 8 or 16
265  * */
266 
267 typedef struct
268 {
269     void (*mc_luma)( pixel *dst, intptr_t i_dst, pixel **src, intptr_t i_src,
270                      int mvx, int mvy, int i_width, int i_height, const x264_weight_t *weight );
271 
272     /* may round up the dimensions if they're not a power of 2 */
273     pixel* (*get_ref)( pixel *dst, intptr_t *i_dst, pixel **src, intptr_t i_src,
274                        int mvx, int mvy, int i_width, int i_height, const x264_weight_t *weight );
275 
276     /* mc_chroma may write up to 2 bytes of garbage to the right of dst,
277      * so it must be run from left to right. */
278     void (*mc_chroma)( pixel *dstu, pixel *dstv, intptr_t i_dst, pixel *src, intptr_t i_src,
279                        int mvx, int mvy, int i_width, int i_height );
280 
281     void (*avg[12])( pixel *dst,  intptr_t dst_stride, pixel *src1, intptr_t src1_stride,
282                      pixel *src2, intptr_t src2_stride, int i_weight );
283 
284     /* only 16x16, 8x8, and 4x4 defined */
285     void (*copy[7])( pixel *dst, intptr_t dst_stride, pixel *src, intptr_t src_stride, int i_height );
286     void (*copy_16x16_unaligned)( pixel *dst, intptr_t dst_stride, pixel *src, intptr_t src_stride, int i_height );
287 
288     void (*store_interleave_chroma)( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height );
289     void (*load_deinterleave_chroma_fenc)( pixel *dst, pixel *src, intptr_t i_src, int height );
290     void (*load_deinterleave_chroma_fdec)( pixel *dst, pixel *src, intptr_t i_src, int height );
291 
292     void (*plane_copy)( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h );
293     void (*plane_copy_swap)( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int w, int h );
294     void (*plane_copy_interleave)( pixel *dst,  intptr_t i_dst, pixel *srcu, intptr_t i_srcu,
295                                    pixel *srcv, intptr_t i_srcv, int w, int h );
296     /* may write up to 15 pixels off the end of each plane */
297     void (*plane_copy_deinterleave)( pixel *dstu, intptr_t i_dstu, pixel *dstv, intptr_t i_dstv,
298                                      pixel *src,  intptr_t i_src, int w, int h );
299     void (*plane_copy_deinterleave_yuyv)( pixel *dsta, intptr_t i_dsta, pixel *dstb, intptr_t i_dstb,
300                                           pixel *src,  intptr_t i_src, int w, int h );
301     void (*plane_copy_deinterleave_rgb)( pixel *dsta, intptr_t i_dsta, pixel *dstb, intptr_t i_dstb,
302                                          pixel *dstc, intptr_t i_dstc, pixel *src,  intptr_t i_src, int pw, int w, int h );
303     void (*plane_copy_deinterleave_v210)( pixel *dsty, intptr_t i_dsty,
304                                           pixel *dstc, intptr_t i_dstc,
305                                           uint32_t *src, intptr_t i_src, int w, int h );
306     void (*hpel_filter)( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,
307                          intptr_t i_stride, int i_width, int i_height, int16_t *buf );
308 
309     /* prefetch the next few macroblocks of fenc or fdec */
310     void (*prefetch_fenc)    ( pixel *pix_y, intptr_t stride_y, pixel *pix_uv, intptr_t stride_uv, int mb_x );
311     void (*prefetch_fenc_400)( pixel *pix_y, intptr_t stride_y, pixel *pix_uv, intptr_t stride_uv, int mb_x );
312     void (*prefetch_fenc_420)( pixel *pix_y, intptr_t stride_y, pixel *pix_uv, intptr_t stride_uv, int mb_x );
313     void (*prefetch_fenc_422)( pixel *pix_y, intptr_t stride_y, pixel *pix_uv, intptr_t stride_uv, int mb_x );
314     /* prefetch the next few macroblocks of a hpel reference frame */
315     void (*prefetch_ref)( pixel *pix, intptr_t stride, int parity );
316 
317     void *(*memcpy_aligned)( void *dst, const void *src, size_t n );
318     void (*memzero_aligned)( void *dst, size_t n );
319 
320     /* successive elimination prefilter */
321     void (*integral_init4h)( uint16_t *sum, pixel *pix, intptr_t stride );
322     void (*integral_init8h)( uint16_t *sum, pixel *pix, intptr_t stride );
323     void (*integral_init4v)( uint16_t *sum8, uint16_t *sum4, intptr_t stride );
324     void (*integral_init8v)( uint16_t *sum8, intptr_t stride );
325 
326     void (*frame_init_lowres_core)( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,
327                                     intptr_t src_stride, intptr_t dst_stride, int width, int height );
328     weight_fn_t *weight;
329     weight_fn_t *offsetadd;
330     weight_fn_t *offsetsub;
331     void (*weight_cache)( x264_t *, x264_weight_t * );
332 
333     void (*mbtree_propagate_cost)( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
334                                    uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len );
335     void (*mbtree_propagate_list)( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],
336                                    int16_t *propagate_amount, uint16_t *lowres_costs,
337                                    int bipred_weight, int mb_y, int len, int list );
338     void (*mbtree_fix8_pack)( uint16_t *dst, float *src, int count );
339     void (*mbtree_fix8_unpack)( float *dst, uint16_t *src, int count );
340 } x264_mc_functions_t;
341 
342 #define x264_mc_init x264_template(mc_init)
343 void x264_mc_init( uint32_t cpu, x264_mc_functions_t *pf, int cpu_independent );
344 
345 #endif
346