1 /*****************************************************************************
2 * checkasm.c: assembly check tool
3 *****************************************************************************
4 * Copyright (C) 2003-2021 x264 project
5 *
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Fiona Glaser <fiona@x264.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 *
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
27
28 #include <ctype.h>
29 #include "common/common.h"
30 #include "encoder/macroblock.h"
31
32 #ifdef _WIN32
33 #include <windows.h>
34 #endif
35
36 // GCC doesn't align stack variables on ARM, so use .bss
37 #if ARCH_ARM
38 #undef ALIGNED_16
39 #define ALIGNED_16( var ) DECLARE_ALIGNED( static var, 16 )
40 #endif
41
42 /* buf1, buf2: initialised to random data and shouldn't write into them */
43 static uint8_t *buf1, *buf2;
44 /* buf3, buf4: used to store output */
45 static uint8_t *buf3, *buf4;
46 /* pbuf1, pbuf2: initialised to random pixel data and shouldn't write into them. */
47 static pixel *pbuf1, *pbuf2;
48 /* pbuf3, pbuf4: point to buf3, buf4, just for type convenience */
49 static pixel *pbuf3, *pbuf4;
50
51 static int quiet = 0;
52
53 #define report( name ) { \
54 if( used_asm && !quiet ) \
55 fprintf( stderr, " - %-21s [%s]\n", name, ok ? "OK" : "FAILED" ); \
56 if( !ok ) ret = -1; \
57 }
58
59 #define BENCH_RUNS 2000 // tradeoff between accuracy and speed
60 #define MAX_FUNCS 1000 // just has to be big enough to hold all the existing functions
61 #define MAX_CPUS 30 // number of different combinations of cpu flags
62
63 // RAND_MAX is guaranteed to be at least 32767, to get 30 bits of random data, we'll call rand() twice
64 #define rand30() (((rand() & 0x7fff) << 15) + (rand() & 0x7fff))
65
66 typedef struct
67 {
68 void *pointer; // just for detecting duplicates
69 uint32_t cpu;
70 uint64_t cycles;
71 uint32_t den;
72 } bench_t;
73
74 typedef struct
75 {
76 char *name;
77 bench_t vers[MAX_CPUS];
78 } bench_func_t;
79
80 static int do_bench = 0;
81 static int bench_pattern_len = 0;
82 static const char *bench_pattern = "";
83 static char func_name[100];
84 static bench_func_t benchs[MAX_FUNCS];
85
86 static const char *pixel_names[12] = { "16x16", "16x8", "8x16", "8x8", "8x4", "4x8", "4x4", "4x16", "4x2", "2x8", "2x4", "2x2" };
87 static const char *intra_predict_16x16_names[7] = { "v", "h", "dc", "p", "dcl", "dct", "dc8" };
88 static const char *intra_predict_8x8c_names[7] = { "dc", "h", "v", "p", "dcl", "dct", "dc8" };
89 static const char *intra_predict_4x4_names[12] = { "v", "h", "dc", "ddl", "ddr", "vr", "hd", "vl", "hu", "dcl", "dct", "dc8" };
90 static const char **intra_predict_8x8_names = intra_predict_4x4_names;
91 static const char **intra_predict_8x16c_names = intra_predict_8x8c_names;
92
93 #define set_func_name(...) snprintf( func_name, sizeof(func_name), __VA_ARGS__ )
94
read_time(void)95 static inline uint32_t read_time(void)
96 {
97 uint32_t a = 0;
98 #if HAVE_X86_INLINE_ASM
99 asm volatile( "lfence \n"
100 "rdtsc \n"
101 : "=a"(a) :: "edx", "memory" );
102 #elif ARCH_PPC
103 asm volatile( "mftb %0" : "=r"(a) :: "memory" );
104 #elif HAVE_ARM_INLINE_ASM // ARMv7 only
105 asm volatile( "mrc p15, 0, %0, c9, c13, 0" : "=r"(a) :: "memory" );
106 #elif ARCH_AARCH64
107 uint64_t b = 0;
108 asm volatile( "mrs %0, pmccntr_el0" : "=r"(b) :: "memory" );
109 a = b;
110 #elif ARCH_MIPS
111 asm volatile( "rdhwr %0, $2" : "=r"(a) :: "memory" );
112 #endif
113 return a;
114 }
115
get_bench(const char * name,uint32_t cpu)116 static bench_t* get_bench( const char *name, uint32_t cpu )
117 {
118 int i, j;
119 for( i = 0; benchs[i].name && strcmp(name, benchs[i].name); i++ )
120 assert( i < MAX_FUNCS );
121 if( !benchs[i].name )
122 benchs[i].name = strdup( name );
123 if( !cpu )
124 return &benchs[i].vers[0];
125 for( j = 1; benchs[i].vers[j].cpu && benchs[i].vers[j].cpu != cpu; j++ )
126 assert( j < MAX_CPUS );
127 benchs[i].vers[j].cpu = cpu;
128 return &benchs[i].vers[j];
129 }
130
cmp_nop(const void * a,const void * b)131 static int cmp_nop( const void *a, const void *b )
132 {
133 return *(uint16_t*)a - *(uint16_t*)b;
134 }
135
cmp_bench(const void * a,const void * b)136 static int cmp_bench( const void *a, const void *b )
137 {
138 // asciibetical sort except preserving numbers
139 const char *sa = ((bench_func_t*)a)->name;
140 const char *sb = ((bench_func_t*)b)->name;
141 for( ;; sa++, sb++ )
142 {
143 if( !*sa && !*sb )
144 return 0;
145 if( isdigit( *sa ) && isdigit( *sb ) && isdigit( sa[1] ) != isdigit( sb[1] ) )
146 return isdigit( sa[1] ) - isdigit( sb[1] );
147 if( *sa != *sb )
148 return *sa - *sb;
149 }
150 }
151
print_bench(void)152 static void print_bench(void)
153 {
154 uint16_t nops[10000];
155 int nfuncs, nop_time=0;
156
157 for( int i = 0; i < 10000; i++ )
158 {
159 uint32_t t = read_time();
160 nops[i] = read_time() - t;
161 }
162 qsort( nops, 10000, sizeof(uint16_t), cmp_nop );
163 for( int i = 500; i < 9500; i++ )
164 nop_time += nops[i];
165 nop_time /= 900;
166 printf( "nop: %d\n", nop_time );
167
168 for( nfuncs = 0; nfuncs < MAX_FUNCS && benchs[nfuncs].name; nfuncs++ );
169 qsort( benchs, nfuncs, sizeof(bench_func_t), cmp_bench );
170 for( int i = 0; i < nfuncs; i++ )
171 for( int j = 0; j < MAX_CPUS && (!j || benchs[i].vers[j].cpu); j++ )
172 {
173 int k;
174 bench_t *b = &benchs[i].vers[j];
175 if( !b->den )
176 continue;
177 for( k = 0; k < j && benchs[i].vers[k].pointer != b->pointer; k++ );
178 if( k < j )
179 continue;
180 printf( "%s_%s%s: %"PRId64"\n", benchs[i].name,
181 #if ARCH_X86 || ARCH_X86_64
182 b->cpu&X264_CPU_AVX512 ? "avx512" :
183 b->cpu&X264_CPU_AVX2 ? "avx2" :
184 b->cpu&X264_CPU_BMI2 ? "bmi2" :
185 b->cpu&X264_CPU_BMI1 ? "bmi1" :
186 b->cpu&X264_CPU_FMA3 ? "fma3" :
187 b->cpu&X264_CPU_FMA4 ? "fma4" :
188 b->cpu&X264_CPU_XOP ? "xop" :
189 b->cpu&X264_CPU_AVX ? "avx" :
190 b->cpu&X264_CPU_SSE42 ? "sse42" :
191 b->cpu&X264_CPU_SSE4 ? "sse4" :
192 b->cpu&X264_CPU_SSSE3 ? "ssse3" :
193 b->cpu&X264_CPU_SSE3 ? "sse3" :
194 b->cpu&X264_CPU_LZCNT ? "lzcnt" :
195 /* print sse2slow only if there's also a sse2fast version of the same func */
196 b->cpu&X264_CPU_SSE2_IS_SLOW && j<MAX_CPUS-1 && b[1].cpu&X264_CPU_SSE2_IS_FAST && !(b[1].cpu&X264_CPU_SSE3) ? "sse2slow" :
197 b->cpu&X264_CPU_SSE2 ? "sse2" :
198 b->cpu&X264_CPU_SSE ? "sse" :
199 b->cpu&X264_CPU_MMX ? "mmx" :
200 #elif ARCH_PPC
201 b->cpu&X264_CPU_ALTIVEC ? "altivec" :
202 #elif ARCH_ARM
203 b->cpu&X264_CPU_NEON ? "neon" :
204 b->cpu&X264_CPU_ARMV6 ? "armv6" :
205 #elif ARCH_AARCH64
206 b->cpu&X264_CPU_NEON ? "neon" :
207 b->cpu&X264_CPU_ARMV8 ? "armv8" :
208 #elif ARCH_MIPS
209 b->cpu&X264_CPU_MSA ? "msa" :
210 #endif
211 "c",
212 #if ARCH_X86 || ARCH_X86_64
213 b->cpu&X264_CPU_CACHELINE_32 ? "_c32" :
214 b->cpu&X264_CPU_SLOW_ATOM && b->cpu&X264_CPU_CACHELINE_64 ? "_c64_atom" :
215 b->cpu&X264_CPU_CACHELINE_64 ? "_c64" :
216 b->cpu&X264_CPU_SLOW_SHUFFLE ? "_slowshuffle" :
217 b->cpu&X264_CPU_LZCNT && b->cpu&X264_CPU_SSE3 && !(b->cpu&X264_CPU_BMI1) ? "_lzcnt" :
218 b->cpu&X264_CPU_SLOW_ATOM ? "_atom" :
219 #elif ARCH_ARM
220 b->cpu&X264_CPU_FAST_NEON_MRC ? "_fast_mrc" :
221 #endif
222 "",
223 (int64_t)(10*b->cycles/b->den - nop_time)/4 );
224 }
225 }
226
227 /* YMM and ZMM registers on x86 are turned off to save power when they haven't been
228 * used for some period of time. When they are used there will be a "warmup" period
229 * during which performance will be reduced and inconsistent which is problematic when
230 * trying to benchmark individual functions. We can work around this by periodically
231 * issuing "dummy" instructions that uses those registers to keep them powered on. */
232 static void (*simd_warmup_func)( void ) = NULL;
233 #define simd_warmup() do { if( simd_warmup_func ) simd_warmup_func(); } while( 0 )
234
235 #if HAVE_MMX
236 int x264_stack_pagealign( int (*func)(), int align );
237 void x264_checkasm_warmup_avx( void );
238 void x264_checkasm_warmup_avx512( void );
239
240 /* detect when callee-saved regs aren't saved
241 * needs an explicit asm check because it only sometimes crashes in normal use. */
242 intptr_t x264_checkasm_call( intptr_t (*func)(), int *ok, ... );
243 #else
244 #define x264_stack_pagealign( func, align ) func()
245 #endif
246
247 #if HAVE_AARCH64
248 intptr_t x264_checkasm_call( intptr_t (*func)(), int *ok, ... );
249 #endif
250
251 #if HAVE_ARMV6
252 intptr_t x264_checkasm_call_neon( intptr_t (*func)(), int *ok, ... );
253 intptr_t x264_checkasm_call_noneon( intptr_t (*func)(), int *ok, ... );
254 intptr_t (*x264_checkasm_call)( intptr_t (*func)(), int *ok, ... ) = x264_checkasm_call_noneon;
255 #endif
256
257 #define call_c1(func,...) func(__VA_ARGS__)
258
259 #if HAVE_MMX && ARCH_X86_64
260 /* Evil hack: detect incorrect assumptions that 32-bit ints are zero-extended to 64-bit.
261 * This is done by clobbering the stack with junk around the stack pointer and calling the
262 * assembly function through x264_checkasm_call with added dummy arguments which forces all
263 * real arguments to be passed on the stack and not in registers. For 32-bit argument the
264 * upper half of the 64-bit register location on the stack will now contain junk. Note that
265 * this is dependant on compiler behaviour and that interrupts etc. at the wrong time may
266 * overwrite the junk written to the stack so there's no guarantee that it will always
267 * detect all functions that assumes zero-extension.
268 */
269 void x264_checkasm_stack_clobber( uint64_t clobber, ... );
270 #define call_a1(func,...) ({ \
271 uint64_t r = (rand() & 0xffff) * 0x0001000100010001ULL; \
272 x264_checkasm_stack_clobber( r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r ); /* max_args+6 */ \
273 simd_warmup(); \
274 x264_checkasm_call(( intptr_t(*)())func, &ok, 0, 0, 0, 0, __VA_ARGS__ ); })
275 #elif HAVE_AARCH64 && !defined(__APPLE__)
276 void x264_checkasm_stack_clobber( uint64_t clobber, ... );
277 #define call_a1(func,...) ({ \
278 uint64_t r = (rand() & 0xffff) * 0x0001000100010001ULL; \
279 x264_checkasm_stack_clobber( r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r,r ); /* max_args+8 */ \
280 x264_checkasm_call(( intptr_t(*)())func, &ok, 0, 0, 0, 0, 0, 0, __VA_ARGS__ ); })
281 #elif HAVE_MMX || HAVE_ARMV6
282 #define call_a1(func,...) x264_checkasm_call( (intptr_t(*)())func, &ok, __VA_ARGS__ )
283 #else
284 #define call_a1 call_c1
285 #endif
286
287 #if HAVE_ARMV6
288 #define call_a1_64(func,...) ((uint64_t (*)(intptr_t(*)(), int*, ...))x264_checkasm_call)( (intptr_t(*)())func, &ok, __VA_ARGS__ )
289 #else
290 #define call_a1_64 call_a1
291 #endif
292
293 #define call_bench(func,cpu,...)\
294 if( do_bench && !strncmp(func_name, bench_pattern, bench_pattern_len) )\
295 {\
296 uint64_t tsum = 0;\
297 int tcount = 0;\
298 call_a1(func, __VA_ARGS__);\
299 for( int ti = 0; ti < (cpu?BENCH_RUNS:BENCH_RUNS/4); ti++ )\
300 {\
301 simd_warmup();\
302 uint32_t t = read_time();\
303 func(__VA_ARGS__);\
304 func(__VA_ARGS__);\
305 func(__VA_ARGS__);\
306 func(__VA_ARGS__);\
307 t = read_time() - t;\
308 if( (uint64_t)t*tcount <= tsum*4 && ti > 0 )\
309 {\
310 tsum += t;\
311 tcount++;\
312 }\
313 }\
314 bench_t *b = get_bench( func_name, cpu );\
315 b->cycles += tsum;\
316 b->den += tcount;\
317 b->pointer = func;\
318 }
319
320 /* for most functions, run benchmark and correctness test at the same time.
321 * for those that modify their inputs, run the above macros separately */
322 #define call_a(func,...) ({ call_a2(func,__VA_ARGS__); call_a1(func,__VA_ARGS__); })
323 #define call_c(func,...) ({ call_c2(func,__VA_ARGS__); call_c1(func,__VA_ARGS__); })
324 #define call_a2(func,...) ({ call_bench(func,cpu_new,__VA_ARGS__); })
325 #define call_c2(func,...) ({ call_bench(func,0,__VA_ARGS__); })
326 #define call_a64(func,...) ({ call_a2(func,__VA_ARGS__); call_a1_64(func,__VA_ARGS__); })
327
328
check_pixel(uint32_t cpu_ref,uint32_t cpu_new)329 static int check_pixel( uint32_t cpu_ref, uint32_t cpu_new )
330 {
331 x264_pixel_function_t pixel_c;
332 x264_pixel_function_t pixel_ref;
333 x264_pixel_function_t pixel_asm;
334 x264_predict_t predict_4x4[12];
335 x264_predict8x8_t predict_8x8[12];
336 x264_predict_8x8_filter_t predict_8x8_filter;
337 ALIGNED_16( pixel edge[36] );
338 uint16_t cost_mv[32];
339 int ret = 0, ok, used_asm;
340
341 x264_pixel_init( 0, &pixel_c );
342 x264_pixel_init( cpu_ref, &pixel_ref );
343 x264_pixel_init( cpu_new, &pixel_asm );
344 x264_predict_4x4_init( 0, predict_4x4 );
345 x264_predict_8x8_init( 0, predict_8x8, &predict_8x8_filter );
346 predict_8x8_filter( pbuf2+40, edge, ALL_NEIGHBORS, ALL_NEIGHBORS );
347
348 // maximize sum
349 for( int i = 0; i < 256; i++ )
350 {
351 int z = i|(i>>4);
352 z ^= z>>2;
353 z ^= z>>1;
354 pbuf4[i] = -(z&1) & PIXEL_MAX;
355 pbuf3[i] = ~pbuf4[i] & PIXEL_MAX;
356 }
357 // random pattern made of maxed pixel differences, in case an intermediate value overflows
358 for( int i = 256; i < 0x1000; i++ )
359 {
360 pbuf4[i] = -(pbuf1[i&~0x88]&1) & PIXEL_MAX;
361 pbuf3[i] = ~(pbuf4[i]) & PIXEL_MAX;
362 }
363
364 #define TEST_PIXEL( name, align ) \
365 ok = 1, used_asm = 0; \
366 for( int i = 0; i < ARRAY_ELEMS(pixel_c.name); i++ ) \
367 { \
368 int res_c, res_asm; \
369 if( pixel_asm.name[i] != pixel_ref.name[i] ) \
370 { \
371 set_func_name( "%s_%s", #name, pixel_names[i] ); \
372 used_asm = 1; \
373 for( int j = 0; j < 64; j++ ) \
374 { \
375 intptr_t stride1 = (j&31) == 31 ? 32 : FENC_STRIDE; \
376 res_c = call_c( pixel_c.name[i], pbuf1, stride1, pbuf2+j*!align, (intptr_t)64 ); \
377 res_asm = call_a( pixel_asm.name[i], pbuf1, stride1, pbuf2+j*!align, (intptr_t)64 ); \
378 if( res_c != res_asm ) \
379 { \
380 ok = 0; \
381 fprintf( stderr, #name "[%d]: %d != %d [FAILED]\n", i, res_c, res_asm ); \
382 break; \
383 } \
384 } \
385 for( int j = 0; j < 0x1000 && ok; j += 256 ) \
386 { \
387 res_c = pixel_c .name[i]( pbuf3+j, 16, pbuf4+j, 16 ); \
388 res_asm = pixel_asm.name[i]( pbuf3+j, 16, pbuf4+j, 16 ); \
389 if( res_c != res_asm ) \
390 { \
391 ok = 0; \
392 fprintf( stderr, #name "[%d]: overflow %d != %d\n", i, res_c, res_asm ); \
393 } \
394 } \
395 } \
396 } \
397 report( "pixel " #name " :" );
398
399 TEST_PIXEL( sad, 0 );
400 TEST_PIXEL( sad_aligned, 1 );
401 TEST_PIXEL( ssd, 1 );
402 TEST_PIXEL( satd, 0 );
403 TEST_PIXEL( sa8d, 1 );
404
405 ok = 1, used_asm = 0;
406 if( pixel_asm.sa8d_satd[PIXEL_16x16] != pixel_ref.sa8d_satd[PIXEL_16x16] )
407 {
408 set_func_name( "sa8d_satd_%s", pixel_names[PIXEL_16x16] );
409 used_asm = 1;
410 for( int j = 0; j < 64; j++ )
411 {
412 uint32_t cost8_c = pixel_c.sa8d[PIXEL_16x16]( pbuf1, 16, pbuf2, 64 );
413 uint32_t cost4_c = pixel_c.satd[PIXEL_16x16]( pbuf1, 16, pbuf2, 64 );
414 uint64_t res_a = call_a64( pixel_asm.sa8d_satd[PIXEL_16x16], pbuf1, (intptr_t)16, pbuf2, (intptr_t)64 );
415 uint32_t cost8_a = res_a;
416 uint32_t cost4_a = res_a >> 32;
417 if( cost8_a != cost8_c || cost4_a != cost4_c )
418 {
419 ok = 0;
420 fprintf( stderr, "sa8d_satd [%d]: (%d,%d) != (%d,%d) [FAILED]\n", PIXEL_16x16,
421 cost8_c, cost4_c, cost8_a, cost4_a );
422 break;
423 }
424 }
425 for( int j = 0; j < 0x1000 && ok; j += 256 ) \
426 {
427 uint32_t cost8_c = pixel_c.sa8d[PIXEL_16x16]( pbuf3+j, 16, pbuf4+j, 16 );
428 uint32_t cost4_c = pixel_c.satd[PIXEL_16x16]( pbuf3+j, 16, pbuf4+j, 16 );
429 uint64_t res_a = pixel_asm.sa8d_satd[PIXEL_16x16]( pbuf3+j, 16, pbuf4+j, 16 );
430 uint32_t cost8_a = res_a;
431 uint32_t cost4_a = res_a >> 32;
432 if( cost8_a != cost8_c || cost4_a != cost4_c )
433 {
434 ok = 0;
435 fprintf( stderr, "sa8d_satd [%d]: overflow (%d,%d) != (%d,%d) [FAILED]\n", PIXEL_16x16,
436 cost8_c, cost4_c, cost8_a, cost4_a );
437 }
438 }
439 }
440 report( "pixel sa8d_satd :" );
441
442 #define TEST_PIXEL_X( N ) \
443 ok = 1; used_asm = 0; \
444 for( int i = 0; i < 7; i++ ) \
445 { \
446 ALIGNED_16( int res_c[4] ) = {0}; \
447 ALIGNED_16( int res_asm[4] ) = {0}; \
448 if( pixel_asm.sad_x##N[i] && pixel_asm.sad_x##N[i] != pixel_ref.sad_x##N[i] ) \
449 { \
450 set_func_name( "sad_x%d_%s", N, pixel_names[i] ); \
451 used_asm = 1; \
452 for( int j = 0; j < 64; j++ ) \
453 { \
454 pixel *pix2 = pbuf2+j; \
455 res_c[0] = pixel_c.sad[i]( pbuf1, 16, pix2, 64 ); \
456 res_c[1] = pixel_c.sad[i]( pbuf1, 16, pix2+6, 64 ); \
457 res_c[2] = pixel_c.sad[i]( pbuf1, 16, pix2+1, 64 ); \
458 if( N == 4 ) \
459 { \
460 res_c[3] = pixel_c.sad[i]( pbuf1, 16, pix2+10, 64 ); \
461 call_a( pixel_asm.sad_x4[i], pbuf1, pix2, pix2+6, pix2+1, pix2+10, (intptr_t)64, res_asm ); \
462 } \
463 else \
464 call_a( pixel_asm.sad_x3[i], pbuf1, pix2, pix2+6, pix2+1, (intptr_t)64, res_asm ); \
465 if( memcmp(res_c, res_asm, N*sizeof(int)) ) \
466 { \
467 ok = 0; \
468 fprintf( stderr, "sad_x"#N"[%d]: %d,%d,%d,%d != %d,%d,%d,%d [FAILED]\n", \
469 i, res_c[0], res_c[1], res_c[2], res_c[3], \
470 res_asm[0], res_asm[1], res_asm[2], res_asm[3] ); \
471 } \
472 if( N == 4 ) \
473 call_c2( pixel_c.sad_x4[i], pbuf1, pix2, pix2+6, pix2+1, pix2+10, (intptr_t)64, res_asm ); \
474 else \
475 call_c2( pixel_c.sad_x3[i], pbuf1, pix2, pix2+6, pix2+1, (intptr_t)64, res_asm ); \
476 } \
477 } \
478 } \
479 report( "pixel sad_x"#N" :" );
480
481 TEST_PIXEL_X(3);
482 TEST_PIXEL_X(4);
483
484 #define TEST_PIXEL_VAR( i ) \
485 if( pixel_asm.var[i] != pixel_ref.var[i] ) \
486 { \
487 set_func_name( "%s_%s", "var", pixel_names[i] ); \
488 used_asm = 1; \
489 /* abi-check wrapper can't return uint64_t, so separate it from return value check */ \
490 call_c1( pixel_c.var[i], pbuf1, 16 ); \
491 call_a1( pixel_asm.var[i], pbuf1, (intptr_t)16 ); \
492 uint64_t res_c = pixel_c.var[i]( pbuf1, 16 ); \
493 uint64_t res_asm = pixel_asm.var[i]( pbuf1, 16 ); \
494 if( res_c != res_asm ) \
495 { \
496 ok = 0; \
497 fprintf( stderr, "var[%d]: %d %d != %d %d [FAILED]\n", i, (int)res_c, (int)(res_c>>32), (int)res_asm, (int)(res_asm>>32) ); \
498 } \
499 call_c2( pixel_c.var[i], pbuf1, (intptr_t)16 ); \
500 call_a2( pixel_asm.var[i], pbuf1, (intptr_t)16 ); \
501 }
502
503 ok = 1; used_asm = 0;
504 TEST_PIXEL_VAR( PIXEL_16x16 );
505 TEST_PIXEL_VAR( PIXEL_8x16 );
506 TEST_PIXEL_VAR( PIXEL_8x8 );
507 report( "pixel var :" );
508
509 #define TEST_PIXEL_VAR2( i ) \
510 if( pixel_asm.var2[i] != pixel_ref.var2[i] ) \
511 { \
512 int res_c, res_asm; \
513 ALIGNED_ARRAY_8( int, ssd_c, [2] ); \
514 ALIGNED_ARRAY_8( int, ssd_asm,[2] ); \
515 set_func_name( "%s_%s", "var2", pixel_names[i] ); \
516 used_asm = 1; \
517 res_c = call_c( pixel_c.var2[i], pbuf1, pbuf2, ssd_c ); \
518 res_asm = call_a( pixel_asm.var2[i], pbuf1, pbuf2, ssd_asm ); \
519 if( res_c != res_asm || memcmp( ssd_c, ssd_asm, 2*sizeof(int) ) ) \
520 { \
521 ok = 0; \
522 fprintf( stderr, "var2[%d]: {%d, %d, %d} != {%d, %d, %d} [FAILED]\n", i, res_c, ssd_c[0], ssd_c[1], res_asm, ssd_asm[0], ssd_asm[1] ); \
523 } \
524 }
525
526 ok = 1; used_asm = 0;
527 TEST_PIXEL_VAR2( PIXEL_8x16 );
528 TEST_PIXEL_VAR2( PIXEL_8x8 );
529 report( "pixel var2 :" );
530
531 ok = 1; used_asm = 0;
532 for( int i = 0; i < 4; i++ )
533 if( pixel_asm.hadamard_ac[i] != pixel_ref.hadamard_ac[i] )
534 {
535 set_func_name( "hadamard_ac_%s", pixel_names[i] );
536 used_asm = 1;
537 for( int j = 0; j < 32; j++ )
538 {
539 pixel *pix = (j&16 ? pbuf1 : pbuf3) + (j&15)*256;
540 call_c1( pixel_c.hadamard_ac[i], pbuf1, (intptr_t)16 );
541 call_a1( pixel_asm.hadamard_ac[i], pbuf1, (intptr_t)16 );
542 uint64_t rc = pixel_c.hadamard_ac[i]( pix, 16 );
543 uint64_t ra = pixel_asm.hadamard_ac[i]( pix, 16 );
544 if( rc != ra )
545 {
546 ok = 0;
547 fprintf( stderr, "hadamard_ac[%d]: %d,%d != %d,%d\n", i, (int)rc, (int)(rc>>32), (int)ra, (int)(ra>>32) );
548 break;
549 }
550 }
551 call_c2( pixel_c.hadamard_ac[i], pbuf1, (intptr_t)16 );
552 call_a2( pixel_asm.hadamard_ac[i], pbuf1, (intptr_t)16 );
553 }
554 report( "pixel hadamard_ac :" );
555
556 // maximize sum
557 for( int i = 0; i < 32; i++ )
558 for( int j = 0; j < 16; j++ )
559 pbuf4[16*i+j] = -((i+j)&1) & PIXEL_MAX;
560 ok = 1; used_asm = 0;
561 if( pixel_asm.vsad != pixel_ref.vsad )
562 {
563 for( int h = 2; h <= 32; h += 2 )
564 {
565 int res_c, res_asm;
566 set_func_name( "vsad" );
567 used_asm = 1;
568 for( int j = 0; j < 2 && ok; j++ )
569 {
570 pixel *p = j ? pbuf4 : pbuf1;
571 res_c = call_c( pixel_c.vsad, p, (intptr_t)16, h );
572 res_asm = call_a( pixel_asm.vsad, p, (intptr_t)16, h );
573 if( res_c != res_asm )
574 {
575 ok = 0;
576 fprintf( stderr, "vsad: height=%d, %d != %d\n", h, res_c, res_asm );
577 break;
578 }
579 }
580 }
581 }
582 report( "pixel vsad :" );
583
584 ok = 1; used_asm = 0;
585 if( pixel_asm.asd8 != pixel_ref.asd8 )
586 {
587 set_func_name( "asd8" );
588 used_asm = 1;
589 int res_c = call_c( pixel_c.asd8, pbuf1, (intptr_t)8, pbuf2, (intptr_t)8, 16 );
590 int res_a = call_a( pixel_asm.asd8, pbuf1, (intptr_t)8, pbuf2, (intptr_t)8, 16 );
591 if( res_c != res_a )
592 {
593 ok = 0;
594 fprintf( stderr, "asd: %d != %d\n", res_c, res_a );
595 }
596 }
597 report( "pixel asd :" );
598
599 #define TEST_INTRA_X3( name, i8x8, ... ) \
600 if( pixel_asm.name && pixel_asm.name != pixel_ref.name ) \
601 { \
602 ALIGNED_16( int res_c[4] ); \
603 ALIGNED_16( int res_asm[4] ); \
604 set_func_name( #name ); \
605 used_asm = 1; \
606 call_c( pixel_c.name, pbuf1+48, i8x8 ? edge : pbuf3+48, res_c ); \
607 call_a( pixel_asm.name, pbuf1+48, i8x8 ? edge : pbuf3+48, res_asm ); \
608 if( memcmp(res_c, res_asm, 3 * sizeof(*res_c)) ) \
609 { \
610 ok = 0; \
611 fprintf( stderr, #name": %d,%d,%d != %d,%d,%d [FAILED]\n", \
612 res_c[0], res_c[1], res_c[2], \
613 res_asm[0], res_asm[1], res_asm[2] ); \
614 } \
615 }
616
617 #define TEST_INTRA_X9( name, cmp ) \
618 if( pixel_asm.name && pixel_asm.name != pixel_ref.name ) \
619 { \
620 set_func_name( #name ); \
621 used_asm = 1; \
622 ALIGNED_ARRAY_64( uint16_t, bitcosts,[17] ); \
623 for( int i=0; i<17; i++ ) \
624 bitcosts[i] = 9*(i!=8); \
625 memcpy( pbuf3, pbuf2, 20*FDEC_STRIDE*SIZEOF_PIXEL ); \
626 memcpy( pbuf4, pbuf2, 20*FDEC_STRIDE*SIZEOF_PIXEL ); \
627 for( int i=0; i<32; i++ ) \
628 { \
629 pixel *fenc = pbuf1+48+i*12; \
630 pixel *fdec1 = pbuf3+48+i*12; \
631 pixel *fdec2 = pbuf4+48+i*12; \
632 int pred_mode = i%9; \
633 int res_c = INT_MAX; \
634 for( int j=0; j<9; j++ ) \
635 { \
636 predict_4x4[j]( fdec1 ); \
637 int cost = pixel_c.cmp[PIXEL_4x4]( fenc, FENC_STRIDE, fdec1, FDEC_STRIDE ) + 9*(j!=pred_mode); \
638 if( cost < (uint16_t)res_c ) \
639 res_c = cost + (j<<16); \
640 } \
641 predict_4x4[res_c>>16]( fdec1 ); \
642 int res_a = call_a( pixel_asm.name, fenc, fdec2, bitcosts+8-pred_mode ); \
643 if( res_c != res_a ) \
644 { \
645 ok = 0; \
646 fprintf( stderr, #name": %d,%d != %d,%d [FAILED]\n", res_c>>16, res_c&0xffff, res_a>>16, res_a&0xffff ); \
647 break; \
648 } \
649 if( memcmp(fdec1, fdec2, 4*FDEC_STRIDE*SIZEOF_PIXEL) ) \
650 { \
651 ok = 0; \
652 fprintf( stderr, #name" [FAILED]\n" ); \
653 for( int j=0; j<16; j++ ) \
654 fprintf( stderr, "%02x ", fdec1[(j&3)+(j>>2)*FDEC_STRIDE] ); \
655 fprintf( stderr, "\n" ); \
656 for( int j=0; j<16; j++ ) \
657 fprintf( stderr, "%02x ", fdec2[(j&3)+(j>>2)*FDEC_STRIDE] ); \
658 fprintf( stderr, "\n" ); \
659 break; \
660 } \
661 } \
662 }
663
664 #define TEST_INTRA8_X9( name, cmp ) \
665 if( pixel_asm.name && pixel_asm.name != pixel_ref.name ) \
666 { \
667 set_func_name( #name ); \
668 used_asm = 1; \
669 ALIGNED_ARRAY_64( uint16_t, bitcosts,[17] ); \
670 ALIGNED_ARRAY_16( uint16_t, satds_c,[16] ); \
671 ALIGNED_ARRAY_16( uint16_t, satds_a,[16] ); \
672 memset( satds_c, 0, 16 * sizeof(*satds_c) ); \
673 memset( satds_a, 0, 16 * sizeof(*satds_a) ); \
674 for( int i=0; i<17; i++ ) \
675 bitcosts[i] = 9*(i!=8); \
676 for( int i=0; i<32; i++ ) \
677 { \
678 pixel *fenc = pbuf1+48+i*12; \
679 pixel *fdec1 = pbuf3+48+i*12; \
680 pixel *fdec2 = pbuf4+48+i*12; \
681 int pred_mode = i%9; \
682 int res_c = INT_MAX; \
683 predict_8x8_filter( fdec1, edge, ALL_NEIGHBORS, ALL_NEIGHBORS ); \
684 for( int j=0; j<9; j++ ) \
685 { \
686 predict_8x8[j]( fdec1, edge ); \
687 satds_c[j] = pixel_c.cmp[PIXEL_8x8]( fenc, FENC_STRIDE, fdec1, FDEC_STRIDE ) + 9*(j!=pred_mode); \
688 if( satds_c[j] < (uint16_t)res_c ) \
689 res_c = satds_c[j] + (j<<16); \
690 } \
691 predict_8x8[res_c>>16]( fdec1, edge ); \
692 int res_a = call_a( pixel_asm.name, fenc, fdec2, edge, bitcosts+8-pred_mode, satds_a ); \
693 if( res_c != res_a || memcmp(satds_c, satds_a, 16 * sizeof(*satds_c)) ) \
694 { \
695 ok = 0; \
696 fprintf( stderr, #name": %d,%d != %d,%d [FAILED]\n", res_c>>16, res_c&0xffff, res_a>>16, res_a&0xffff ); \
697 for( int j = 0; j < 9; j++ ) \
698 fprintf( stderr, "%5d ", satds_c[j]); \
699 fprintf( stderr, "\n" ); \
700 for( int j = 0; j < 9; j++ ) \
701 fprintf( stderr, "%5d ", satds_a[j]); \
702 fprintf( stderr, "\n" ); \
703 break; \
704 } \
705 for( int j=0; j<8; j++ ) \
706 if( memcmp(fdec1+j*FDEC_STRIDE, fdec2+j*FDEC_STRIDE, 8*SIZEOF_PIXEL) ) \
707 ok = 0; \
708 if( !ok ) \
709 { \
710 fprintf( stderr, #name" [FAILED]\n" ); \
711 for( int j=0; j<8; j++ ) \
712 { \
713 for( int k=0; k<8; k++ ) \
714 fprintf( stderr, "%02x ", fdec1[k+j*FDEC_STRIDE] ); \
715 fprintf( stderr, "\n" ); \
716 } \
717 fprintf( stderr, "\n" ); \
718 for( int j=0; j<8; j++ ) \
719 { \
720 for( int k=0; k<8; k++ ) \
721 fprintf( stderr, "%02x ", fdec2[k+j*FDEC_STRIDE] ); \
722 fprintf( stderr, "\n" ); \
723 } \
724 fprintf( stderr, "\n" ); \
725 break; \
726 } \
727 } \
728 }
729
730 memcpy( pbuf3, pbuf2, 20*FDEC_STRIDE*SIZEOF_PIXEL );
731 ok = 1; used_asm = 0;
732 TEST_INTRA_X3( intra_satd_x3_16x16, 0 );
733 TEST_INTRA_X3( intra_satd_x3_8x16c, 0 );
734 TEST_INTRA_X3( intra_satd_x3_8x8c, 0 );
735 TEST_INTRA_X3( intra_sa8d_x3_8x8, 1, edge );
736 TEST_INTRA_X3( intra_satd_x3_4x4, 0 );
737 report( "intra satd_x3 :" );
738 ok = 1; used_asm = 0;
739 TEST_INTRA_X3( intra_sad_x3_16x16, 0 );
740 TEST_INTRA_X3( intra_sad_x3_8x16c, 0 );
741 TEST_INTRA_X3( intra_sad_x3_8x8c, 0 );
742 TEST_INTRA_X3( intra_sad_x3_8x8, 1, edge );
743 TEST_INTRA_X3( intra_sad_x3_4x4, 0 );
744 report( "intra sad_x3 :" );
745 ok = 1; used_asm = 0;
746 TEST_INTRA_X9( intra_satd_x9_4x4, satd );
747 TEST_INTRA8_X9( intra_sa8d_x9_8x8, sa8d );
748 report( "intra satd_x9 :" );
749 ok = 1; used_asm = 0;
750 TEST_INTRA_X9( intra_sad_x9_4x4, sad );
751 TEST_INTRA8_X9( intra_sad_x9_8x8, sad );
752 report( "intra sad_x9 :" );
753
754 ok = 1; used_asm = 0;
755 if( pixel_asm.ssd_nv12_core != pixel_ref.ssd_nv12_core )
756 {
757 used_asm = 1;
758 set_func_name( "ssd_nv12" );
759 uint64_t res_u_c, res_v_c, res_u_a, res_v_a;
760 for( int w = 8; w <= 360; w += 8 )
761 {
762 pixel_c.ssd_nv12_core( pbuf1, 368, pbuf2, 368, w, 8, &res_u_c, &res_v_c );
763 pixel_asm.ssd_nv12_core( pbuf1, 368, pbuf2, 368, w, 8, &res_u_a, &res_v_a );
764 if( res_u_c != res_u_a || res_v_c != res_v_a )
765 {
766 ok = 0;
767 fprintf( stderr, "ssd_nv12: %"PRIu64",%"PRIu64" != %"PRIu64",%"PRIu64"\n",
768 res_u_c, res_v_c, res_u_a, res_v_a );
769 }
770 }
771 call_c( pixel_c.ssd_nv12_core, pbuf1, (intptr_t)368, pbuf2, (intptr_t)368, 360, 8, &res_u_c, &res_v_c );
772 call_a( pixel_asm.ssd_nv12_core, pbuf1, (intptr_t)368, pbuf2, (intptr_t)368, 360, 8, &res_u_a, &res_v_a );
773 }
774 report( "ssd_nv12 :" );
775
776 if( pixel_asm.ssim_4x4x2_core != pixel_ref.ssim_4x4x2_core ||
777 pixel_asm.ssim_end4 != pixel_ref.ssim_end4 )
778 {
779 int cnt;
780 float res_c, res_a;
781 ALIGNED_16( int sums[5][4] ) = {{0}};
782 used_asm = ok = 1;
783 x264_emms();
784 res_c = x264_pixel_ssim_wxh( &pixel_c, pbuf1+2, 32, pbuf2+2, 32, 32, 28, pbuf3, &cnt );
785 res_a = x264_pixel_ssim_wxh( &pixel_asm, pbuf1+2, 32, pbuf2+2, 32, 32, 28, pbuf3, &cnt );
786 if( fabs( res_c - res_a ) > 1e-5 )
787 {
788 ok = 0;
789 fprintf( stderr, "ssim: %.7f != %.7f [FAILED]\n", res_c, res_a );
790 }
791 set_func_name( "ssim_core" );
792 call_c( pixel_c.ssim_4x4x2_core, pbuf1+2, (intptr_t)32, pbuf2+2, (intptr_t)32, sums );
793 call_a( pixel_asm.ssim_4x4x2_core, pbuf1+2, (intptr_t)32, pbuf2+2, (intptr_t)32, sums );
794 set_func_name( "ssim_end" );
795 call_c2( pixel_c.ssim_end4, sums, sums, 4 );
796 call_a2( pixel_asm.ssim_end4, sums, sums, 4 );
797 /* check incorrect assumptions that 32-bit ints are zero-extended to 64-bit */
798 call_c1( pixel_c.ssim_end4, sums, sums, 3 );
799 call_a1( pixel_asm.ssim_end4, sums, sums, 3 );
800 report( "ssim :" );
801 }
802
803 ok = 1; used_asm = 0;
804 for( int i = 0; i < 32; i++ )
805 cost_mv[i] = rand30() & 0xffff;
806 for( int i = 0; i < 100 && ok; i++ )
807 if( pixel_asm.ads[i&3] != pixel_ref.ads[i&3] )
808 {
809 ALIGNED_16( uint16_t sums[72] );
810 ALIGNED_16( int dc[4] );
811 ALIGNED_16( int16_t mvs_a[48] );
812 ALIGNED_16( int16_t mvs_c[48] );
813 int mvn_a, mvn_c;
814 int thresh = (rand() % 257) * PIXEL_MAX + (rand30() & 0xffff);
815 set_func_name( "esa_ads_%s", pixel_names[i&3] );
816 if( i < 40 )
817 {
818 for( int j = 0; j < 72; j++ )
819 sums[j] = (rand() % 9) * 8 * PIXEL_MAX;
820 for( int j = 0; j < 4; j++ )
821 dc[j] = (rand() % 9) * 8 * PIXEL_MAX;
822 }
823 else
824 {
825 #if BIT_DEPTH + 6 > 15
826 for( int j = 0; j < 72; j++ )
827 sums[j] = rand30() & ((1 << (BIT_DEPTH + 6))-1);
828 for( int j = 0; j < 4; j++ )
829 dc[j] = rand30() & ((1 << (BIT_DEPTH + 6))-1);
830 #else
831 for( int j = 0; j < 72; j++ )
832 sums[j] = rand() & ((1 << (BIT_DEPTH + 6))-1);
833 for( int j = 0; j < 4; j++ )
834 dc[j] = rand() & ((1 << (BIT_DEPTH + 6))-1);
835 #endif
836 }
837 used_asm = 1;
838 mvn_c = call_c( pixel_c.ads[i&3], dc, sums, 32, cost_mv, mvs_c, 28, thresh );
839 mvn_a = call_a( pixel_asm.ads[i&3], dc, sums, 32, cost_mv, mvs_a, 28, thresh );
840 if( mvn_c != mvn_a || memcmp( mvs_c, mvs_a, mvn_c*sizeof(*mvs_c) ) )
841 {
842 ok = 0;
843 printf( "thresh: %d\n", thresh );
844 printf( "c%d: ", i&3 );
845 for( int j = 0; j < mvn_c; j++ )
846 printf( "%d ", mvs_c[j] );
847 printf( "\na%d: ", i&3 );
848 for( int j = 0; j < mvn_a; j++ )
849 printf( "%d ", mvs_a[j] );
850 printf( "\n\n" );
851 }
852 }
853 report( "esa ads:" );
854
855 return ret;
856 }
857
check_dct(uint32_t cpu_ref,uint32_t cpu_new)858 static int check_dct( uint32_t cpu_ref, uint32_t cpu_new )
859 {
860 x264_dct_function_t dct_c;
861 x264_dct_function_t dct_ref;
862 x264_dct_function_t dct_asm;
863 x264_quant_function_t qf;
864 int ret = 0, ok, used_asm, interlace = 0;
865 ALIGNED_ARRAY_64( dctcoef, dct1, [16],[16] );
866 ALIGNED_ARRAY_64( dctcoef, dct2, [16],[16] );
867 ALIGNED_ARRAY_64( dctcoef, dct4, [16],[16] );
868 ALIGNED_ARRAY_64( dctcoef, dct8, [4],[64] );
869 ALIGNED_16( dctcoef dctdc[2][8] );
870 x264_t h_buf;
871 x264_t *h = &h_buf;
872
873 x264_dct_init( 0, &dct_c );
874 x264_dct_init( cpu_ref, &dct_ref);
875 x264_dct_init( cpu_new, &dct_asm );
876
877 memset( h, 0, sizeof(*h) );
878 x264_param_default( &h->param );
879 h->sps->i_chroma_format_idc = 1;
880 h->chroma_qp_table = i_chroma_qp_table + 12;
881 h->param.analyse.i_luma_deadzone[0] = 0;
882 h->param.analyse.i_luma_deadzone[1] = 0;
883 h->param.analyse.b_transform_8x8 = 1;
884 for( int i = 0; i < 6; i++ )
885 h->sps->scaling_list[i] = x264_cqm_flat16;
886 x264_cqm_init( h );
887 x264_quant_init( h, 0, &qf );
888
889 /* overflow test cases */
890 for( int i = 0; i < 5; i++ )
891 {
892 pixel *enc = &pbuf3[16*i*FENC_STRIDE];
893 pixel *dec = &pbuf4[16*i*FDEC_STRIDE];
894
895 for( int j = 0; j < 16; j++ )
896 {
897 int cond_a = (i < 2) ? 1 : ((j&3) == 0 || (j&3) == (i-1));
898 int cond_b = (i == 0) ? 1 : !cond_a;
899 enc[0] = enc[1] = enc[4] = enc[5] = enc[8] = enc[9] = enc[12] = enc[13] = cond_a ? PIXEL_MAX : 0;
900 enc[2] = enc[3] = enc[6] = enc[7] = enc[10] = enc[11] = enc[14] = enc[15] = cond_b ? PIXEL_MAX : 0;
901
902 for( int k = 0; k < 4; k++ )
903 dec[k] = PIXEL_MAX - enc[k];
904
905 enc += FENC_STRIDE;
906 dec += FDEC_STRIDE;
907 }
908 }
909
910 #define TEST_DCT( name, t1, t2, size ) \
911 if( dct_asm.name != dct_ref.name ) \
912 { \
913 set_func_name( #name ); \
914 used_asm = 1; \
915 pixel *enc = pbuf3; \
916 pixel *dec = pbuf4; \
917 for( int j = 0; j < 5; j++) \
918 { \
919 call_c( dct_c.name, t1, &pbuf1[j*64], &pbuf2[j*64] ); \
920 call_a( dct_asm.name, t2, &pbuf1[j*64], &pbuf2[j*64] ); \
921 if( memcmp( t1, t2, size*sizeof(dctcoef) ) ) \
922 { \
923 ok = 0; \
924 fprintf( stderr, #name " [FAILED]\n" ); \
925 for( int k = 0; k < size; k++ )\
926 printf( "%d ", ((dctcoef*)t1)[k] );\
927 printf("\n");\
928 for( int k = 0; k < size; k++ )\
929 printf( "%d ", ((dctcoef*)t2)[k] );\
930 printf("\n");\
931 break; \
932 } \
933 call_c( dct_c.name, t1, enc, dec ); \
934 call_a( dct_asm.name, t2, enc, dec ); \
935 if( memcmp( t1, t2, size*sizeof(dctcoef) ) ) \
936 { \
937 ok = 0; \
938 fprintf( stderr, #name " [FAILED] (overflow)\n" ); \
939 break; \
940 } \
941 enc += 16*FENC_STRIDE; \
942 dec += 16*FDEC_STRIDE; \
943 } \
944 }
945 ok = 1; used_asm = 0;
946 TEST_DCT( sub4x4_dct, dct1[0], dct2[0], 16 );
947 TEST_DCT( sub8x8_dct, dct1, dct2, 16*4 );
948 TEST_DCT( sub8x8_dct_dc, dctdc[0], dctdc[1], 4 );
949 TEST_DCT( sub8x16_dct_dc, dctdc[0], dctdc[1], 8 );
950 TEST_DCT( sub16x16_dct, dct1, dct2, 16*16 );
951 report( "sub_dct4 :" );
952
953 ok = 1; used_asm = 0;
954 TEST_DCT( sub8x8_dct8, (void*)dct1[0], (void*)dct2[0], 64 );
955 TEST_DCT( sub16x16_dct8, (void*)dct1, (void*)dct2, 64*4 );
956 report( "sub_dct8 :" );
957 #undef TEST_DCT
958
959 // fdct and idct are denormalized by different factors, so quant/dequant
960 // is needed to force the coefs into the right range.
961 dct_c.sub16x16_dct( dct4, pbuf1, pbuf2 );
962 dct_c.sub16x16_dct8( dct8, pbuf1, pbuf2 );
963 for( int i = 0; i < 16; i++ )
964 {
965 qf.quant_4x4( dct4[i], h->quant4_mf[CQM_4IY][20], h->quant4_bias[CQM_4IY][20] );
966 qf.dequant_4x4( dct4[i], h->dequant4_mf[CQM_4IY], 20 );
967 }
968 for( int i = 0; i < 4; i++ )
969 {
970 qf.quant_8x8( dct8[i], h->quant8_mf[CQM_8IY][20], h->quant8_bias[CQM_8IY][20] );
971 qf.dequant_8x8( dct8[i], h->dequant8_mf[CQM_8IY], 20 );
972 }
973 x264_cqm_delete( h );
974
975 #define TEST_IDCT( name, src ) \
976 if( dct_asm.name != dct_ref.name ) \
977 { \
978 set_func_name( #name ); \
979 used_asm = 1; \
980 memcpy( pbuf3, pbuf1, 32*32 * SIZEOF_PIXEL ); \
981 memcpy( pbuf4, pbuf1, 32*32 * SIZEOF_PIXEL ); \
982 memcpy( dct1, src, 256 * sizeof(dctcoef) ); \
983 memcpy( dct2, src, 256 * sizeof(dctcoef) ); \
984 call_c1( dct_c.name, pbuf3, (void*)dct1 ); \
985 call_a1( dct_asm.name, pbuf4, (void*)dct2 ); \
986 if( memcmp( pbuf3, pbuf4, 32*32 * SIZEOF_PIXEL ) ) \
987 { \
988 ok = 0; \
989 fprintf( stderr, #name " [FAILED]\n" ); \
990 } \
991 call_c2( dct_c.name, pbuf3, (void*)dct1 ); \
992 call_a2( dct_asm.name, pbuf4, (void*)dct2 ); \
993 }
994 ok = 1; used_asm = 0;
995 TEST_IDCT( add4x4_idct, dct4 );
996 TEST_IDCT( add8x8_idct, dct4 );
997 TEST_IDCT( add8x8_idct_dc, dct4 );
998 TEST_IDCT( add16x16_idct, dct4 );
999 TEST_IDCT( add16x16_idct_dc, dct4 );
1000 report( "add_idct4 :" );
1001
1002 ok = 1; used_asm = 0;
1003 TEST_IDCT( add8x8_idct8, dct8 );
1004 TEST_IDCT( add16x16_idct8, dct8 );
1005 report( "add_idct8 :" );
1006 #undef TEST_IDCT
1007
1008 #define TEST_DCTDC( name )\
1009 ok = 1; used_asm = 0;\
1010 if( dct_asm.name != dct_ref.name )\
1011 {\
1012 set_func_name( #name );\
1013 used_asm = 1;\
1014 uint16_t *p = (uint16_t*)buf1;\
1015 for( int i = 0; i < 16 && ok; i++ )\
1016 {\
1017 for( int j = 0; j < 16; j++ )\
1018 dct1[0][j] = !i ? (j^j>>1^j>>2^j>>3)&1 ? PIXEL_MAX*16 : -PIXEL_MAX*16 /* max dc */\
1019 : i<8 ? (*p++)&1 ? PIXEL_MAX*16 : -PIXEL_MAX*16 /* max elements */\
1020 : ((*p++)&0x1fff)-0x1000; /* general case */\
1021 memcpy( dct2, dct1, 16 * sizeof(dctcoef) );\
1022 call_c1( dct_c.name, dct1[0] );\
1023 call_a1( dct_asm.name, dct2[0] );\
1024 if( memcmp( dct1, dct2, 16 * sizeof(dctcoef) ) )\
1025 ok = 0;\
1026 }\
1027 call_c2( dct_c.name, dct1[0] );\
1028 call_a2( dct_asm.name, dct2[0] );\
1029 }\
1030 report( #name " :" );
1031
1032 TEST_DCTDC( dct4x4dc );
1033 TEST_DCTDC( idct4x4dc );
1034 #undef TEST_DCTDC
1035
1036 #define TEST_DCTDC_CHROMA( name )\
1037 ok = 1; used_asm = 0;\
1038 if( dct_asm.name != dct_ref.name )\
1039 {\
1040 set_func_name( #name );\
1041 used_asm = 1;\
1042 uint16_t *p = (uint16_t*)buf1;\
1043 for( int i = 0; i < 16 && ok; i++ )\
1044 {\
1045 for( int j = 0; j < 8; j++ )\
1046 dct1[j][0] = !i ? (j^j>>1^j>>2)&1 ? PIXEL_MAX*16 : -PIXEL_MAX*16 /* max dc */\
1047 : i<8 ? (*p++)&1 ? PIXEL_MAX*16 : -PIXEL_MAX*16 /* max elements */\
1048 : ((*p++)&0x1fff)-0x1000; /* general case */\
1049 memcpy( dct2, dct1, 8*16 * sizeof(dctcoef) );\
1050 call_c1( dct_c.name, dctdc[0], dct1 );\
1051 call_a1( dct_asm.name, dctdc[1], dct2 );\
1052 if( memcmp( dctdc[0], dctdc[1], 8 * sizeof(dctcoef) ) || memcmp( dct1, dct2, 8*16 * sizeof(dctcoef) ) )\
1053 {\
1054 ok = 0;\
1055 fprintf( stderr, #name " [FAILED]\n" ); \
1056 }\
1057 }\
1058 call_c2( dct_c.name, dctdc[0], dct1 );\
1059 call_a2( dct_asm.name, dctdc[1], dct2 );\
1060 }\
1061 report( #name " :" );
1062
1063 TEST_DCTDC_CHROMA( dct2x4dc );
1064 #undef TEST_DCTDC_CHROMA
1065
1066 x264_zigzag_function_t zigzag_c[2];
1067 x264_zigzag_function_t zigzag_ref[2];
1068 x264_zigzag_function_t zigzag_asm[2];
1069
1070 ALIGNED_ARRAY_64( dctcoef, level1,[64] );
1071 ALIGNED_ARRAY_64( dctcoef, level2,[64] );
1072
1073 #define TEST_ZIGZAG_SCAN( name, t1, t2, dct, size ) \
1074 if( zigzag_asm[interlace].name != zigzag_ref[interlace].name ) \
1075 { \
1076 set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" ); \
1077 used_asm = 1; \
1078 for( int i = 0; i < size*size; i++ ) \
1079 dct[i] = i; \
1080 call_c( zigzag_c[interlace].name, t1, dct ); \
1081 call_a( zigzag_asm[interlace].name, t2, dct ); \
1082 if( memcmp( t1, t2, size*size*sizeof(dctcoef) ) ) \
1083 { \
1084 ok = 0; \
1085 for( int i = 0; i < 2; i++ ) \
1086 { \
1087 dctcoef *d = (dctcoef*)(i ? t2 : t1); \
1088 for( int j = 0; j < size; j++ ) \
1089 { \
1090 for( int k = 0; k < size; k++ ) \
1091 fprintf( stderr, "%2d ", d[k+j*8] ); \
1092 fprintf( stderr, "\n" ); \
1093 } \
1094 fprintf( stderr, "\n" ); \
1095 } \
1096 fprintf( stderr, #name " [FAILED]\n" ); \
1097 } \
1098 }
1099
1100 #define TEST_ZIGZAG_SUB( name, t1, t2, size ) \
1101 if( zigzag_asm[interlace].name != zigzag_ref[interlace].name ) \
1102 { \
1103 int nz_a, nz_c; \
1104 set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" ); \
1105 used_asm = 1; \
1106 memcpy( pbuf3, pbuf1, 16*FDEC_STRIDE * SIZEOF_PIXEL ); \
1107 memcpy( pbuf4, pbuf1, 16*FDEC_STRIDE * SIZEOF_PIXEL ); \
1108 nz_c = call_c1( zigzag_c[interlace].name, t1, pbuf2, pbuf3 ); \
1109 nz_a = call_a1( zigzag_asm[interlace].name, t2, pbuf2, pbuf4 ); \
1110 if( memcmp( t1, t2, size*sizeof(dctcoef) ) || memcmp( pbuf3, pbuf4, 16*FDEC_STRIDE*SIZEOF_PIXEL ) || nz_c != nz_a ) \
1111 { \
1112 ok = 0; \
1113 fprintf( stderr, #name " [FAILED]\n" ); \
1114 } \
1115 call_c2( zigzag_c[interlace].name, t1, pbuf2, pbuf3 ); \
1116 call_a2( zigzag_asm[interlace].name, t2, pbuf2, pbuf4 ); \
1117 }
1118
1119 #define TEST_ZIGZAG_SUBAC( name, t1, t2 ) \
1120 if( zigzag_asm[interlace].name != zigzag_ref[interlace].name ) \
1121 { \
1122 int nz_a, nz_c; \
1123 dctcoef dc_a, dc_c; \
1124 set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" ); \
1125 used_asm = 1; \
1126 for( int i = 0; i < 2; i++ ) \
1127 { \
1128 memcpy( pbuf3, pbuf2, 16*FDEC_STRIDE * SIZEOF_PIXEL ); \
1129 memcpy( pbuf4, pbuf2, 16*FDEC_STRIDE * SIZEOF_PIXEL ); \
1130 for( int j = 0; j < 4; j++ ) \
1131 { \
1132 memcpy( pbuf3 + j*FDEC_STRIDE, (i?pbuf1:pbuf2) + j*FENC_STRIDE, 4 * SIZEOF_PIXEL ); \
1133 memcpy( pbuf4 + j*FDEC_STRIDE, (i?pbuf1:pbuf2) + j*FENC_STRIDE, 4 * SIZEOF_PIXEL ); \
1134 } \
1135 nz_c = call_c1( zigzag_c[interlace].name, t1, pbuf2, pbuf3, &dc_c ); \
1136 nz_a = call_a1( zigzag_asm[interlace].name, t2, pbuf2, pbuf4, &dc_a ); \
1137 if( memcmp( t1+1, t2+1, 15*sizeof(dctcoef) ) || memcmp( pbuf3, pbuf4, 16*FDEC_STRIDE * SIZEOF_PIXEL ) || nz_c != nz_a || dc_c != dc_a ) \
1138 { \
1139 ok = 0; \
1140 fprintf( stderr, #name " [FAILED]\n" ); \
1141 break; \
1142 } \
1143 } \
1144 call_c2( zigzag_c[interlace].name, t1, pbuf2, pbuf3, &dc_c ); \
1145 call_a2( zigzag_asm[interlace].name, t2, pbuf2, pbuf4, &dc_a ); \
1146 }
1147
1148 #define TEST_INTERLEAVE( name, t1, t2, dct, size ) \
1149 if( zigzag_asm[interlace].name != zigzag_ref[interlace].name ) \
1150 { \
1151 for( int j = 0; j < 100; j++ ) \
1152 { \
1153 set_func_name( "zigzag_"#name"_%s", interlace?"field":"frame" ); \
1154 used_asm = 1; \
1155 memcpy(dct, buf1, size*sizeof(dctcoef)); \
1156 for( int i = 0; i < size; i++ ) \
1157 dct[i] = rand()&0x1F ? 0 : dct[i]; \
1158 memcpy(buf3, buf4, 10); \
1159 call_c( zigzag_c[interlace].name, t1, dct, buf3 ); \
1160 call_a( zigzag_asm[interlace].name, t2, dct, buf4 ); \
1161 if( memcmp( t1, t2, size*sizeof(dctcoef) ) || memcmp( buf3, buf4, 10 ) ) \
1162 { \
1163 ok = 0; printf("%d: %d %d %d %d\n%d %d %d %d\n\n",memcmp( t1, t2, size*sizeof(dctcoef) ),buf3[0], buf3[1], buf3[8], buf3[9], buf4[0], buf4[1], buf4[8], buf4[9]);break;\
1164 } \
1165 } \
1166 }
1167
1168 x264_zigzag_init( 0, &zigzag_c[0], &zigzag_c[1] );
1169 x264_zigzag_init( cpu_ref, &zigzag_ref[0], &zigzag_ref[1] );
1170 x264_zigzag_init( cpu_new, &zigzag_asm[0], &zigzag_asm[1] );
1171
1172 ok = 1; used_asm = 0;
1173 TEST_INTERLEAVE( interleave_8x8_cavlc, level1, level2, dct8[0], 64 );
1174 report( "zigzag_interleave :" );
1175
1176 for( interlace = 0; interlace <= 1; interlace++ )
1177 {
1178 ok = 1; used_asm = 0;
1179 TEST_ZIGZAG_SCAN( scan_8x8, level1, level2, dct8[0], 8 );
1180 TEST_ZIGZAG_SCAN( scan_4x4, level1, level2, dct1[0], 4 );
1181 TEST_ZIGZAG_SUB( sub_4x4, level1, level2, 16 );
1182 TEST_ZIGZAG_SUB( sub_8x8, level1, level2, 64 );
1183 TEST_ZIGZAG_SUBAC( sub_4x4ac, level1, level2 );
1184 report( interlace ? "zigzag_field :" : "zigzag_frame :" );
1185 }
1186 #undef TEST_ZIGZAG_SCAN
1187 #undef TEST_ZIGZAG_SUB
1188
1189 return ret;
1190 }
1191
check_mc(uint32_t cpu_ref,uint32_t cpu_new)1192 static int check_mc( uint32_t cpu_ref, uint32_t cpu_new )
1193 {
1194 x264_mc_functions_t mc_c;
1195 x264_mc_functions_t mc_ref;
1196 x264_mc_functions_t mc_a;
1197 x264_pixel_function_t pixf;
1198
1199 pixel *src = &(pbuf1)[2*64+2];
1200 pixel *src2[4] = { &(pbuf1)[3*64+2], &(pbuf1)[5*64+2],
1201 &(pbuf1)[7*64+2], &(pbuf1)[9*64+2] };
1202 pixel *dst1 = pbuf3;
1203 pixel *dst2 = pbuf4;
1204
1205 int ret = 0, ok, used_asm;
1206
1207 x264_mc_init( 0, &mc_c, 0 );
1208 x264_mc_init( cpu_ref, &mc_ref, 0 );
1209 x264_mc_init( cpu_new, &mc_a, 0 );
1210 x264_pixel_init( 0, &pixf );
1211
1212 #define MC_TEST_LUMA( w, h ) \
1213 if( mc_a.mc_luma != mc_ref.mc_luma && !(w&(w-1)) && h<=16 ) \
1214 { \
1215 const x264_weight_t *weight = x264_weight_none; \
1216 set_func_name( "mc_luma_%dx%d", w, h ); \
1217 used_asm = 1; \
1218 for( int i = 0; i < 1024; i++ ) \
1219 pbuf3[i] = pbuf4[i] = 0xCD; \
1220 call_c( mc_c.mc_luma, dst1, (intptr_t)32, src2, (intptr_t)64, dx, dy, w, h, weight ); \
1221 call_a( mc_a.mc_luma, dst2, (intptr_t)32, src2, (intptr_t)64, dx, dy, w, h, weight ); \
1222 if( memcmp( pbuf3, pbuf4, 1024 * SIZEOF_PIXEL ) ) \
1223 { \
1224 fprintf( stderr, "mc_luma[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
1225 ok = 0; \
1226 } \
1227 } \
1228 if( mc_a.get_ref != mc_ref.get_ref ) \
1229 { \
1230 pixel *ref = dst2; \
1231 intptr_t ref_stride = 32; \
1232 int w_checked = ( ( SIZEOF_PIXEL == 2 && (w == 12 || w == 20)) ? w-2 : w ); \
1233 const x264_weight_t *weight = x264_weight_none; \
1234 set_func_name( "get_ref_%dx%d", w_checked, h ); \
1235 used_asm = 1; \
1236 for( int i = 0; i < 1024; i++ ) \
1237 pbuf3[i] = pbuf4[i] = 0xCD; \
1238 call_c( mc_c.mc_luma, dst1, (intptr_t)32, src2, (intptr_t)64, dx, dy, w, h, weight ); \
1239 ref = (pixel*)call_a( mc_a.get_ref, ref, &ref_stride, src2, (intptr_t)64, dx, dy, w, h, weight ); \
1240 for( int i = 0; i < h; i++ ) \
1241 if( memcmp( dst1+i*32, ref+i*ref_stride, w_checked * SIZEOF_PIXEL ) ) \
1242 { \
1243 fprintf( stderr, "get_ref[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w_checked, h ); \
1244 ok = 0; \
1245 break; \
1246 } \
1247 }
1248
1249 #define MC_TEST_CHROMA( w, h ) \
1250 if( mc_a.mc_chroma != mc_ref.mc_chroma ) \
1251 { \
1252 set_func_name( "mc_chroma_%dx%d", w, h ); \
1253 used_asm = 1; \
1254 for( int i = 0; i < 1024; i++ ) \
1255 pbuf3[i] = pbuf4[i] = 0xCD; \
1256 call_c( mc_c.mc_chroma, dst1, dst1+8, (intptr_t)16, src, (intptr_t)64, dx, dy, w, h ); \
1257 call_a( mc_a.mc_chroma, dst2, dst2+8, (intptr_t)16, src, (intptr_t)64, dx, dy, w, h ); \
1258 /* mc_chroma width=2 may write garbage to the right of dst. ignore that. */ \
1259 for( int j = 0; j < h; j++ ) \
1260 for( int i = w; i < 8; i++ ) \
1261 { \
1262 dst2[i+j*16+8] = dst1[i+j*16+8]; \
1263 dst2[i+j*16 ] = dst1[i+j*16 ]; \
1264 } \
1265 if( memcmp( pbuf3, pbuf4, 1024 * SIZEOF_PIXEL ) ) \
1266 { \
1267 fprintf( stderr, "mc_chroma[mv(%d,%d) %2dx%-2d] [FAILED]\n", dx, dy, w, h ); \
1268 ok = 0; \
1269 } \
1270 }
1271 ok = 1; used_asm = 0;
1272 for( int dy = -8; dy < 8; dy++ )
1273 for( int dx = -128; dx < 128; dx++ )
1274 {
1275 if( rand()&15 ) continue; // running all of them is too slow
1276 MC_TEST_LUMA( 20, 18 );
1277 MC_TEST_LUMA( 16, 16 );
1278 MC_TEST_LUMA( 16, 8 );
1279 MC_TEST_LUMA( 12, 10 );
1280 MC_TEST_LUMA( 8, 16 );
1281 MC_TEST_LUMA( 8, 8 );
1282 MC_TEST_LUMA( 8, 4 );
1283 MC_TEST_LUMA( 4, 8 );
1284 MC_TEST_LUMA( 4, 4 );
1285 }
1286 report( "mc luma :" );
1287
1288 ok = 1; used_asm = 0;
1289 for( int dy = -1; dy < 9; dy++ )
1290 for( int dx = -128; dx < 128; dx++ )
1291 {
1292 if( rand()&15 ) continue;
1293 MC_TEST_CHROMA( 8, 8 );
1294 MC_TEST_CHROMA( 8, 4 );
1295 MC_TEST_CHROMA( 4, 8 );
1296 MC_TEST_CHROMA( 4, 4 );
1297 MC_TEST_CHROMA( 4, 2 );
1298 MC_TEST_CHROMA( 2, 4 );
1299 MC_TEST_CHROMA( 2, 2 );
1300 }
1301 report( "mc chroma :" );
1302 #undef MC_TEST_LUMA
1303 #undef MC_TEST_CHROMA
1304
1305 #define MC_TEST_AVG( name, weight ) \
1306 { \
1307 for( int i = 0; i < 12; i++ ) \
1308 { \
1309 memcpy( pbuf3, pbuf1+320, 320 * SIZEOF_PIXEL ); \
1310 memcpy( pbuf4, pbuf1+320, 320 * SIZEOF_PIXEL ); \
1311 if( mc_a.name[i] != mc_ref.name[i] ) \
1312 { \
1313 set_func_name( "%s_%s", #name, pixel_names[i] ); \
1314 used_asm = 1; \
1315 call_c1( mc_c.name[i], pbuf3, (intptr_t)16, pbuf2+1, (intptr_t)16, pbuf1+18, (intptr_t)16, weight ); \
1316 call_a1( mc_a.name[i], pbuf4, (intptr_t)16, pbuf2+1, (intptr_t)16, pbuf1+18, (intptr_t)16, weight ); \
1317 if( memcmp( pbuf3, pbuf4, 320 * SIZEOF_PIXEL ) ) \
1318 { \
1319 ok = 0; \
1320 fprintf( stderr, #name "[%d]: [FAILED]\n", i ); \
1321 } \
1322 call_c2( mc_c.name[i], pbuf3, (intptr_t)16, pbuf2+1, (intptr_t)16, pbuf1+18, (intptr_t)16, weight ); \
1323 call_a2( mc_a.name[i], pbuf4, (intptr_t)16, pbuf2+1, (intptr_t)16, pbuf1+18, (intptr_t)16, weight ); \
1324 } \
1325 } \
1326 }
1327
1328 ok = 1, used_asm = 0;
1329 for( int w = -63; w <= 127 && ok; w++ )
1330 MC_TEST_AVG( avg, w );
1331 report( "mc wpredb :" );
1332
1333 #define MC_TEST_WEIGHT( name, weight, aligned ) \
1334 int align_off = (aligned ? 0 : rand()%16); \
1335 for( int i = 1; i <= 5; i++ ) \
1336 { \
1337 ALIGNED_16( pixel buffC[640] ); \
1338 ALIGNED_16( pixel buffA[640] ); \
1339 int j = X264_MAX( i*4, 2 ); \
1340 memset( buffC, 0, 640 * SIZEOF_PIXEL ); \
1341 memset( buffA, 0, 640 * SIZEOF_PIXEL ); \
1342 x264_t ha; \
1343 ha.mc = mc_a; \
1344 /* w12 is the same as w16 in some cases */ \
1345 if( i == 3 && mc_a.name[i] == mc_a.name[i+1] ) \
1346 continue; \
1347 if( mc_a.name[i] != mc_ref.name[i] ) \
1348 { \
1349 set_func_name( "%s_w%d", #name, j ); \
1350 used_asm = 1; \
1351 call_c1( mc_c.weight[i], buffC, (intptr_t)32, pbuf2+align_off, (intptr_t)32, &weight, 16 ); \
1352 mc_a.weight_cache(&ha, &weight); \
1353 call_a1( weight.weightfn[i], buffA, (intptr_t)32, pbuf2+align_off, (intptr_t)32, &weight, 16 ); \
1354 for( int k = 0; k < 16; k++ ) \
1355 if( memcmp( &buffC[k*32], &buffA[k*32], j * SIZEOF_PIXEL ) ) \
1356 { \
1357 ok = 0; \
1358 fprintf( stderr, #name "[%d]: [FAILED] s:%d o:%d d%d\n", i, s, o, d ); \
1359 break; \
1360 } \
1361 /* omit unlikely high scales for benchmarking */ \
1362 if( (s << (8-d)) < 512 ) \
1363 { \
1364 call_c2( mc_c.weight[i], buffC, (intptr_t)32, pbuf2+align_off, (intptr_t)32, &weight, 16 ); \
1365 call_a2( weight.weightfn[i], buffA, (intptr_t)32, pbuf2+align_off, (intptr_t)32, &weight, 16 ); \
1366 } \
1367 } \
1368 }
1369
1370 ok = 1; used_asm = 0;
1371
1372 int align_cnt = 0;
1373 for( int s = 0; s <= 127 && ok; s++ )
1374 {
1375 for( int o = -128; o <= 127 && ok; o++ )
1376 {
1377 if( rand() & 2047 ) continue;
1378 for( int d = 0; d <= 7 && ok; d++ )
1379 {
1380 if( s == 1<<d )
1381 continue;
1382 x264_weight_t weight = { .i_scale = s, .i_denom = d, .i_offset = o };
1383 MC_TEST_WEIGHT( weight, weight, (align_cnt++ % 4) );
1384 }
1385 }
1386
1387 }
1388 report( "mc weight :" );
1389
1390 ok = 1; used_asm = 0;
1391 for( int o = 0; o <= 127 && ok; o++ )
1392 {
1393 int s = 1, d = 0;
1394 if( rand() & 15 ) continue;
1395 x264_weight_t weight = { .i_scale = 1, .i_denom = 0, .i_offset = o };
1396 MC_TEST_WEIGHT( offsetadd, weight, (align_cnt++ % 4) );
1397 }
1398 report( "mc offsetadd :" );
1399 ok = 1; used_asm = 0;
1400 for( int o = -128; o < 0 && ok; o++ )
1401 {
1402 int s = 1, d = 0;
1403 if( rand() & 15 ) continue;
1404 x264_weight_t weight = { .i_scale = 1, .i_denom = 0, .i_offset = o };
1405 MC_TEST_WEIGHT( offsetsub, weight, (align_cnt++ % 4) );
1406 }
1407 report( "mc offsetsub :" );
1408
1409 memset( pbuf3, 0, 64*16 );
1410 memset( pbuf4, 0, 64*16 );
1411 ok = 1; used_asm = 0;
1412 for( int height = 8; height <= 16; height += 8 )
1413 {
1414 if( mc_a.store_interleave_chroma != mc_ref.store_interleave_chroma )
1415 {
1416 set_func_name( "store_interleave_chroma" );
1417 used_asm = 1;
1418 call_c( mc_c.store_interleave_chroma, pbuf3, (intptr_t)64, pbuf1, pbuf1+16, height );
1419 call_a( mc_a.store_interleave_chroma, pbuf4, (intptr_t)64, pbuf1, pbuf1+16, height );
1420 if( memcmp( pbuf3, pbuf4, 64*height ) )
1421 {
1422 ok = 0;
1423 fprintf( stderr, "store_interleave_chroma FAILED: h=%d\n", height );
1424 break;
1425 }
1426 }
1427 if( mc_a.load_deinterleave_chroma_fenc != mc_ref.load_deinterleave_chroma_fenc )
1428 {
1429 set_func_name( "load_deinterleave_chroma_fenc" );
1430 used_asm = 1;
1431 call_c( mc_c.load_deinterleave_chroma_fenc, pbuf3, pbuf1, (intptr_t)64, height );
1432 call_a( mc_a.load_deinterleave_chroma_fenc, pbuf4, pbuf1, (intptr_t)64, height );
1433 if( memcmp( pbuf3, pbuf4, FENC_STRIDE*height ) )
1434 {
1435 ok = 0;
1436 fprintf( stderr, "load_deinterleave_chroma_fenc FAILED: h=%d\n", height );
1437 break;
1438 }
1439 }
1440 if( mc_a.load_deinterleave_chroma_fdec != mc_ref.load_deinterleave_chroma_fdec )
1441 {
1442 set_func_name( "load_deinterleave_chroma_fdec" );
1443 used_asm = 1;
1444 call_c( mc_c.load_deinterleave_chroma_fdec, pbuf3, pbuf1, (intptr_t)64, height );
1445 call_a( mc_a.load_deinterleave_chroma_fdec, pbuf4, pbuf1, (intptr_t)64, height );
1446 if( memcmp( pbuf3, pbuf4, FDEC_STRIDE*height ) )
1447 {
1448 ok = 0;
1449 fprintf( stderr, "load_deinterleave_chroma_fdec FAILED: h=%d\n", height );
1450 break;
1451 }
1452 }
1453 }
1454 report( "store_interleave :" );
1455
1456 struct plane_spec {
1457 int w, h, src_stride;
1458 } plane_specs[] = { {2,2,2}, {8,6,8}, {20,31,24}, {32,8,40}, {256,10,272}, {504,7,505}, {528,6,528}, {256,10,-256}, {263,9,-264}, {1904,1,0} };
1459 ok = 1; used_asm = 0;
1460 if( mc_a.plane_copy != mc_ref.plane_copy )
1461 {
1462 set_func_name( "plane_copy" );
1463 used_asm = 1;
1464 for( int i = 0; i < ARRAY_ELEMS(plane_specs); i++ )
1465 {
1466 int w = plane_specs[i].w;
1467 int h = plane_specs[i].h;
1468 intptr_t src_stride = plane_specs[i].src_stride;
1469 intptr_t dst_stride = (w + 127) & ~63;
1470 assert( dst_stride * h <= 0x1000 );
1471 pixel *src1 = pbuf1 + X264_MAX(0, -src_stride) * (h-1);
1472 memset( pbuf3, 0, 0x1000*SIZEOF_PIXEL );
1473 memset( pbuf4, 0, 0x1000*SIZEOF_PIXEL );
1474 call_c( mc_c.plane_copy, pbuf3, dst_stride, src1, src_stride, w, h );
1475 call_a( mc_a.plane_copy, pbuf4, dst_stride, src1, src_stride, w, h );
1476 for( int y = 0; y < h; y++ )
1477 if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, w*SIZEOF_PIXEL ) )
1478 {
1479 ok = 0;
1480 fprintf( stderr, "plane_copy FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
1481 break;
1482 }
1483 }
1484 }
1485
1486 if( mc_a.plane_copy_swap != mc_ref.plane_copy_swap )
1487 {
1488 set_func_name( "plane_copy_swap" );
1489 used_asm = 1;
1490 for( int i = 0; i < ARRAY_ELEMS(plane_specs); i++ )
1491 {
1492 int w = (plane_specs[i].w + 1) >> 1;
1493 int h = plane_specs[i].h;
1494 intptr_t src_stride = plane_specs[i].src_stride;
1495 intptr_t dst_stride = (2*w + 127) & ~63;
1496 assert( dst_stride * h <= 0x1000 );
1497 pixel *src1 = pbuf1 + X264_MAX(0, -src_stride) * (h-1);
1498 memset( pbuf3, 0, 0x1000*SIZEOF_PIXEL );
1499 memset( pbuf4, 0, 0x1000*SIZEOF_PIXEL );
1500 call_c( mc_c.plane_copy_swap, pbuf3, dst_stride, src1, src_stride, w, h );
1501 call_a( mc_a.plane_copy_swap, pbuf4, dst_stride, src1, src_stride, w, h );
1502 for( int y = 0; y < h; y++ )
1503 if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, 2*w*SIZEOF_PIXEL ) )
1504 {
1505 ok = 0;
1506 fprintf( stderr, "plane_copy_swap FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
1507 break;
1508 }
1509 }
1510 }
1511
1512 if( mc_a.plane_copy_interleave != mc_ref.plane_copy_interleave )
1513 {
1514 set_func_name( "plane_copy_interleave" );
1515 used_asm = 1;
1516 for( int i = 0; i < ARRAY_ELEMS(plane_specs); i++ )
1517 {
1518 int w = (plane_specs[i].w + 1) >> 1;
1519 int h = plane_specs[i].h;
1520 intptr_t src_stride = (plane_specs[i].src_stride + 1) >> 1;
1521 intptr_t dst_stride = (2*w + 127) & ~63;
1522 assert( dst_stride * h <= 0x1000 );
1523 pixel *src1 = pbuf1 + X264_MAX(0, -src_stride) * (h-1);
1524 memset( pbuf3, 0, 0x1000*SIZEOF_PIXEL );
1525 memset( pbuf4, 0, 0x1000*SIZEOF_PIXEL );
1526 call_c( mc_c.plane_copy_interleave, pbuf3, dst_stride, src1, src_stride, src1+1024, src_stride+16, w, h );
1527 call_a( mc_a.plane_copy_interleave, pbuf4, dst_stride, src1, src_stride, src1+1024, src_stride+16, w, h );
1528 for( int y = 0; y < h; y++ )
1529 if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, 2*w*SIZEOF_PIXEL ) )
1530 {
1531 ok = 0;
1532 fprintf( stderr, "plane_copy_interleave FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
1533 break;
1534 }
1535 }
1536 }
1537
1538 if( mc_a.plane_copy_deinterleave != mc_ref.plane_copy_deinterleave )
1539 {
1540 set_func_name( "plane_copy_deinterleave" );
1541 used_asm = 1;
1542 for( int i = 0; i < ARRAY_ELEMS(plane_specs); i++ )
1543 {
1544 int w = (plane_specs[i].w + 1) >> 1;
1545 int h = plane_specs[i].h;
1546 intptr_t dst_stride = w;
1547 intptr_t src_stride = (2*w + 127) & ~63;
1548 intptr_t offv = (dst_stride*h + 63) & ~31;
1549 memset( pbuf3, 0, 0x1000 );
1550 memset( pbuf4, 0, 0x1000 );
1551 call_c( mc_c.plane_copy_deinterleave, pbuf3, dst_stride, pbuf3+offv, dst_stride, pbuf1, src_stride, w, h );
1552 call_a( mc_a.plane_copy_deinterleave, pbuf4, dst_stride, pbuf4+offv, dst_stride, pbuf1, src_stride, w, h );
1553 for( int y = 0; y < h; y++ )
1554 if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, w ) ||
1555 memcmp( pbuf3+y*dst_stride+offv, pbuf4+y*dst_stride+offv, w ) )
1556 {
1557 ok = 0;
1558 fprintf( stderr, "plane_copy_deinterleave FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
1559 break;
1560 }
1561 }
1562 }
1563
1564 if( mc_a.plane_copy_deinterleave_yuyv != mc_ref.plane_copy_deinterleave_yuyv )
1565 {
1566 set_func_name( "plane_copy_deinterleave_yuyv" );
1567 used_asm = 1;
1568 for( int i = 0; i < ARRAY_ELEMS(plane_specs); i++ )
1569 {
1570 int w = (plane_specs[i].w + 1) >> 1;
1571 int h = plane_specs[i].h;
1572 intptr_t dst_stride = ALIGN( w, 32/SIZEOF_PIXEL );
1573 intptr_t src_stride = (plane_specs[i].src_stride + 1) >> 1;
1574 intptr_t offv = dst_stride*h;
1575 pixel *src1 = pbuf1 + X264_MAX(0, -src_stride) * (h-1);
1576 memset( pbuf3, 0, 0x1000 );
1577 memset( pbuf4, 0, 0x1000 );
1578 /* Skip benchmarking since it's the same as plane_copy_deinterleave(), just verify correctness. */
1579 call_c1( mc_c.plane_copy_deinterleave_yuyv, pbuf3, dst_stride, pbuf3+offv, dst_stride, src1, src_stride, w, h );
1580 call_a1( mc_a.plane_copy_deinterleave_yuyv, pbuf4, dst_stride, pbuf4+offv, dst_stride, src1, src_stride, w, h );
1581 for( int y = 0; y < h; y++ )
1582 if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, w*SIZEOF_PIXEL ) ||
1583 memcmp( pbuf3+y*dst_stride+offv, pbuf4+y*dst_stride+offv, w*SIZEOF_PIXEL ) )
1584 {
1585 fprintf( stderr, "plane_copy_deinterleave_yuyv FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
1586 break;
1587 }
1588 }
1589 }
1590
1591 if( mc_a.plane_copy_deinterleave_rgb != mc_ref.plane_copy_deinterleave_rgb )
1592 {
1593 set_func_name( "plane_copy_deinterleave_rgb" );
1594 used_asm = 1;
1595 for( int i = 0; i < ARRAY_ELEMS(plane_specs); i++ )
1596 {
1597 int w = (plane_specs[i].w + 2) >> 2;
1598 int h = plane_specs[i].h;
1599 intptr_t src_stride = plane_specs[i].src_stride;
1600 intptr_t dst_stride = ALIGN( w, 16 );
1601 intptr_t offv = dst_stride*h + 16;
1602 pixel *src1 = pbuf1 + X264_MAX(0, -src_stride) * (h-1);
1603
1604 for( int pw = 3; pw <= 4; pw++ )
1605 {
1606 memset( pbuf3, 0, 0x1000 );
1607 memset( pbuf4, 0, 0x1000 );
1608 call_c( mc_c.plane_copy_deinterleave_rgb, pbuf3, dst_stride, pbuf3+offv, dst_stride, pbuf3+2*offv, dst_stride, src1, src_stride, pw, w, h );
1609 call_a( mc_a.plane_copy_deinterleave_rgb, pbuf4, dst_stride, pbuf4+offv, dst_stride, pbuf4+2*offv, dst_stride, src1, src_stride, pw, w, h );
1610 for( int y = 0; y < h; y++ )
1611 if( memcmp( pbuf3+y*dst_stride+0*offv, pbuf4+y*dst_stride+0*offv, w ) ||
1612 memcmp( pbuf3+y*dst_stride+1*offv, pbuf4+y*dst_stride+1*offv, w ) ||
1613 memcmp( pbuf3+y*dst_stride+2*offv, pbuf4+y*dst_stride+2*offv, w ) )
1614 {
1615 ok = 0;
1616 fprintf( stderr, "plane_copy_deinterleave_rgb FAILED: w=%d h=%d stride=%d pw=%d\n", w, h, (int)src_stride, pw );
1617 break;
1618 }
1619 }
1620 }
1621 }
1622 report( "plane_copy :" );
1623
1624 if( mc_a.plane_copy_deinterleave_v210 != mc_ref.plane_copy_deinterleave_v210 )
1625 {
1626 set_func_name( "plane_copy_deinterleave_v210" );
1627 ok = 1; used_asm = 1;
1628 for( int i = 0; i < ARRAY_ELEMS(plane_specs); i++ )
1629 {
1630 int w = (plane_specs[i].w + 1) >> 1;
1631 int h = plane_specs[i].h;
1632 intptr_t dst_stride = ALIGN( w, 32 );
1633 intptr_t src_stride = (w + 47) / 48 * 128 / (int)sizeof(uint32_t);
1634 intptr_t offv = dst_stride*h + 32;
1635 memset( pbuf3, 0, 0x1000 );
1636 memset( pbuf4, 0, 0x1000 );
1637 call_c( mc_c.plane_copy_deinterleave_v210, pbuf3, dst_stride, pbuf3+offv, dst_stride, (uint32_t *)buf1, src_stride, w, h );
1638 call_a( mc_a.plane_copy_deinterleave_v210, pbuf4, dst_stride, pbuf4+offv, dst_stride, (uint32_t *)buf1, src_stride, w, h );
1639 for( int y = 0; y < h; y++ )
1640 if( memcmp( pbuf3+y*dst_stride, pbuf4+y*dst_stride, w*sizeof(uint16_t) ) ||
1641 memcmp( pbuf3+y*dst_stride+offv, pbuf4+y*dst_stride+offv, w*sizeof(uint16_t) ) )
1642 {
1643 ok = 0;
1644 fprintf( stderr, "plane_copy_deinterleave_v210 FAILED: w=%d h=%d stride=%d\n", w, h, (int)src_stride );
1645 break;
1646 }
1647 }
1648 report( "v210 :" );
1649 }
1650
1651 if( mc_a.hpel_filter != mc_ref.hpel_filter )
1652 {
1653 pixel *srchpel = pbuf1+8+2*64;
1654 pixel *dstc[3] = { pbuf3+8, pbuf3+8+16*64, pbuf3+8+32*64 };
1655 pixel *dsta[3] = { pbuf4+8, pbuf4+8+16*64, pbuf4+8+32*64 };
1656 void *tmp = pbuf3+49*64;
1657 set_func_name( "hpel_filter" );
1658 ok = 1; used_asm = 1;
1659 memset( pbuf3, 0, 4096 * SIZEOF_PIXEL );
1660 memset( pbuf4, 0, 4096 * SIZEOF_PIXEL );
1661 call_c( mc_c.hpel_filter, dstc[0], dstc[1], dstc[2], srchpel, (intptr_t)64, 48, 10, tmp );
1662 call_a( mc_a.hpel_filter, dsta[0], dsta[1], dsta[2], srchpel, (intptr_t)64, 48, 10, tmp );
1663 for( int i = 0; i < 3; i++ )
1664 for( int j = 0; j < 10; j++ )
1665 //FIXME ideally the first pixels would match too, but they aren't actually used
1666 if( memcmp( dstc[i]+j*64+2, dsta[i]+j*64+2, 43 * SIZEOF_PIXEL ) )
1667 {
1668 ok = 0;
1669 fprintf( stderr, "hpel filter differs at plane %c line %d\n", "hvc"[i], j );
1670 for( int k = 0; k < 48; k++ )
1671 printf( "%02x%s", dstc[i][j*64+k], (k+1)&3 ? "" : " " );
1672 printf( "\n" );
1673 for( int k = 0; k < 48; k++ )
1674 printf( "%02x%s", dsta[i][j*64+k], (k+1)&3 ? "" : " " );
1675 printf( "\n" );
1676 break;
1677 }
1678 report( "hpel filter :" );
1679 }
1680
1681 if( mc_a.frame_init_lowres_core != mc_ref.frame_init_lowres_core )
1682 {
1683 pixel *dstc[4] = { pbuf3, pbuf3+1024, pbuf3+2048, pbuf3+3072 };
1684 pixel *dsta[4] = { pbuf4, pbuf4+1024, pbuf4+2048, pbuf4+3072 };
1685 set_func_name( "lowres_init" );
1686 ok = 1; used_asm = 1;
1687 for( int w = 96; w <= 96+24; w += 8 )
1688 {
1689 intptr_t stride = (w*2+31)&~31;
1690 intptr_t stride_lowres = (w+31)&~31;
1691 call_c( mc_c.frame_init_lowres_core, pbuf1, dstc[0], dstc[1], dstc[2], dstc[3], stride, stride_lowres, w, 8 );
1692 call_a( mc_a.frame_init_lowres_core, pbuf1, dsta[0], dsta[1], dsta[2], dsta[3], stride, stride_lowres, w, 8 );
1693 for( int i = 0; i < 8; i++ )
1694 {
1695 for( int j = 0; j < 4; j++ )
1696 if( memcmp( dstc[j]+i*stride_lowres, dsta[j]+i*stride_lowres, w * SIZEOF_PIXEL ) )
1697 {
1698 ok = 0;
1699 fprintf( stderr, "frame_init_lowres differs at plane %d line %d\n", j, i );
1700 for( int k = 0; k < w; k++ )
1701 printf( "%d ", dstc[j][k+i*stride_lowres] );
1702 printf( "\n" );
1703 for( int k = 0; k < w; k++ )
1704 printf( "%d ", dsta[j][k+i*stride_lowres] );
1705 printf( "\n" );
1706 break;
1707 }
1708 }
1709 }
1710 report( "lowres init :" );
1711 }
1712
1713 #define INTEGRAL_INIT( name, size, offset, cmp_len, ... )\
1714 if( mc_a.name != mc_ref.name )\
1715 {\
1716 intptr_t stride = 96;\
1717 set_func_name( #name );\
1718 used_asm = 1;\
1719 memcpy( buf3, buf1, size*2*stride );\
1720 memcpy( buf4, buf1, size*2*stride );\
1721 uint16_t *sum = (uint16_t*)buf3;\
1722 call_c1( mc_c.name, sum+offset, __VA_ARGS__ );\
1723 sum = (uint16_t*)buf4;\
1724 call_a1( mc_a.name, sum+offset, __VA_ARGS__ );\
1725 if( memcmp( buf3+2*offset, buf4+2*offset, cmp_len*2 )\
1726 || (size>9 && memcmp( buf3+18*stride, buf4+18*stride, (stride-8)*2 )))\
1727 ok = 0;\
1728 call_c2( mc_c.name, sum+offset, __VA_ARGS__ );\
1729 call_a2( mc_a.name, sum+offset, __VA_ARGS__ );\
1730 }
1731 ok = 1; used_asm = 0;
1732 INTEGRAL_INIT( integral_init4h, 2, stride, stride-4, pbuf2, stride );
1733 INTEGRAL_INIT( integral_init8h, 2, stride, stride-8, pbuf2, stride );
1734 INTEGRAL_INIT( integral_init4v, 14, 0, stride-8, sum+9*stride, stride );
1735 INTEGRAL_INIT( integral_init8v, 9, 0, stride-8, stride );
1736 report( "integral init :" );
1737
1738 ok = 1; used_asm = 0;
1739 if( mc_a.mbtree_propagate_cost != mc_ref.mbtree_propagate_cost )
1740 {
1741 used_asm = 1;
1742 x264_emms();
1743 for( int i = 0; i < 10; i++ )
1744 {
1745 float fps_factor = (rand30()&65535) / 65535.0f;
1746 set_func_name( "mbtree_propagate_cost" );
1747 int16_t *dsta = (int16_t*)buf3;
1748 int16_t *dstc = dsta+400;
1749 uint16_t *prop = (uint16_t*)buf1;
1750 uint16_t *intra = (uint16_t*)buf4;
1751 uint16_t *inter = intra+128;
1752 uint16_t *qscale = inter+128;
1753 uint16_t *rnd = (uint16_t*)buf2;
1754 x264_emms();
1755 for( int j = 0; j < 100; j++ )
1756 {
1757 intra[j] = *rnd++ & 0x7fff;
1758 intra[j] += !intra[j];
1759 inter[j] = *rnd++ & 0x7fff;
1760 qscale[j] = *rnd++ & 0x7fff;
1761 }
1762 call_c( mc_c.mbtree_propagate_cost, dstc, prop, intra, inter, qscale, &fps_factor, 100 );
1763 call_a( mc_a.mbtree_propagate_cost, dsta, prop, intra, inter, qscale, &fps_factor, 100 );
1764 // I don't care about exact rounding, this is just how close the floating-point implementation happens to be
1765 x264_emms();
1766 for( int j = 0; j < 100 && ok; j++ )
1767 {
1768 ok &= abs( dstc[j]-dsta[j] ) <= 1 || fabs( (double)dstc[j]/dsta[j]-1 ) < 1e-4;
1769 if( !ok )
1770 fprintf( stderr, "mbtree_propagate_cost FAILED: %d !~= %d\n", dstc[j], dsta[j] );
1771 }
1772 }
1773 }
1774
1775 if( mc_a.mbtree_propagate_list != mc_ref.mbtree_propagate_list )
1776 {
1777 used_asm = 1;
1778 for( int i = 0; i < 8; i++ )
1779 {
1780 set_func_name( "mbtree_propagate_list" );
1781 x264_t h;
1782 int height = 4;
1783 int width = 128;
1784 int size = width*height;
1785 h.mb.i_mb_stride = width;
1786 h.mb.i_mb_width = width;
1787 h.mb.i_mb_height = height;
1788
1789 uint16_t *ref_costsc = (uint16_t*)buf3 + width;
1790 uint16_t *ref_costsa = (uint16_t*)buf4 + width;
1791 int16_t (*mvs)[2] = (int16_t(*)[2])(ref_costsc + width + size);
1792 int16_t *propagate_amount = (int16_t*)(mvs + width);
1793 uint16_t *lowres_costs = (uint16_t*)(propagate_amount + width);
1794 h.scratch_buffer2 = (uint8_t*)(ref_costsa + width + size);
1795 int bipred_weight = (rand()%63)+1;
1796 int mb_y = rand()&3;
1797 int list = i&1;
1798 for( int j = -width; j < size+width; j++ )
1799 ref_costsc[j] = ref_costsa[j] = rand()&32767;
1800 for( int j = 0; j < width; j++ )
1801 {
1802 static const uint8_t list_dist[2][8] = {{0,1,1,1,1,1,1,1},{1,1,3,3,3,3,3,2}};
1803 for( int k = 0; k < 2; k++ )
1804 mvs[j][k] = (rand()&127) - 64;
1805 propagate_amount[j] = rand()&32767;
1806 lowres_costs[j] = list_dist[list][rand()&7] << LOWRES_COST_SHIFT;
1807 }
1808
1809 call_c1( mc_c.mbtree_propagate_list, &h, ref_costsc, mvs, propagate_amount, lowres_costs, bipred_weight, mb_y, width, list );
1810 call_a1( mc_a.mbtree_propagate_list, &h, ref_costsa, mvs, propagate_amount, lowres_costs, bipred_weight, mb_y, width, list );
1811
1812 for( int j = -width; j < size+width && ok; j++ )
1813 {
1814 ok &= abs(ref_costsa[j] - ref_costsc[j]) <= 1;
1815 if( !ok )
1816 fprintf( stderr, "mbtree_propagate_list FAILED at %d: %d !~= %d\n", j, ref_costsc[j], ref_costsa[j] );
1817 }
1818
1819 call_c2( mc_c.mbtree_propagate_list, &h, ref_costsc, mvs, propagate_amount, lowres_costs, bipred_weight, mb_y, width, list );
1820 call_a2( mc_a.mbtree_propagate_list, &h, ref_costsa, mvs, propagate_amount, lowres_costs, bipred_weight, mb_y, width, list );
1821 }
1822 }
1823
1824 static const uint16_t mbtree_fix8_counts[] = { 5, 384, 392, 400, 415 };
1825
1826 if( mc_a.mbtree_fix8_pack != mc_ref.mbtree_fix8_pack )
1827 {
1828 set_func_name( "mbtree_fix8_pack" );
1829 used_asm = 1;
1830 float *fix8_src = (float*)(buf3 + 0x800);
1831 uint16_t *dstc = (uint16_t*)buf3;
1832 uint16_t *dsta = (uint16_t*)buf4;
1833 for( int i = 0; i < ARRAY_ELEMS(mbtree_fix8_counts); i++ )
1834 {
1835 int count = mbtree_fix8_counts[i];
1836
1837 for( int j = 0; j < count; j++ )
1838 fix8_src[j] = (int16_t)(rand()) / 256.0f;
1839 dsta[count] = 0xAAAA;
1840
1841 call_c( mc_c.mbtree_fix8_pack, dstc, fix8_src, count );
1842 call_a( mc_a.mbtree_fix8_pack, dsta, fix8_src, count );
1843
1844 if( memcmp( dsta, dstc, count * sizeof(uint16_t) ) || dsta[count] != 0xAAAA )
1845 {
1846 ok = 0;
1847 fprintf( stderr, "mbtree_fix8_pack FAILED\n" );
1848 break;
1849 }
1850 }
1851 }
1852
1853 if( mc_a.mbtree_fix8_unpack != mc_ref.mbtree_fix8_unpack )
1854 {
1855 set_func_name( "mbtree_fix8_unpack" );
1856 used_asm = 1;
1857 uint16_t *fix8_src = (uint16_t*)(buf3 + 0x800);
1858 float *dstc = (float*)buf3;
1859 float *dsta = (float*)buf4;
1860 for( int i = 0; i < ARRAY_ELEMS(mbtree_fix8_counts); i++ )
1861 {
1862 int count = mbtree_fix8_counts[i];
1863
1864 for( int j = 0; j < count; j++ )
1865 fix8_src[j] = rand();
1866 M32( &dsta[count] ) = 0xAAAAAAAA;
1867
1868 call_c( mc_c.mbtree_fix8_unpack, dstc, fix8_src, count );
1869 call_a( mc_a.mbtree_fix8_unpack, dsta, fix8_src, count );
1870
1871 if( memcmp( dsta, dstc, count * sizeof(float) ) || M32( &dsta[count] ) != 0xAAAAAAAA )
1872 {
1873 ok = 0;
1874 fprintf( stderr, "mbtree_fix8_unpack FAILED\n" );
1875 break;
1876 }
1877 }
1878 }
1879 report( "mbtree :" );
1880
1881 if( mc_a.memcpy_aligned != mc_ref.memcpy_aligned )
1882 {
1883 set_func_name( "memcpy_aligned" );
1884 ok = 1; used_asm = 1;
1885 for( size_t size = 16; size < 512; size += 16 )
1886 {
1887 for( size_t i = 0; i < size; i++ )
1888 buf1[i] = rand();
1889 memset( buf4-1, 0xAA, size + 2 );
1890 call_c( mc_c.memcpy_aligned, buf3, buf1, size );
1891 call_a( mc_a.memcpy_aligned, buf4, buf1, size );
1892 if( memcmp( buf3, buf4, size ) || buf4[-1] != 0xAA || buf4[size] != 0xAA )
1893 {
1894 ok = 0;
1895 fprintf( stderr, "memcpy_aligned FAILED: size=%d\n", (int)size );
1896 break;
1897 }
1898 }
1899 report( "memcpy aligned :" );
1900 }
1901
1902 if( mc_a.memzero_aligned != mc_ref.memzero_aligned )
1903 {
1904 set_func_name( "memzero_aligned" );
1905 ok = 1; used_asm = 1;
1906 for( size_t size = 128; size < 1024; size += 128 )
1907 {
1908 memset( buf4-1, 0xAA, size + 2 );
1909 call_c( mc_c.memzero_aligned, buf3, size );
1910 call_a( mc_a.memzero_aligned, buf4, size );
1911 if( memcmp( buf3, buf4, size ) || buf4[-1] != 0xAA || buf4[size] != 0xAA )
1912 {
1913 ok = 0;
1914 fprintf( stderr, "memzero_aligned FAILED: size=%d\n", (int)size );
1915 break;
1916 }
1917 }
1918 report( "memzero aligned :" );
1919 }
1920
1921 return ret;
1922 }
1923
check_deblock(uint32_t cpu_ref,uint32_t cpu_new)1924 static int check_deblock( uint32_t cpu_ref, uint32_t cpu_new )
1925 {
1926 x264_deblock_function_t db_c;
1927 x264_deblock_function_t db_ref;
1928 x264_deblock_function_t db_a;
1929 int ret = 0, ok = 1, used_asm = 0;
1930 int alphas[36], betas[36];
1931 int8_t tcs[36][4];
1932
1933 x264_deblock_init( 0, &db_c, 0 );
1934 x264_deblock_init( cpu_ref, &db_ref, 0 );
1935 x264_deblock_init( cpu_new, &db_a, 0 );
1936
1937 /* not exactly the real values of a,b,tc but close enough */
1938 for( int i = 35, a = 255, c = 250; i >= 0; i-- )
1939 {
1940 alphas[i] = a << (BIT_DEPTH-8);
1941 betas[i] = (i+1)/2 << (BIT_DEPTH-8);
1942 tcs[i][0] = tcs[i][3] = (c+6)/10 << (BIT_DEPTH-8);
1943 tcs[i][1] = (c+7)/15 << (BIT_DEPTH-8);
1944 tcs[i][2] = (c+9)/20 << (BIT_DEPTH-8);
1945 a = a*9/10;
1946 c = c*9/10;
1947 }
1948
1949 #define TEST_DEBLOCK( name, align, ... ) \
1950 for( int i = 0; i < 36; i++ ) \
1951 { \
1952 intptr_t off = 8*32 + (i&15)*4*!align; /* benchmark various alignments of h filter */ \
1953 for( int j = 0; j < 1024; j++ ) \
1954 /* two distributions of random to excersize different failure modes */ \
1955 pbuf3[j] = rand() & (i&1 ? 0xf : PIXEL_MAX ); \
1956 memcpy( pbuf4, pbuf3, 1024 * SIZEOF_PIXEL ); \
1957 if( db_a.name != db_ref.name ) \
1958 { \
1959 set_func_name( #name ); \
1960 used_asm = 1; \
1961 call_c1( db_c.name, pbuf3+off, (intptr_t)32, alphas[i], betas[i], ##__VA_ARGS__ ); \
1962 call_a1( db_a.name, pbuf4+off, (intptr_t)32, alphas[i], betas[i], ##__VA_ARGS__ ); \
1963 if( memcmp( pbuf3, pbuf4, 1024 * SIZEOF_PIXEL ) ) \
1964 { \
1965 ok = 0; \
1966 fprintf( stderr, #name "(a=%d, b=%d): [FAILED]\n", alphas[i], betas[i] ); \
1967 break; \
1968 } \
1969 call_c2( db_c.name, pbuf3+off, (intptr_t)32, alphas[i], betas[i], ##__VA_ARGS__ ); \
1970 call_a2( db_a.name, pbuf4+off, (intptr_t)32, alphas[i], betas[i], ##__VA_ARGS__ ); \
1971 } \
1972 }
1973
1974 TEST_DEBLOCK( deblock_luma[0], 0, tcs[i] );
1975 TEST_DEBLOCK( deblock_luma[1], 1, tcs[i] );
1976 TEST_DEBLOCK( deblock_h_chroma_420, 0, tcs[i] );
1977 TEST_DEBLOCK( deblock_h_chroma_422, 0, tcs[i] );
1978 TEST_DEBLOCK( deblock_chroma_420_mbaff, 0, tcs[i] );
1979 TEST_DEBLOCK( deblock_chroma_422_mbaff, 0, tcs[i] );
1980 TEST_DEBLOCK( deblock_chroma[1], 1, tcs[i] );
1981 TEST_DEBLOCK( deblock_luma_intra[0], 0 );
1982 TEST_DEBLOCK( deblock_luma_intra[1], 1 );
1983 TEST_DEBLOCK( deblock_h_chroma_420_intra, 0 );
1984 TEST_DEBLOCK( deblock_h_chroma_422_intra, 0 );
1985 TEST_DEBLOCK( deblock_chroma_420_intra_mbaff, 0 );
1986 TEST_DEBLOCK( deblock_chroma_422_intra_mbaff, 0 );
1987 TEST_DEBLOCK( deblock_chroma_intra[1], 1 );
1988
1989 if( db_a.deblock_strength != db_ref.deblock_strength )
1990 {
1991 set_func_name( "deblock_strength" );
1992 used_asm = 1;
1993 for( int i = 0; i < 100; i++ )
1994 {
1995 ALIGNED_ARRAY_16( uint8_t, nnz_buf, [X264_SCAN8_SIZE+8] );
1996 uint8_t *nnz = &nnz_buf[8];
1997 ALIGNED_4( int8_t ref[2][X264_SCAN8_LUMA_SIZE] );
1998 ALIGNED_ARRAY_16( int16_t, mv, [2],[X264_SCAN8_LUMA_SIZE][2] );
1999 ALIGNED_ARRAY_32( uint8_t, bs, [2],[2][8][4] );
2000 memset( bs, 99, sizeof(uint8_t)*2*4*8*2 );
2001 for( int j = 0; j < X264_SCAN8_SIZE; j++ )
2002 nnz[j] = ((rand()&7) == 7) * rand() & 0xf;
2003 for( int j = 0; j < 2; j++ )
2004 for( int k = 0; k < X264_SCAN8_LUMA_SIZE; k++ )
2005 {
2006 ref[j][k] = ((rand()&3) != 3) ? 0 : (rand() & 31) - 2;
2007 for( int l = 0; l < 2; l++ )
2008 mv[j][k][l] = ((rand()&7) != 7) ? (rand()&7) - 3 : (rand()&16383) - 8192;
2009 }
2010 call_c( db_c.deblock_strength, nnz, ref, mv, bs[0], 2<<(i&1), ((i>>1)&1) );
2011 call_a( db_a.deblock_strength, nnz, ref, mv, bs[1], 2<<(i&1), ((i>>1)&1) );
2012 if( memcmp( bs[0], bs[1], sizeof(uint8_t)*2*4*8 ) )
2013 {
2014 ok = 0;
2015 fprintf( stderr, "deblock_strength: [FAILED]\n" );
2016 for( int j = 0; j < 2; j++ )
2017 {
2018 for( int k = 0; k < 2; k++ )
2019 for( int l = 0; l < 4; l++ )
2020 {
2021 for( int m = 0; m < 4; m++ )
2022 printf("%d ",bs[j][k][l][m]);
2023 printf("\n");
2024 }
2025 printf("\n");
2026 }
2027 break;
2028 }
2029 }
2030 }
2031
2032 report( "deblock :" );
2033
2034 return ret;
2035 }
2036
check_quant(uint32_t cpu_ref,uint32_t cpu_new)2037 static int check_quant( uint32_t cpu_ref, uint32_t cpu_new )
2038 {
2039 x264_quant_function_t qf_c;
2040 x264_quant_function_t qf_ref;
2041 x264_quant_function_t qf_a;
2042 ALIGNED_ARRAY_64( dctcoef, dct1,[64] );
2043 ALIGNED_ARRAY_64( dctcoef, dct2,[64] );
2044 ALIGNED_ARRAY_32( dctcoef, dct3,[8],[16] );
2045 ALIGNED_ARRAY_32( dctcoef, dct4,[8],[16] );
2046 ALIGNED_ARRAY_32( uint8_t, cqm_buf,[64] );
2047 int ret = 0, ok, used_asm;
2048 int oks[3] = {1,1,1}, used_asms[3] = {0,0,0};
2049 x264_t h_buf;
2050 x264_t *h = &h_buf;
2051 memset( h, 0, sizeof(*h) );
2052 h->sps->i_chroma_format_idc = 1;
2053 x264_param_default( &h->param );
2054 h->chroma_qp_table = i_chroma_qp_table + 12;
2055 h->param.analyse.b_transform_8x8 = 1;
2056
2057 for( int i_cqm = 0; i_cqm < 4; i_cqm++ )
2058 {
2059 if( i_cqm == 0 )
2060 {
2061 for( int i = 0; i < 6; i++ )
2062 h->sps->scaling_list[i] = x264_cqm_flat16;
2063 h->param.i_cqm_preset = h->sps->i_cqm_preset = X264_CQM_FLAT;
2064 }
2065 else if( i_cqm == 1 )
2066 {
2067 for( int i = 0; i < 6; i++ )
2068 h->sps->scaling_list[i] = x264_cqm_jvt[i];
2069 h->param.i_cqm_preset = h->sps->i_cqm_preset = X264_CQM_JVT;
2070 }
2071 else
2072 {
2073 int max_scale = BIT_DEPTH < 10 ? 255 : 228;
2074 if( i_cqm == 2 )
2075 for( int i = 0; i < 64; i++ )
2076 cqm_buf[i] = 10 + rand() % (max_scale - 9);
2077 else
2078 for( int i = 0; i < 64; i++ )
2079 cqm_buf[i] = 1;
2080 for( int i = 0; i < 6; i++ )
2081 h->sps->scaling_list[i] = cqm_buf;
2082 h->param.i_cqm_preset = h->sps->i_cqm_preset = X264_CQM_CUSTOM;
2083 }
2084
2085 h->param.rc.i_qp_min = 0;
2086 h->param.rc.i_qp_max = QP_MAX_SPEC;
2087 x264_cqm_init( h );
2088 x264_quant_init( h, 0, &qf_c );
2089 x264_quant_init( h, cpu_ref, &qf_ref );
2090 x264_quant_init( h, cpu_new, &qf_a );
2091
2092 #define INIT_QUANT8(j,max) \
2093 { \
2094 static const int scale1d[8] = {32,31,24,31,32,31,24,31}; \
2095 for( int i = 0; i < max; i++ ) \
2096 { \
2097 unsigned int scale = (255*scale1d[(i>>3)&7]*scale1d[i&7])/16; \
2098 dct1[i] = dct2[i] = (j>>(i>>6))&1 ? (rand()%(2*scale+1))-scale : 0; \
2099 } \
2100 }
2101
2102 #define INIT_QUANT4(j,max) \
2103 { \
2104 static const int scale1d[4] = {4,6,4,6}; \
2105 for( int i = 0; i < max; i++ ) \
2106 { \
2107 unsigned int scale = 255*scale1d[(i>>2)&3]*scale1d[i&3]; \
2108 dct1[i] = dct2[i] = (j>>(i>>4))&1 ? (rand()%(2*scale+1))-scale : 0; \
2109 } \
2110 }
2111
2112 #define TEST_QUANT_DC( name, cqm ) \
2113 if( qf_a.name != qf_ref.name ) \
2114 { \
2115 set_func_name( #name ); \
2116 used_asms[0] = 1; \
2117 for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- ) \
2118 { \
2119 for( int j = 0; j < 2; j++ ) \
2120 { \
2121 int result_c, result_a; \
2122 for( int i = 0; i < 16; i++ ) \
2123 dct1[i] = dct2[i] = j ? (rand() & 0x1fff) - 0xfff : 0; \
2124 result_c = call_c1( qf_c.name, dct1, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
2125 result_a = call_a1( qf_a.name, dct2, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
2126 if( memcmp( dct1, dct2, 16*sizeof(dctcoef) ) || result_c != result_a ) \
2127 { \
2128 oks[0] = 0; \
2129 fprintf( stderr, #name "(cqm=%d): [FAILED]\n", i_cqm ); \
2130 break; \
2131 } \
2132 call_c2( qf_c.name, dct1, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
2133 call_a2( qf_a.name, dct2, h->quant4_mf[CQM_4IY][qp][0], h->quant4_bias[CQM_4IY][qp][0] ); \
2134 } \
2135 } \
2136 }
2137
2138 #define TEST_QUANT( qname, block, type, w, maxj ) \
2139 if( qf_a.qname != qf_ref.qname ) \
2140 { \
2141 set_func_name( #qname ); \
2142 used_asms[0] = 1; \
2143 for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- ) \
2144 { \
2145 for( int j = 0; j < maxj; j++ ) \
2146 { \
2147 INIT_QUANT##type(j, w*w) \
2148 int result_c = call_c1( qf_c.qname, (void*)dct1, h->quant##type##_mf[block][qp], h->quant##type##_bias[block][qp] ); \
2149 int result_a = call_a1( qf_a.qname, (void*)dct2, h->quant##type##_mf[block][qp], h->quant##type##_bias[block][qp] ); \
2150 if( memcmp( dct1, dct2, w*w*sizeof(dctcoef) ) || result_c != result_a ) \
2151 { \
2152 oks[0] = 0; \
2153 fprintf( stderr, #qname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
2154 break; \
2155 } \
2156 call_c2( qf_c.qname, (void*)dct1, h->quant##type##_mf[block][qp], h->quant##type##_bias[block][qp] ); \
2157 call_a2( qf_a.qname, (void*)dct2, h->quant##type##_mf[block][qp], h->quant##type##_bias[block][qp] ); \
2158 } \
2159 } \
2160 }
2161
2162 TEST_QUANT( quant_8x8, CQM_8IY, 8, 8, 2 );
2163 TEST_QUANT( quant_8x8, CQM_8PY, 8, 8, 2 );
2164 TEST_QUANT( quant_4x4, CQM_4IY, 4, 4, 2 );
2165 TEST_QUANT( quant_4x4, CQM_4PY, 4, 4, 2 );
2166 TEST_QUANT( quant_4x4x4, CQM_4IY, 4, 8, 16 );
2167 TEST_QUANT( quant_4x4x4, CQM_4PY, 4, 8, 16 );
2168 TEST_QUANT_DC( quant_4x4_dc, **h->quant4_mf[CQM_4IY] );
2169 TEST_QUANT_DC( quant_2x2_dc, **h->quant4_mf[CQM_4IC] );
2170
2171 #define TEST_DEQUANT( qname, dqname, block, w ) \
2172 if( qf_a.dqname != qf_ref.dqname ) \
2173 { \
2174 set_func_name( "%s_%s", #dqname, i_cqm?"cqm":"flat" ); \
2175 used_asms[1] = 1; \
2176 for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- ) \
2177 { \
2178 INIT_QUANT##w(1, w*w) \
2179 qf_c.qname( dct1, h->quant##w##_mf[block][qp], h->quant##w##_bias[block][qp] ); \
2180 memcpy( dct2, dct1, w*w*sizeof(dctcoef) ); \
2181 call_c1( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
2182 call_a1( qf_a.dqname, dct2, h->dequant##w##_mf[block], qp ); \
2183 if( memcmp( dct1, dct2, w*w*sizeof(dctcoef) ) ) \
2184 { \
2185 oks[1] = 0; \
2186 fprintf( stderr, #dqname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
2187 break; \
2188 } \
2189 call_c2( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
2190 call_a2( qf_a.dqname, dct2, h->dequant##w##_mf[block], qp ); \
2191 } \
2192 }
2193
2194 TEST_DEQUANT( quant_8x8, dequant_8x8, CQM_8IY, 8 );
2195 TEST_DEQUANT( quant_8x8, dequant_8x8, CQM_8PY, 8 );
2196 TEST_DEQUANT( quant_4x4, dequant_4x4, CQM_4IY, 4 );
2197 TEST_DEQUANT( quant_4x4, dequant_4x4, CQM_4PY, 4 );
2198
2199 #define TEST_DEQUANT_DC( qname, dqname, block, w ) \
2200 if( qf_a.dqname != qf_ref.dqname ) \
2201 { \
2202 set_func_name( "%s_%s", #dqname, i_cqm?"cqm":"flat" ); \
2203 used_asms[1] = 1; \
2204 for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- ) \
2205 { \
2206 for( int i = 0; i < 16; i++ ) \
2207 dct1[i] = rand()%(PIXEL_MAX*16*2+1) - PIXEL_MAX*16; \
2208 qf_c.qname( dct1, h->quant##w##_mf[block][qp][0]>>1, h->quant##w##_bias[block][qp][0]>>1 ); \
2209 memcpy( dct2, dct1, w*w*sizeof(dctcoef) ); \
2210 call_c1( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
2211 call_a1( qf_a.dqname, dct2, h->dequant##w##_mf[block], qp ); \
2212 if( memcmp( dct1, dct2, w*w*sizeof(dctcoef) ) ) \
2213 { \
2214 oks[1] = 0; \
2215 fprintf( stderr, #dqname "(qp=%d, cqm=%d, block="#block"): [FAILED]\n", qp, i_cqm ); \
2216 } \
2217 call_c2( qf_c.dqname, dct1, h->dequant##w##_mf[block], qp ); \
2218 call_a2( qf_a.dqname, dct2, h->dequant##w##_mf[block], qp ); \
2219 } \
2220 }
2221
2222 TEST_DEQUANT_DC( quant_4x4_dc, dequant_4x4_dc, CQM_4IY, 4 );
2223
2224 if( qf_a.idct_dequant_2x4_dc != qf_ref.idct_dequant_2x4_dc )
2225 {
2226 set_func_name( "idct_dequant_2x4_dc_%s", i_cqm?"cqm":"flat" );
2227 used_asms[1] = 1;
2228 for( int qp = h->chroma_qp_table[h->param.rc.i_qp_max]; qp >= h->chroma_qp_table[h->param.rc.i_qp_min]; qp-- )
2229 {
2230 for( int i = 0; i < 8; i++ )
2231 dct1[i] = rand()%(PIXEL_MAX*16*2+1) - PIXEL_MAX*16;
2232 qf_c.quant_2x2_dc( &dct1[0], h->quant4_mf[CQM_4IC][qp+3][0]>>1, h->quant4_bias[CQM_4IC][qp+3][0]>>1 );
2233 qf_c.quant_2x2_dc( &dct1[4], h->quant4_mf[CQM_4IC][qp+3][0]>>1, h->quant4_bias[CQM_4IC][qp+3][0]>>1 );
2234 call_c( qf_c.idct_dequant_2x4_dc, dct1, dct3, h->dequant4_mf[CQM_4IC], qp+3 );
2235 call_a( qf_a.idct_dequant_2x4_dc, dct1, dct4, h->dequant4_mf[CQM_4IC], qp+3 );
2236 for( int i = 0; i < 8; i++ )
2237 if( dct3[i][0] != dct4[i][0] )
2238 {
2239 oks[1] = 0;
2240 fprintf( stderr, "idct_dequant_2x4_dc (qp=%d, cqm=%d): [FAILED]\n", qp, i_cqm );
2241 break;
2242 }
2243 }
2244 }
2245
2246 if( qf_a.idct_dequant_2x4_dconly != qf_ref.idct_dequant_2x4_dconly )
2247 {
2248 set_func_name( "idct_dequant_2x4_dconly_%s", i_cqm?"cqm":"flat" );
2249 used_asms[1] = 1;
2250 for( int qp = h->chroma_qp_table[h->param.rc.i_qp_max]; qp >= h->chroma_qp_table[h->param.rc.i_qp_min]; qp-- )
2251 {
2252 for( int i = 0; i < 8; i++ )
2253 dct1[i] = rand()%(PIXEL_MAX*16*2+1) - PIXEL_MAX*16;
2254 qf_c.quant_2x2_dc( &dct1[0], h->quant4_mf[CQM_4IC][qp+3][0]>>1, h->quant4_bias[CQM_4IC][qp+3][0]>>1 );
2255 qf_c.quant_2x2_dc( &dct1[4], h->quant4_mf[CQM_4IC][qp+3][0]>>1, h->quant4_bias[CQM_4IC][qp+3][0]>>1 );
2256 memcpy( dct2, dct1, 8*sizeof(dctcoef) );
2257 call_c1( qf_c.idct_dequant_2x4_dconly, dct1, h->dequant4_mf[CQM_4IC], qp+3 );
2258 call_a1( qf_a.idct_dequant_2x4_dconly, dct2, h->dequant4_mf[CQM_4IC], qp+3 );
2259 if( memcmp( dct1, dct2, 8*sizeof(dctcoef) ) )
2260 {
2261 oks[1] = 0;
2262 fprintf( stderr, "idct_dequant_2x4_dconly (qp=%d, cqm=%d): [FAILED]\n", qp, i_cqm );
2263 break;
2264 }
2265 call_c2( qf_c.idct_dequant_2x4_dconly, dct1, h->dequant4_mf[CQM_4IC], qp+3 );
2266 call_a2( qf_a.idct_dequant_2x4_dconly, dct2, h->dequant4_mf[CQM_4IC], qp+3 );
2267 }
2268 }
2269
2270 #define TEST_OPTIMIZE_CHROMA_DC( optname, size ) \
2271 if( qf_a.optname != qf_ref.optname ) \
2272 { \
2273 set_func_name( #optname ); \
2274 used_asms[2] = 1; \
2275 for( int qp = h->param.rc.i_qp_max; qp >= h->param.rc.i_qp_min; qp-- ) \
2276 { \
2277 int qpdc = qp + (size == 8 ? 3 : 0); \
2278 int dmf = h->dequant4_mf[CQM_4IC][qpdc%6][0] << qpdc/6; \
2279 if( dmf > 32*64 ) \
2280 continue; \
2281 for( int i = 16;; i <<= 1 ) \
2282 { \
2283 int res_c, res_asm; \
2284 int max = X264_MIN( i, PIXEL_MAX*16 ); \
2285 for( int j = 0; j < size; j++ ) \
2286 dct1[j] = rand()%(max*2+1) - max; \
2287 for( int j = 0; j <= size; j += 4 ) \
2288 qf_c.quant_2x2_dc( &dct1[j], h->quant4_mf[CQM_4IC][qpdc][0]>>1, h->quant4_bias[CQM_4IC][qpdc][0]>>1 ); \
2289 memcpy( dct2, dct1, size*sizeof(dctcoef) ); \
2290 res_c = call_c1( qf_c.optname, dct1, dmf ); \
2291 res_asm = call_a1( qf_a.optname, dct2, dmf ); \
2292 if( res_c != res_asm || memcmp( dct1, dct2, size*sizeof(dctcoef) ) ) \
2293 { \
2294 oks[2] = 0; \
2295 fprintf( stderr, #optname "(qp=%d, res_c=%d, res_asm=%d): [FAILED]\n", qp, res_c, res_asm ); \
2296 } \
2297 call_c2( qf_c.optname, dct1, dmf ); \
2298 call_a2( qf_a.optname, dct2, dmf ); \
2299 if( i >= PIXEL_MAX*16 ) \
2300 break; \
2301 } \
2302 } \
2303 }
2304
2305 TEST_OPTIMIZE_CHROMA_DC( optimize_chroma_2x2_dc, 4 );
2306 TEST_OPTIMIZE_CHROMA_DC( optimize_chroma_2x4_dc, 8 );
2307
2308 x264_cqm_delete( h );
2309 }
2310
2311 ok = oks[0]; used_asm = used_asms[0];
2312 report( "quant :" );
2313
2314 ok = oks[1]; used_asm = used_asms[1];
2315 report( "dequant :" );
2316
2317 ok = oks[2]; used_asm = used_asms[2];
2318 report( "optimize chroma dc :" );
2319
2320 ok = 1; used_asm = 0;
2321 if( qf_a.denoise_dct != qf_ref.denoise_dct )
2322 {
2323 used_asm = 1;
2324 for( int size = 16; size <= 64; size += 48 )
2325 {
2326 set_func_name( "denoise_dct" );
2327 memcpy( dct1, buf1, size*sizeof(dctcoef) );
2328 memcpy( dct2, buf1, size*sizeof(dctcoef) );
2329 memcpy( buf3+256, buf3, 256 );
2330 call_c1( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (udctcoef*)buf2, size );
2331 call_a1( qf_a.denoise_dct, dct2, (uint32_t*)(buf3+256), (udctcoef*)buf2, size );
2332 if( memcmp( dct1, dct2, size*sizeof(dctcoef) ) || memcmp( buf3+4, buf3+256+4, (size-1)*sizeof(uint32_t) ) )
2333 ok = 0;
2334 call_c2( qf_c.denoise_dct, dct1, (uint32_t*)buf3, (udctcoef*)buf2, size );
2335 call_a2( qf_a.denoise_dct, dct2, (uint32_t*)(buf3+256), (udctcoef*)buf2, size );
2336 }
2337 }
2338 report( "denoise dct :" );
2339
2340 #define TEST_DECIMATE( decname, w, ac, thresh ) \
2341 if( qf_a.decname != qf_ref.decname ) \
2342 { \
2343 set_func_name( #decname ); \
2344 used_asm = 1; \
2345 for( int i = 0; i < 100; i++ ) \
2346 { \
2347 static const int distrib[16] = {1,1,1,1,1,1,1,1,1,1,1,1,2,3,4};\
2348 static const int zerorate_lut[4] = {3,7,15,31};\
2349 int zero_rate = zerorate_lut[i&3];\
2350 for( int idx = 0; idx < w*w; idx++ ) \
2351 { \
2352 int sign = (rand()&1) ? -1 : 1; \
2353 int abs_level = distrib[rand()&15]; \
2354 if( abs_level == 4 ) abs_level = rand()&0x3fff; \
2355 int zero = !(rand()&zero_rate); \
2356 dct1[idx] = zero * abs_level * sign; \
2357 } \
2358 if( ac ) \
2359 dct1[0] = 0; \
2360 int result_c = call_c( qf_c.decname, dct1 ); \
2361 int result_a = call_a( qf_a.decname, dct1 ); \
2362 if( X264_MIN(result_c,thresh) != X264_MIN(result_a,thresh) ) \
2363 { \
2364 ok = 0; \
2365 fprintf( stderr, #decname ": [FAILED]\n" ); \
2366 break; \
2367 } \
2368 } \
2369 }
2370
2371 ok = 1; used_asm = 0;
2372 TEST_DECIMATE( decimate_score64, 8, 0, 6 );
2373 TEST_DECIMATE( decimate_score16, 4, 0, 6 );
2374 TEST_DECIMATE( decimate_score15, 4, 1, 7 );
2375 report( "decimate_score :" );
2376
2377 #define TEST_LAST( last, lastname, size, ac ) \
2378 if( qf_a.last != qf_ref.last ) \
2379 { \
2380 set_func_name( #lastname ); \
2381 used_asm = 1; \
2382 for( int i = 0; i < 100; i++ ) \
2383 { \
2384 int nnz = 0; \
2385 int max = rand() & (size-1); \
2386 memset( dct1, 0, size*sizeof(dctcoef) ); \
2387 for( int idx = ac; idx < max; idx++ ) \
2388 nnz |= dct1[idx] = !(rand()&3) + (!(rand()&15))*rand(); \
2389 if( !nnz ) \
2390 dct1[ac] = 1; \
2391 int result_c = call_c( qf_c.last, dct1+ac ); \
2392 int result_a = call_a( qf_a.last, dct1+ac ); \
2393 if( result_c != result_a ) \
2394 { \
2395 ok = 0; \
2396 fprintf( stderr, #lastname ": [FAILED]\n" ); \
2397 break; \
2398 } \
2399 } \
2400 }
2401
2402 ok = 1; used_asm = 0;
2403 TEST_LAST( coeff_last4 , coeff_last4, 4, 0 );
2404 TEST_LAST( coeff_last8 , coeff_last8, 8, 0 );
2405 TEST_LAST( coeff_last[ DCT_LUMA_AC], coeff_last15, 16, 1 );
2406 TEST_LAST( coeff_last[ DCT_LUMA_4x4], coeff_last16, 16, 0 );
2407 TEST_LAST( coeff_last[ DCT_LUMA_8x8], coeff_last64, 64, 0 );
2408 report( "coeff_last :" );
2409
2410 #define TEST_LEVELRUN( lastname, name, size, ac ) \
2411 if( qf_a.lastname != qf_ref.lastname ) \
2412 { \
2413 set_func_name( #name ); \
2414 used_asm = 1; \
2415 for( int i = 0; i < 100; i++ ) \
2416 { \
2417 x264_run_level_t runlevel_c, runlevel_a; \
2418 int nnz = 0; \
2419 int max = rand() & (size-1); \
2420 memset( dct1, 0, size*sizeof(dctcoef) ); \
2421 memcpy( &runlevel_a, buf1+i, sizeof(x264_run_level_t) ); \
2422 memcpy( &runlevel_c, buf1+i, sizeof(x264_run_level_t) ); \
2423 for( int idx = ac; idx < max; idx++ ) \
2424 nnz |= dct1[idx] = !(rand()&3) + (!(rand()&15))*rand(); \
2425 if( !nnz ) \
2426 dct1[ac] = 1; \
2427 int result_c = call_c( qf_c.lastname, dct1+ac, &runlevel_c ); \
2428 int result_a = call_a( qf_a.lastname, dct1+ac, &runlevel_a ); \
2429 if( result_c != result_a || runlevel_c.last != runlevel_a.last || \
2430 runlevel_c.mask != runlevel_a.mask || \
2431 memcmp(runlevel_c.level, runlevel_a.level, sizeof(dctcoef)*result_c)) \
2432 { \
2433 ok = 0; \
2434 fprintf( stderr, #name ": [FAILED]\n" ); \
2435 break; \
2436 } \
2437 } \
2438 }
2439
2440 ok = 1; used_asm = 0;
2441 TEST_LEVELRUN( coeff_level_run4 , coeff_level_run4, 4, 0 );
2442 TEST_LEVELRUN( coeff_level_run8 , coeff_level_run8, 8, 0 );
2443 TEST_LEVELRUN( coeff_level_run[ DCT_LUMA_AC], coeff_level_run15, 16, 1 );
2444 TEST_LEVELRUN( coeff_level_run[ DCT_LUMA_4x4], coeff_level_run16, 16, 0 );
2445 report( "coeff_level_run :" );
2446
2447 return ret;
2448 }
2449
check_intra(uint32_t cpu_ref,uint32_t cpu_new)2450 static int check_intra( uint32_t cpu_ref, uint32_t cpu_new )
2451 {
2452 int ret = 0, ok = 1, used_asm = 0;
2453 ALIGNED_ARRAY_32( pixel, edge,[36] );
2454 ALIGNED_ARRAY_32( pixel, edge2,[36] );
2455 ALIGNED_ARRAY_32( pixel, fdec,[FDEC_STRIDE*20] );
2456 struct
2457 {
2458 x264_predict_t predict_16x16[4+3];
2459 x264_predict_t predict_8x8c[4+3];
2460 x264_predict_t predict_8x16c[4+3];
2461 x264_predict8x8_t predict_8x8[9+3];
2462 x264_predict_t predict_4x4[9+3];
2463 x264_predict_8x8_filter_t predict_8x8_filter;
2464 } ip_c, ip_ref, ip_a;
2465
2466 x264_predict_16x16_init( 0, ip_c.predict_16x16 );
2467 x264_predict_8x8c_init( 0, ip_c.predict_8x8c );
2468 x264_predict_8x16c_init( 0, ip_c.predict_8x16c );
2469 x264_predict_8x8_init( 0, ip_c.predict_8x8, &ip_c.predict_8x8_filter );
2470 x264_predict_4x4_init( 0, ip_c.predict_4x4 );
2471
2472 x264_predict_16x16_init( cpu_ref, ip_ref.predict_16x16 );
2473 x264_predict_8x8c_init( cpu_ref, ip_ref.predict_8x8c );
2474 x264_predict_8x16c_init( cpu_ref, ip_ref.predict_8x16c );
2475 x264_predict_8x8_init( cpu_ref, ip_ref.predict_8x8, &ip_ref.predict_8x8_filter );
2476 x264_predict_4x4_init( cpu_ref, ip_ref.predict_4x4 );
2477
2478 x264_predict_16x16_init( cpu_new, ip_a.predict_16x16 );
2479 x264_predict_8x8c_init( cpu_new, ip_a.predict_8x8c );
2480 x264_predict_8x16c_init( cpu_new, ip_a.predict_8x16c );
2481 x264_predict_8x8_init( cpu_new, ip_a.predict_8x8, &ip_a.predict_8x8_filter );
2482 x264_predict_4x4_init( cpu_new, ip_a.predict_4x4 );
2483
2484 memcpy( fdec, pbuf1, 32*20 * SIZEOF_PIXEL );\
2485
2486 ip_c.predict_8x8_filter( fdec+48, edge, ALL_NEIGHBORS, ALL_NEIGHBORS );
2487
2488 #define INTRA_TEST( name, dir, w, h, align, bench, ... )\
2489 if( ip_a.name[dir] != ip_ref.name[dir] )\
2490 {\
2491 set_func_name( "intra_%s_%s", #name, intra_##name##_names[dir] );\
2492 used_asm = 1;\
2493 memcpy( pbuf3, fdec, FDEC_STRIDE*20 * SIZEOF_PIXEL );\
2494 memcpy( pbuf4, fdec, FDEC_STRIDE*20 * SIZEOF_PIXEL );\
2495 for( int a = 0; a < (do_bench ? 64/SIZEOF_PIXEL : 1); a += align )\
2496 {\
2497 call_c##bench( ip_c.name[dir], pbuf3+48+a, ##__VA_ARGS__ );\
2498 call_a##bench( ip_a.name[dir], pbuf4+48+a, ##__VA_ARGS__ );\
2499 if( memcmp( pbuf3, pbuf4, FDEC_STRIDE*20 * SIZEOF_PIXEL ) )\
2500 {\
2501 fprintf( stderr, #name "[%d] : [FAILED]\n", dir );\
2502 ok = 0;\
2503 if( ip_c.name == (void *)ip_c.predict_8x8 )\
2504 {\
2505 for( int k = -1; k < 16; k++ )\
2506 printf( "%2x ", edge[16+k] );\
2507 printf( "\n" );\
2508 }\
2509 for( int j = 0; j < h; j++ )\
2510 {\
2511 if( ip_c.name == (void *)ip_c.predict_8x8 )\
2512 printf( "%2x ", edge[14-j] );\
2513 for( int k = 0; k < w; k++ )\
2514 printf( "%2x ", pbuf4[48+k+j*FDEC_STRIDE] );\
2515 printf( "\n" );\
2516 }\
2517 printf( "\n" );\
2518 for( int j = 0; j < h; j++ )\
2519 {\
2520 if( ip_c.name == (void *)ip_c.predict_8x8 )\
2521 printf( " " );\
2522 for( int k = 0; k < w; k++ )\
2523 printf( "%2x ", pbuf3[48+k+j*FDEC_STRIDE] );\
2524 printf( "\n" );\
2525 }\
2526 break;\
2527 }\
2528 }\
2529 }
2530
2531 for( int i = 0; i < 12; i++ )
2532 INTRA_TEST( predict_4x4, i, 4, 4, 4, );
2533 for( int i = 0; i < 7; i++ )
2534 INTRA_TEST( predict_8x8c, i, 8, 8, 16, );
2535 for( int i = 0; i < 7; i++ )
2536 INTRA_TEST( predict_8x16c, i, 8, 16, 16, );
2537 for( int i = 0; i < 7; i++ )
2538 INTRA_TEST( predict_16x16, i, 16, 16, 16, );
2539 for( int i = 0; i < 12; i++ )
2540 INTRA_TEST( predict_8x8, i, 8, 8, 8, , edge );
2541
2542 set_func_name("intra_predict_8x8_filter");
2543 if( ip_a.predict_8x8_filter != ip_ref.predict_8x8_filter )
2544 {
2545 used_asm = 1;
2546 for( int i = 0; i < 32; i++ )
2547 {
2548 if( !(i&7) || ((i&MB_TOPRIGHT) && !(i&MB_TOP)) )
2549 continue;
2550 int neighbor = (i&24)>>1;
2551 memset( edge, 0, 36*SIZEOF_PIXEL );
2552 memset( edge2, 0, 36*SIZEOF_PIXEL );
2553 call_c( ip_c.predict_8x8_filter, pbuf1+48, edge, neighbor, i&7 );
2554 call_a( ip_a.predict_8x8_filter, pbuf1+48, edge2, neighbor, i&7 );
2555 if( !(neighbor&MB_TOPLEFT) )
2556 edge[15] = edge2[15] = 0;
2557 if( memcmp( edge+7, edge2+7, (i&MB_TOPRIGHT ? 26 : i&MB_TOP ? 17 : 8) * SIZEOF_PIXEL ) )
2558 {
2559 fprintf( stderr, "predict_8x8_filter : [FAILED] %d %d\n", (i&24)>>1, i&7);
2560 ok = 0;
2561 }
2562 }
2563 }
2564
2565 #define EXTREMAL_PLANE( w, h ) \
2566 { \
2567 int max[7]; \
2568 for( int j = 0; j < 7; j++ ) \
2569 max[j] = test ? rand()&PIXEL_MAX : PIXEL_MAX; \
2570 fdec[48-1-FDEC_STRIDE] = (i&1)*max[0]; \
2571 for( int j = 0; j < w/2; j++ ) \
2572 fdec[48+j-FDEC_STRIDE] = (!!(i&2))*max[1]; \
2573 for( int j = w/2; j < w-1; j++ ) \
2574 fdec[48+j-FDEC_STRIDE] = (!!(i&4))*max[2]; \
2575 fdec[48+(w-1)-FDEC_STRIDE] = (!!(i&8))*max[3]; \
2576 for( int j = 0; j < h/2; j++ ) \
2577 fdec[48+j*FDEC_STRIDE-1] = (!!(i&16))*max[4]; \
2578 for( int j = h/2; j < h-1; j++ ) \
2579 fdec[48+j*FDEC_STRIDE-1] = (!!(i&32))*max[5]; \
2580 fdec[48+(h-1)*FDEC_STRIDE-1] = (!!(i&64))*max[6]; \
2581 }
2582 /* Extremal test case for planar prediction. */
2583 for( int test = 0; test < 100 && ok; test++ )
2584 for( int i = 0; i < 128 && ok; i++ )
2585 {
2586 EXTREMAL_PLANE( 8, 8 );
2587 INTRA_TEST( predict_8x8c, I_PRED_CHROMA_P, 8, 8, 64, 1 );
2588 EXTREMAL_PLANE( 8, 16 );
2589 INTRA_TEST( predict_8x16c, I_PRED_CHROMA_P, 8, 16, 64, 1 );
2590 EXTREMAL_PLANE( 16, 16 );
2591 INTRA_TEST( predict_16x16, I_PRED_16x16_P, 16, 16, 64, 1 );
2592 }
2593 report( "intra pred :" );
2594 return ret;
2595 }
2596
2597 #define DECL_CABAC(cpu) \
2598 static void run_cabac_decision_##cpu( x264_t *h, uint8_t *dst )\
2599 {\
2600 x264_cabac_t cb;\
2601 x264_cabac_context_init( h, &cb, SLICE_TYPE_P, 26, 0 );\
2602 x264_cabac_encode_init( &cb, dst, dst+0xff0 );\
2603 for( int i = 0; i < 0x1000; i++ )\
2604 x264_cabac_encode_decision_##cpu( &cb, buf1[i]>>1, buf1[i]&1 );\
2605 }\
2606 static void run_cabac_bypass_##cpu( x264_t *h, uint8_t *dst )\
2607 {\
2608 x264_cabac_t cb;\
2609 x264_cabac_context_init( h, &cb, SLICE_TYPE_P, 26, 0 );\
2610 x264_cabac_encode_init( &cb, dst, dst+0xff0 );\
2611 for( int i = 0; i < 0x1000; i++ )\
2612 x264_cabac_encode_bypass_##cpu( &cb, buf1[i]&1 );\
2613 }\
2614 static void run_cabac_terminal_##cpu( x264_t *h, uint8_t *dst )\
2615 {\
2616 x264_cabac_t cb;\
2617 x264_cabac_context_init( h, &cb, SLICE_TYPE_P, 26, 0 );\
2618 x264_cabac_encode_init( &cb, dst, dst+0xff0 );\
2619 for( int i = 0; i < 0x1000; i++ )\
2620 x264_cabac_encode_terminal_##cpu( &cb );\
2621 }
2622 DECL_CABAC(c)
2623 #if HAVE_MMX
2624 DECL_CABAC(asm)
2625 #elif HAVE_AARCH64
2626 DECL_CABAC(asm)
2627 #else
2628 #define run_cabac_decision_asm run_cabac_decision_c
2629 #define run_cabac_bypass_asm run_cabac_bypass_c
2630 #define run_cabac_terminal_asm run_cabac_terminal_c
2631 #endif
2632
2633 extern const uint8_t x264_count_cat_m1[14];
2634
check_cabac(uint32_t cpu_ref,uint32_t cpu_new)2635 static int check_cabac( uint32_t cpu_ref, uint32_t cpu_new )
2636 {
2637 int ret = 0, ok = 1, used_asm = 0;
2638 x264_t h;
2639 h.sps->i_chroma_format_idc = 3;
2640
2641 x264_bitstream_function_t bs_ref;
2642 x264_bitstream_function_t bs_a;
2643 x264_bitstream_init( cpu_ref, &bs_ref );
2644 x264_bitstream_init( cpu_new, &bs_a );
2645 x264_quant_init( &h, cpu_new, &h.quantf );
2646 h.quantf.coeff_last[DCT_CHROMA_DC] = h.quantf.coeff_last4;
2647
2648 /* Reset cabac state to avoid buffer overruns in do_bench() with large BENCH_RUNS values. */
2649 #define GET_CB( i ) (\
2650 x264_cabac_encode_init( &cb[i], bitstream[i], bitstream[i]+0xfff0 ),\
2651 cb[i].f8_bits_encoded = 0, &cb[i] )
2652
2653 #define CABAC_RESIDUAL(name, start, end, rd)\
2654 {\
2655 if( bs_a.name##_internal && (bs_a.name##_internal != bs_ref.name##_internal || (cpu_new&X264_CPU_SSE2_IS_SLOW)) )\
2656 {\
2657 used_asm = 1;\
2658 set_func_name( #name );\
2659 for( int i = 0; i < 2; i++ )\
2660 {\
2661 for( intptr_t ctx_block_cat = start; ctx_block_cat <= end; ctx_block_cat++ )\
2662 {\
2663 for( int j = 0; j < 256; j++ )\
2664 {\
2665 ALIGNED_ARRAY_64( dctcoef, dct, [2],[64] );\
2666 uint8_t bitstream[2][1<<16];\
2667 static const uint8_t ctx_ac[14] = {0,1,0,0,1,0,0,1,0,0,0,1,0,0};\
2668 int ac = ctx_ac[ctx_block_cat];\
2669 int nz = 0;\
2670 while( !nz )\
2671 {\
2672 for( int k = 0; k <= x264_count_cat_m1[ctx_block_cat]; k++ )\
2673 {\
2674 /* Very rough distribution that covers possible inputs */\
2675 int rnd = rand();\
2676 int coef = !(rnd&3);\
2677 coef += !(rnd& 15) * (rand()&0x0006);\
2678 coef += !(rnd& 63) * (rand()&0x0008);\
2679 coef += !(rnd& 255) * (rand()&0x00F0);\
2680 coef += !(rnd&1023) * (rand()&0x7F00);\
2681 nz |= dct[0][ac+k] = dct[1][ac+k] = coef * ((rand()&1) ? 1 : -1);\
2682 }\
2683 }\
2684 h.mb.b_interlaced = i;\
2685 x264_cabac_t cb[2];\
2686 x264_cabac_context_init( &h, &cb[0], SLICE_TYPE_P, 26, 0 );\
2687 x264_cabac_context_init( &h, &cb[1], SLICE_TYPE_P, 26, 0 );\
2688 if( !rd ) memcpy( bitstream[1], bitstream[0], 0x400 );\
2689 call_c1( x264_##name##_c, &h, GET_CB( 0 ), ctx_block_cat, dct[0]+ac );\
2690 call_a1( bs_a.name##_internal, dct[1]+ac, i, ctx_block_cat, GET_CB( 1 ) );\
2691 ok = cb[0].f8_bits_encoded == cb[1].f8_bits_encoded && !memcmp(cb[0].state, cb[1].state, 1024);\
2692 if( !rd ) ok |= !memcmp( bitstream[1], bitstream[0], 0x400 ) && !memcmp( &cb[1], &cb[0], offsetof(x264_cabac_t, p_start) );\
2693 if( !ok )\
2694 {\
2695 fprintf( stderr, #name " : [FAILED] ctx_block_cat %d", (int)ctx_block_cat );\
2696 if( rd && cb[0].f8_bits_encoded != cb[1].f8_bits_encoded )\
2697 fprintf( stderr, " (%d != %d)", cb[0].f8_bits_encoded, cb[1].f8_bits_encoded );\
2698 fprintf( stderr, "\n");\
2699 goto name##fail;\
2700 }\
2701 if( (j&15) == 0 )\
2702 {\
2703 call_c2( x264_##name##_c, &h, GET_CB( 0 ), ctx_block_cat, dct[0]+ac );\
2704 call_a2( bs_a.name##_internal, dct[1]+ac, i, ctx_block_cat, GET_CB( 1 ) );\
2705 }\
2706 }\
2707 }\
2708 }\
2709 }\
2710 }\
2711 name##fail:
2712
2713 CABAC_RESIDUAL( cabac_block_residual, 0, DCT_LUMA_8x8, 0 )
2714 report( "cabac residual:" );
2715
2716 ok = 1; used_asm = 0;
2717 CABAC_RESIDUAL( cabac_block_residual_rd, 0, DCT_LUMA_8x8-1, 1 )
2718 CABAC_RESIDUAL( cabac_block_residual_8x8_rd, DCT_LUMA_8x8, DCT_LUMA_8x8, 1 )
2719 report( "cabac residual rd:" );
2720
2721 if( cpu_ref || run_cabac_decision_c == run_cabac_decision_asm )
2722 return ret;
2723 ok = 1; used_asm = 0;
2724 x264_cabac_init( &h );
2725
2726 set_func_name( "cabac_encode_decision" );
2727 memcpy( buf4, buf3, 0x1000 );
2728 call_c( run_cabac_decision_c, &h, buf3 );
2729 call_a( run_cabac_decision_asm, &h, buf4 );
2730 ok = !memcmp( buf3, buf4, 0x1000 );
2731 report( "cabac decision:" );
2732
2733 set_func_name( "cabac_encode_bypass" );
2734 memcpy( buf4, buf3, 0x1000 );
2735 call_c( run_cabac_bypass_c, &h, buf3 );
2736 call_a( run_cabac_bypass_asm, &h, buf4 );
2737 ok = !memcmp( buf3, buf4, 0x1000 );
2738 report( "cabac bypass:" );
2739
2740 set_func_name( "cabac_encode_terminal" );
2741 memcpy( buf4, buf3, 0x1000 );
2742 call_c( run_cabac_terminal_c, &h, buf3 );
2743 call_a( run_cabac_terminal_asm, &h, buf4 );
2744 ok = !memcmp( buf3, buf4, 0x1000 );
2745 report( "cabac terminal:" );
2746
2747 return ret;
2748 }
2749
check_bitstream(uint32_t cpu_ref,uint32_t cpu_new)2750 static int check_bitstream( uint32_t cpu_ref, uint32_t cpu_new )
2751 {
2752 x264_bitstream_function_t bs_c;
2753 x264_bitstream_function_t bs_ref;
2754 x264_bitstream_function_t bs_a;
2755
2756 int ret = 0, ok = 1, used_asm = 0;
2757
2758 x264_bitstream_init( 0, &bs_c );
2759 x264_bitstream_init( cpu_ref, &bs_ref );
2760 x264_bitstream_init( cpu_new, &bs_a );
2761 if( bs_a.nal_escape != bs_ref.nal_escape )
2762 {
2763 int size = 0x4000;
2764 uint8_t *input = malloc(size+100);
2765 uint8_t *output1 = malloc(size*2);
2766 uint8_t *output2 = malloc(size*2);
2767 used_asm = 1;
2768 set_func_name( "nal_escape" );
2769 for( int i = 0; i < 100; i++ )
2770 {
2771 /* Test corner-case sizes */
2772 int test_size = i < 10 ? i+1 : rand() & 0x3fff;
2773 /* Test 8 different probability distributions of zeros */
2774 for( int j = 0; j < test_size+32; j++ )
2775 input[j] = (rand()&((1 << ((i&7)+1)) - 1)) * rand();
2776 uint8_t *end_c = (uint8_t*)call_c1( bs_c.nal_escape, output1, input, input+test_size );
2777 uint8_t *end_a = (uint8_t*)call_a1( bs_a.nal_escape, output2, input, input+test_size );
2778 int size_c = end_c-output1;
2779 int size_a = end_a-output2;
2780 if( size_c != size_a || memcmp( output1, output2, size_c ) )
2781 {
2782 fprintf( stderr, "nal_escape : [FAILED] %d %d\n", size_c, size_a );
2783 ok = 0;
2784 break;
2785 }
2786 }
2787 for( int j = 0; j < size+32; j++ )
2788 input[j] = rand();
2789 call_c2( bs_c.nal_escape, output1, input, input+size );
2790 call_a2( bs_a.nal_escape, output2, input, input+size );
2791 free(input);
2792 free(output1);
2793 free(output2);
2794 }
2795 report( "nal escape:" );
2796
2797 return ret;
2798 }
2799
check_all_funcs(uint32_t cpu_ref,uint32_t cpu_new)2800 static int check_all_funcs( uint32_t cpu_ref, uint32_t cpu_new )
2801 {
2802 return check_pixel( cpu_ref, cpu_new )
2803 + check_dct( cpu_ref, cpu_new )
2804 + check_mc( cpu_ref, cpu_new )
2805 + check_intra( cpu_ref, cpu_new )
2806 + check_deblock( cpu_ref, cpu_new )
2807 + check_quant( cpu_ref, cpu_new )
2808 + check_cabac( cpu_ref, cpu_new )
2809 + check_bitstream( cpu_ref, cpu_new );
2810 }
2811
add_flags(uint32_t * cpu_ref,uint32_t * cpu_new,uint32_t flags,const char * name)2812 static int add_flags( uint32_t *cpu_ref, uint32_t *cpu_new, uint32_t flags, const char *name )
2813 {
2814 *cpu_ref = *cpu_new;
2815 *cpu_new |= flags;
2816 #if STACK_ALIGNMENT < 16
2817 *cpu_new |= X264_CPU_STACK_MOD4;
2818 #endif
2819 if( *cpu_new & X264_CPU_SSE2_IS_FAST )
2820 *cpu_new &= ~X264_CPU_SSE2_IS_SLOW;
2821 if( !quiet )
2822 fprintf( stderr, "x264: %s\n", name );
2823 return check_all_funcs( *cpu_ref, *cpu_new );
2824 }
2825
check_all_flags(void)2826 static int check_all_flags( void )
2827 {
2828 int ret = 0;
2829 uint32_t cpu0 = 0, cpu1 = 0;
2830 uint32_t cpu_detect = x264_cpu_detect();
2831 #if HAVE_MMX
2832 if( cpu_detect & X264_CPU_AVX512 )
2833 simd_warmup_func = x264_checkasm_warmup_avx512;
2834 else if( cpu_detect & X264_CPU_AVX )
2835 simd_warmup_func = x264_checkasm_warmup_avx;
2836 #endif
2837 simd_warmup();
2838
2839 #if ARCH_X86 || ARCH_X86_64
2840 if( cpu_detect & X264_CPU_MMX2 )
2841 {
2842 ret |= add_flags( &cpu0, &cpu1, X264_CPU_MMX | X264_CPU_MMX2, "MMX" );
2843 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "MMX Cache64" );
2844 cpu1 &= ~X264_CPU_CACHELINE_64;
2845 #if ARCH_X86
2846 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_32, "MMX Cache32" );
2847 cpu1 &= ~X264_CPU_CACHELINE_32;
2848 #endif
2849 }
2850 if( cpu_detect & X264_CPU_SSE )
2851 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE, "SSE" );
2852 if( cpu_detect & X264_CPU_SSE2 )
2853 {
2854 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE2 | X264_CPU_SSE2_IS_SLOW, "SSE2Slow" );
2855 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE2_IS_FAST, "SSE2Fast" );
2856 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSE2Fast Cache64" );
2857 cpu1 &= ~X264_CPU_CACHELINE_64;
2858 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_SHUFFLE, "SSE2 SlowShuffle" );
2859 cpu1 &= ~X264_CPU_SLOW_SHUFFLE;
2860 }
2861 if( cpu_detect & X264_CPU_LZCNT )
2862 {
2863 ret |= add_flags( &cpu0, &cpu1, X264_CPU_LZCNT, "LZCNT" );
2864 cpu1 &= ~X264_CPU_LZCNT;
2865 }
2866 if( cpu_detect & X264_CPU_SSE3 )
2867 {
2868 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE3 | X264_CPU_CACHELINE_64, "SSE3" );
2869 cpu1 &= ~X264_CPU_CACHELINE_64;
2870 }
2871 if( cpu_detect & X264_CPU_SSSE3 )
2872 {
2873 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSSE3, "SSSE3" );
2874 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSSE3 Cache64" );
2875 cpu1 &= ~X264_CPU_CACHELINE_64;
2876 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_SHUFFLE, "SSSE3 SlowShuffle" );
2877 cpu1 &= ~X264_CPU_SLOW_SHUFFLE;
2878 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SLOW_ATOM, "SSSE3 SlowAtom" );
2879 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSSE3 Cache64 SlowAtom" );
2880 cpu1 &= ~X264_CPU_CACHELINE_64;
2881 cpu1 &= ~X264_CPU_SLOW_ATOM;
2882 if( cpu_detect & X264_CPU_LZCNT )
2883 {
2884 ret |= add_flags( &cpu0, &cpu1, X264_CPU_LZCNT, "SSSE3 LZCNT" );
2885 cpu1 &= ~X264_CPU_LZCNT;
2886 }
2887 }
2888 if( cpu_detect & X264_CPU_SSE4 )
2889 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE4, "SSE4" );
2890 if( cpu_detect & X264_CPU_SSE42 )
2891 ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE42, "SSE4.2" );
2892 if( cpu_detect & X264_CPU_AVX )
2893 ret |= add_flags( &cpu0, &cpu1, X264_CPU_AVX, "AVX" );
2894 if( cpu_detect & X264_CPU_XOP )
2895 ret |= add_flags( &cpu0, &cpu1, X264_CPU_XOP, "XOP" );
2896 if( cpu_detect & X264_CPU_FMA4 )
2897 {
2898 ret |= add_flags( &cpu0, &cpu1, X264_CPU_FMA4, "FMA4" );
2899 cpu1 &= ~X264_CPU_FMA4;
2900 }
2901 if( cpu_detect & X264_CPU_FMA3 )
2902 ret |= add_flags( &cpu0, &cpu1, X264_CPU_FMA3, "FMA3" );
2903 if( cpu_detect & X264_CPU_BMI1 )
2904 ret |= add_flags( &cpu0, &cpu1, X264_CPU_BMI1, "BMI1" );
2905 if( cpu_detect & X264_CPU_BMI2 )
2906 ret |= add_flags( &cpu0, &cpu1, X264_CPU_BMI2, "BMI2" );
2907 if( cpu_detect & X264_CPU_AVX2 )
2908 ret |= add_flags( &cpu0, &cpu1, X264_CPU_AVX2, "AVX2" );
2909 if( cpu_detect & X264_CPU_AVX512 )
2910 ret |= add_flags( &cpu0, &cpu1, X264_CPU_AVX512, "AVX512" );
2911 #elif ARCH_PPC
2912 if( cpu_detect & X264_CPU_ALTIVEC )
2913 {
2914 fprintf( stderr, "x264: ALTIVEC against C\n" );
2915 ret = check_all_funcs( 0, X264_CPU_ALTIVEC );
2916 }
2917 #elif ARCH_ARM
2918 if( cpu_detect & X264_CPU_NEON )
2919 x264_checkasm_call = x264_checkasm_call_neon;
2920 if( cpu_detect & X264_CPU_ARMV6 )
2921 ret |= add_flags( &cpu0, &cpu1, X264_CPU_ARMV6, "ARMv6" );
2922 if( cpu_detect & X264_CPU_NEON )
2923 ret |= add_flags( &cpu0, &cpu1, X264_CPU_NEON, "NEON" );
2924 if( cpu_detect & X264_CPU_FAST_NEON_MRC )
2925 ret |= add_flags( &cpu0, &cpu1, X264_CPU_FAST_NEON_MRC, "Fast NEON MRC" );
2926 #elif ARCH_AARCH64
2927 if( cpu_detect & X264_CPU_ARMV8 )
2928 ret |= add_flags( &cpu0, &cpu1, X264_CPU_ARMV8, "ARMv8" );
2929 if( cpu_detect & X264_CPU_NEON )
2930 ret |= add_flags( &cpu0, &cpu1, X264_CPU_NEON, "NEON" );
2931 #elif ARCH_MIPS
2932 if( cpu_detect & X264_CPU_MSA )
2933 ret |= add_flags( &cpu0, &cpu1, X264_CPU_MSA, "MSA" );
2934 #endif
2935 return ret;
2936 }
2937
main(int argc,char ** argv)2938 REALIGN_STACK int main( int argc, char **argv )
2939 {
2940 #ifdef _WIN32
2941 /* Disable the Windows Error Reporting dialog */
2942 SetErrorMode( SEM_NOGPFAULTERRORBOX );
2943 #endif
2944
2945 if( argc > 1 && !strncmp( argv[1], "--bench", 7 ) )
2946 {
2947 #if !ARCH_X86 && !ARCH_X86_64 && !ARCH_PPC && !ARCH_ARM && !ARCH_AARCH64 && !ARCH_MIPS
2948 fprintf( stderr, "no --bench for your cpu until you port rdtsc\n" );
2949 return 1;
2950 #endif
2951 do_bench = 1;
2952 if( argv[1][7] == '=' )
2953 {
2954 bench_pattern = argv[1]+8;
2955 bench_pattern_len = strlen(bench_pattern);
2956 }
2957 argc--;
2958 argv++;
2959 }
2960
2961 int seed = ( argc > 1 ) ? atoi(argv[1]) : x264_mdate();
2962 fprintf( stderr, "x264: using random seed %u\n", seed );
2963 srand( seed );
2964
2965 buf1 = x264_malloc( 0x1e00 + 0x2000*SIZEOF_PIXEL );
2966 pbuf1 = x264_malloc( 0x1e00*SIZEOF_PIXEL );
2967 if( !buf1 || !pbuf1 )
2968 {
2969 fprintf( stderr, "malloc failed, unable to initiate tests!\n" );
2970 return -1;
2971 }
2972 #define INIT_POINTER_OFFSETS\
2973 buf2 = buf1 + 0xf00;\
2974 buf3 = buf2 + 0xf00;\
2975 buf4 = buf3 + 0x1000*SIZEOF_PIXEL;\
2976 pbuf2 = pbuf1 + 0xf00;\
2977 pbuf3 = (pixel*)buf3;\
2978 pbuf4 = (pixel*)buf4;
2979 INIT_POINTER_OFFSETS;
2980 for( int i = 0; i < 0x1e00; i++ )
2981 {
2982 buf1[i] = rand() & 0xFF;
2983 pbuf1[i] = rand() & PIXEL_MAX;
2984 }
2985 memset( buf1+0x1e00, 0, 0x2000*SIZEOF_PIXEL );
2986
2987 if( x264_stack_pagealign( check_all_flags, 0 ) )
2988 {
2989 fprintf( stderr, "x264: at least one test has failed. Go and fix that Right Now!\n" );
2990 return -1;
2991 }
2992 fprintf( stderr, "x264: All tests passed Yeah :)\n" );
2993 if( do_bench )
2994 print_bench();
2995 return 0;
2996 }
2997