1 /*
2 * Copyright © 2018, VideoLAN and dav1d authors
3 * Copyright © 2018, Two Orioles, LLC
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #ifndef DAV1D_TESTS_CHECKASM_CHECKASM_H
29 #define DAV1D_TESTS_CHECKASM_CHECKASM_H
30
31 #include "config.h"
32
33 #include <stdint.h>
34 #include <stdlib.h>
35
36 #if ARCH_X86_64 && defined(_WIN32)
37 /* setjmp/longjmp on 64-bit Windows will try to use SEH to unwind the stack,
38 * which doesn't work for assembly functions without unwind information. */
39 #include <windows.h>
40 #define checkasm_context CONTEXT
41 #define checkasm_save_context() RtlCaptureContext(&checkasm_context_buf)
42 #define checkasm_load_context() RtlRestoreContext(&checkasm_context_buf, NULL)
43 #else
44 #include <setjmp.h>
45 #define checkasm_context jmp_buf
46 #define checkasm_save_context() setjmp(checkasm_context_buf)
47 #define checkasm_load_context() longjmp(checkasm_context_buf, 1)
48 #endif
49
50 #include "include/common/attributes.h"
51 #include "include/common/bitdepth.h"
52 #include "include/common/intops.h"
53
54 int xor128_rand(void);
55 #define rnd xor128_rand
56
57 #define decl_check_bitfns(name) \
58 name##_8bpc(void); \
59 name##_16bpc(void)
60
61 void checkasm_check_msac(void);
62 void checkasm_check_refmvs(void);
63 decl_check_bitfns(void checkasm_check_cdef);
64 decl_check_bitfns(void checkasm_check_filmgrain);
65 decl_check_bitfns(void checkasm_check_ipred);
66 decl_check_bitfns(void checkasm_check_itx);
67 decl_check_bitfns(void checkasm_check_loopfilter);
68 decl_check_bitfns(void checkasm_check_looprestoration);
69 decl_check_bitfns(void checkasm_check_mc);
70
71 void *checkasm_check_func(void *func, const char *name, ...);
72 int checkasm_bench_func(void);
73 int checkasm_fail_func(const char *msg, ...);
74 void checkasm_update_bench(int iterations, uint64_t cycles);
75 void checkasm_report(const char *name, ...);
76 void checkasm_set_signal_handler_state(int enabled);
77 extern checkasm_context checkasm_context_buf;
78
79 /* float compare utilities */
80 int float_near_ulp(float a, float b, unsigned max_ulp);
81 int float_near_abs_eps(float a, float b, float eps);
82 int float_near_abs_eps_ulp(float a, float b, float eps, unsigned max_ulp);
83 int float_near_ulp_array(const float *a, const float *b, unsigned max_ulp,
84 int len);
85 int float_near_abs_eps_array(const float *a, const float *b, float eps,
86 int len);
87 int float_near_abs_eps_array_ulp(const float *a, const float *b, float eps,
88 unsigned max_ulp, int len);
89
90 #define BENCH_RUNS (1 << 12) /* Trade-off between accuracy and speed */
91
92 /* Decide whether or not the specified function needs to be tested */
93 #define check_func(func, ...)\
94 (func_ref = checkasm_check_func((func_new = func), __VA_ARGS__))
95
96 /* Declare the function prototype. The first argument is the return value,
97 * the remaining arguments are the function parameters. Naming parameters
98 * is optional. */
99 #define declare_func(ret, ...)\
100 declare_new(ret, __VA_ARGS__)\
101 void *func_ref, *func_new;\
102 typedef ret func_type(__VA_ARGS__);\
103 checkasm_save_context()
104
105 /* Indicate that the current test has failed */
106 #define fail() checkasm_fail_func("%s:%d", __FILE__, __LINE__)
107
108 /* Print the test outcome */
109 #define report checkasm_report
110
111 /* Call the reference function */
112 #define call_ref(...)\
113 (checkasm_set_signal_handler_state(1),\
114 ((func_type *)func_ref)(__VA_ARGS__));\
115 checkasm_set_signal_handler_state(0)
116
117 #if HAVE_ASM
118 #if ARCH_X86
119 #if defined(_MSC_VER) && !defined(__clang__)
120 #include <intrin.h>
121 #define readtime() (_mm_lfence(), __rdtsc())
122 #else
readtime(void)123 static inline uint64_t readtime(void) {
124 uint32_t eax, edx;
125 __asm__ __volatile__("lfence\nrdtsc" : "=a"(eax), "=d"(edx));
126 return (((uint64_t)edx) << 32) | eax;
127 }
128 #define readtime readtime
129 #endif
130 #elif (ARCH_AARCH64 || ARCH_ARM) && defined(__APPLE__)
131 #include <mach/mach_time.h>
132 #define readtime() mach_absolute_time()
133 #elif ARCH_AARCH64
134 #ifdef _MSC_VER
135 #include <windows.h>
136 #define readtime() (_InstructionSynchronizationBarrier(), ReadTimeStampCounter())
137 #else
readtime(void)138 static inline uint64_t readtime(void) {
139 uint64_t cycle_counter;
140 /* This requires enabling user mode access to the cycle counter (which
141 * can only be done from kernel space).
142 * This could also read cntvct_el0 instead of pmccntr_el0; that register
143 * might also be readable (depending on kernel version), but it has much
144 * worse precision (it's a fixed 50 MHz timer). */
145 __asm__ __volatile__("isb\nmrs %0, pmccntr_el0"
146 : "=r"(cycle_counter)
147 :: "memory");
148 return cycle_counter;
149 }
150 #define readtime readtime
151 #endif
152 #elif ARCH_ARM && !defined(_MSC_VER) && __ARM_ARCH >= 7
readtime(void)153 static inline uint64_t readtime(void) {
154 uint32_t cycle_counter;
155 /* This requires enabling user mode access to the cycle counter (which
156 * can only be done from kernel space). */
157 __asm__ __volatile__("isb\nmrc p15, 0, %0, c9, c13, 0"
158 : "=r"(cycle_counter)
159 :: "memory");
160 return cycle_counter;
161 }
162 #define readtime readtime
163 #elif ARCH_PPC64LE
readtime(void)164 static inline uint64_t readtime(void) {
165 uint32_t tbu, tbl, temp;
166
167 __asm__ __volatile__(
168 "1:\n"
169 "mfspr %2,269\n"
170 "mfspr %0,268\n"
171 "mfspr %1,269\n"
172 "cmpw %2,%1\n"
173 "bne 1b\n"
174 : "=r"(tbl), "=r"(tbu), "=r"(temp)
175 :
176 : "cc");
177
178 return (((uint64_t)tbu) << 32) | (uint64_t)tbl;
179 }
180 #define readtime readtime
181 #endif
182
183 /* Verifies that clobbered callee-saved registers
184 * are properly saved and restored */
185 void checkasm_checked_call(void *func, ...);
186
187 #if ARCH_X86_64
188 /* Evil hack: detect incorrect assumptions that 32-bit ints are zero-extended
189 * to 64-bit. This is done by clobbering the stack with junk around the stack
190 * pointer and calling the assembly function through checked_call() with added
191 * dummy arguments which forces all real arguments to be passed on the stack
192 * and not in registers. For 32-bit arguments the upper half of the 64-bit
193 * register locations on the stack will now contain junk which will cause
194 * misbehaving functions to either produce incorrect output or segfault. Note
195 * that even though this works extremely well in practice, it's technically
196 * not guaranteed and false negatives is theoretically possible, but there
197 * can never be any false positives. */
198 void checkasm_stack_clobber(uint64_t clobber, ...);
199 /* YMM and ZMM registers on x86 are turned off to save power when they haven't
200 * been used for some period of time. When they are used there will be a
201 * "warmup" period during which performance will be reduced and inconsistent
202 * which is problematic when trying to benchmark individual functions. We can
203 * work around this by periodically issuing "dummy" instructions that uses
204 * those registers to keep them powered on. */
205 void checkasm_simd_warmup(void);
206 #define declare_new(ret, ...)\
207 ret (*checked_call)(void *, int, int, int, int, int, __VA_ARGS__,\
208 int, int, int, int, int, int, int, int,\
209 int, int, int, int, int, int, int) =\
210 (void *)checkasm_checked_call;
211 #define CLOB (UINT64_C(0xdeadbeefdeadbeef))
212 #ifdef _WIN32
213 #define STACKARGS 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0
214 #else
215 #define STACKARGS 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0
216 #endif
217 #define call_new(...)\
218 (checkasm_set_signal_handler_state(1),\
219 checkasm_simd_warmup(),\
220 checkasm_stack_clobber(CLOB, CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
221 CLOB, CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
222 CLOB, CLOB, CLOB, CLOB, CLOB, CLOB, CLOB),\
223 checked_call(func_new, 0, 0, 0, 0, 0, __VA_ARGS__, STACKARGS));\
224 checkasm_set_signal_handler_state(0)
225 #elif ARCH_X86_32
226 #define declare_new(ret, ...)\
227 ret (*checked_call)(void *, __VA_ARGS__, int, int, int, int, int, int,\
228 int, int, int, int, int, int, int, int, int) =\
229 (void *)checkasm_checked_call;
230 #define call_new(...)\
231 (checkasm_set_signal_handler_state(1),\
232 checked_call(func_new, __VA_ARGS__, 15, 14, 13, 12,\
233 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1));\
234 checkasm_set_signal_handler_state(0)
235 #elif ARCH_ARM
236 /* Use a dummy argument, to offset the real parameters by 2, not only 1.
237 * This makes sure that potential 8-byte-alignment of parameters is kept
238 * the same even when the extra parameters have been removed. */
239 void checkasm_checked_call_vfp(void *func, int dummy, ...);
240 #define declare_new(ret, ...)\
241 ret (*checked_call)(void *, int dummy, __VA_ARGS__,\
242 int, int, int, int, int, int, int, int,\
243 int, int, int, int, int, int, int) =\
244 (void *)checkasm_checked_call_vfp;
245 #define call_new(...)\
246 (checkasm_set_signal_handler_state(1),\
247 checked_call(func_new, 0, __VA_ARGS__, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0));\
248 checkasm_set_signal_handler_state(0)
249 #elif ARCH_AARCH64 && !defined(__APPLE__)
250 void checkasm_stack_clobber(uint64_t clobber, ...);
251 #define declare_new(ret, ...)\
252 ret (*checked_call)(void *, int, int, int, int, int, int, int,\
253 __VA_ARGS__, int, int, int, int, int, int, int, int,\
254 int, int, int, int, int, int, int) =\
255 (void *)checkasm_checked_call;
256 #define CLOB (UINT64_C(0xdeadbeefdeadbeef))
257 #define call_new(...)\
258 (checkasm_set_signal_handler_state(1),\
259 checkasm_stack_clobber(CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
260 CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
261 CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
262 CLOB, CLOB, CLOB, CLOB, CLOB),\
263 checked_call(func_new, 0, 0, 0, 0, 0, 0, 0, __VA_ARGS__,\
264 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0));\
265 checkasm_set_signal_handler_state(0)
266 #else
267 #define declare_new(ret, ...)
268 #define call_new(...)\
269 (checkasm_set_signal_handler_state(1),\
270 ((func_type *)func_new)(__VA_ARGS__));\
271 checkasm_set_signal_handler_state(0)
272 #endif
273 #else /* HAVE_ASM */
274 #define declare_new(ret, ...)
275 /* Call the function */
276 #define call_new(...)\
277 (checkasm_set_signal_handler_state(1),\
278 ((func_type *)func_new)(__VA_ARGS__));\
279 checkasm_set_signal_handler_state(0)
280 #endif /* HAVE_ASM */
281
282 /* Benchmark the function */
283 #ifdef readtime
284 #define bench_new(...)\
285 do {\
286 if (checkasm_bench_func()) {\
287 func_type *tfunc = func_new;\
288 checkasm_set_signal_handler_state(1);\
289 uint64_t tsum = 0;\
290 int tcount = 0;\
291 for (int ti = 0; ti < BENCH_RUNS; ti++) {\
292 uint64_t t = readtime();\
293 tfunc(__VA_ARGS__);\
294 tfunc(__VA_ARGS__);\
295 tfunc(__VA_ARGS__);\
296 tfunc(__VA_ARGS__);\
297 t = readtime() - t;\
298 if (t*tcount <= tsum*4 && ti > 0) {\
299 tsum += t;\
300 tcount++;\
301 }\
302 }\
303 checkasm_set_signal_handler_state(0);\
304 checkasm_update_bench(tcount, tsum);\
305 } else {\
306 call_new(__VA_ARGS__);\
307 }\
308 } while (0)
309 #else
310 #define bench_new(...) do {} while (0)
311 #endif
312
313
314 #define ROUND_UP(x,a) (((x)+((a)-1)) & ~((a)-1))
315 #define PIXEL_RECT(name, w, h) \
316 ALIGN_STK_64(pixel, name##_buf, ((h)+32)*(ROUND_UP(w,64)+64) + 64,); \
317 ptrdiff_t name##_stride = sizeof(pixel)*(ROUND_UP(w,64)+64); \
318 (void)name##_stride; \
319 pixel *name = name##_buf + (ROUND_UP(w,64)+64)*16 + 64
320
321 #define CLEAR_PIXEL_RECT(name) \
322 memset(name##_buf, 0x99, sizeof(name##_buf)) \
323
324 #define DECL_CHECKASM_CHECK_FUNC(type) \
325 int checkasm_check_##type(const char *const file, const int line, \
326 const type *const buf1, const ptrdiff_t stride1, \
327 const type *const buf2, const ptrdiff_t stride2, \
328 const int w, const int h, const char *const name, \
329 const int align_w, const int align_h, \
330 const int padding)
331
332 DECL_CHECKASM_CHECK_FUNC(int8_t);
333 DECL_CHECKASM_CHECK_FUNC(int16_t);
334 DECL_CHECKASM_CHECK_FUNC(int32_t);
335 DECL_CHECKASM_CHECK_FUNC(uint8_t);
336 DECL_CHECKASM_CHECK_FUNC(uint16_t);
337 DECL_CHECKASM_CHECK_FUNC(uint32_t);
338
339 #define CONCAT(a,b) a ## b
340
341 #define checkasm_check2(prefix, ...) CONCAT(checkasm_check_, prefix)(__FILE__, __LINE__, __VA_ARGS__)
342 #define checkasm_check(prefix, ...) checkasm_check2(prefix, __VA_ARGS__, 0, 0, 0)
343
344 #ifdef BITDEPTH
345 #define checkasm_check_pixel(...) checkasm_check(PIXEL_TYPE, __VA_ARGS__)
346 #define checkasm_check_pixel_padded(...) checkasm_check2(PIXEL_TYPE, __VA_ARGS__, 1, 1, 8)
347 #define checkasm_check_pixel_padded_align(...) checkasm_check2(PIXEL_TYPE, __VA_ARGS__, 8)
348 #define checkasm_check_coef(...) checkasm_check(COEF_TYPE, __VA_ARGS__)
349 #endif
350
351 #endif /* DAV1D_TESTS_CHECKASM_CHECKASM_H */
352