1 /***********************************************************************
2 * Copyright (c) 2013, 2014 Pieter Wuille *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5 ***********************************************************************/
6
7 #ifndef SECP256K1_UTIL_H
8 #define SECP256K1_UTIL_H
9
10 #if defined HAVE_CONFIG_H
11 #include "libsecp256k1-config.h"
12 #endif
13
14 #include <stdlib.h>
15 #include <stdint.h>
16 #include <stdio.h>
17 #include <limits.h>
18
19 typedef struct {
20 void (*fn)(const char *text, void* data);
21 const void* data;
22 } rustsecp256k1_v0_4_1_callback;
23
rustsecp256k1_v0_4_1_callback_call(const rustsecp256k1_v0_4_1_callback * const cb,const char * const text)24 static SECP256K1_INLINE void rustsecp256k1_v0_4_1_callback_call(const rustsecp256k1_v0_4_1_callback * const cb, const char * const text) {
25 cb->fn(text, (void*)cb->data);
26 }
27
28 #ifdef DETERMINISTIC
29 #define TEST_FAILURE(msg) do { \
30 fprintf(stderr, "%s\n", msg); \
31 abort(); \
32 } while(0);
33 #else
34 #define TEST_FAILURE(msg) do { \
35 fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, msg); \
36 abort(); \
37 } while(0)
38 #endif
39
40 #if SECP256K1_GNUC_PREREQ(3, 0)
41 #define EXPECT(x,c) __builtin_expect((x),(c))
42 #else
43 #define EXPECT(x,c) (x)
44 #endif
45
46 #ifdef DETERMINISTIC
47 #define CHECK(cond) do { \
48 if (EXPECT(!(cond), 0)) { \
49 TEST_FAILURE("test condition failed"); \
50 } \
51 } while(0)
52 #else
53 #define CHECK(cond) do { \
54 if (EXPECT(!(cond), 0)) { \
55 TEST_FAILURE("test condition failed: " #cond); \
56 } \
57 } while(0)
58 #endif
59
60 /* Like assert(), but when VERIFY is defined, and side-effect safe. */
61 #if defined(COVERAGE)
62 #define VERIFY_CHECK(check)
63 #define VERIFY_SETUP(stmt)
64 #elif defined(VERIFY)
65 #define VERIFY_CHECK CHECK
66 #define VERIFY_SETUP(stmt) do { stmt; } while(0)
67 #else
68 #define VERIFY_CHECK(cond) do { (void)(cond); } while(0)
69 #define VERIFY_SETUP(stmt)
70 #endif
71
72 /* Define `VG_UNDEF` and `VG_CHECK` when VALGRIND is defined */
73 #if !defined(VG_CHECK)
74 # if defined(VALGRIND)
75 # include <valgrind/memcheck.h>
76 # define VG_UNDEF(x,y) VALGRIND_MAKE_MEM_UNDEFINED((x),(y))
77 # define VG_CHECK(x,y) VALGRIND_CHECK_MEM_IS_DEFINED((x),(y))
78 # else
79 # define VG_UNDEF(x,y)
80 # define VG_CHECK(x,y)
81 # endif
82 #endif
83
84 /* Like `VG_CHECK` but on VERIFY only */
85 #if defined(VERIFY)
86 #define VG_CHECK_VERIFY(x,y) VG_CHECK((x), (y))
87 #else
88 #define VG_CHECK_VERIFY(x,y)
89 #endif
90
91 #if defined(__BIGGEST_ALIGNMENT__)
92 #define ALIGNMENT __BIGGEST_ALIGNMENT__
93 #else
94 /* Using 16 bytes alignment because common architectures never have alignment
95 * requirements above 8 for any of the types we care about. In addition we
96 * leave some room because currently we don't care about a few bytes. */
97 #define ALIGNMENT 16
98 #endif
99
100 #define ROUND_TO_ALIGN(size) ((((size) + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT)
101
102 /* Assume there is a contiguous memory object with bounds [base, base + max_size)
103 * of which the memory range [base, *prealloc_ptr) is already allocated for usage,
104 * where *prealloc_ptr is an aligned pointer. In that setting, this functions
105 * reserves the subobject [*prealloc_ptr, *prealloc_ptr + alloc_size) of
106 * alloc_size bytes by increasing *prealloc_ptr accordingly, taking into account
107 * alignment requirements.
108 *
109 * The function returns an aligned pointer to the newly allocated subobject.
110 *
111 * This is useful for manual memory management: if we're simply given a block
112 * [base, base + max_size), the caller can use this function to allocate memory
113 * in this block and keep track of the current allocation state with *prealloc_ptr.
114 *
115 * It is VERIFY_CHECKed that there is enough space left in the memory object and
116 * *prealloc_ptr is aligned relative to base.
117 */
manual_alloc(void ** prealloc_ptr,size_t alloc_size,void * base,size_t max_size)118 static SECP256K1_INLINE void *manual_alloc(void** prealloc_ptr, size_t alloc_size, void* base, size_t max_size) {
119 size_t aligned_alloc_size = ROUND_TO_ALIGN(alloc_size);
120 void* ret;
121 VERIFY_CHECK(prealloc_ptr != NULL);
122 VERIFY_CHECK(*prealloc_ptr != NULL);
123 VERIFY_CHECK(base != NULL);
124 VERIFY_CHECK((unsigned char*)*prealloc_ptr >= (unsigned char*)base);
125 VERIFY_CHECK(((unsigned char*)*prealloc_ptr - (unsigned char*)base) % ALIGNMENT == 0);
126 VERIFY_CHECK((unsigned char*)*prealloc_ptr - (unsigned char*)base + aligned_alloc_size <= max_size);
127 ret = *prealloc_ptr;
128 *prealloc_ptr = (unsigned char*)*prealloc_ptr + aligned_alloc_size;
129 return ret;
130 }
131
132 /* Macro for restrict, when available and not in a VERIFY build. */
133 #if defined(SECP256K1_BUILD) && defined(VERIFY)
134 # define SECP256K1_RESTRICT
135 #else
136 # if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) )
137 # if SECP256K1_GNUC_PREREQ(3,0)
138 # define SECP256K1_RESTRICT __restrict__
139 # elif (defined(_MSC_VER) && _MSC_VER >= 1400)
140 # define SECP256K1_RESTRICT __restrict
141 # else
142 # define SECP256K1_RESTRICT
143 # endif
144 # else
145 # define SECP256K1_RESTRICT restrict
146 # endif
147 #endif
148
149 #if defined(_WIN32)
150 # define I64FORMAT "I64d"
151 # define I64uFORMAT "I64u"
152 #else
153 # define I64FORMAT "lld"
154 # define I64uFORMAT "llu"
155 #endif
156
157 #if defined(__GNUC__)
158 # define SECP256K1_GNUC_EXT __extension__
159 #else
160 # define SECP256K1_GNUC_EXT
161 #endif
162
163 /* If SECP256K1_{LITTLE,BIG}_ENDIAN is not explicitly provided, infer from various other system macros. */
164 #if !defined(SECP256K1_LITTLE_ENDIAN) && !defined(SECP256K1_BIG_ENDIAN)
165 /* Inspired by https://github.com/rofl0r/endianness.h/blob/9853923246b065a3b52d2c43835f3819a62c7199/endianness.h#L52L73 */
166 # if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
167 defined(_X86_) || defined(__x86_64__) || defined(__i386__) || \
168 defined(__i486__) || defined(__i586__) || defined(__i686__) || \
169 defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) || \
170 defined(__ARMEL__) || defined(__AARCH64EL__) || \
171 (defined(__LITTLE_ENDIAN__) && __LITTLE_ENDIAN__ == 1) || \
172 (defined(_LITTLE_ENDIAN) && _LITTLE_ENDIAN == 1) || \
173 defined(_M_IX86) || defined(_M_AMD64) || defined(_M_ARM) /* MSVC */
174 # define SECP256K1_LITTLE_ENDIAN
175 # endif
176 # if (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
177 defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) || \
178 defined(__MICROBLAZEEB__) || defined(__ARMEB__) || defined(__AARCH64EB__) || \
179 (defined(__BIG_ENDIAN__) && __BIG_ENDIAN__ == 1) || \
180 (defined(_BIG_ENDIAN) && _BIG_ENDIAN == 1)
181 # define SECP256K1_BIG_ENDIAN
182 # endif
183 #endif
184 #if defined(SECP256K1_LITTLE_ENDIAN) == defined(SECP256K1_BIG_ENDIAN)
185 # error Please make sure that either SECP256K1_LITTLE_ENDIAN or SECP256K1_BIG_ENDIAN is set, see src/util.h.
186 #endif
187
188 /* Zero memory if flag == 1. Flag must be 0 or 1. Constant time. */
rustsecp256k1_v0_4_1_memczero(void * s,size_t len,int flag)189 static SECP256K1_INLINE void rustsecp256k1_v0_4_1_memczero(void *s, size_t len, int flag) {
190 unsigned char *p = (unsigned char *)s;
191 /* Access flag with a volatile-qualified lvalue.
192 This prevents clang from figuring out (after inlining) that flag can
193 take only be 0 or 1, which leads to variable time code. */
194 volatile int vflag = flag;
195 unsigned char mask = -(unsigned char) vflag;
196 while (len) {
197 *p &= ~mask;
198 p++;
199 len--;
200 }
201 }
202
203 /** Semantics like memcmp. Variable-time.
204 *
205 * We use this to avoid possible compiler bugs with memcmp, e.g.
206 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95189
207 */
rustsecp256k1_v0_4_1_memcmp_var(const void * s1,const void * s2,size_t n)208 static SECP256K1_INLINE int rustsecp256k1_v0_4_1_memcmp_var(const void *s1, const void *s2, size_t n) {
209 const unsigned char *p1 = s1, *p2 = s2;
210 size_t i;
211
212 for (i = 0; i < n; i++) {
213 int diff = p1[i] - p2[i];
214 if (diff != 0) {
215 return diff;
216 }
217 }
218 return 0;
219 }
220
221 /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/
rustsecp256k1_v0_4_1_int_cmov(int * r,const int * a,int flag)222 static SECP256K1_INLINE void rustsecp256k1_v0_4_1_int_cmov(int *r, const int *a, int flag) {
223 unsigned int mask0, mask1, r_masked, a_masked;
224 /* Access flag with a volatile-qualified lvalue.
225 This prevents clang from figuring out (after inlining) that flag can
226 take only be 0 or 1, which leads to variable time code. */
227 volatile int vflag = flag;
228
229 /* Casting a negative int to unsigned and back to int is implementation defined behavior */
230 VERIFY_CHECK(*r >= 0 && *a >= 0);
231
232 mask0 = (unsigned int)vflag + ~0u;
233 mask1 = ~mask0;
234 r_masked = ((unsigned int)*r & mask0);
235 a_masked = ((unsigned int)*a & mask1);
236
237 *r = (int)(r_masked | a_masked);
238 }
239
240 /* If USE_FORCE_WIDEMUL_{INT128,INT64} is set, use that wide multiplication implementation.
241 * Otherwise use the presence of __SIZEOF_INT128__ to decide.
242 */
243 #if defined(USE_FORCE_WIDEMUL_INT128)
244 # define SECP256K1_WIDEMUL_INT128 1
245 #elif defined(USE_FORCE_WIDEMUL_INT64)
246 # define SECP256K1_WIDEMUL_INT64 1
247 #elif defined(UINT128_MAX) || defined(__SIZEOF_INT128__)
248 # define SECP256K1_WIDEMUL_INT128 1
249 #else
250 # define SECP256K1_WIDEMUL_INT64 1
251 #endif
252 #if defined(SECP256K1_WIDEMUL_INT128)
253 # if !defined(UINT128_MAX) && defined(__SIZEOF_INT128__)
254 SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t;
255 SECP256K1_GNUC_EXT typedef __int128 int128_t;
256 #define UINT128_MAX ((uint128_t)(-1))
257 #define INT128_MAX ((int128_t)(UINT128_MAX >> 1))
258 #define INT128_MIN (-INT128_MAX - 1)
259 /* No (U)INT128_C macros because compilers providing __int128 do not support 128-bit literals. */
260 # endif
261 #endif
262
263 #ifndef __has_builtin
264 #define __has_builtin(x) 0
265 #endif
266
267 /* Determine the number of trailing zero bits in a (non-zero) 32-bit x.
268 * This function is only intended to be used as fallback for
269 * rustsecp256k1_v0_4_1_ctz32_var, but permits it to be tested separately. */
rustsecp256k1_v0_4_1_ctz32_var_debruijn(uint32_t x)270 static SECP256K1_INLINE int rustsecp256k1_v0_4_1_ctz32_var_debruijn(uint32_t x) {
271 static const uint8_t debruijn[32] = {
272 0x00, 0x01, 0x02, 0x18, 0x03, 0x13, 0x06, 0x19, 0x16, 0x04, 0x14, 0x0A,
273 0x10, 0x07, 0x0C, 0x1A, 0x1F, 0x17, 0x12, 0x05, 0x15, 0x09, 0x0F, 0x0B,
274 0x1E, 0x11, 0x08, 0x0E, 0x1D, 0x0D, 0x1C, 0x1B
275 };
276 return debruijn[((x & -x) * 0x04D7651F) >> 27];
277 }
278
279 /* Determine the number of trailing zero bits in a (non-zero) 64-bit x.
280 * This function is only intended to be used as fallback for
281 * rustsecp256k1_v0_4_1_ctz64_var, but permits it to be tested separately. */
rustsecp256k1_v0_4_1_ctz64_var_debruijn(uint64_t x)282 static SECP256K1_INLINE int rustsecp256k1_v0_4_1_ctz64_var_debruijn(uint64_t x) {
283 static const uint8_t debruijn[64] = {
284 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
285 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
286 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
287 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
288 };
289 return debruijn[((x & -x) * 0x022FDD63CC95386D) >> 58];
290 }
291
292 /* Determine the number of trailing zero bits in a (non-zero) 32-bit x. */
rustsecp256k1_v0_4_1_ctz32_var(uint32_t x)293 static SECP256K1_INLINE int rustsecp256k1_v0_4_1_ctz32_var(uint32_t x) {
294 VERIFY_CHECK(x != 0);
295 #if (__has_builtin(__builtin_ctz) || SECP256K1_GNUC_PREREQ(3,4))
296 /* If the unsigned type is sufficient to represent the largest uint32_t, consider __builtin_ctz. */
297 if (((unsigned)UINT32_MAX) == UINT32_MAX) {
298 return __builtin_ctz(x);
299 }
300 #endif
301 #if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4))
302 /* Otherwise consider __builtin_ctzl (the unsigned long type is always at least 32 bits). */
303 return __builtin_ctzl(x);
304 #else
305 /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */
306 return rustsecp256k1_v0_4_1_ctz32_var_debruijn(x);
307 #endif
308 }
309
310 /* Determine the number of trailing zero bits in a (non-zero) 64-bit x. */
rustsecp256k1_v0_4_1_ctz64_var(uint64_t x)311 static SECP256K1_INLINE int rustsecp256k1_v0_4_1_ctz64_var(uint64_t x) {
312 VERIFY_CHECK(x != 0);
313 #if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4))
314 /* If the unsigned long type is sufficient to represent the largest uint64_t, consider __builtin_ctzl. */
315 if (((unsigned long)UINT64_MAX) == UINT64_MAX) {
316 return __builtin_ctzl(x);
317 }
318 #endif
319 #if (__has_builtin(__builtin_ctzll) || SECP256K1_GNUC_PREREQ(3,4))
320 /* Otherwise consider __builtin_ctzll (the unsigned long long type is always at least 64 bits). */
321 return __builtin_ctzll(x);
322 #else
323 /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */
324 return rustsecp256k1_v0_4_1_ctz64_var_debruijn(x);
325 #endif
326 }
327
328 #endif /* SECP256K1_UTIL_H */
329