1 /*
2 * Configuration for math routines.
3 *
4 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 * See https://llvm.org/LICENSE.txt for license information.
6 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 */
8
9 #ifndef _MATH_CONFIG_H
10 #define _MATH_CONFIG_H
11
12 #include <math.h>
13 #include <stdint.h>
14
15 #ifndef WANT_ROUNDING
16 /* If defined to 1, return correct results for special cases in non-nearest
17 rounding modes (logf (1.0f) returns 0.0f with FE_DOWNWARD rather than -0.0f).
18 This may be set to 0 if there is no fenv support or if math functions only
19 get called in round to nearest mode. */
20 # define WANT_ROUNDING 1
21 #endif
22 #ifndef WANT_ERRNO
23 /* If defined to 1, set errno in math functions according to ISO C. Many math
24 libraries do not set errno, so this is 0 by default. It may need to be
25 set to 1 if math.h has (math_errhandling & MATH_ERRNO) != 0. */
26 # define WANT_ERRNO 0
27 #endif
28 #ifndef WANT_ERRNO_UFLOW
29 /* Set errno to ERANGE if result underflows to 0 (in all rounding modes). */
30 # define WANT_ERRNO_UFLOW (WANT_ROUNDING && WANT_ERRNO)
31 #endif
32
33 /* Compiler can inline round as a single instruction. */
34 #ifndef HAVE_FAST_ROUND
35 # if __aarch64__
36 # define HAVE_FAST_ROUND 1
37 # else
38 # define HAVE_FAST_ROUND 0
39 # endif
40 #endif
41
42 /* Compiler can inline lround, but not (long)round(x). */
43 #ifndef HAVE_FAST_LROUND
44 # if __aarch64__ && (100*__GNUC__ + __GNUC_MINOR__) >= 408 && __NO_MATH_ERRNO__
45 # define HAVE_FAST_LROUND 1
46 # else
47 # define HAVE_FAST_LROUND 0
48 # endif
49 #endif
50
51 /* Compiler can inline fma as a single instruction. */
52 #ifndef HAVE_FAST_FMA
53 # if defined FP_FAST_FMA || __aarch64__
54 # define HAVE_FAST_FMA 1
55 # else
56 # define HAVE_FAST_FMA 0
57 # endif
58 #endif
59
60 /* Provide *_finite symbols and some of the glibc hidden symbols
61 so libmathlib can be used with binaries compiled against glibc
62 to interpose math functions with both static and dynamic linking. */
63 #ifndef USE_GLIBC_ABI
64 # if __GNUC__
65 # define USE_GLIBC_ABI 1
66 # else
67 # define USE_GLIBC_ABI 0
68 # endif
69 #endif
70
71 /* Optionally used extensions. */
72 #ifdef __GNUC__
73 # define HIDDEN __attribute__ ((__visibility__ ("hidden")))
74 # define NOINLINE __attribute__ ((noinline))
75 # define UNUSED __attribute__ ((unused))
76 # define likely(x) __builtin_expect (!!(x), 1)
77 # define unlikely(x) __builtin_expect (x, 0)
78 # if __GNUC__ >= 9
79 # define attribute_copy(f) __attribute__ ((copy (f)))
80 # else
81 # define attribute_copy(f)
82 # endif
83 # define strong_alias(f, a) \
84 extern __typeof (f) a __attribute__ ((alias (#f))) attribute_copy (f);
85 # define hidden_alias(f, a) \
86 extern __typeof (f) a __attribute__ ((alias (#f), visibility ("hidden"))) \
87 attribute_copy (f);
88 #else
89 # define HIDDEN
90 # define NOINLINE
91 # define UNUSED
92 # define likely(x) (x)
93 # define unlikely(x) (x)
94 #endif
95
96 #if HAVE_FAST_ROUND
97 /* When set, the roundtoint and converttoint functions are provided with
98 the semantics documented below. */
99 # define TOINT_INTRINSICS 1
100
101 /* Round x to nearest int in all rounding modes, ties have to be rounded
102 consistently with converttoint so the results match. If the result
103 would be outside of [-2^31, 2^31-1] then the semantics is unspecified. */
104 static inline double_t
roundtoint(double_t x)105 roundtoint (double_t x)
106 {
107 return round (x);
108 }
109
110 /* Convert x to nearest int in all rounding modes, ties have to be rounded
111 consistently with roundtoint. If the result is not representable in an
112 int32_t then the semantics is unspecified. */
113 static inline int32_t
converttoint(double_t x)114 converttoint (double_t x)
115 {
116 # if HAVE_FAST_LROUND
117 return lround (x);
118 # else
119 return (long) round (x);
120 # endif
121 }
122 #endif
123
124 static inline uint32_t
asuint(float f)125 asuint (float f)
126 {
127 union
128 {
129 float f;
130 uint32_t i;
131 } u = {f};
132 return u.i;
133 }
134
135 static inline float
asfloat(uint32_t i)136 asfloat (uint32_t i)
137 {
138 union
139 {
140 uint32_t i;
141 float f;
142 } u = {i};
143 return u.f;
144 }
145
146 static inline uint64_t
asuint64(double f)147 asuint64 (double f)
148 {
149 union
150 {
151 double f;
152 uint64_t i;
153 } u = {f};
154 return u.i;
155 }
156
157 static inline double
asdouble(uint64_t i)158 asdouble (uint64_t i)
159 {
160 union
161 {
162 uint64_t i;
163 double f;
164 } u = {i};
165 return u.f;
166 }
167
168 #ifndef IEEE_754_2008_SNAN
169 # define IEEE_754_2008_SNAN 1
170 #endif
171 static inline int
issignalingf_inline(float x)172 issignalingf_inline (float x)
173 {
174 uint32_t ix = asuint (x);
175 if (!IEEE_754_2008_SNAN)
176 return (ix & 0x7fc00000) == 0x7fc00000;
177 return 2 * (ix ^ 0x00400000) > 2u * 0x7fc00000;
178 }
179
180 static inline int
issignaling_inline(double x)181 issignaling_inline (double x)
182 {
183 uint64_t ix = asuint64 (x);
184 if (!IEEE_754_2008_SNAN)
185 return (ix & 0x7ff8000000000000) == 0x7ff8000000000000;
186 return 2 * (ix ^ 0x0008000000000000) > 2 * 0x7ff8000000000000ULL;
187 }
188
189 #if __aarch64__ && __GNUC__
190 /* Prevent the optimization of a floating-point expression. */
191 static inline float
opt_barrier_float(float x)192 opt_barrier_float (float x)
193 {
194 __asm__ __volatile__ ("" : "+w" (x));
195 return x;
196 }
197 static inline double
opt_barrier_double(double x)198 opt_barrier_double (double x)
199 {
200 __asm__ __volatile__ ("" : "+w" (x));
201 return x;
202 }
203 /* Force the evaluation of a floating-point expression for its side-effect. */
204 static inline void
force_eval_float(float x)205 force_eval_float (float x)
206 {
207 __asm__ __volatile__ ("" : "+w" (x));
208 }
209 static inline void
force_eval_double(double x)210 force_eval_double (double x)
211 {
212 __asm__ __volatile__ ("" : "+w" (x));
213 }
214 #else
215 static inline float
opt_barrier_float(float x)216 opt_barrier_float (float x)
217 {
218 volatile float y = x;
219 return y;
220 }
221 static inline double
opt_barrier_double(double x)222 opt_barrier_double (double x)
223 {
224 volatile double y = x;
225 return y;
226 }
227 static inline void
force_eval_float(float x)228 force_eval_float (float x)
229 {
230 volatile float y UNUSED = x;
231 }
232 static inline void
force_eval_double(double x)233 force_eval_double (double x)
234 {
235 volatile double y UNUSED = x;
236 }
237 #endif
238
239 /* Evaluate an expression as the specified type, normally a type
240 cast should be enough, but compilers implement non-standard
241 excess-precision handling, so when FLT_EVAL_METHOD != 0 then
242 these functions may need to be customized. */
243 static inline float
eval_as_float(float x)244 eval_as_float (float x)
245 {
246 return x;
247 }
248 static inline double
eval_as_double(double x)249 eval_as_double (double x)
250 {
251 return x;
252 }
253
254 /* Error handling tail calls for special cases, with a sign argument.
255 The sign of the return value is set if the argument is non-zero. */
256
257 /* The result overflows. */
258 HIDDEN float __math_oflowf (uint32_t);
259 /* The result underflows to 0 in nearest rounding mode. */
260 HIDDEN float __math_uflowf (uint32_t);
261 /* The result underflows to 0 in some directed rounding mode only. */
262 HIDDEN float __math_may_uflowf (uint32_t);
263 /* Division by zero. */
264 HIDDEN float __math_divzerof (uint32_t);
265 /* The result overflows. */
266 HIDDEN double __math_oflow (uint32_t);
267 /* The result underflows to 0 in nearest rounding mode. */
268 HIDDEN double __math_uflow (uint32_t);
269 /* The result underflows to 0 in some directed rounding mode only. */
270 HIDDEN double __math_may_uflow (uint32_t);
271 /* Division by zero. */
272 HIDDEN double __math_divzero (uint32_t);
273
274 /* Error handling using input checking. */
275
276 /* Invalid input unless it is a quiet NaN. */
277 HIDDEN float __math_invalidf (float);
278 /* Invalid input unless it is a quiet NaN. */
279 HIDDEN double __math_invalid (double);
280
281 /* Error handling using output checking, only for errno setting. */
282
283 /* Check if the result overflowed to infinity. */
284 HIDDEN double __math_check_oflow (double);
285 /* Check if the result underflowed to 0. */
286 HIDDEN double __math_check_uflow (double);
287
288 /* Check if the result overflowed to infinity. */
289 static inline double
check_oflow(double x)290 check_oflow (double x)
291 {
292 return WANT_ERRNO ? __math_check_oflow (x) : x;
293 }
294
295 /* Check if the result underflowed to 0. */
296 static inline double
check_uflow(double x)297 check_uflow (double x)
298 {
299 return WANT_ERRNO ? __math_check_uflow (x) : x;
300 }
301
302
303 /* Shared between expf, exp2f and powf. */
304 #define EXP2F_TABLE_BITS 5
305 #define EXP2F_POLY_ORDER 3
306 extern const struct exp2f_data
307 {
308 uint64_t tab[1 << EXP2F_TABLE_BITS];
309 double shift_scaled;
310 double poly[EXP2F_POLY_ORDER];
311 double shift;
312 double invln2_scaled;
313 double poly_scaled[EXP2F_POLY_ORDER];
314 } __exp2f_data HIDDEN;
315
316 #define LOGF_TABLE_BITS 4
317 #define LOGF_POLY_ORDER 4
318 extern const struct logf_data
319 {
320 struct
321 {
322 double invc, logc;
323 } tab[1 << LOGF_TABLE_BITS];
324 double ln2;
325 double poly[LOGF_POLY_ORDER - 1]; /* First order coefficient is 1. */
326 } __logf_data HIDDEN;
327
328 #define LOG2F_TABLE_BITS 4
329 #define LOG2F_POLY_ORDER 4
330 extern const struct log2f_data
331 {
332 struct
333 {
334 double invc, logc;
335 } tab[1 << LOG2F_TABLE_BITS];
336 double poly[LOG2F_POLY_ORDER];
337 } __log2f_data HIDDEN;
338
339 #define POWF_LOG2_TABLE_BITS 4
340 #define POWF_LOG2_POLY_ORDER 5
341 #if TOINT_INTRINSICS
342 # define POWF_SCALE_BITS EXP2F_TABLE_BITS
343 #else
344 # define POWF_SCALE_BITS 0
345 #endif
346 #define POWF_SCALE ((double) (1 << POWF_SCALE_BITS))
347 extern const struct powf_log2_data
348 {
349 struct
350 {
351 double invc, logc;
352 } tab[1 << POWF_LOG2_TABLE_BITS];
353 double poly[POWF_LOG2_POLY_ORDER];
354 } __powf_log2_data HIDDEN;
355
356
357 #define EXP_TABLE_BITS 7
358 #define EXP_POLY_ORDER 5
359 /* Use polynomial that is optimized for a wider input range. This may be
360 needed for good precision in non-nearest rounding and !TOINT_INTRINSICS. */
361 #define EXP_POLY_WIDE 0
362 /* Use close to nearest rounding toint when !TOINT_INTRINSICS. This may be
363 needed for good precision in non-nearest rounding and !EXP_POLY_WIDE. */
364 #define EXP_USE_TOINT_NARROW 0
365 #define EXP2_POLY_ORDER 5
366 #define EXP2_POLY_WIDE 0
367 extern const struct exp_data
368 {
369 double invln2N;
370 double shift;
371 double negln2hiN;
372 double negln2loN;
373 double poly[4]; /* Last four coefficients. */
374 double exp2_shift;
375 double exp2_poly[EXP2_POLY_ORDER];
376 uint64_t tab[2*(1 << EXP_TABLE_BITS)];
377 } __exp_data HIDDEN;
378
379 #define LOG_TABLE_BITS 7
380 #define LOG_POLY_ORDER 6
381 #define LOG_POLY1_ORDER 12
382 extern const struct log_data
383 {
384 double ln2hi;
385 double ln2lo;
386 double poly[LOG_POLY_ORDER - 1]; /* First coefficient is 1. */
387 double poly1[LOG_POLY1_ORDER - 1];
388 struct {double invc, logc;} tab[1 << LOG_TABLE_BITS];
389 #if !HAVE_FAST_FMA
390 struct {double chi, clo;} tab2[1 << LOG_TABLE_BITS];
391 #endif
392 } __log_data HIDDEN;
393
394 #define LOG2_TABLE_BITS 6
395 #define LOG2_POLY_ORDER 7
396 #define LOG2_POLY1_ORDER 11
397 extern const struct log2_data
398 {
399 double invln2hi;
400 double invln2lo;
401 double poly[LOG2_POLY_ORDER - 1];
402 double poly1[LOG2_POLY1_ORDER - 1];
403 struct {double invc, logc;} tab[1 << LOG2_TABLE_BITS];
404 #if !HAVE_FAST_FMA
405 struct {double chi, clo;} tab2[1 << LOG2_TABLE_BITS];
406 #endif
407 } __log2_data HIDDEN;
408
409 #define POW_LOG_TABLE_BITS 7
410 #define POW_LOG_POLY_ORDER 8
411 extern const struct pow_log_data
412 {
413 double ln2hi;
414 double ln2lo;
415 double poly[POW_LOG_POLY_ORDER - 1]; /* First coefficient is 1. */
416 /* Note: the pad field is unused, but allows slightly faster indexing. */
417 struct {double invc, pad, logc, logctail;} tab[1 << POW_LOG_TABLE_BITS];
418 } __pow_log_data HIDDEN;
419
420 #endif
421