xref: /freebsd/lib/msun/src/math_private.h (revision 315ee00f)
1 /*
2  * ====================================================
3  * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
4  *
5  * Developed at SunPro, a Sun Microsystems, Inc. business.
6  * Permission to use, copy, modify, and distribute this
7  * software is freely granted, provided that this notice
8  * is preserved.
9  * ====================================================
10  */
11 
12 /*
13  * from: @(#)fdlibm.h 5.1 93/09/24
14  */
15 
16 #ifndef _MATH_PRIVATE_H_
17 #define	_MATH_PRIVATE_H_
18 
19 #include <sys/types.h>
20 #include <machine/endian.h>
21 
22 /*
23  * The original fdlibm code used statements like:
24  *	n0 = ((*(int*)&one)>>29)^1;		* index of high word *
25  *	ix0 = *(n0+(int*)&x);			* high word of x *
26  *	ix1 = *((1-n0)+(int*)&x);		* low word of x *
27  * to dig two 32 bit words out of the 64 bit IEEE floating point
28  * value.  That is non-ANSI, and, moreover, the gcc instruction
29  * scheduler gets it wrong.  We instead use the following macros.
30  * Unlike the original code, we determine the endianness at compile
31  * time, not at run time; I don't see much benefit to selecting
32  * endianness at run time.
33  */
34 
35 /*
36  * A union which permits us to convert between a double and two 32 bit
37  * ints.
38  */
39 
40 #ifdef __arm__
41 #if defined(__VFP_FP__) || defined(__ARM_EABI__)
42 #define	IEEE_WORD_ORDER	BYTE_ORDER
43 #else
44 #define	IEEE_WORD_ORDER	BIG_ENDIAN
45 #endif
46 #else /* __arm__ */
47 #define	IEEE_WORD_ORDER	BYTE_ORDER
48 #endif
49 
50 /* A union which permits us to convert between a long double and
51    four 32 bit ints.  */
52 
53 #if IEEE_WORD_ORDER == BIG_ENDIAN
54 
55 typedef union
56 {
57   long double value;
58   struct {
59     u_int32_t mswhi;
60     u_int32_t mswlo;
61     u_int32_t lswhi;
62     u_int32_t lswlo;
63   } parts32;
64   struct {
65     u_int64_t msw;
66     u_int64_t lsw;
67   } parts64;
68 } ieee_quad_shape_type;
69 
70 #endif
71 
72 #if IEEE_WORD_ORDER == LITTLE_ENDIAN
73 
74 typedef union
75 {
76   long double value;
77   struct {
78     u_int32_t lswlo;
79     u_int32_t lswhi;
80     u_int32_t mswlo;
81     u_int32_t mswhi;
82   } parts32;
83   struct {
84     u_int64_t lsw;
85     u_int64_t msw;
86   } parts64;
87 } ieee_quad_shape_type;
88 
89 #endif
90 
91 #if IEEE_WORD_ORDER == BIG_ENDIAN
92 
93 typedef union
94 {
95   double value;
96   struct
97   {
98     u_int32_t msw;
99     u_int32_t lsw;
100   } parts;
101   struct
102   {
103     u_int64_t w;
104   } xparts;
105 } ieee_double_shape_type;
106 
107 #endif
108 
109 #if IEEE_WORD_ORDER == LITTLE_ENDIAN
110 
111 typedef union
112 {
113   double value;
114   struct
115   {
116     u_int32_t lsw;
117     u_int32_t msw;
118   } parts;
119   struct
120   {
121     u_int64_t w;
122   } xparts;
123 } ieee_double_shape_type;
124 
125 #endif
126 
127 /* Get two 32 bit ints from a double.  */
128 
129 #define EXTRACT_WORDS(ix0,ix1,d)				\
130 do {								\
131   ieee_double_shape_type ew_u;					\
132   ew_u.value = (d);						\
133   (ix0) = ew_u.parts.msw;					\
134   (ix1) = ew_u.parts.lsw;					\
135 } while (0)
136 
137 /* Get a 64-bit int from a double. */
138 #define EXTRACT_WORD64(ix,d)					\
139 do {								\
140   ieee_double_shape_type ew_u;					\
141   ew_u.value = (d);						\
142   (ix) = ew_u.xparts.w;						\
143 } while (0)
144 
145 /* Get the more significant 32 bit int from a double.  */
146 
147 #define GET_HIGH_WORD(i,d)					\
148 do {								\
149   ieee_double_shape_type gh_u;					\
150   gh_u.value = (d);						\
151   (i) = gh_u.parts.msw;						\
152 } while (0)
153 
154 /* Get the less significant 32 bit int from a double.  */
155 
156 #define GET_LOW_WORD(i,d)					\
157 do {								\
158   ieee_double_shape_type gl_u;					\
159   gl_u.value = (d);						\
160   (i) = gl_u.parts.lsw;						\
161 } while (0)
162 
163 /* Set a double from two 32 bit ints.  */
164 
165 #define INSERT_WORDS(d,ix0,ix1)					\
166 do {								\
167   ieee_double_shape_type iw_u;					\
168   iw_u.parts.msw = (ix0);					\
169   iw_u.parts.lsw = (ix1);					\
170   (d) = iw_u.value;						\
171 } while (0)
172 
173 /* Set a double from a 64-bit int. */
174 #define INSERT_WORD64(d,ix)					\
175 do {								\
176   ieee_double_shape_type iw_u;					\
177   iw_u.xparts.w = (ix);						\
178   (d) = iw_u.value;						\
179 } while (0)
180 
181 /* Set the more significant 32 bits of a double from an int.  */
182 
183 #define SET_HIGH_WORD(d,v)					\
184 do {								\
185   ieee_double_shape_type sh_u;					\
186   sh_u.value = (d);						\
187   sh_u.parts.msw = (v);						\
188   (d) = sh_u.value;						\
189 } while (0)
190 
191 /* Set the less significant 32 bits of a double from an int.  */
192 
193 #define SET_LOW_WORD(d,v)					\
194 do {								\
195   ieee_double_shape_type sl_u;					\
196   sl_u.value = (d);						\
197   sl_u.parts.lsw = (v);						\
198   (d) = sl_u.value;						\
199 } while (0)
200 
201 /*
202  * A union which permits us to convert between a float and a 32 bit
203  * int.
204  */
205 
206 typedef union
207 {
208   float value;
209   /* FIXME: Assumes 32 bit int.  */
210   unsigned int word;
211 } ieee_float_shape_type;
212 
213 /* Get a 32 bit int from a float.  */
214 
215 #define GET_FLOAT_WORD(i,d)					\
216 do {								\
217   ieee_float_shape_type gf_u;					\
218   gf_u.value = (d);						\
219   (i) = gf_u.word;						\
220 } while (0)
221 
222 /* Set a float from a 32 bit int.  */
223 
224 #define SET_FLOAT_WORD(d,i)					\
225 do {								\
226   ieee_float_shape_type sf_u;					\
227   sf_u.word = (i);						\
228   (d) = sf_u.value;						\
229 } while (0)
230 
231 /*
232  * Get expsign and mantissa as 16 bit and 64 bit ints from an 80 bit long
233  * double.
234  */
235 
236 #define	EXTRACT_LDBL80_WORDS(ix0,ix1,d)				\
237 do {								\
238   union IEEEl2bits ew_u;					\
239   ew_u.e = (d);							\
240   (ix0) = ew_u.xbits.expsign;					\
241   (ix1) = ew_u.xbits.man;					\
242 } while (0)
243 
244 /*
245  * Get expsign and mantissa as one 16 bit and two 64 bit ints from a 128 bit
246  * long double.
247  */
248 
249 #define	EXTRACT_LDBL128_WORDS(ix0,ix1,ix2,d)			\
250 do {								\
251   union IEEEl2bits ew_u;					\
252   ew_u.e = (d);							\
253   (ix0) = ew_u.xbits.expsign;					\
254   (ix1) = ew_u.xbits.manh;					\
255   (ix2) = ew_u.xbits.manl;					\
256 } while (0)
257 
258 /* Get expsign as a 16 bit int from a long double.  */
259 
260 #define	GET_LDBL_EXPSIGN(i,d)					\
261 do {								\
262   union IEEEl2bits ge_u;					\
263   ge_u.e = (d);							\
264   (i) = ge_u.xbits.expsign;					\
265 } while (0)
266 
267 /*
268  * Set an 80 bit long double from a 16 bit int expsign and a 64 bit int
269  * mantissa.
270  */
271 
272 #define	INSERT_LDBL80_WORDS(d,ix0,ix1)				\
273 do {								\
274   union IEEEl2bits iw_u;					\
275   iw_u.xbits.expsign = (ix0);					\
276   iw_u.xbits.man = (ix1);					\
277   (d) = iw_u.e;							\
278 } while (0)
279 
280 /*
281  * Set a 128 bit long double from a 16 bit int expsign and two 64 bit ints
282  * comprising the mantissa.
283  */
284 
285 #define	INSERT_LDBL128_WORDS(d,ix0,ix1,ix2)			\
286 do {								\
287   union IEEEl2bits iw_u;					\
288   iw_u.xbits.expsign = (ix0);					\
289   iw_u.xbits.manh = (ix1);					\
290   iw_u.xbits.manl = (ix2);					\
291   (d) = iw_u.e;							\
292 } while (0)
293 
294 /* Set expsign of a long double from a 16 bit int.  */
295 
296 #define	SET_LDBL_EXPSIGN(d,v)					\
297 do {								\
298   union IEEEl2bits se_u;					\
299   se_u.e = (d);							\
300   se_u.xbits.expsign = (v);					\
301   (d) = se_u.e;							\
302 } while (0)
303 
304 #ifdef __i386__
305 /* Long double constants are broken on i386. */
306 #define	LD80C(m, ex, v) {						\
307 	.xbits.man = __CONCAT(m, ULL),					\
308 	.xbits.expsign = (0x3fff + (ex)) | ((v) < 0 ? 0x8000 : 0),	\
309 }
310 #else
311 /* The above works on non-i386 too, but we use this to check v. */
312 #define	LD80C(m, ex, v)	{ .e = (v), }
313 #endif
314 
315 #ifdef FLT_EVAL_METHOD
316 /*
317  * Attempt to get strict C99 semantics for assignment with non-C99 compilers.
318  */
319 #if FLT_EVAL_METHOD == 0 || __GNUC__ == 0
320 #define	STRICT_ASSIGN(type, lval, rval)	((lval) = (rval))
321 #else
322 #define	STRICT_ASSIGN(type, lval, rval) do {	\
323 	volatile type __lval;			\
324 						\
325 	if (sizeof(type) >= sizeof(long double))	\
326 		(lval) = (rval);		\
327 	else {					\
328 		__lval = (rval);		\
329 		(lval) = __lval;		\
330 	}					\
331 } while (0)
332 #endif
333 #endif /* FLT_EVAL_METHOD */
334 
335 /* Support switching the mode to FP_PE if necessary. */
336 #if defined(__i386__) && !defined(NO_FPSETPREC)
337 #define	ENTERI() ENTERIT(long double)
338 #define	ENTERIT(returntype)			\
339 	returntype __retval;			\
340 	fp_prec_t __oprec;			\
341 						\
342 	if ((__oprec = fpgetprec()) != FP_PE)	\
343 		fpsetprec(FP_PE)
344 #define	RETURNI(x) do {				\
345 	__retval = (x);				\
346 	if (__oprec != FP_PE)			\
347 		fpsetprec(__oprec);		\
348 	RETURNF(__retval);			\
349 } while (0)
350 #define	ENTERV()				\
351 	fp_prec_t __oprec;			\
352 						\
353 	if ((__oprec = fpgetprec()) != FP_PE)	\
354 		fpsetprec(FP_PE)
355 #define	RETURNV() do {				\
356 	if (__oprec != FP_PE)			\
357 		fpsetprec(__oprec);		\
358 	return;			\
359 } while (0)
360 #else
361 #define	ENTERI()
362 #define	ENTERIT(x)
363 #define	RETURNI(x)	RETURNF(x)
364 #define	ENTERV()
365 #define	RETURNV()	return
366 #endif
367 
368 /* Default return statement if hack*_t() is not used. */
369 #define      RETURNF(v)      return (v)
370 
371 /*
372  * 2sum gives the same result as 2sumF without requiring |a| >= |b| or
373  * a == 0, but is slower.
374  */
375 #define	_2sum(a, b) do {	\
376 	__typeof(a) __s, __w;	\
377 				\
378 	__w = (a) + (b);	\
379 	__s = __w - (a);	\
380 	(b) = ((a) - (__w - __s)) + ((b) - __s); \
381 	(a) = __w;		\
382 } while (0)
383 
384 /*
385  * 2sumF algorithm.
386  *
387  * "Normalize" the terms in the infinite-precision expression a + b for
388  * the sum of 2 floating point values so that b is as small as possible
389  * relative to 'a'.  (The resulting 'a' is the value of the expression in
390  * the same precision as 'a' and the resulting b is the rounding error.)
391  * |a| must be >= |b| or 0, b's type must be no larger than 'a's type, and
392  * exponent overflow or underflow must not occur.  This uses a Theorem of
393  * Dekker (1971).  See Knuth (1981) 4.2.2 Theorem C.  The name "TwoSum"
394  * is apparently due to Skewchuk (1997).
395  *
396  * For this to always work, assignment of a + b to 'a' must not retain any
397  * extra precision in a + b.  This is required by C standards but broken
398  * in many compilers.  The brokenness cannot be worked around using
399  * STRICT_ASSIGN() like we do elsewhere, since the efficiency of this
400  * algorithm would be destroyed by non-null strict assignments.  (The
401  * compilers are correct to be broken -- the efficiency of all floating
402  * point code calculations would be destroyed similarly if they forced the
403  * conversions.)
404  *
405  * Fortunately, a case that works well can usually be arranged by building
406  * any extra precision into the type of 'a' -- 'a' should have type float_t,
407  * double_t or long double.  b's type should be no larger than 'a's type.
408  * Callers should use these types with scopes as large as possible, to
409  * reduce their own extra-precision and efficiciency problems.  In
410  * particular, they shouldn't convert back and forth just to call here.
411  */
412 #ifdef DEBUG
413 #define	_2sumF(a, b) do {				\
414 	__typeof(a) __w;				\
415 	volatile __typeof(a) __ia, __ib, __r, __vw;	\
416 							\
417 	__ia = (a);					\
418 	__ib = (b);					\
419 	assert(__ia == 0 || fabsl(__ia) >= fabsl(__ib));	\
420 							\
421 	__w = (a) + (b);				\
422 	(b) = ((a) - __w) + (b);			\
423 	(a) = __w;					\
424 							\
425 	/* The next 2 assertions are weak if (a) is already long double. */ \
426 	assert((long double)__ia + __ib == (long double)(a) + (b));	\
427 	__vw = __ia + __ib;				\
428 	__r = __ia - __vw;				\
429 	__r += __ib;					\
430 	assert(__vw == (a) && __r == (b));		\
431 } while (0)
432 #else /* !DEBUG */
433 #define	_2sumF(a, b) do {	\
434 	__typeof(a) __w;	\
435 				\
436 	__w = (a) + (b);	\
437 	(b) = ((a) - __w) + (b); \
438 	(a) = __w;		\
439 } while (0)
440 #endif /* DEBUG */
441 
442 /*
443  * Set x += c, where x is represented in extra precision as a + b.
444  * x must be sufficiently normalized and sufficiently larger than c,
445  * and the result is then sufficiently normalized.
446  *
447  * The details of ordering are that |a| must be >= |c| (so that (a, c)
448  * can be normalized without extra work to swap 'a' with c).  The details of
449  * the normalization are that b must be small relative to the normalized 'a'.
450  * Normalization of (a, c) makes the normalized c tiny relative to the
451  * normalized a, so b remains small relative to 'a' in the result.  However,
452  * b need not ever be tiny relative to 'a'.  For example, b might be about
453  * 2**20 times smaller than 'a' to give about 20 extra bits of precision.
454  * That is usually enough, and adding c (which by normalization is about
455  * 2**53 times smaller than a) cannot change b significantly.  However,
456  * cancellation of 'a' with c in normalization of (a, c) may reduce 'a'
457  * significantly relative to b.  The caller must ensure that significant
458  * cancellation doesn't occur, either by having c of the same sign as 'a',
459  * or by having |c| a few percent smaller than |a|.  Pre-normalization of
460  * (a, b) may help.
461  *
462  * This is a variant of an algorithm of Kahan (see Knuth (1981) 4.2.2
463  * exercise 19).  We gain considerable efficiency by requiring the terms to
464  * be sufficiently normalized and sufficiently increasing.
465  */
466 #define	_3sumF(a, b, c) do {	\
467 	__typeof(a) __tmp;	\
468 				\
469 	__tmp = (c);		\
470 	_2sumF(__tmp, (a));	\
471 	(b) += (a);		\
472 	(a) = __tmp;		\
473 } while (0)
474 
475 /*
476  * Common routine to process the arguments to nan(), nanf(), and nanl().
477  */
478 void _scan_nan(uint32_t *__words, int __num_words, const char *__s);
479 
480 /*
481  * Mix 0, 1 or 2 NaNs.  First add 0 to each arg.  This normally just turns
482  * signaling NaNs into quiet NaNs by setting a quiet bit.  We do this
483  * because we want to never return a signaling NaN, and also because we
484  * don't want the quiet bit to affect the result.  Then mix the converted
485  * args using the specified operation.
486  *
487  * When one arg is NaN, the result is typically that arg quieted.  When both
488  * args are NaNs, the result is typically the quietening of the arg whose
489  * mantissa is largest after quietening.  When neither arg is NaN, the
490  * result may be NaN because it is indeterminate, or finite for subsequent
491  * construction of a NaN as the indeterminate 0.0L/0.0L.
492  *
493  * Technical complications: the result in bits after rounding to the final
494  * precision might depend on the runtime precision and/or on compiler
495  * optimizations, especially when different register sets are used for
496  * different precisions.  Try to make the result not depend on at least the
497  * runtime precision by always doing the main mixing step in long double
498  * precision.  Try to reduce dependencies on optimizations by adding the
499  * the 0's in different precisions (unless everything is in long double
500  * precision).
501  */
502 #define	nan_mix(x, y)		(nan_mix_op((x), (y), +))
503 #define	nan_mix_op(x, y, op)	(((x) + 0.0L) op ((y) + 0))
504 
505 #ifdef _COMPLEX_H
506 
507 /*
508  * C99 specifies that complex numbers have the same representation as
509  * an array of two elements, where the first element is the real part
510  * and the second element is the imaginary part.
511  */
512 typedef union {
513 	float complex f;
514 	float a[2];
515 } float_complex;
516 typedef union {
517 	double complex f;
518 	double a[2];
519 } double_complex;
520 typedef union {
521 	long double complex f;
522 	long double a[2];
523 } long_double_complex;
524 #define	REALPART(z)	((z).a[0])
525 #define	IMAGPART(z)	((z).a[1])
526 
527 /*
528  * Inline functions that can be used to construct complex values.
529  *
530  * The C99 standard intends x+I*y to be used for this, but x+I*y is
531  * currently unusable in general since gcc introduces many overflow,
532  * underflow, sign and efficiency bugs by rewriting I*y as
533  * (0.0+I)*(y+0.0*I) and laboriously computing the full complex product.
534  * In particular, I*Inf is corrupted to NaN+I*Inf, and I*-0 is corrupted
535  * to -0.0+I*0.0.
536  *
537  * The C11 standard introduced the macros CMPLX(), CMPLXF() and CMPLXL()
538  * to construct complex values.  Compilers that conform to the C99
539  * standard require the following functions to avoid the above issues.
540  */
541 
542 #ifndef CMPLXF
543 static __inline float complex
544 CMPLXF(float x, float y)
545 {
546 	float_complex z;
547 
548 	REALPART(z) = x;
549 	IMAGPART(z) = y;
550 	return (z.f);
551 }
552 #endif
553 
554 #ifndef CMPLX
555 static __inline double complex
556 CMPLX(double x, double y)
557 {
558 	double_complex z;
559 
560 	REALPART(z) = x;
561 	IMAGPART(z) = y;
562 	return (z.f);
563 }
564 #endif
565 
566 #ifndef CMPLXL
567 static __inline long double complex
568 CMPLXL(long double x, long double y)
569 {
570 	long_double_complex z;
571 
572 	REALPART(z) = x;
573 	IMAGPART(z) = y;
574 	return (z.f);
575 }
576 #endif
577 
578 #endif /* _COMPLEX_H */
579 
580 /*
581  * The rnint() family rounds to the nearest integer for a restricted range
582  * range of args (up to about 2**MANT_DIG).  We assume that the current
583  * rounding mode is FE_TONEAREST so that this can be done efficiently.
584  * Extra precision causes more problems in practice, and we only centralize
585  * this here to reduce those problems, and have not solved the efficiency
586  * problems.  The exp2() family uses a more delicate version of this that
587  * requires extracting bits from the intermediate value, so it is not
588  * centralized here and should copy any solution of the efficiency problems.
589  */
590 
591 static inline double
592 rnint(__double_t x)
593 {
594 	/*
595 	 * This casts to double to kill any extra precision.  This depends
596 	 * on the cast being applied to a double_t to avoid compiler bugs
597 	 * (this is a cleaner version of STRICT_ASSIGN()).  This is
598 	 * inefficient if there actually is extra precision, but is hard
599 	 * to improve on.  We use double_t in the API to minimise conversions
600 	 * for just calling here.  Note that we cannot easily change the
601 	 * magic number to the one that works directly with double_t, since
602 	 * the rounding precision is variable at runtime on x86 so the
603 	 * magic number would need to be variable.  Assuming that the
604 	 * rounding precision is always the default is too fragile.  This
605 	 * and many other complications will move when the default is
606 	 * changed to FP_PE.
607 	 */
608 	return ((double)(x + 0x1.8p52) - 0x1.8p52);
609 }
610 
611 static inline float
612 rnintf(__float_t x)
613 {
614 	/*
615 	 * As for rnint(), except we could just call that to handle the
616 	 * extra precision case, usually without losing efficiency.
617 	 */
618 	return ((float)(x + 0x1.8p23F) - 0x1.8p23F);
619 }
620 
621 #ifdef LDBL_MANT_DIG
622 /*
623  * The complications for extra precision are smaller for rnintl() since it
624  * can safely assume that the rounding precision has been increased from
625  * its default to FP_PE on x86.  We don't exploit that here to get small
626  * optimizations from limiting the range to double.  We just need it for
627  * the magic number to work with long doubles.  ld128 callers should use
628  * rnint() instead of this if possible.  ld80 callers should prefer
629  * rnintl() since for amd64 this avoids swapping the register set, while
630  * for i386 it makes no difference (assuming FP_PE), and for other arches
631  * it makes little difference.
632  */
633 static inline long double
634 rnintl(long double x)
635 {
636 	return (x + __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2 -
637 	    __CONCAT(0x1.8p, LDBL_MANT_DIG) / 2);
638 }
639 #endif /* LDBL_MANT_DIG */
640 
641 /*
642  * irint() and i64rint() give the same result as casting to their integer
643  * return type provided their arg is a floating point integer.  They can
644  * sometimes be more efficient because no rounding is required.
645  */
646 #if defined(amd64) || defined(__i386__)
647 #define	irint(x)						\
648     (sizeof(x) == sizeof(float) &&				\
649     sizeof(__float_t) == sizeof(long double) ? irintf(x) :	\
650     sizeof(x) == sizeof(double) &&				\
651     sizeof(__double_t) == sizeof(long double) ? irintd(x) :	\
652     sizeof(x) == sizeof(long double) ? irintl(x) : (int)(x))
653 #else
654 #define	irint(x)	((int)(x))
655 #endif
656 
657 #define	i64rint(x)	((int64_t)(x))	/* only needed for ld128 so not opt. */
658 
659 #if defined(__i386__)
660 static __inline int
661 irintf(float x)
662 {
663 	int n;
664 
665 	__asm("fistl %0" : "=m" (n) : "t" (x));
666 	return (n);
667 }
668 
669 static __inline int
670 irintd(double x)
671 {
672 	int n;
673 
674 	__asm("fistl %0" : "=m" (n) : "t" (x));
675 	return (n);
676 }
677 #endif
678 
679 #if defined(__amd64__) || defined(__i386__)
680 static __inline int
681 irintl(long double x)
682 {
683 	int n;
684 
685 	__asm("fistl %0" : "=m" (n) : "t" (x));
686 	return (n);
687 }
688 #endif
689 
690 /*
691  * The following are fast floor macros for 0 <= |x| < 0x1p(N-1), where
692  * N is the precision of the type of x. These macros are used in the
693  * half-cycle trignometric functions (e.g., sinpi(x)).
694  */
695 #define	FFLOORF(x, j0, ix) do {			\
696 	(j0) = (((ix) >> 23) & 0xff) - 0x7f;	\
697 	(ix) &= ~(0x007fffff >> (j0));		\
698 	SET_FLOAT_WORD((x), (ix));		\
699 } while (0)
700 
701 #define	FFLOOR(x, j0, ix, lx) do {				\
702 	(j0) = (((ix) >> 20) & 0x7ff) - 0x3ff;			\
703 	if ((j0) < 20) {					\
704 		(ix) &= ~(0x000fffff >> (j0));			\
705 		(lx) = 0;					\
706 	} else {						\
707 		(lx) &= ~((uint32_t)0xffffffff >> ((j0) - 20));	\
708 	}							\
709 	INSERT_WORDS((x), (ix), (lx));				\
710 } while (0)
711 
712 #define	FFLOORL80(x, j0, ix, lx) do {			\
713 	j0 = ix - 0x3fff + 1;				\
714 	if ((j0) < 32) {				\
715 		(lx) = ((lx) >> 32) << 32;		\
716 		(lx) &= ~((((lx) << 32)-1) >> (j0));	\
717 	} else {					\
718 		uint64_t _m;				\
719 		_m = (uint64_t)-1 >> (j0);		\
720 		if ((lx) & _m) (lx) &= ~_m;		\
721 	}						\
722 	INSERT_LDBL80_WORDS((x), (ix), (lx));		\
723 } while (0)
724 
725 #define FFLOORL128(x, ai, ar) do {			\
726 	union IEEEl2bits u;				\
727 	uint64_t m;					\
728 	int e;						\
729 	u.e = (x);					\
730 	e = u.bits.exp - 16383;				\
731 	if (e < 48) {					\
732 		m = ((1llu << 49) - 1) >> (e + 1);	\
733 		u.bits.manh &= ~m;			\
734 		u.bits.manl = 0;			\
735 	} else {					\
736 		m = (uint64_t)-1 >> (e - 48);		\
737 		u.bits.manl &= ~m;			\
738 	}						\
739 	(ai) = u.e;					\
740 	(ar) = (x) - (ai);				\
741 } while (0)
742 
743 #ifdef DEBUG
744 #if defined(__amd64__) || defined(__i386__)
745 #define	breakpoint()	asm("int $3")
746 #else
747 #include <signal.h>
748 
749 #define	breakpoint()	raise(SIGTRAP)
750 #endif
751 #endif
752 
753 #ifdef STRUCT_RETURN
754 #define	RETURNSP(rp) do {		\
755 	if (!(rp)->lo_set)		\
756 		RETURNF((rp)->hi);	\
757 	RETURNF((rp)->hi + (rp)->lo);	\
758 } while (0)
759 #define	RETURNSPI(rp) do {		\
760 	if (!(rp)->lo_set)		\
761 		RETURNI((rp)->hi);	\
762 	RETURNI((rp)->hi + (rp)->lo);	\
763 } while (0)
764 #endif
765 
766 #define	SUM2P(x, y) ({			\
767 	const __typeof (x) __x = (x);	\
768 	const __typeof (y) __y = (y);	\
769 	__x + __y;			\
770 })
771 
772 /* fdlibm kernel function */
773 int	__kernel_rem_pio2(double*,double*,int,int,int);
774 
775 /* double precision kernel functions */
776 #ifndef INLINE_REM_PIO2
777 int	__ieee754_rem_pio2(double,double*);
778 #endif
779 double	__kernel_sin(double,double,int);
780 double	__kernel_cos(double,double);
781 double	__kernel_tan(double,double,int);
782 double	__ldexp_exp(double,int);
783 #ifdef _COMPLEX_H
784 double complex __ldexp_cexp(double complex,int);
785 #endif
786 
787 /* float precision kernel functions */
788 #ifndef INLINE_REM_PIO2F
789 int	__ieee754_rem_pio2f(float,double*);
790 #endif
791 #ifndef INLINE_KERNEL_SINDF
792 float	__kernel_sindf(double);
793 #endif
794 #ifndef INLINE_KERNEL_COSDF
795 float	__kernel_cosdf(double);
796 #endif
797 #ifndef INLINE_KERNEL_TANDF
798 float	__kernel_tandf(double,int);
799 #endif
800 float	__ldexp_expf(float,int);
801 #ifdef _COMPLEX_H
802 float complex __ldexp_cexpf(float complex,int);
803 #endif
804 
805 /* long double precision kernel functions */
806 long double __kernel_sinl(long double, long double, int);
807 long double __kernel_cosl(long double, long double);
808 long double __kernel_tanl(long double, long double, int);
809 
810 #endif /* !_MATH_PRIVATE_H_ */
811