1 //===-- lib/fp_lib.h - Floating-point utilities -------------------*- C -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a configuration header for soft-float routines in compiler-rt.
10 // This file does not provide any part of the compiler-rt interface, but defines
11 // many useful constants and utility routines that are used in the
12 // implementation of the soft-float routines in compiler-rt.
13 //
14 // Assumes that float, double and long double correspond to the IEEE-754
15 // binary32, binary64 and binary 128 types, respectively, and that integer
16 // endianness matches floating point endianness on the target platform.
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #ifndef FP_LIB_HEADER
21 #define FP_LIB_HEADER
22 
23 #include "int_lib.h"
24 #include "int_math.h"
25 #include <limits.h>
26 #include <stdbool.h>
27 #include <stdint.h>
28 
29 // x86_64 FreeBSD prior v9.3 define fixed-width types incorrectly in
30 // 32-bit mode.
31 #if defined(__FreeBSD__) && defined(__i386__)
32 #include <sys/param.h>
33 #if __FreeBSD_version < 903000 // v9.3
34 #define uint64_t unsigned long long
35 #define int64_t long long
36 #undef UINT64_C
37 #define UINT64_C(c) (c##ULL)
38 #endif
39 #endif
40 
41 #if defined SINGLE_PRECISION
42 
43 typedef uint16_t half_rep_t;
44 typedef uint32_t rep_t;
45 typedef uint64_t twice_rep_t;
46 typedef int32_t srep_t;
47 typedef float fp_t;
48 #define HALF_REP_C UINT16_C
49 #define REP_C UINT32_C
50 #define significandBits 23
51 
rep_clz(rep_t a)52 static __inline int rep_clz(rep_t a) { return clzsi(a); }
53 
54 // 32x32 --> 64 bit multiply
wideMultiply(rep_t a,rep_t b,rep_t * hi,rep_t * lo)55 static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
56   const uint64_t product = (uint64_t)a * b;
57   *hi = product >> 32;
58   *lo = product;
59 }
60 COMPILER_RT_ABI fp_t __addsf3(fp_t a, fp_t b);
61 
62 #elif defined DOUBLE_PRECISION
63 
64 typedef uint32_t half_rep_t;
65 typedef uint64_t rep_t;
66 typedef int64_t srep_t;
67 typedef double fp_t;
68 #define HALF_REP_C UINT32_C
69 #define REP_C UINT64_C
70 #define significandBits 52
71 
rep_clz(rep_t a)72 static __inline int rep_clz(rep_t a) {
73 #if defined __LP64__
74   return __builtin_clzl(a);
75 #else
76   if (a & REP_C(0xffffffff00000000))
77     return clzsi(a >> 32);
78   else
79     return 32 + clzsi(a & REP_C(0xffffffff));
80 #endif
81 }
82 
83 #define loWord(a) (a & 0xffffffffU)
84 #define hiWord(a) (a >> 32)
85 
86 // 64x64 -> 128 wide multiply for platforms that don't have such an operation;
87 // many 64-bit platforms have this operation, but they tend to have hardware
88 // floating-point, so we don't bother with a special case for them here.
wideMultiply(rep_t a,rep_t b,rep_t * hi,rep_t * lo)89 static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
90   // Each of the component 32x32 -> 64 products
91   const uint64_t plolo = loWord(a) * loWord(b);
92   const uint64_t plohi = loWord(a) * hiWord(b);
93   const uint64_t philo = hiWord(a) * loWord(b);
94   const uint64_t phihi = hiWord(a) * hiWord(b);
95   // Sum terms that contribute to lo in a way that allows us to get the carry
96   const uint64_t r0 = loWord(plolo);
97   const uint64_t r1 = hiWord(plolo) + loWord(plohi) + loWord(philo);
98   *lo = r0 + (r1 << 32);
99   // Sum terms contributing to hi with the carry from lo
100   *hi = hiWord(plohi) + hiWord(philo) + hiWord(r1) + phihi;
101 }
102 #undef loWord
103 #undef hiWord
104 
105 COMPILER_RT_ABI fp_t __adddf3(fp_t a, fp_t b);
106 
107 #elif defined QUAD_PRECISION
108 #if __LDBL_MANT_DIG__ == 113 && defined(__SIZEOF_INT128__)
109 #define CRT_LDBL_128BIT
110 typedef uint64_t half_rep_t;
111 typedef __uint128_t rep_t;
112 typedef __int128_t srep_t;
113 typedef long double fp_t;
114 #define HALF_REP_C UINT64_C
115 #define REP_C (__uint128_t)
116 // Note: Since there is no explicit way to tell compiler the constant is a
117 // 128-bit integer, we let the constant be casted to 128-bit integer
118 #define significandBits 112
119 
rep_clz(rep_t a)120 static __inline int rep_clz(rep_t a) {
121   const union {
122     __uint128_t ll;
123 #if _YUGA_BIG_ENDIAN
124     struct {
125       uint64_t high, low;
126     } s;
127 #else
128     struct {
129       uint64_t low, high;
130     } s;
131 #endif
132   } uu = {.ll = a};
133 
134   uint64_t word;
135   uint64_t add;
136 
137   if (uu.s.high) {
138     word = uu.s.high;
139     add = 0;
140   } else {
141     word = uu.s.low;
142     add = 64;
143   }
144   return __builtin_clzll(word) + add;
145 }
146 
147 #define Word_LoMask UINT64_C(0x00000000ffffffff)
148 #define Word_HiMask UINT64_C(0xffffffff00000000)
149 #define Word_FullMask UINT64_C(0xffffffffffffffff)
150 #define Word_1(a) (uint64_t)((a >> 96) & Word_LoMask)
151 #define Word_2(a) (uint64_t)((a >> 64) & Word_LoMask)
152 #define Word_3(a) (uint64_t)((a >> 32) & Word_LoMask)
153 #define Word_4(a) (uint64_t)(a & Word_LoMask)
154 
155 // 128x128 -> 256 wide multiply for platforms that don't have such an operation;
156 // many 64-bit platforms have this operation, but they tend to have hardware
157 // floating-point, so we don't bother with a special case for them here.
wideMultiply(rep_t a,rep_t b,rep_t * hi,rep_t * lo)158 static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
159 
160   const uint64_t product11 = Word_1(a) * Word_1(b);
161   const uint64_t product12 = Word_1(a) * Word_2(b);
162   const uint64_t product13 = Word_1(a) * Word_3(b);
163   const uint64_t product14 = Word_1(a) * Word_4(b);
164   const uint64_t product21 = Word_2(a) * Word_1(b);
165   const uint64_t product22 = Word_2(a) * Word_2(b);
166   const uint64_t product23 = Word_2(a) * Word_3(b);
167   const uint64_t product24 = Word_2(a) * Word_4(b);
168   const uint64_t product31 = Word_3(a) * Word_1(b);
169   const uint64_t product32 = Word_3(a) * Word_2(b);
170   const uint64_t product33 = Word_3(a) * Word_3(b);
171   const uint64_t product34 = Word_3(a) * Word_4(b);
172   const uint64_t product41 = Word_4(a) * Word_1(b);
173   const uint64_t product42 = Word_4(a) * Word_2(b);
174   const uint64_t product43 = Word_4(a) * Word_3(b);
175   const uint64_t product44 = Word_4(a) * Word_4(b);
176 
177   const __uint128_t sum0 = (__uint128_t)product44;
178   const __uint128_t sum1 = (__uint128_t)product34 + (__uint128_t)product43;
179   const __uint128_t sum2 =
180       (__uint128_t)product24 + (__uint128_t)product33 + (__uint128_t)product42;
181   const __uint128_t sum3 = (__uint128_t)product14 + (__uint128_t)product23 +
182                            (__uint128_t)product32 + (__uint128_t)product41;
183   const __uint128_t sum4 =
184       (__uint128_t)product13 + (__uint128_t)product22 + (__uint128_t)product31;
185   const __uint128_t sum5 = (__uint128_t)product12 + (__uint128_t)product21;
186   const __uint128_t sum6 = (__uint128_t)product11;
187 
188   const __uint128_t r0 = (sum0 & Word_FullMask) + ((sum1 & Word_LoMask) << 32);
189   const __uint128_t r1 = (sum0 >> 64) + ((sum1 >> 32) & Word_FullMask) +
190                          (sum2 & Word_FullMask) + ((sum3 << 32) & Word_HiMask);
191 
192   *lo = r0 + (r1 << 64);
193   *hi = (r1 >> 64) + (sum1 >> 96) + (sum2 >> 64) + (sum3 >> 32) + sum4 +
194         (sum5 << 32) + (sum6 << 64);
195 }
196 #undef Word_1
197 #undef Word_2
198 #undef Word_3
199 #undef Word_4
200 #undef Word_HiMask
201 #undef Word_LoMask
202 #undef Word_FullMask
203 #endif // __LDBL_MANT_DIG__ == 113 && __SIZEOF_INT128__
204 #else
205 #error SINGLE_PRECISION, DOUBLE_PRECISION or QUAD_PRECISION must be defined.
206 #endif
207 
208 #if defined(SINGLE_PRECISION) || defined(DOUBLE_PRECISION) ||                  \
209     defined(CRT_LDBL_128BIT)
210 #define typeWidth (sizeof(rep_t) * CHAR_BIT)
211 #define exponentBits (typeWidth - significandBits - 1)
212 #define maxExponent ((1 << exponentBits) - 1)
213 #define exponentBias (maxExponent >> 1)
214 
215 #define implicitBit (REP_C(1) << significandBits)
216 #define significandMask (implicitBit - 1U)
217 #define signBit (REP_C(1) << (significandBits + exponentBits))
218 #define absMask (signBit - 1U)
219 #define exponentMask (absMask ^ significandMask)
220 #define oneRep ((rep_t)exponentBias << significandBits)
221 #define infRep exponentMask
222 #define quietBit (implicitBit >> 1)
223 #define qnanRep (exponentMask | quietBit)
224 
toRep(fp_t x)225 static __inline rep_t toRep(fp_t x) {
226   const union {
227     fp_t f;
228     rep_t i;
229   } rep = {.f = x};
230   return rep.i;
231 }
232 
fromRep(rep_t x)233 static __inline fp_t fromRep(rep_t x) {
234   const union {
235     fp_t f;
236     rep_t i;
237   } rep = {.i = x};
238   return rep.f;
239 }
240 
normalize(rep_t * significand)241 static __inline int normalize(rep_t *significand) {
242   const int shift = rep_clz(*significand) - rep_clz(implicitBit);
243   *significand <<= shift;
244   return 1 - shift;
245 }
246 
wideLeftShift(rep_t * hi,rep_t * lo,int count)247 static __inline void wideLeftShift(rep_t *hi, rep_t *lo, int count) {
248   *hi = *hi << count | *lo >> (typeWidth - count);
249   *lo = *lo << count;
250 }
251 
wideRightShiftWithSticky(rep_t * hi,rep_t * lo,unsigned int count)252 static __inline void wideRightShiftWithSticky(rep_t *hi, rep_t *lo,
253                                               unsigned int count) {
254   if (count < typeWidth) {
255     const bool sticky = (*lo << (typeWidth - count)) != 0;
256     *lo = *hi << (typeWidth - count) | *lo >> count | sticky;
257     *hi = *hi >> count;
258   } else if (count < 2 * typeWidth) {
259     const bool sticky = *hi << (2 * typeWidth - count) | *lo;
260     *lo = *hi >> (count - typeWidth) | sticky;
261     *hi = 0;
262   } else {
263     const bool sticky = *hi | *lo;
264     *lo = sticky;
265     *hi = 0;
266   }
267 }
268 
269 // Implements logb methods (logb, logbf, logbl) for IEEE-754. This avoids
270 // pulling in a libm dependency from compiler-rt, but is not meant to replace
271 // it (i.e. code calling logb() should get the one from libm, not this), hence
272 // the __compiler_rt prefix.
__compiler_rt_logbX(fp_t x)273 static __inline fp_t __compiler_rt_logbX(fp_t x) {
274   rep_t rep = toRep(x);
275   int exp = (rep & exponentMask) >> significandBits;
276 
277   // Abnormal cases:
278   // 1) +/- inf returns +inf; NaN returns NaN
279   // 2) 0.0 returns -inf
280   if (exp == maxExponent) {
281     if (((rep & signBit) == 0) || (x != x)) {
282       return x; // NaN or +inf: return x
283     } else {
284       return -x; // -inf: return -x
285     }
286   } else if (x == 0.0) {
287     // 0.0: return -inf
288     return fromRep(infRep | signBit);
289   }
290 
291   if (exp != 0) {
292     // Normal number
293     return exp - exponentBias; // Unbias exponent
294   } else {
295     // Subnormal number; normalize and repeat
296     rep &= absMask;
297     const int shift = 1 - normalize(&rep);
298     exp = (rep & exponentMask) >> significandBits;
299     return exp - exponentBias - shift; // Unbias exponent
300   }
301 }
302 
303 // Avoid using scalbn from libm. Unlike libc/libm scalbn, this function never
304 // sets errno on underflow/overflow.
__compiler_rt_scalbnX(fp_t x,int y)305 static __inline fp_t __compiler_rt_scalbnX(fp_t x, int y) {
306   const rep_t rep = toRep(x);
307   int exp = (rep & exponentMask) >> significandBits;
308 
309   if (x == 0.0 || exp == maxExponent)
310     return x; // +/- 0.0, NaN, or inf: return x
311 
312   // Normalize subnormal input.
313   rep_t sig = rep & significandMask;
314   if (exp == 0) {
315     exp += normalize(&sig);
316     sig &= ~implicitBit; // clear the implicit bit again
317   }
318 
319   if (__builtin_sadd_overflow(exp, y, &exp)) {
320     // Saturate the exponent, which will guarantee an underflow/overflow below.
321     exp = (y >= 0) ? INT_MAX : INT_MIN;
322   }
323 
324   // Return this value: [+/-] 1.sig * 2 ** (exp - exponentBias).
325   const rep_t sign = rep & signBit;
326   if (exp >= maxExponent) {
327     // Overflow, which could produce infinity or the largest-magnitude value,
328     // depending on the rounding mode.
329     return fromRep(sign | ((rep_t)(maxExponent - 1) << significandBits)) * 2.0f;
330   } else if (exp <= 0) {
331     // Subnormal or underflow. Use floating-point multiply to handle truncation
332     // correctly.
333     fp_t tmp = fromRep(sign | (REP_C(1) << significandBits) | sig);
334     exp += exponentBias - 1;
335     if (exp < 1)
336       exp = 1;
337     tmp *= fromRep((rep_t)exp << significandBits);
338     return tmp;
339   } else
340     return fromRep(sign | ((rep_t)exp << significandBits) | sig);
341 }
342 
343 // Avoid using fmax from libm.
__compiler_rt_fmaxX(fp_t x,fp_t y)344 static __inline fp_t __compiler_rt_fmaxX(fp_t x, fp_t y) {
345   // If either argument is NaN, return the other argument. If both are NaN,
346   // arbitrarily return the second one. Otherwise, if both arguments are +/-0,
347   // arbitrarily return the first one.
348   return (crt_isnan(x) || x < y) ? y : x;
349 }
350 
351 #endif
352 
353 #if defined(SINGLE_PRECISION)
354 
__compiler_rt_logbf(fp_t x)355 static __inline fp_t __compiler_rt_logbf(fp_t x) {
356   return __compiler_rt_logbX(x);
357 }
__compiler_rt_scalbnf(fp_t x,int y)358 static __inline fp_t __compiler_rt_scalbnf(fp_t x, int y) {
359   return __compiler_rt_scalbnX(x, y);
360 }
__compiler_rt_fmaxf(fp_t x,fp_t y)361 static __inline fp_t __compiler_rt_fmaxf(fp_t x, fp_t y) {
362 #if defined(__aarch64__)
363   // Use __builtin_fmaxf which turns into an fmaxnm instruction on AArch64.
364   return __builtin_fmaxf(x, y);
365 #else
366   // __builtin_fmaxf frequently turns into a libm call, so inline the function.
367   return __compiler_rt_fmaxX(x, y);
368 #endif
369 }
370 
371 #elif defined(DOUBLE_PRECISION)
372 
__compiler_rt_logb(fp_t x)373 static __inline fp_t __compiler_rt_logb(fp_t x) {
374   return __compiler_rt_logbX(x);
375 }
__compiler_rt_scalbn(fp_t x,int y)376 static __inline fp_t __compiler_rt_scalbn(fp_t x, int y) {
377   return __compiler_rt_scalbnX(x, y);
378 }
__compiler_rt_fmax(fp_t x,fp_t y)379 static __inline fp_t __compiler_rt_fmax(fp_t x, fp_t y) {
380 #if defined(__aarch64__)
381   // Use __builtin_fmax which turns into an fmaxnm instruction on AArch64.
382   return __builtin_fmax(x, y);
383 #else
384   // __builtin_fmax frequently turns into a libm call, so inline the function.
385   return __compiler_rt_fmaxX(x, y);
386 #endif
387 }
388 
389 #elif defined(QUAD_PRECISION)
390 
391 #if defined(CRT_LDBL_128BIT)
__compiler_rt_logbl(fp_t x)392 static __inline fp_t __compiler_rt_logbl(fp_t x) {
393   return __compiler_rt_logbX(x);
394 }
__compiler_rt_scalbnl(fp_t x,int y)395 static __inline fp_t __compiler_rt_scalbnl(fp_t x, int y) {
396   return __compiler_rt_scalbnX(x, y);
397 }
__compiler_rt_fmaxl(fp_t x,fp_t y)398 static __inline fp_t __compiler_rt_fmaxl(fp_t x, fp_t y) {
399   return __compiler_rt_fmaxX(x, y);
400 }
401 #else
402 // The generic implementation only works for ieee754 floating point. For other
403 // floating point types, continue to rely on the libm implementation for now.
__compiler_rt_logbl(long double x)404 static __inline long double __compiler_rt_logbl(long double x) {
405   return crt_logbl(x);
406 }
__compiler_rt_scalbnl(long double x,int y)407 static __inline long double __compiler_rt_scalbnl(long double x, int y) {
408   return crt_scalbnl(x, y);
409 }
__compiler_rt_fmaxl(long double x,long double y)410 static __inline long double __compiler_rt_fmaxl(long double x, long double y) {
411   return crt_fmaxl(x, y);
412 }
413 #endif // CRT_LDBL_128BIT
414 
415 #endif // *_PRECISION
416 
417 #endif // FP_LIB_HEADER
418