1 #ifndef AL_NUMERIC_H
2 #define AL_NUMERIC_H
3 
4 #include <cstddef>
5 #include <cstdint>
6 #ifdef HAVE_INTRIN_H
7 #include <intrin.h>
8 #endif
9 #ifdef HAVE_SSE_INTRINSICS
10 #include <xmmintrin.h>
11 #endif
12 
13 #include "opthelpers.h"
14 
15 
16 inline constexpr int64_t operator "" _i64(unsigned long long int n) noexcept { return static_cast<int64_t>(n); }
17 inline constexpr uint64_t operator "" _u64(unsigned long long int n) noexcept { return static_cast<uint64_t>(n); }
18 
19 
minf(float a,float b)20 constexpr inline float minf(float a, float b) noexcept
21 { return ((a > b) ? b : a); }
maxf(float a,float b)22 constexpr inline float maxf(float a, float b) noexcept
23 { return ((a > b) ? a : b); }
clampf(float val,float min,float max)24 constexpr inline float clampf(float val, float min, float max) noexcept
25 { return minf(max, maxf(min, val)); }
26 
mind(double a,double b)27 constexpr inline double mind(double a, double b) noexcept
28 { return ((a > b) ? b : a); }
maxd(double a,double b)29 constexpr inline double maxd(double a, double b) noexcept
30 { return ((a > b) ? a : b); }
clampd(double val,double min,double max)31 constexpr inline double clampd(double val, double min, double max) noexcept
32 { return mind(max, maxd(min, val)); }
33 
minu(unsigned int a,unsigned int b)34 constexpr inline unsigned int minu(unsigned int a, unsigned int b) noexcept
35 { return ((a > b) ? b : a); }
maxu(unsigned int a,unsigned int b)36 constexpr inline unsigned int maxu(unsigned int a, unsigned int b) noexcept
37 { return ((a > b) ? a : b); }
clampu(unsigned int val,unsigned int min,unsigned int max)38 constexpr inline unsigned int clampu(unsigned int val, unsigned int min, unsigned int max) noexcept
39 { return minu(max, maxu(min, val)); }
40 
mini(int a,int b)41 constexpr inline int mini(int a, int b) noexcept
42 { return ((a > b) ? b : a); }
maxi(int a,int b)43 constexpr inline int maxi(int a, int b) noexcept
44 { return ((a > b) ? a : b); }
clampi(int val,int min,int max)45 constexpr inline int clampi(int val, int min, int max) noexcept
46 { return mini(max, maxi(min, val)); }
47 
mini64(int64_t a,int64_t b)48 constexpr inline int64_t mini64(int64_t a, int64_t b) noexcept
49 { return ((a > b) ? b : a); }
maxi64(int64_t a,int64_t b)50 constexpr inline int64_t maxi64(int64_t a, int64_t b) noexcept
51 { return ((a > b) ? a : b); }
clampi64(int64_t val,int64_t min,int64_t max)52 constexpr inline int64_t clampi64(int64_t val, int64_t min, int64_t max) noexcept
53 { return mini64(max, maxi64(min, val)); }
54 
minu64(uint64_t a,uint64_t b)55 constexpr inline uint64_t minu64(uint64_t a, uint64_t b) noexcept
56 { return ((a > b) ? b : a); }
maxu64(uint64_t a,uint64_t b)57 constexpr inline uint64_t maxu64(uint64_t a, uint64_t b) noexcept
58 { return ((a > b) ? a : b); }
clampu64(uint64_t val,uint64_t min,uint64_t max)59 constexpr inline uint64_t clampu64(uint64_t val, uint64_t min, uint64_t max) noexcept
60 { return minu64(max, maxu64(min, val)); }
61 
minz(size_t a,size_t b)62 constexpr inline size_t minz(size_t a, size_t b) noexcept
63 { return ((a > b) ? b : a); }
maxz(size_t a,size_t b)64 constexpr inline size_t maxz(size_t a, size_t b) noexcept
65 { return ((a > b) ? a : b); }
clampz(size_t val,size_t min,size_t max)66 constexpr inline size_t clampz(size_t val, size_t min, size_t max) noexcept
67 { return minz(max, maxz(min, val)); }
68 
69 
lerp(float val1,float val2,float mu)70 constexpr inline float lerp(float val1, float val2, float mu) noexcept
71 { return val1 + (val2-val1)*mu; }
cubic(float val1,float val2,float val3,float val4,float mu)72 constexpr inline float cubic(float val1, float val2, float val3, float val4, float mu) noexcept
73 {
74     const float mu2{mu*mu}, mu3{mu2*mu};
75     const float a0{-0.5f*mu3 +       mu2 + -0.5f*mu};
76     const float a1{ 1.5f*mu3 + -2.5f*mu2            + 1.0f};
77     const float a2{-1.5f*mu3 +  2.0f*mu2 +  0.5f*mu};
78     const float a3{ 0.5f*mu3 + -0.5f*mu2};
79     return val1*a0 + val2*a1 + val3*a2 + val4*a3;
80 }
81 
82 
83 /** Find the next power-of-2 for non-power-of-2 numbers. */
NextPowerOf2(uint32_t value)84 inline uint32_t NextPowerOf2(uint32_t value) noexcept
85 {
86     if(value > 0)
87     {
88         value--;
89         value |= value>>1;
90         value |= value>>2;
91         value |= value>>4;
92         value |= value>>8;
93         value |= value>>16;
94     }
95     return value+1;
96 }
97 
98 /** Round up a value to the next multiple. */
RoundUp(size_t value,size_t r)99 inline size_t RoundUp(size_t value, size_t r) noexcept
100 {
101     value += r-1;
102     return value - (value%r);
103 }
104 
105 
106 /**
107  * Fast float-to-int conversion. No particular rounding mode is assumed; the
108  * IEEE-754 default is round-to-nearest with ties-to-even, though an app could
109  * change it on its own threads. On some systems, a truncating conversion may
110  * always be the fastest method.
111  */
fastf2i(float f)112 inline int fastf2i(float f) noexcept
113 {
114 #if defined(HAVE_SSE_INTRINSICS)
115     return _mm_cvt_ss2si(_mm_set_ss(f));
116 
117 #elif defined(_MSC_VER) && defined(_M_IX86_FP)
118 
119     int i;
120     __asm fld f
121     __asm fistp i
122     return i;
123 
124 #elif (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__))
125 
126     int i;
127 #ifdef __SSE_MATH__
128     __asm__("cvtss2si %1, %0" : "=r"(i) : "x"(f));
129 #else
130     __asm__ __volatile__("fistpl %0" : "=m"(i) : "t"(f) : "st");
131 #endif
132     return i;
133 
134 #else
135 
136     return static_cast<int>(f);
137 #endif
138 }
fastf2u(float f)139 inline unsigned int fastf2u(float f) noexcept
140 { return static_cast<unsigned int>(fastf2i(f)); }
141 
142 /** Converts float-to-int using standard behavior (truncation). */
float2int(float f)143 inline int float2int(float f) noexcept
144 {
145 #if defined(HAVE_SSE_INTRINSICS)
146     return _mm_cvtt_ss2si(_mm_set_ss(f));
147 
148 #elif (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP == 0) \
149     || ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
150         && !defined(__SSE_MATH__))
151     int sign, shift, mant;
152     union {
153         float f;
154         int i;
155     } conv;
156 
157     conv.f = f;
158     sign = (conv.i>>31) | 1;
159     shift = ((conv.i>>23)&0xff) - (127+23);
160 
161     /* Over/underflow */
162     if UNLIKELY(shift >= 31 || shift < -23)
163         return 0;
164 
165     mant = (conv.i&0x7fffff) | 0x800000;
166     if LIKELY(shift < 0)
167         return (mant >> -shift) * sign;
168     return (mant << shift) * sign;
169 
170 #else
171 
172     return static_cast<int>(f);
173 #endif
174 }
float2uint(float f)175 inline unsigned int float2uint(float f) noexcept
176 { return static_cast<unsigned int>(float2int(f)); }
177 
178 /** Converts double-to-int using standard behavior (truncation). */
double2int(double d)179 inline int double2int(double d) noexcept
180 {
181 #if defined(HAVE_SSE_INTRINSICS)
182     return _mm_cvttsd_si32(_mm_set_sd(d));
183 
184 #elif (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP < 2) \
185     || ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
186         && !defined(__SSE2_MATH__))
187     int sign, shift;
188     int64_t mant;
189     union {
190         double d;
191         int64_t i64;
192     } conv;
193 
194     conv.d = d;
195     sign = (conv.i64 >> 63) | 1;
196     shift = ((conv.i64 >> 52) & 0x7ff) - (1023 + 52);
197 
198     /* Over/underflow */
199     if UNLIKELY(shift >= 63 || shift < -52)
200         return 0;
201 
202     mant = (conv.i64 & 0xfffffffffffff_i64) | 0x10000000000000_i64;
203     if LIKELY(shift < 0)
204         return (int)(mant >> -shift) * sign;
205     return (int)(mant << shift) * sign;
206 
207 #else
208 
209     return static_cast<int>(d);
210 #endif
211 }
212 
213 /**
214  * Rounds a float to the nearest integral value, according to the current
215  * rounding mode. This is essentially an inlined version of rintf, although
216  * makes fewer promises (e.g. -0 or -0.25 rounded to 0 may result in +0).
217  */
fast_roundf(float f)218 inline float fast_roundf(float f) noexcept
219 {
220 #if (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) \
221     && !defined(__SSE_MATH__)
222 
223     float out;
224     __asm__ __volatile__("frndint" : "=t"(out) : "0"(f));
225     return out;
226 
227 #elif (defined(__GNUC__) || defined(__clang__)) && defined(__aarch64__)
228 
229     float out;
230     __asm__ volatile("frintx %s0, %s1" : "=w"(out) : "w"(f));
231     return out;
232 
233 #else
234 
235     /* Integral limit, where sub-integral precision is not available for
236      * floats.
237      */
238     static const float ilim[2]{
239          8388608.0f /*  0x1.0p+23 */,
240         -8388608.0f /* -0x1.0p+23 */
241     };
242     unsigned int sign, expo;
243     union {
244         float f;
245         unsigned int i;
246     } conv;
247 
248     conv.f = f;
249     sign = (conv.i>>31)&0x01;
250     expo = (conv.i>>23)&0xff;
251 
252     if UNLIKELY(expo >= 150/*+23*/)
253     {
254         /* An exponent (base-2) of 23 or higher is incapable of sub-integral
255          * precision, so it's already an integral value. We don't need to worry
256          * about infinity or NaN here.
257          */
258         return f;
259     }
260     /* Adding the integral limit to the value (with a matching sign) forces a
261      * result that has no sub-integral precision, and is consequently forced to
262      * round to an integral value. Removing the integral limit then restores
263      * the initial value rounded to the integral. The compiler should not
264      * optimize this out because of non-associative rules on floating-point
265      * math (as long as you don't use -fassociative-math,
266      * -funsafe-math-optimizations, -ffast-math, or -Ofast, in which case this
267      * may break).
268      */
269     f += ilim[sign];
270     return f - ilim[sign];
271 #endif
272 }
273 
274 #endif /* AL_NUMERIC_H */
275