1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2016 Pedro Gonnet (pedro.gonnet@gmail.com)
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_
11 #define THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_
12 
13 namespace Eigen {
14 
15 namespace internal {
16 
17 // Disable the code for older versions of gcc that don't support many of the required avx512 instrinsics.
18 #if EIGEN_GNUC_AT_LEAST(5, 3)
19 
20 #define _EIGEN_DECLARE_CONST_Packet16f(NAME, X) \
21   const Packet16f p16f_##NAME = pset1<Packet16f>(X)
22 
23 #define _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(NAME, X) \
24   const Packet16f p16f_##NAME = (__m512)pset1<Packet16i>(X)
25 
26 #define _EIGEN_DECLARE_CONST_Packet8d(NAME, X) \
27   const Packet8d p8d_##NAME = pset1<Packet8d>(X)
28 
29 #define _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(NAME, X) \
30   const Packet8d p8d_##NAME = _mm512_castsi512_pd(_mm512_set1_epi64(X))
31 
32 
33 // Natural logarithm
34 // Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2)
35 // and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can
36 // be easily approximated by a polynomial centered on m=1 for stability.
37 #if defined(EIGEN_VECTORIZE_AVX512DQ)
38 template <>
39 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
40 plog<Packet16f>(const Packet16f& _x) {
41   Packet16f x = _x;
42   _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f);
43   _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f);
44   _EIGEN_DECLARE_CONST_Packet16f(126f, 126.0f);
45 
46   _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inv_mant_mask, ~0x7f800000);
47 
48   // The smallest non denormalized float number.
49   _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(min_norm_pos, 0x00800000);
50   _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(minus_inf, 0xff800000);
51   _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(pos_inf, 0x7f800000);
52   _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000);
53 
54   // Polynomial coefficients.
55   _EIGEN_DECLARE_CONST_Packet16f(cephes_SQRTHF, 0.707106781186547524f);
56   _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p0, 7.0376836292E-2f);
57   _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p1, -1.1514610310E-1f);
58   _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p2, 1.1676998740E-1f);
59   _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p3, -1.2420140846E-1f);
60   _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p4, +1.4249322787E-1f);
61   _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p5, -1.6668057665E-1f);
62   _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p6, +2.0000714765E-1f);
63   _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p7, -2.4999993993E-1f);
64   _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p8, +3.3333331174E-1f);
65   _EIGEN_DECLARE_CONST_Packet16f(cephes_log_q1, -2.12194440e-4f);
66   _EIGEN_DECLARE_CONST_Packet16f(cephes_log_q2, 0.693359375f);
67 
68   // invalid_mask is set to true when x is NaN
69   __mmask16 invalid_mask =  _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_NGE_UQ);
70   __mmask16 iszero_mask  =  _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_EQ_OQ);
71 
72   // Truncate input values to the minimum positive normal.
73   x = pmax(x, p16f_min_norm_pos);
74 
75   // Extract the shifted exponents.
76   Packet16f emm0 = _mm512_cvtepi32_ps(_mm512_srli_epi32((__m512i)x, 23));
77   Packet16f e = _mm512_sub_ps(emm0, p16f_126f);
78 
79   // Set the exponents to -1, i.e. x are in the range [0.5,1).
80   x = _mm512_and_ps(x, p16f_inv_mant_mask);
81   x = _mm512_or_ps(x, p16f_half);
82 
83   // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))
84   // and shift by -1. The values are then centered around 0, which improves
85   // the stability of the polynomial evaluation.
86   //   if( x < SQRTHF ) {
87   //     e -= 1;
88   //     x = x + x - 1.0;
89   //   } else { x = x - 1.0; }
90   __mmask16 mask = _mm512_cmp_ps_mask(x, p16f_cephes_SQRTHF, _CMP_LT_OQ);
91   Packet16f tmp = _mm512_mask_blend_ps(mask, _mm512_setzero_ps(), x);
92   x = psub(x, p16f_1);
93   e = psub(e, _mm512_mask_blend_ps(mask, _mm512_setzero_ps(), p16f_1));
94   x = padd(x, tmp);
95 
96   Packet16f x2 = pmul(x, x);
97   Packet16f x3 = pmul(x2, x);
98 
99   // Evaluate the polynomial approximant of degree 8 in three parts, probably
100   // to improve instruction-level parallelism.
101   Packet16f y, y1, y2;
102   y = pmadd(p16f_cephes_log_p0, x, p16f_cephes_log_p1);
103   y1 = pmadd(p16f_cephes_log_p3, x, p16f_cephes_log_p4);
104   y2 = pmadd(p16f_cephes_log_p6, x, p16f_cephes_log_p7);
105   y = pmadd(y, x, p16f_cephes_log_p2);
106   y1 = pmadd(y1, x, p16f_cephes_log_p5);
107   y2 = pmadd(y2, x, p16f_cephes_log_p8);
108   y = pmadd(y, x3, y1);
109   y = pmadd(y, x3, y2);
110   y = pmul(y, x3);
111 
112   // Add the logarithm of the exponent back to the result of the interpolation.
113   y1 = pmul(e, p16f_cephes_log_q1);
114   tmp = pmul(x2, p16f_half);
115   y = padd(y, y1);
116   x = psub(x, tmp);
117   y2 = pmul(e, p16f_cephes_log_q2);
118   x = padd(x, y);
119   x = padd(x, y2);
120 
121   __mmask16 pos_inf_mask = _mm512_cmp_ps_mask(_x,p16f_pos_inf,_CMP_EQ_OQ);
122   // Filter out invalid inputs, i.e.:
123   //  - negative arg will be NAN,
124   //  - 0 will be -INF.
125   //  - +INF will be +INF
126   return _mm512_mask_blend_ps(iszero_mask,
127             _mm512_mask_blend_ps(invalid_mask,
128               _mm512_mask_blend_ps(pos_inf_mask,x,p16f_pos_inf),
129               p16f_nan),
130             p16f_minus_inf);
131 }
132 
133 #endif
134 
135 // Exponential function. Works by writing "x = m*log(2) + r" where
136 // "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then
137 // "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1).
138 template <>
139 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
140 pexp<Packet16f>(const Packet16f& _x) {
141   _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f);
142   _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f);
143   _EIGEN_DECLARE_CONST_Packet16f(127, 127.0f);
144 
145   _EIGEN_DECLARE_CONST_Packet16f(exp_hi, 88.3762626647950f);
146   _EIGEN_DECLARE_CONST_Packet16f(exp_lo, -88.3762626647949f);
147 
148   _EIGEN_DECLARE_CONST_Packet16f(cephes_LOG2EF, 1.44269504088896341f);
149 
150   _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p0, 1.9875691500E-4f);
151   _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p1, 1.3981999507E-3f);
152   _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p2, 8.3334519073E-3f);
153   _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p3, 4.1665795894E-2f);
154   _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p4, 1.6666665459E-1f);
155   _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p5, 5.0000001201E-1f);
156 
157   // Clamp x.
158   Packet16f x = pmax(pmin(_x, p16f_exp_hi), p16f_exp_lo);
159 
160   // Express exp(x) as exp(m*ln(2) + r), start by extracting
161   // m = floor(x/ln(2) + 0.5).
162   Packet16f m = _mm512_floor_ps(pmadd(x, p16f_cephes_LOG2EF, p16f_half));
163 
164   // Get r = x - m*ln(2). Note that we can do this without losing more than one
165   // ulp precision due to the FMA instruction.
166   _EIGEN_DECLARE_CONST_Packet16f(nln2, -0.6931471805599453f);
167   Packet16f r = _mm512_fmadd_ps(m, p16f_nln2, x);
168   Packet16f r2 = pmul(r, r);
169 
170   // TODO(gonnet): Split into odd/even polynomials and try to exploit
171   //               instruction-level parallelism.
172   Packet16f y = p16f_cephes_exp_p0;
173   y = pmadd(y, r, p16f_cephes_exp_p1);
174   y = pmadd(y, r, p16f_cephes_exp_p2);
175   y = pmadd(y, r, p16f_cephes_exp_p3);
176   y = pmadd(y, r, p16f_cephes_exp_p4);
177   y = pmadd(y, r, p16f_cephes_exp_p5);
178   y = pmadd(y, r2, r);
179   y = padd(y, p16f_1);
180 
181   // Build emm0 = 2^m.
182   Packet16i emm0 = _mm512_cvttps_epi32(padd(m, p16f_127));
183   emm0 = _mm512_slli_epi32(emm0, 23);
184 
185   // Return 2^m * exp(r).
186   return pmax(pmul(y, _mm512_castsi512_ps(emm0)), _x);
187 }
188 
189 /*template <>
190 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
191 pexp<Packet8d>(const Packet8d& _x) {
192   Packet8d x = _x;
193 
194   _EIGEN_DECLARE_CONST_Packet8d(1, 1.0);
195   _EIGEN_DECLARE_CONST_Packet8d(2, 2.0);
196 
197   _EIGEN_DECLARE_CONST_Packet8d(exp_hi, 709.437);
198   _EIGEN_DECLARE_CONST_Packet8d(exp_lo, -709.436139303);
199 
200   _EIGEN_DECLARE_CONST_Packet8d(cephes_LOG2EF, 1.4426950408889634073599);
201 
202   _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p0, 1.26177193074810590878e-4);
203   _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p1, 3.02994407707441961300e-2);
204   _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p2, 9.99999999999999999910e-1);
205 
206   _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q0, 3.00198505138664455042e-6);
207   _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q1, 2.52448340349684104192e-3);
208   _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q2, 2.27265548208155028766e-1);
209   _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q3, 2.00000000000000000009e0);
210 
211   _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_C1, 0.693145751953125);
212   _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_C2, 1.42860682030941723212e-6);
213 
214   // clamp x
215   x = pmax(pmin(x, p8d_exp_hi), p8d_exp_lo);
216 
217   // Express exp(x) as exp(g + n*log(2)).
218   const Packet8d n =
219       _mm512_mul_round_pd(p8d_cephes_LOG2EF, x, _MM_FROUND_TO_NEAREST_INT);
220 
221   // Get the remainder modulo log(2), i.e. the "g" described above. Subtract
222   // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last
223   // digits right.
224   const Packet8d nC1 = pmul(n, p8d_cephes_exp_C1);
225   const Packet8d nC2 = pmul(n, p8d_cephes_exp_C2);
226   x = psub(x, nC1);
227   x = psub(x, nC2);
228 
229   const Packet8d x2 = pmul(x, x);
230 
231   // Evaluate the numerator polynomial of the rational interpolant.
232   Packet8d px = p8d_cephes_exp_p0;
233   px = pmadd(px, x2, p8d_cephes_exp_p1);
234   px = pmadd(px, x2, p8d_cephes_exp_p2);
235   px = pmul(px, x);
236 
237   // Evaluate the denominator polynomial of the rational interpolant.
238   Packet8d qx = p8d_cephes_exp_q0;
239   qx = pmadd(qx, x2, p8d_cephes_exp_q1);
240   qx = pmadd(qx, x2, p8d_cephes_exp_q2);
241   qx = pmadd(qx, x2, p8d_cephes_exp_q3);
242 
243   // I don't really get this bit, copied from the SSE2 routines, so...
244   // TODO(gonnet): Figure out what is going on here, perhaps find a better
245   // rational interpolant?
246   x = _mm512_div_pd(px, psub(qx, px));
247   x = pmadd(p8d_2, x, p8d_1);
248 
249   // Build e=2^n.
250   const Packet8d e = _mm512_castsi512_pd(_mm512_slli_epi64(
251       _mm512_add_epi64(_mm512_cvtpd_epi64(n), _mm512_set1_epi64(1023)), 52));
252 
253   // Construct the result 2^n * exp(g) = e * x. The max is used to catch
254   // non-finite values in the input.
255   return pmax(pmul(x, e), _x);
256   }*/
257 
258 // Functions for sqrt.
259 // The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step
260 // of Newton's method, at a cost of 1-2 bits of precision as opposed to the
261 // exact solution. The main advantage of this approach is not just speed, but
262 // also the fact that it can be inlined and pipelined with other computations,
263 // further reducing its effective latency.
264 #if EIGEN_FAST_MATH
265 template <>
266 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
267 psqrt<Packet16f>(const Packet16f& _x) {
268   Packet16f neg_half = pmul(_x, pset1<Packet16f>(-.5f));
269   __mmask16 denormal_mask = _mm512_kand(
270       _mm512_cmp_ps_mask(_x, pset1<Packet16f>((std::numeric_limits<float>::min)()),
271                         _CMP_LT_OQ),
272       _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_GE_OQ));
273 
274   Packet16f x = _mm512_rsqrt14_ps(_x);
275 
276   // Do a single step of Newton's iteration.
277   x = pmul(x, pmadd(neg_half, pmul(x, x), pset1<Packet16f>(1.5f)));
278 
279   // Flush results for denormals to zero.
280   return _mm512_mask_blend_ps(denormal_mask, pmul(_x,x), _mm512_setzero_ps());
281 }
282 
283 template <>
284 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
285 psqrt<Packet8d>(const Packet8d& _x) {
286   Packet8d neg_half = pmul(_x, pset1<Packet8d>(-.5));
287   __mmask16 denormal_mask = _mm512_kand(
288       _mm512_cmp_pd_mask(_x, pset1<Packet8d>((std::numeric_limits<double>::min)()),
289                         _CMP_LT_OQ),
290       _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_GE_OQ));
291 
292   Packet8d x = _mm512_rsqrt14_pd(_x);
293 
294   // Do a single step of Newton's iteration.
295   x = pmul(x, pmadd(neg_half, pmul(x, x), pset1<Packet8d>(1.5)));
296 
297   // Do a second step of Newton's iteration.
298   x = pmul(x, pmadd(neg_half, pmul(x, x), pset1<Packet8d>(1.5)));
299 
300   return _mm512_mask_blend_pd(denormal_mask, pmul(_x,x), _mm512_setzero_pd());
301 }
302 #else
303 template <>
304 EIGEN_STRONG_INLINE Packet16f psqrt<Packet16f>(const Packet16f& x) {
305   return _mm512_sqrt_ps(x);
306 }
307 template <>
308 EIGEN_STRONG_INLINE Packet8d psqrt<Packet8d>(const Packet8d& x) {
309   return _mm512_sqrt_pd(x);
310 }
311 #endif
312 
313 // Functions for rsqrt.
314 // Almost identical to the sqrt routine, just leave out the last multiplication
315 // and fill in NaN/Inf where needed. Note that this function only exists as an
316 // iterative version for doubles since there is no instruction for diretly
317 // computing the reciprocal square root in AVX-512.
318 #ifdef EIGEN_FAST_MATH
319 template <>
320 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
321 prsqrt<Packet16f>(const Packet16f& _x) {
322   _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inf, 0x7f800000);
323   _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000);
324   _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f);
325   _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f);
326   _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(flt_min, 0x00800000);
327 
328   Packet16f neg_half = pmul(_x, p16f_minus_half);
329 
330   // select only the inverse sqrt of positive normal inputs (denormals are
331   // flushed to zero and cause infs as well).
332   __mmask16 le_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_LT_OQ);
333   Packet16f x = _mm512_mask_blend_ps(le_zero_mask, _mm512_rsqrt14_ps(_x), _mm512_setzero_ps());
334 
335   // Fill in NaNs and Infs for the negative/zero entries.
336   __mmask16 neg_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LT_OQ);
337   Packet16f infs_and_nans = _mm512_mask_blend_ps(
338       neg_mask, _mm512_mask_blend_ps(le_zero_mask, _mm512_setzero_ps(), p16f_inf), p16f_nan);
339 
340   // Do a single step of Newton's iteration.
341   x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five));
342 
343   // Insert NaNs and Infs in all the right places.
344   return _mm512_mask_blend_ps(le_zero_mask, x, infs_and_nans);
345 }
346 
347 template <>
348 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
349 prsqrt<Packet8d>(const Packet8d& _x) {
350   _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(inf, 0x7ff0000000000000LL);
351   _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(nan, 0x7ff1000000000000LL);
352   _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5);
353   _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5);
354   _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(dbl_min, 0x0010000000000000LL);
355 
356   Packet8d neg_half = pmul(_x, p8d_minus_half);
357 
358   // select only the inverse sqrt of positive normal inputs (denormals are
359   // flushed to zero and cause infs as well).
360   __mmask8 le_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_LT_OQ);
361   Packet8d x = _mm512_mask_blend_pd(le_zero_mask, _mm512_rsqrt14_pd(_x), _mm512_setzero_pd());
362 
363   // Fill in NaNs and Infs for the negative/zero entries.
364   __mmask8 neg_mask = _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_LT_OQ);
365   Packet8d infs_and_nans = _mm512_mask_blend_pd(
366       neg_mask, _mm512_mask_blend_pd(le_zero_mask, _mm512_setzero_pd(), p8d_inf), p8d_nan);
367 
368   // Do a first step of Newton's iteration.
369   x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));
370 
371   // Do a second step of Newton's iteration.
372   x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));
373 
374   // Insert NaNs and Infs in all the right places.
375   return _mm512_mask_blend_pd(le_zero_mask, x, infs_and_nans);
376 }
377 #elif defined(EIGEN_VECTORIZE_AVX512ER)
378 template <>
379 EIGEN_STRONG_INLINE Packet16f prsqrt<Packet16f>(const Packet16f& x) {
380   return _mm512_rsqrt28_ps(x);
381 }
382 #endif
383 #endif
384 
385 }  // end namespace internal
386 
387 }  // end namespace Eigen
388 
389 #endif  // THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_
390