1 /*
2  * Single-precision vector log function - inline version
3  *
4  * Copyright (c) 2019-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "v_math.h"
9 
10 struct v_logf_data
11 {
12   float32x4_t poly[7];
13   float32x4_t ln2;
14   uint32x4_t off, mantissa_mask;
15 };
16 
17 #define V_LOGF_CONSTANTS                                                      \
18   {                                                                           \
19     .poly                                                                     \
20 	= { V4 (-0x1.3e737cp-3f), V4 (0x1.5a9aa2p-3f),	V4 (-0x1.4f9934p-3f), \
21 	    V4 (0x1.961348p-3f),  V4 (-0x1.00187cp-2f), V4 (0x1.555d7cp-2f),  \
22 	    V4 (-0x1.ffffc8p-2f) },                                           \
23 	.ln2 = V4 (0x1.62e43p-1f), .off = V4 (0x3f2aaaab),                    \
24 	.mantissa_mask = V4 (0x007fffff)                                      \
25   }
26 
27 #define P(i) d->poly[7 - i]
28 
29 static inline float32x4_t
30 v_logf_inline (float32x4_t x, const struct v_logf_data *d)
31 {
32   float32x4_t n, p, q, r, r2, y;
33   uint32x4_t u;
34 
35   u = vreinterpretq_u32_f32 (x);
36 
37   /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3.  */
38   u = vsubq_u32 (u, d->off);
39   n = vcvtq_f32_s32 (
40       vshrq_n_s32 (vreinterpretq_s32_u32 (u), 23)); /* signextend.  */
41   u = vandq_u32 (u, d->mantissa_mask);
42   u = vaddq_u32 (u, d->off);
43   r = vsubq_f32 (vreinterpretq_f32_u32 (u), v_f32 (1.0f));
44 
45   /* y = log(1+r) + n*ln2.  */
46   r2 = vmulq_f32 (r, r);
47   /* n*ln2 + r + r2*(P1 + r*P2 + r2*(P3 + r*P4 + r2*(P5 + r*P6 + r2*P7))).  */
48   p = vfmaq_f32 (P (5), P (6), r);
49   q = vfmaq_f32 (P (3), P (4), r);
50   y = vfmaq_f32 (P (1), P (2), r);
51   p = vfmaq_f32 (p, P (7), r2);
52   q = vfmaq_f32 (q, p, r2);
53   y = vfmaq_f32 (y, q, r2);
54   p = vfmaq_f32 (r, d->ln2, n);
55 
56   return vfmaq_f32 (p, y, r2);
57 }
58 
59 #undef P
60