1 /*
2 Copyright (C) 2014 Fredrik Johansson
3
4 This file is part of Arb.
5
6 Arb is free software: you can redistribute it and/or modify it under
7 the terms of the GNU Lesser General Public License (LGPL) as published
8 by the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version. See <http://www.gnu.org/licenses/>.
10 */
11
12 #include "arb.h"
13
14 #define TMP_ALLOC_LIMBS(__n) TMP_ALLOC((__n) * sizeof(mp_limb_t))
15 #define MAGLIM(prec) FLINT_MAX(128, 2 * (prec))
16
17 void
arb_exp_arf_huge(arb_t z,const arf_t x,slong mag,slong prec,int minus_one)18 arb_exp_arf_huge(arb_t z, const arf_t x, slong mag, slong prec, int minus_one)
19 {
20 arb_t ln2, t, u;
21 fmpz_t q;
22 slong wp;
23
24 arb_init(ln2);
25 arb_init(t);
26 arb_init(u);
27 fmpz_init(q);
28
29 wp = prec + mag + 10;
30
31 arb_const_log2(ln2, wp);
32 arb_set_arf(t, x);
33 arb_div(u, t, ln2, wp);
34 arf_get_fmpz(q, arb_midref(u), ARF_RND_DOWN);
35 arb_submul_fmpz(t, ln2, q, wp);
36
37 arb_exp(z, t, prec);
38 arb_mul_2exp_fmpz(z, z, q);
39
40 if (minus_one)
41 arb_sub_ui(z, z, 1, prec);
42
43 arb_clear(ln2);
44 arb_clear(t);
45 arb_clear(u);
46 fmpz_clear(q);
47 }
48
49 /* |x| >= 2^expbound */
50 static void
arb_exp_arf_overflow(arb_t z,slong expbound,int negative,int minus_one,slong prec)51 arb_exp_arf_overflow(arb_t z, slong expbound, int negative, int minus_one, slong prec)
52 {
53 if (!negative)
54 {
55 arf_zero(arb_midref(z));
56 mag_inf(arb_radref(z));
57 }
58 else
59 {
60 /* x <= -2^expbound ==> 0 < exp(x) <= 2^(-2^expbound) */
61 fmpz_t t;
62 fmpz_init(t);
63
64 fmpz_set_si(t, -1);
65 fmpz_mul_2exp(t, t, expbound);
66
67 arf_one(arb_midref(z));
68 mag_one(arb_radref(z));
69 arb_mul_2exp_fmpz(z, z, t);
70
71 if (minus_one)
72 arb_sub_ui(z, z, 1, prec);
73
74 fmpz_clear(t);
75 }
76 }
77
78 static void
arb_exp_arf_fallback(arb_t z,const arf_t x,slong mag,slong prec,int minus_one)79 arb_exp_arf_fallback(arb_t z, const arf_t x, slong mag, slong prec, int minus_one)
80 {
81 /* reduce by log(2) if needed, but avoid computing log(2) unnecessarily at
82 extremely high precision */
83 if (mag > 64 || (mag > 8 && prec < 1000000))
84 arb_exp_arf_huge(z, x, mag, prec, minus_one);
85 else if (prec < 19000)
86 arb_exp_arf_rs_generic(z, x, prec, minus_one);
87 else
88 arb_exp_arf_bb(z, x, prec, minus_one);
89 }
90
91 void
arb_exp_arf(arb_t z,const arf_t x,slong prec,int minus_one,slong maglim)92 arb_exp_arf(arb_t z, const arf_t x, slong prec, int minus_one, slong maglim)
93 {
94 if (arf_is_special(x))
95 {
96 if (minus_one)
97 {
98 if (arf_is_zero(x))
99 arb_zero(z);
100 else if (arf_is_pos_inf(x))
101 arb_pos_inf(z);
102 else if (arf_is_neg_inf(x))
103 arb_set_si(z, -1);
104 else
105 arb_indeterminate(z);
106 }
107 else
108 {
109 if (arf_is_zero(x))
110 arb_one(z);
111 else if (arf_is_pos_inf(x))
112 arb_pos_inf(z);
113 else if (arf_is_neg_inf(x))
114 arb_zero(z);
115 else
116 arb_indeterminate(z);
117 }
118 }
119 else if (COEFF_IS_MPZ(ARF_EXP(x)))
120 {
121 if (fmpz_sgn(ARF_EXPREF(x)) > 0)
122 {
123 /* huge input */
124 arb_exp_arf_overflow(z, maglim, ARF_SGNBIT(x), minus_one, prec);
125 }
126 else
127 {
128 /* |exp(x) - (1 + x)| <= |x^2| */
129 fmpz_t t;
130 int inexact;
131 fmpz_init(t);
132 fmpz_mul_2exp(t, ARF_EXPREF(x), 1);
133 inexact = arf_add_ui(arb_midref(z), x, minus_one ? 0 : 1, prec, ARB_RND);
134 mag_one(arb_radref(z));
135 mag_mul_2exp_fmpz(arb_radref(z), arb_radref(z), t);
136 if (inexact)
137 arf_mag_add_ulp(arb_radref(z), arb_radref(z), arb_midref(z), prec);
138 fmpz_clear(t);
139 }
140 }
141 else
142 {
143 slong exp, wp, wn, N, r, wprounded, finaln;
144 fmpz_t n;
145 mp_ptr tmp, w, t, u, finalvalue;
146 mp_limb_t p1, q1bits, p2, q2bits, error, error2;
147 int negative, inexact;
148 TMP_INIT;
149
150 exp = ARF_EXP(x);
151 negative = ARF_SGNBIT(x);
152
153 /* handle tiny input */
154 /* |exp(x) - 1| <= 2|x| */
155 if (!minus_one && exp < -prec - 4)
156 {
157 arf_one(arb_midref(z));
158 mag_set_ui_2exp_si(arb_radref(z), 1, exp + 1);
159 return;
160 }
161 /* |exp(x) - (1 + x)| <= |x^2| */
162 else if (exp < (minus_one ? -prec - 4 : -(prec / 2) - 4))
163 {
164 inexact = arf_add_ui(arb_midref(z), x, minus_one ? 0 : 1, prec, ARB_RND);
165 mag_set_ui_2exp_si(arb_radref(z), 1, 2 * exp);
166 if (inexact)
167 arf_mag_add_ulp(arb_radref(z), arb_radref(z), arb_midref(z), prec);
168 return;
169 }
170
171 /* handle huge input */
172 if (exp > maglim)
173 {
174 arb_exp_arf_overflow(z, maglim, negative, minus_one, prec);
175 return;
176 }
177
178 /* Absolute working precision (NOT rounded to a limb multiple) */
179 wp = prec + 8;
180 if (minus_one && exp <= 0)
181 wp += (-exp);
182 /* Number of limbs */
183 wn = (wp + FLINT_BITS - 1) / FLINT_BITS;
184 /* Precision rounded to a number of bits */
185 wprounded = FLINT_BITS * wn;
186 /* Don't be close to the boundary (to allow adding adding the
187 Taylor series truncation error without overflow) */
188 wp = FLINT_MAX(wp, wprounded - (FLINT_BITS - 4));
189
190 /* Too high precision to use table -- use generic algorithm */
191 if (wp > ARB_EXP_TAB2_PREC)
192 {
193 arb_exp_arf_fallback(z, x, exp, prec, minus_one);
194 return;
195 }
196
197 TMP_START;
198
199 tmp = TMP_ALLOC_LIMBS(4 * wn + 3);
200 w = tmp; /* requires wn+1 limbs */
201 t = w + wn + 1; /* requires wn+1 limbs */
202 u = t + wn + 1; /* requires 2wn+1 limbs */
203
204 /* reduce modulo log(2) */
205 fmpz_init(n);
206 if (_arb_get_mpn_fixed_mod_log2(w, n, &error, x, wn) == 0)
207 {
208 /* may run out of precision for log(2) */
209 arb_exp_arf_fallback(z, x, exp, prec, minus_one);
210 fmpz_clear(n);
211 TMP_END;
212 return;
213 }
214
215 /* err(w) translates to a propagated error bounded by
216 err(w) * exp'(x) < err(w) * exp(1) < err(w) * 3 */
217 error *= 3;
218
219 /* Table-based argument reduction (1 or 2 steps) */
220 if (wp <= ARB_EXP_TAB1_PREC)
221 {
222 q1bits = ARB_EXP_TAB1_BITS;
223 q2bits = 0;
224
225 p1 = w[wn-1] >> (FLINT_BITS - q1bits);
226 w[wn-1] -= (p1 << (FLINT_BITS - q1bits));
227 p2 = 0;
228 }
229 else
230 {
231 q1bits = ARB_EXP_TAB21_BITS;
232 q2bits = ARB_EXP_TAB21_BITS + ARB_EXP_TAB22_BITS;
233
234 p1 = w[wn-1] >> (FLINT_BITS - q1bits);
235 w[wn-1] -= (p1 << (FLINT_BITS - q1bits));
236 p2 = w[wn-1] >> (FLINT_BITS - q2bits);
237 w[wn-1] -= (p2 << (FLINT_BITS - q2bits));
238 }
239
240 /* |w| <= 2^-r */
241 r = _arb_mpn_leading_zeros(w, wn);
242
243 /* Choose number of terms N such that Taylor series truncation
244 error is <= 2^-wp */
245 N = _arb_exp_taylor_bound(-r, wp);
246
247 if (N < 60)
248 {
249 /* Evaluate Taylor series */
250 _arb_exp_taylor_rs(t, &error2, w, wn, N);
251 /* Taylor series evaluation error */
252 error += error2;
253 /* Taylor series truncation error */
254 error += UWORD(1) << (wprounded-wp);
255 }
256 else /* Compute cosh(a) from sinh(a) using a square root. */
257 {
258 /* the summation for sinh is actually done to (2N-1)! */
259 N = (N + 1) / 2;
260
261 /* Evaluate Taylor series for sinh */
262 _arb_sin_cos_taylor_rs(t, u, &error2, w, wn, N, 1, 0);
263 error += error2;
264 error += UWORD(1) << (wprounded-wp);
265
266 /* 1 + sinh^2, with wn + 1 limbs */
267 mpn_sqr(u, t, wn);
268 u[2 * wn] = 1;
269
270 /* cosh, with wn + 1 limbs */
271 mpn_sqrtrem(w, u, u, 2 * wn + 1);
272
273 /* exp = sinh + cosh */
274 t[wn] = w[wn] + mpn_add_n(t, t, w, wn);
275
276 /* Error for cosh */
277 /* When converting sinh to cosh, the error for cosh must be
278 smaller than the error for sinh; but we also get 1 ulp
279 extra error from the square root. */
280 error2 = error + 1;
281
282 /* Error for sinh + cosh */
283 error += error2;
284 }
285
286 if (wp <= ARB_EXP_TAB1_PREC)
287 {
288 if (p1 == 0)
289 {
290 finalvalue = t;
291 finaln = wn + 1;
292 }
293 else
294 {
295 /* Divide by 2 to get |t| <= 1 (todo: check this?) */
296 mpn_rshift(t, t, wn + 1, 1);
297 error = (error >> 1) + 2;
298
299 mpn_mul_n(u, t, arb_exp_tab1[p1] + ARB_EXP_TAB1_LIMBS - wn, wn);
300
301 /* (t + err1 * ulp) * (u + err2 * ulp) + 1ulp = t*u +
302 (err1*u + err2*t + t*u*ulp + 1) * ulp
303 note |u| <= 1, |t| <= 1 */
304 error += 4;
305 finalvalue = u + wn;
306
307 finaln = wn;
308
309 /* we have effectively divided by 2^2 -- todo use inline function */
310 fmpz_add_ui(n, n, 2);
311 }
312 }
313 else
314 {
315 if (p1 == 0 && p2 == 0)
316 {
317 finalvalue = t;
318 finaln = wn + 1;
319 }
320 else
321 {
322 /* Divide by 2 to get |t| <= 1 (todo: check this?) */
323 mpn_rshift(t, t, wn + 1, 1);
324 error = (error >> 1) + 2;
325
326 mpn_mul_n(u, arb_exp_tab21[p1] + ARB_EXP_TAB2_LIMBS - wn,
327 arb_exp_tab22[p2] + ARB_EXP_TAB2_LIMBS - wn, wn);
328
329 /* error of w <= 4 ulp */
330 flint_mpn_copyi(w, u + wn, wn); /* todo: avoid with better alloc */
331
332 mpn_mul_n(u, t, w, wn);
333
334 /* (t + err1 * ulp) * (w + 4 * ulp) + 1ulp = t*u +
335 (err1*w + 4*t + t*w*ulp + 1) * ulp
336 note |w| <= 1, |t| <= 1 */
337 error += 6;
338
339 finalvalue = u + wn;
340 finaln = wn;
341
342 /* we have effectively divided by 2^3 -- todo use inline function */
343 fmpz_add_ui(n, n, 3);
344 }
345 }
346
347 /* The accumulated arithmetic error */
348 mag_set_ui_2exp_si(arb_radref(z), error, -wprounded);
349
350 /* Set the midpoint */
351 if (!minus_one)
352 {
353 inexact = _arf_set_mpn_fixed(arb_midref(z), finalvalue, finaln, wn, 0, prec, ARB_RND);
354 if (inexact)
355 arf_mag_add_ulp(arb_radref(z), arb_radref(z), arb_midref(z), prec);
356 }
357 else
358 {
359 _arf_set_mpn_fixed(arb_midref(z), finalvalue, finaln, wn, 0, finaln * FLINT_BITS, ARB_RND);
360 }
361
362 arb_mul_2exp_fmpz(z, z, n);
363
364 if (minus_one)
365 arb_sub_ui(z, z, 1, prec);
366
367 TMP_END;
368 fmpz_clear(n);
369 }
370 }
371
372 /* todo: min prec by MAG_BITS everywhere? */
373 void
arb_exp_wide(arb_t res,const arb_t x,slong prec,slong maglim)374 arb_exp_wide(arb_t res, const arb_t x, slong prec, slong maglim)
375 {
376 mag_t t, u;
377
378 mag_init(t);
379 mag_init(u);
380
381 if (arf_cmpabs_2exp_si(arb_midref(x), 20) < 0
382 && mag_cmp_2exp_si(arb_radref(x), 20) < 0)
383 {
384 if (arf_is_zero(arb_midref(x)))
385 {
386 if (mag_cmp_2exp_si(arb_radref(x), -10) < 0)
387 {
388 mag_expm1(arb_radref(res), arb_radref(x));
389 arf_one(arb_midref(res));
390 }
391 else
392 {
393 mag_expinv_lower(t, arb_radref(x));
394 mag_exp(u, arb_radref(x));
395 arb_set_interval_mag(res, t, u, prec);
396 }
397 }
398 else if (arb_contains_zero(x))
399 {
400 arf_get_mag_lower(t, arb_midref(x));
401 mag_sub(t, arb_radref(x), t);
402
403 arf_get_mag(u, arb_midref(x));
404 mag_add(u, arb_radref(x), u);
405
406 if (arf_sgn(arb_midref(x)) > 0)
407 {
408 mag_expinv_lower(t, t);
409 mag_exp(u, u);
410 arb_set_interval_mag(res, t, u, prec);
411 }
412 else
413 {
414 mag_expinv_lower(u, u);
415 mag_exp(t, t);
416 arb_set_interval_mag(res, u, t, prec);
417 }
418 }
419 else if (arf_sgn(arb_midref(x)) < 0)
420 {
421 arb_get_mag(t, x);
422 arb_get_mag_lower(u, x);
423 mag_expinv_lower(t, t);
424 mag_expinv(u, u);
425 arb_set_interval_mag(res, t, u, prec);
426 }
427 else
428 {
429 arb_get_mag_lower(t, x);
430 arb_get_mag(u, x);
431 mag_exp_lower(t, t);
432 mag_exp(u, u);
433 arb_set_interval_mag(res, t, u, prec);
434 }
435 }
436 else
437 {
438 /* use arb_exp_arf for accurate argument reduction */
439 arf_t q;
440 arf_init(q);
441 arf_set_mag(q, arb_radref(x));
442 arf_add(q, q, arb_midref(x), MAG_BITS, ARF_RND_CEIL);
443 arb_exp_arf(res, q, FLINT_MIN(prec, MAG_BITS), 0, maglim);
444 arb_get_mag(arb_radref(res), res);
445 arf_zero(arb_midref(res));
446 arf_clear(q);
447 }
448
449 mag_clear(t);
450 mag_clear(u);
451 }
452
arb_exp(arb_t res,const arb_t x,slong prec)453 void arb_exp(arb_t res, const arb_t x, slong prec)
454 {
455 slong maglim = MAGLIM(prec);
456
457 if (mag_is_zero(arb_radref(x)))
458 {
459 arb_exp_arf(res, arb_midref(x), prec, 0, maglim);
460 }
461 else if (mag_is_inf(arb_radref(x)))
462 {
463 if (arf_is_nan(arb_midref(x)))
464 arb_indeterminate(res);
465 else
466 arb_zero_pm_inf(res);
467 }
468 else if (arf_is_special(arb_midref(x)))
469 {
470 if (arf_is_zero(arb_midref(x)))
471 arb_exp_wide(res, x, prec, maglim);
472 else if (arf_is_nan(arb_midref(x)))
473 arb_indeterminate(res);
474 else /* infinity +/- finite */
475 arb_exp_arf(res, arb_midref(x), prec, 0, 1);
476 }
477 else /* both finite, non-special */
478 {
479 slong acc, mexp, rexp;
480
481 mexp = ARF_EXP(arb_midref(x));
482 rexp = MAG_EXP(arb_radref(x));
483
484 if (COEFF_IS_MPZ(rexp))
485 rexp = (fmpz_sgn(ARF_EXPREF(arb_radref(x))) < 0) ? COEFF_MIN : COEFF_MAX;
486 if (COEFF_IS_MPZ(mexp))
487 mexp = (fmpz_sgn(MAG_EXPREF(arb_midref(x))) < 0) ? COEFF_MIN : COEFF_MAX;
488
489 if (mexp < -prec && rexp < -prec)
490 {
491 arb_get_mag(arb_radref(res), x);
492 mag_expm1(arb_radref(res), arb_radref(res));
493 arf_one(arb_midref(res));
494 return;
495 }
496
497 acc = -rexp;
498 acc = FLINT_MAX(acc, 0);
499 acc = FLINT_MIN(acc, prec);
500 prec = FLINT_MIN(prec, acc + MAG_BITS);
501 prec = FLINT_MAX(prec, 2);
502
503 if (acc < 20 && (rexp >= 0 || mexp <= 10))
504 {
505 /* may evaluate at endpoints */
506 arb_exp_wide(res, x, prec, maglim);
507 }
508 else
509 {
510 /* exp(a+b) - exp(a) = exp(a) * (exp(b)-1) */
511 mag_t t, u;
512
513 mag_init_set(t, arb_radref(x));
514 mag_init(u);
515
516 arb_exp_arf(res, arb_midref(x), prec, 0, maglim);
517 mag_expm1(t, t);
518 arb_get_mag(u, res);
519 mag_addmul(arb_radref(res), t, u);
520
521 mag_clear(t);
522 mag_clear(u);
523 }
524 }
525 }
526
527 void
arb_expm1(arb_t res,const arb_t x,slong prec)528 arb_expm1(arb_t res, const arb_t x, slong prec)
529 {
530 slong maglim = MAGLIM(prec);
531
532 if (mag_is_zero(arb_radref(x)))
533 {
534 arb_exp_arf(res, arb_midref(x), prec, 1, maglim);
535 }
536 else if (mag_is_inf(arb_radref(x)))
537 {
538 if (arf_is_nan(arb_midref(x)))
539 arb_indeterminate(res);
540 else
541 arb_zero_pm_inf(res);
542 }
543 else if (arf_is_special(arb_midref(x)))
544 {
545 if (arf_is_zero(arb_midref(x)))
546 {
547 if (mag_cmp_2exp_si(arb_radref(x), -10) < 0)
548 {
549 mag_expm1(arb_radref(res), arb_radref(x));
550 arf_zero(arb_midref(res));
551 }
552 else
553 {
554 arb_exp_wide(res, x, prec, maglim);
555 arb_sub_ui(res, res, 1, prec);
556 }
557 }
558 else if (arf_is_nan(arb_midref(x)))
559 arb_indeterminate(res);
560 else /* infinity +/- finite */
561 arb_exp_arf(res, arb_midref(x), prec, 1, 1);
562 }
563 else /* both finite, non-special */
564 {
565 if (arf_cmpabs_2exp_si(arb_midref(x), 3) < 0 &&
566 mag_cmp_2exp_si(arb_radref(x), -3) < 0)
567 {
568 mag_t t, u, one;
569 slong acc, mexp, rexp;
570
571 mexp = ARF_EXP(arb_midref(x));
572 rexp = MAG_EXP(arb_radref(x));
573
574 if (COEFF_IS_MPZ(rexp))
575 rexp = (fmpz_sgn(ARF_EXPREF(arb_radref(x))) < 0) ? COEFF_MIN : COEFF_MAX;
576 if (COEFF_IS_MPZ(mexp))
577 mexp = (fmpz_sgn(MAG_EXPREF(arb_midref(x))) < 0) ? COEFF_MIN : COEFF_MAX;
578
579 acc = FLINT_MIN(mexp, 0) - rexp;
580 acc = FLINT_MAX(acc, 0);
581 acc = FLINT_MIN(acc, prec);
582 prec = FLINT_MIN(prec, acc + MAG_BITS);
583 prec = FLINT_MAX(prec, 2);
584
585 /* [exp(a+b) - 1] - [exp(a) - 1] = exp(a) * (exp(b)-1) */
586 mag_init_set(t, arb_radref(x));
587 mag_init(u);
588 mag_init(one);
589 mag_one(one);
590
591 if (arf_sgn(arb_midref(x)) >= 0)
592 {
593 arb_exp_arf(res, arb_midref(x), prec, 1, maglim);
594 arb_get_mag(u, res);
595 mag_add(u, u, one);
596 }
597 else
598 {
599 arb_exp_arf(res, arb_midref(x), prec, 1, maglim);
600 arb_get_mag_lower(u, res);
601 mag_sub(u, one, u);
602 }
603
604 mag_expm1(t, t);
605 mag_addmul(arb_radref(res), t, u);
606
607 mag_clear(t);
608 mag_clear(u);
609 mag_clear(one);
610 }
611 else
612 {
613 arb_exp(res, x, prec);
614 arb_sub_ui(res, res, 1, prec);
615 }
616 }
617 }
618
arb_exp_invexp(arb_t res,arb_t res2,const arb_t x,slong prec)619 void arb_exp_invexp(arb_t res, arb_t res2, const arb_t x, slong prec)
620 {
621 slong maglim = MAGLIM(prec);
622
623 if (arf_is_special(arb_midref(x)) || mag_is_special(arb_radref(x)))
624 {
625 /* [c +/- 0] */
626 if (arf_is_finite(arb_midref(x)) && mag_is_zero(arb_radref(x)))
627 {
628 arb_exp_arf(res, arb_midref(x), prec, 0, maglim);
629 arb_inv(res2, res, prec);
630 } /* [nan +/- ?] */
631 else if (arf_is_nan(arb_midref(x)))
632 {
633 arb_indeterminate(res);
634 arb_indeterminate(res2);
635 } /* [c +/- inf] */
636 else if (mag_is_inf(arb_radref(x)))
637 {
638 arb_zero_pm_inf(res);
639 arb_zero_pm_inf(res2);
640 } /* [+inf +/- c] */
641 else if (arf_is_pos_inf(arb_midref(x)))
642 {
643 arb_pos_inf(res);
644 arb_zero(res2);
645 } /* [-inf +/- c] */
646 else if (arf_is_neg_inf(arb_midref(x)))
647 {
648 arb_zero(res);
649 arb_pos_inf(res2);
650 }
651 else
652 {
653 arb_t t;
654 arb_init(t);
655 arb_neg(t, x);
656 arb_exp_wide(res, x, prec, maglim);
657 arb_exp_wide(res2, t, prec, maglim);
658 arb_clear(t);
659 }
660 }
661 else /* both finite, non-special */
662 {
663 slong acc, mexp, rexp;
664
665 mexp = ARF_EXP(arb_midref(x));
666 rexp = MAG_EXP(arb_radref(x));
667
668 if (COEFF_IS_MPZ(rexp))
669 rexp = (fmpz_sgn(ARF_EXPREF(arb_radref(x))) < 0) ? COEFF_MIN : COEFF_MAX;
670 if (COEFF_IS_MPZ(mexp))
671 mexp = (fmpz_sgn(MAG_EXPREF(arb_midref(x))) < 0) ? COEFF_MIN : COEFF_MAX;
672
673 if (mexp < -prec && rexp < -prec)
674 {
675 arb_get_mag(arb_radref(res), x);
676 mag_expm1(arb_radref(res), arb_radref(res));
677 arf_one(arb_midref(res));
678 arb_set(res2, res);
679 return;
680 }
681
682 acc = -rexp;
683 acc = FLINT_MAX(acc, 0);
684 acc = FLINT_MIN(acc, prec);
685 prec = FLINT_MIN(prec, acc + MAG_BITS);
686 prec = FLINT_MAX(prec, 2);
687
688 if (acc < 20 && (rexp >= 0 || mexp <= 10))
689 {
690 /* may evaluate at endpoints */
691 arb_t t;
692 arb_init(t);
693 arb_neg(t, x);
694 arb_exp_wide(res, x, prec, maglim);
695 arb_exp_wide(res2, t, prec, maglim);
696 arb_clear(t);
697 }
698 else
699 {
700 /* exp(a+b) - exp(a) = exp(a) * (exp(b)-1) */
701 mag_t t, u;
702
703 mag_init_set(t, arb_radref(x));
704 mag_init(u);
705
706 arb_exp_arf(res, arb_midref(x), prec, 0, maglim);
707 arb_inv(res2, res, prec);
708
709 mag_expm1(t, t);
710
711 arb_get_mag(u, res);
712 mag_addmul(arb_radref(res), t, u);
713 arb_get_mag(u, res2);
714 mag_addmul(arb_radref(res2), t, u);
715
716 mag_clear(t);
717 mag_clear(u);
718 }
719 }
720 }
721