1 /*
2 Copyright (C) 2012 Fredrik Johansson
3
4 This file is part of Arb.
5
6 Arb is free software: you can redistribute it and/or modify it under
7 the terms of the GNU Lesser General Public License (LGPL) as published
8 by the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version. See <http://www.gnu.org/licenses/>.
10 */
11
12 #ifndef FMPR_H
13 #define FMPR_H
14
15 #if defined(__MINGW64__) || defined(_MSC_VER)
16 #include "stdint.h"
17 #endif
18 #include <stdio.h>
19 #include <limits.h>
20 #include <gmp.h>
21 #include <mpfr.h>
22 #include "flint/flint.h"
23 #include "flint/fmpz.h"
24 #include "flint/fmpq.h"
25 #if __FLINT_RELEASE < 20600
26 #include "flint/config.h"
27 #else
28 #include "flint/flint-config.h"
29 #endif
30 #include "fmpz_extras.h"
31
32 #ifndef flint_abort
33 #if __FLINT_RELEASE <= 20502
34 #define flint_abort abort
35 #endif
36 #endif
37
38 #define TLS_PREFIX FLINT_TLS_PREFIX
39
40 #if defined(_MSC_VER) && defined(ARB_BUILD_DLL)
41 #define ARB_DLL __declspec(dllexport)
42 #else
43 #define ARB_DLL FLINT_DLL
44 #endif
45
46 #define fmpr_rnd_t int
47 #define FMPR_RND_DOWN 0
48 #define FMPR_RND_UP 1
49 #define FMPR_RND_FLOOR 2
50 #define FMPR_RND_CEIL 3
51 #define FMPR_RND_NEAR 4
52
53 #ifdef __cplusplus
54 extern "C" {
55 #endif
56
57 /* currently defined in the arb module, but global to the library */
58 double arb_test_multiplier(void);
59
60 static __inline__ int
rounds_up(fmpr_rnd_t rnd,int negative)61 rounds_up(fmpr_rnd_t rnd, int negative)
62 {
63 if (rnd == FMPR_RND_DOWN) return 0;
64 if (rnd == FMPR_RND_UP) return 1;
65 if (rnd == FMPR_RND_FLOOR) return negative;
66 return !negative;
67 }
68
rnd_to_mpfr(fmpr_rnd_t rnd)69 static __inline__ mpfr_rnd_t rnd_to_mpfr(fmpr_rnd_t rnd)
70 {
71 if (rnd == FMPR_RND_DOWN) return MPFR_RNDZ;
72 else if (rnd == FMPR_RND_UP) return MPFR_RNDA;
73 else if (rnd == FMPR_RND_FLOOR) return MPFR_RNDD;
74 else if (rnd == FMPR_RND_CEIL) return MPFR_RNDU;
75 else return MPFR_RNDN;
76 }
77
78 typedef struct
79 {
80 fmpz man;
81 fmpz exp;
82 }
83 fmpr_struct;
84
85 typedef fmpr_struct fmpr_t[1];
86 typedef fmpr_struct * fmpr_ptr;
87 typedef const fmpr_struct * fmpr_srcptr;
88
89 #define fmpr_manref(x) (&(x)->man)
90 #define fmpr_expref(x) (&(x)->exp)
91
92
93 static __inline__ void
fmpr_init(fmpr_t x)94 fmpr_init(fmpr_t x)
95 {
96 fmpz_init(fmpr_manref(x));
97 fmpz_init(fmpr_expref(x));
98 }
99
100 static __inline__ void
fmpr_clear(fmpr_t x)101 fmpr_clear(fmpr_t x)
102 {
103 fmpz_clear(fmpr_manref(x));
104 fmpz_clear(fmpr_expref(x));
105 }
106
107
108 /* The return value encodes an error bound, or FMPR_RESULT_EXACT if exact */
109 #define FMPR_RESULT_EXACT WORD_MAX
110
111 /* Allow 'infinite' precision for operations where a result can be computed exactly */
112 #define FMPR_PREC_EXACT WORD_MAX
113
114 #define FMPR_PREC_ADD(prec,extra) ((prec) == FMPR_PREC_EXACT ? FMPR_PREC_EXACT : (prec) + (extra))
115
116 /* ------------------------------------------------------------------------ */
117 /* Special values */
118
119 /*
120 Special values (0, +inf, -inf, NaN) are encoded using a zero mantissa.
121 Zero is encoded with a zero exponent.
122 */
123
124 #define FMPR_EXP_POS_INF WORD(1)
125 #define FMPR_EXP_NEG_INF WORD(2)
126 #define FMPR_EXP_NAN WORD(3)
127
fmpr_zero(fmpr_t x)128 static __inline__ void fmpr_zero(fmpr_t x) {
129 fmpz_zero(fmpr_manref(x));
130 fmpz_zero(fmpr_expref(x));
131 }
132
fmpr_is_zero(const fmpr_t x)133 static __inline__ int fmpr_is_zero(const fmpr_t x) {
134 return fmpz_is_zero(fmpr_manref(x)) && fmpz_is_zero(fmpr_expref(x));
135 }
136
fmpr_is_special(const fmpr_t x)137 static __inline__ int fmpr_is_special(const fmpr_t x) {
138 return fmpz_is_zero(fmpr_manref(x));
139 }
140
fmpr_is_normal(const fmpr_t x)141 static __inline__ int fmpr_is_normal(const fmpr_t x) {
142 return !fmpz_is_zero(fmpr_manref(x));
143 }
144
fmpr_is_inf(const fmpr_t x)145 static __inline__ int fmpr_is_inf(const fmpr_t x) {
146 return fmpr_is_special(x) && (*fmpr_expref(x) == FMPR_EXP_POS_INF ||
147 *fmpr_expref(x) == FMPR_EXP_NEG_INF);
148 }
149
fmpr_is_pos_inf(const fmpr_t x)150 static __inline__ int fmpr_is_pos_inf(const fmpr_t x) {
151 return fmpr_is_special(x) && (*fmpr_expref(x) == FMPR_EXP_POS_INF);
152 }
153
fmpr_is_neg_inf(const fmpr_t x)154 static __inline__ int fmpr_is_neg_inf(const fmpr_t x) {
155 return fmpr_is_special(x) && (*fmpr_expref(x) == FMPR_EXP_NEG_INF);
156 }
157
fmpr_is_nan(const fmpr_t x)158 static __inline__ int fmpr_is_nan(const fmpr_t x) {
159 return fmpr_is_special(x) && (*fmpr_expref(x) == FMPR_EXP_NAN);
160 }
161
fmpr_is_finite(const fmpr_t x)162 static __inline__ int fmpr_is_finite(const fmpr_t x) {
163 return fmpr_is_zero(x) || !fmpr_is_special(x);
164 }
165
fmpr_pos_inf(fmpr_t x)166 static __inline__ void fmpr_pos_inf(fmpr_t x) {
167 fmpz_zero(fmpr_manref(x));
168 fmpz_set_si(fmpr_expref(x), FMPR_EXP_POS_INF);
169 }
170
fmpr_neg_inf(fmpr_t x)171 static __inline__ void fmpr_neg_inf(fmpr_t x) {
172 fmpz_zero(fmpr_manref(x));
173 fmpz_set_si(fmpr_expref(x), FMPR_EXP_NEG_INF);
174 }
175
fmpr_nan(fmpr_t x)176 static __inline__ void fmpr_nan(fmpr_t x) {
177 fmpz_zero(fmpr_manref(x));
178 fmpz_set_si(fmpr_expref(x), FMPR_EXP_NAN);
179 }
180
181
182
183 static __inline__ int
fmpr_is_one(const fmpr_t x)184 fmpr_is_one(const fmpr_t x)
185 {
186 return fmpz_is_one(fmpr_manref(x)) && fmpz_is_zero(fmpr_expref(x));
187 }
188
189 static __inline__ void
fmpr_one(fmpr_t x)190 fmpr_one(fmpr_t x)
191 {
192 fmpz_one(fmpr_manref(x));
193 fmpz_zero(fmpr_expref(x));
194 }
195
196 /* ------------------------------------------------------------------------ */
197
198 slong _fmpr_normalise_naive(fmpz_t man, fmpz_t exp, slong prec, fmpr_rnd_t rnd);
199
200 static __inline__ void
fmpr_set(fmpr_t y,const fmpr_t x)201 fmpr_set(fmpr_t y, const fmpr_t x)
202 {
203 if (y != x)
204 {
205 fmpz_set(fmpr_manref(y), fmpr_manref(x));
206 fmpz_set(fmpr_expref(y), fmpr_expref(x));
207 }
208 }
209
210 static __inline__ void
fmpr_swap(fmpr_t x,fmpr_t y)211 fmpr_swap(fmpr_t x, fmpr_t y)
212 {
213 fmpz_swap(fmpr_manref(x), fmpr_manref(y));
214 fmpz_swap(fmpr_expref(x), fmpr_expref(y));
215 }
216
217 slong _fmpr_set_round(fmpz_t rman, fmpz_t rexp,
218 const fmpz_t man, const fmpz_t exp, slong prec, fmpr_rnd_t rnd);
219
220 static __inline__ slong
_fmpr_normalise(fmpz_t man,fmpz_t exp,slong prec,fmpr_rnd_t rnd)221 _fmpr_normalise(fmpz_t man, fmpz_t exp, slong prec, fmpr_rnd_t rnd)
222 {
223 if (fmpz_is_zero(man))
224 {
225 fmpz_zero(man);
226 fmpz_zero(exp);
227 return FMPR_RESULT_EXACT;
228 }
229 else
230 {
231 return _fmpr_set_round(man, exp, man, exp, prec, rnd);
232 }
233 }
234
235 static __inline__ slong
fmpr_set_round_naive(fmpr_t y,const fmpr_t x,slong prec,fmpr_rnd_t rnd)236 fmpr_set_round_naive(fmpr_t y, const fmpr_t x, slong prec, fmpr_rnd_t rnd)
237 {
238 fmpr_set(y, x);
239 if (fmpr_is_special(y))
240 return FMPR_RESULT_EXACT;
241 else
242 return _fmpr_normalise(fmpr_manref(y), fmpr_expref(y), prec, rnd);
243 }
244
245 static __inline__ slong
fmpr_set_round(fmpr_t y,const fmpr_t x,slong prec,fmpr_rnd_t rnd)246 fmpr_set_round(fmpr_t y, const fmpr_t x, slong prec, fmpr_rnd_t rnd)
247 {
248 if (fmpr_is_special(x))
249 {
250 fmpr_set(y, x);
251 return FMPR_RESULT_EXACT;
252 }
253 else
254 {
255 return _fmpr_set_round(fmpr_manref(y), fmpr_expref(y),
256 fmpr_manref(x), fmpr_expref(x), prec, rnd);
257 }
258 }
259
260 static __inline__ slong
fmpr_set_round_fmpz_2exp(fmpr_t y,const fmpz_t x,const fmpz_t exp,slong prec,fmpr_rnd_t rnd)261 fmpr_set_round_fmpz_2exp(fmpr_t y, const fmpz_t x, const fmpz_t exp, slong prec, fmpr_rnd_t rnd)
262 {
263 if (fmpz_is_zero(x))
264 {
265 fmpr_zero(y);
266 return FMPR_RESULT_EXACT;
267 }
268 else
269 {
270 return _fmpr_set_round(fmpr_manref(y), fmpr_expref(y), x, exp, prec, rnd);
271 }
272 }
273
274 static __inline__ slong
fmpr_set_round_fmpz(fmpr_t y,const fmpz_t x,slong prec,fmpr_rnd_t rnd)275 fmpr_set_round_fmpz(fmpr_t y, const fmpz_t x, slong prec, fmpr_rnd_t rnd)
276 {
277 if (fmpz_is_zero(x))
278 {
279 fmpr_zero(y);
280 return FMPR_RESULT_EXACT;
281 }
282 else
283 {
284 slong ret;
285 fmpz_t exp;
286 fmpz_init(exp);
287 ret = _fmpr_set_round(fmpr_manref(y), fmpr_expref(y), x, exp, prec, rnd);
288 fmpz_clear(exp);
289 return ret;
290 }
291 }
292
293 slong _fmpr_set_round_mpn(slong * shift, fmpz_t man, mp_srcptr x, mp_size_t xn, int negative, slong prec, fmpr_rnd_t rnd);
294
295 slong fmpr_set_round_ui_2exp_fmpz(fmpr_t z,
296 mp_limb_t lo, const fmpz_t exp, int negative,
297 slong prec, fmpr_rnd_t rnd);
298
299 slong fmpr_set_round_uiui_2exp_fmpz(fmpr_t z,
300 mp_limb_t hi, mp_limb_t lo, const fmpz_t exp, int negative,
301 slong prec, fmpr_rnd_t rnd);
302
303 void fmpr_ulp(fmpr_t u, const fmpr_t x, slong prec);
304
305 int fmpr_check_ulp(const fmpr_t result, slong r, slong prec);
306
307 static __inline__ int
fmpr_equal(const fmpr_t x,const fmpr_t y)308 fmpr_equal(const fmpr_t x, const fmpr_t y)
309 {
310 return fmpz_equal(fmpr_expref(x), fmpr_expref(y)) &&
311 fmpz_equal(fmpr_manref(x), fmpr_manref(y));
312 }
313
314 static __inline__ int
fmpr_sgn(const fmpr_t x)315 fmpr_sgn(const fmpr_t x)
316 {
317 if (fmpr_is_special(x))
318 {
319 if (fmpr_is_zero(x))
320 return 0;
321 if (fmpr_is_pos_inf(x))
322 return 1;
323 if (fmpr_is_neg_inf(x))
324 return -1;
325 return 0;
326 }
327
328 return fmpz_sgn(fmpr_manref(x));
329 }
330
331 int fmpr_cmp(const fmpr_t x, const fmpr_t y);
332
333 int fmpr_cmpabs(const fmpr_t x, const fmpr_t y);
334
335 int fmpr_cmpabs_ui(const fmpr_t x, ulong y);
336
337 void fmpr_randtest(fmpr_t x, flint_rand_t state, slong bits, slong exp_bits);
338
339 void fmpr_randtest_not_zero(fmpr_t x, flint_rand_t state, slong bits, slong exp_bits);
340
341 void fmpr_randtest_special(fmpr_t x, flint_rand_t state, slong bits, slong exp_bits);
342
343 int fmpr_get_mpfr(mpfr_t x, const fmpr_t y, mpfr_rnd_t rnd);
344
345 void fmpr_set_mpfr(fmpr_t x, const mpfr_t y);
346
347 double fmpr_get_d(const fmpr_t x, fmpr_rnd_t rnd);
348
349 void fmpr_set_d(fmpr_t x, double v);
350
fmpr_set_ui(fmpr_t x,ulong c)351 static __inline__ void fmpr_set_ui(fmpr_t x, ulong c)
352 {
353 if (c == 0)
354 {
355 fmpr_zero(x);
356 }
357 else
358 {
359 int b;
360 count_trailing_zeros(b, c);
361 fmpz_set_ui(fmpr_manref(x), c >> b);
362 fmpz_set_ui(fmpr_expref(x), b);
363 }
364 }
365
fmpr_set_si(fmpr_t x,slong c)366 static __inline__ void fmpr_set_si(fmpr_t x, slong c)
367 {
368 if (c == 0)
369 {
370 fmpr_zero(x);
371 }
372 else
373 {
374 int b;
375 count_trailing_zeros(b, c);
376 fmpz_set_si(fmpr_manref(x), c >> b);
377 fmpz_set_ui(fmpr_expref(x), b);
378 }
379 }
380
381 static __inline__ void
fmpr_set_fmpz(fmpr_t x,const fmpz_t c)382 fmpr_set_fmpz(fmpr_t x, const fmpz_t c)
383 {
384 if (fmpz_is_zero(c))
385 {
386 fmpr_zero(x);
387 }
388 else
389 {
390 slong v = fmpz_val2(c);
391
392 fmpz_tdiv_q_2exp(fmpr_manref(x), c, v);
393 fmpz_set_ui(fmpr_expref(x), v);
394 }
395 }
396
397 /* Arithmetic */
398
399
400 slong _fmpr_add_eps(fmpr_t z, const fmpr_t x, int sign, slong prec, fmpr_rnd_t rnd);
401
402 slong _fmpr_add_mpn(fmpr_t z,
403 mp_srcptr xman, mp_size_t xn, int xsign, const fmpz_t xexp,
404 mp_srcptr yman, mp_size_t yn, int ysign, const fmpz_t yexp,
405 slong shift, slong prec, fmpr_rnd_t rnd);
406
407 slong _fmpr_add_1x1(fmpr_t z,
408 mp_limb_t x, int xsign, const fmpz_t xexp,
409 mp_limb_t y, int ysign, const fmpz_t yexp,
410 slong shift, slong prec, slong rnd);
411
412 slong fmpr_add_naive(fmpr_t z, const fmpr_t x, const fmpr_t y, slong prec, fmpr_rnd_t rnd);
413
414 slong _fmpr_mul_mpn(fmpr_t z,
415 mp_srcptr xman, mp_size_t xn, const fmpz_t xexp,
416 mp_srcptr yman, mp_size_t yn, const fmpz_t yexp,
417 int negative, slong prec, fmpr_rnd_t rnd);
418
419 slong _fmpr_mul_1x1(fmpr_t z,
420 mp_limb_t u, const fmpz_t xexp, mp_limb_t v, const fmpz_t yexp,
421 int negative, slong prec, fmpr_rnd_t rnd);
422
423 slong fmpr_mul_naive(fmpr_t z, const fmpr_t x, const fmpr_t y, slong prec, fmpr_rnd_t rnd);
424 slong fmpr_mul(fmpr_t z, const fmpr_t x, const fmpr_t y, slong prec, fmpr_rnd_t rnd);
425 slong fmpr_mul_ui(fmpr_t z, const fmpr_t x, ulong y, slong prec, fmpr_rnd_t rnd);
426 slong fmpr_mul_si(fmpr_t z, const fmpr_t x, slong y, slong prec, fmpr_rnd_t rnd);
427 slong fmpr_mul_fmpz(fmpr_t z, const fmpr_t x, const fmpz_t y, slong prec, fmpr_rnd_t rnd);
428
429 slong fmpr_add(fmpr_t z, const fmpr_t x, const fmpr_t y, slong prec, fmpr_rnd_t rnd);
430 slong fmpr_add_ui(fmpr_t z, const fmpr_t x, ulong y, slong prec, fmpr_rnd_t rnd);
431 slong fmpr_add_si(fmpr_t z, const fmpr_t x, slong y, slong prec, fmpr_rnd_t rnd);
432 slong fmpr_add_fmpz(fmpr_t z, const fmpr_t x, const fmpz_t y, slong prec, fmpr_rnd_t rnd);
433
434 slong fmpr_sub(fmpr_t z, const fmpr_t x, const fmpr_t y, slong prec, fmpr_rnd_t rnd);
435 slong fmpr_sub_ui(fmpr_t z, const fmpr_t x, ulong y, slong prec, fmpr_rnd_t rnd);
436 slong fmpr_sub_si(fmpr_t z, const fmpr_t x, slong y, slong prec, fmpr_rnd_t rnd);
437 slong fmpr_sub_fmpz(fmpr_t z, const fmpr_t x, const fmpz_t y, slong prec, fmpr_rnd_t rnd);
438
439 slong fmpr_div(fmpr_t z, const fmpr_t x, const fmpr_t y, slong prec, fmpr_rnd_t rnd);
440 slong fmpr_div_ui(fmpr_t z, const fmpr_t x, ulong y, slong prec, fmpr_rnd_t rnd);
441 slong fmpr_ui_div(fmpr_t z, ulong x, const fmpr_t y, slong prec, fmpr_rnd_t rnd);
442 slong fmpr_div_si(fmpr_t z, const fmpr_t x, slong y, slong prec, fmpr_rnd_t rnd);
443 slong fmpr_si_div(fmpr_t z, slong x, const fmpr_t y, slong prec, fmpr_rnd_t rnd);
444 slong fmpr_div_fmpz(fmpr_t z, const fmpr_t x, const fmpz_t y, slong prec, fmpr_rnd_t rnd);
445 slong fmpr_fmpz_div(fmpr_t z, const fmpz_t x, const fmpr_t y, slong prec, fmpr_rnd_t rnd);
446 slong fmpr_fmpz_div_fmpz(fmpr_t z, const fmpz_t x, const fmpz_t y, slong prec, fmpr_rnd_t rnd);
447
448 slong fmpr_addmul(fmpr_t z, const fmpr_t x, const fmpr_t y, slong prec, fmpr_rnd_t rnd);
449 slong fmpr_addmul_ui(fmpr_t z, const fmpr_t x, ulong y, slong prec, fmpr_rnd_t rnd);
450 slong fmpr_addmul_si(fmpr_t z, const fmpr_t x, slong y, slong prec, fmpr_rnd_t rnd);
451 slong fmpr_addmul_fmpz(fmpr_t z, const fmpr_t x, const fmpz_t y, slong prec, fmpr_rnd_t rnd);
452
453 slong fmpr_submul(fmpr_t z, const fmpr_t x, const fmpr_t y, slong prec, fmpr_rnd_t rnd);
454 slong fmpr_submul_ui(fmpr_t z, const fmpr_t x, ulong y, slong prec, fmpr_rnd_t rnd);
455 slong fmpr_submul_si(fmpr_t z, const fmpr_t x, slong y, slong prec, fmpr_rnd_t rnd);
456 slong fmpr_submul_fmpz(fmpr_t z, const fmpr_t x, const fmpz_t y, slong prec, fmpr_rnd_t rnd);
457
458 slong fmpr_sqrt(fmpr_t y, const fmpr_t x, slong prec, fmpr_rnd_t rnd);
459 slong fmpr_sqrt_ui(fmpr_t z, ulong x, slong prec, fmpr_rnd_t rnd);
460 slong fmpr_sqrt_fmpz(fmpr_t z, const fmpz_t x, slong prec, fmpr_rnd_t rnd);
461
462 slong fmpr_rsqrt(fmpr_t y, const fmpr_t x, slong prec, fmpr_rnd_t rnd);
463
464 slong fmpr_root(fmpr_t y, const fmpr_t x, ulong k, slong prec, fmpr_rnd_t rnd);
465
466 slong fmpr_log(fmpr_t y, const fmpr_t x, slong prec, fmpr_rnd_t rnd);
467 slong fmpr_log1p(fmpr_t y, const fmpr_t x, slong prec, fmpr_rnd_t rnd);
468
469 slong fmpr_exp(fmpr_t y, const fmpr_t x, slong prec, fmpr_rnd_t rnd);
470 slong fmpr_expm1(fmpr_t y, const fmpr_t x, slong prec, fmpr_rnd_t rnd);
471
472 static __inline__ void
fmpr_neg(fmpr_t y,const fmpr_t x)473 fmpr_neg(fmpr_t y, const fmpr_t x)
474 {
475 if (fmpr_is_special(x))
476 {
477 if (fmpr_is_pos_inf(x))
478 fmpr_neg_inf(y);
479 else if (fmpr_is_neg_inf(x))
480 fmpr_pos_inf(y);
481 else
482 fmpr_set(y, x);
483 }
484 else
485 {
486 fmpz_neg(fmpr_manref(y), fmpr_manref(x));
487 fmpz_set(fmpr_expref(y), fmpr_expref(x));
488 }
489 }
490
491 static __inline__ slong
fmpr_neg_round(fmpr_t y,const fmpr_t x,slong prec,fmpr_rnd_t rnd)492 fmpr_neg_round(fmpr_t y, const fmpr_t x, slong prec, fmpr_rnd_t rnd)
493 {
494 fmpr_neg(y, x);
495 if (fmpr_is_special(y))
496 return FMPR_RESULT_EXACT;
497 else
498 return _fmpr_normalise(fmpr_manref(y), fmpr_expref(y), prec, rnd);
499 }
500
501 static __inline__ void
fmpr_abs(fmpr_t y,const fmpr_t x)502 fmpr_abs(fmpr_t y, const fmpr_t x)
503 {
504 if (fmpr_sgn(x) < 0)
505 fmpr_neg(y, x);
506 else
507 fmpr_set(y, x);
508 }
509
510 /* Convert return value to error bound */
511 static __inline__ void
fmpr_set_error_result(fmpr_t err,const fmpr_t result,slong rret)512 fmpr_set_error_result(fmpr_t err, const fmpr_t result, slong rret)
513 {
514 if (rret == FMPR_RESULT_EXACT)
515 {
516 fmpr_zero(err);
517 }
518 /*
519 XXX: normally, a special value should not result from inexact arithmetic
520 else if (fmpr_is_special(result))
521 {
522 fmpr_pos_inf(err);
523 }
524 */
525 else
526 {
527 /* TODO: inline this */
528 fmpz_sub_ui(fmpr_expref(err), fmpr_expref(result), rret);
529 fmpz_one(fmpr_manref(err));
530 }
531 }
532
533 static __inline__ void
fmpr_add_error_result(fmpr_t err,const fmpr_t err_in,const fmpr_t result,slong rret,slong prec,fmpr_rnd_t rnd)534 fmpr_add_error_result(fmpr_t err, const fmpr_t err_in,
535 const fmpr_t result, slong rret, slong prec, fmpr_rnd_t rnd)
536 {
537 if (rret == FMPR_RESULT_EXACT)
538 {
539 fmpr_set(err, err_in);
540 }
541 else
542 {
543 fmpr_t t;
544 fmpr_init(t);
545 fmpr_set_error_result(t, result, rret);
546 fmpr_add(err, err_in, t, prec, rnd);
547 fmpr_clear(t);
548 }
549 }
550
551 void fmpr_print(const fmpr_t x);
552
553 void fmpr_printd(const fmpr_t x, slong digits);
554
555
556 static __inline__ void
fmpr_mul_2exp_si(fmpr_t y,const fmpr_t x,slong e)557 fmpr_mul_2exp_si(fmpr_t y, const fmpr_t x, slong e)
558 {
559 if (e == 0 || fmpr_is_special(x))
560 {
561 fmpr_set(y, x);
562 }
563 else
564 {
565 fmpz_set(fmpr_manref(y), fmpr_manref(x));
566 fmpz_add_si_inline(fmpr_expref(y), fmpr_expref(x), e);
567 }
568 }
569
570 static __inline__ void
fmpr_mul_2exp_fmpz(fmpr_t y,const fmpr_t x,const fmpz_t e)571 fmpr_mul_2exp_fmpz(fmpr_t y, const fmpr_t x, const fmpz_t e)
572 {
573 if (e == 0 || fmpr_is_special(x))
574 {
575 fmpr_set(y, x);
576 }
577 else
578 {
579 fmpz_set(fmpr_manref(y), fmpr_manref(x));
580 fmpz_add(fmpr_expref(y), fmpr_expref(x), e);
581 }
582 }
583
584 void fmpr_get_fmpq(fmpq_t y, const fmpr_t x);
585
586 slong fmpr_set_fmpq(fmpr_t x, const fmpq_t y, slong prec, fmpr_rnd_t rnd);
587
588 void fmpr_get_fmpz(fmpz_t z, const fmpr_t x, fmpr_rnd_t rnd);
589
590 slong fmpr_get_si(const fmpr_t x, fmpr_rnd_t rnd);
591
592 void fmpr_set_fmpz_2exp(fmpr_t x, const fmpz_t man, const fmpz_t exp);
593
594 void fmpr_get_fmpz_2exp(fmpz_t man, fmpz_t exp, const fmpr_t x);
595
596 int fmpr_get_fmpz_fixed_fmpz(fmpz_t y, const fmpr_t x, const fmpz_t e);
597
598 int fmpr_get_fmpz_fixed_si(fmpz_t y, const fmpr_t x, slong e);
599
600 static __inline__ void
fmpr_set_si_2exp_si(fmpr_t x,slong man,slong exp)601 fmpr_set_si_2exp_si(fmpr_t x, slong man, slong exp)
602 {
603 fmpr_set_si(x, man);
604 fmpr_mul_2exp_si(x, x, exp);
605 }
606
607 static __inline__ void
fmpr_set_ui_2exp_si(fmpr_t x,ulong man,slong exp)608 fmpr_set_ui_2exp_si(fmpr_t x, ulong man, slong exp)
609 {
610 fmpr_set_ui(x, man);
611 fmpr_mul_2exp_si(x, x, exp);
612 }
613
614 int fmpr_cmp_2exp_si(const fmpr_t x, slong e);
615
616 int fmpr_cmpabs_2exp_si(const fmpr_t x, slong e);
617
618 static __inline__ void
fmpr_min(fmpr_t z,const fmpr_t a,const fmpr_t b)619 fmpr_min(fmpr_t z, const fmpr_t a, const fmpr_t b)
620 {
621 if (fmpr_cmp(a, b) <= 0)
622 fmpr_set(z, a);
623 else
624 fmpr_set(z, b);
625 }
626
627 static __inline__ void
fmpr_max(fmpr_t z,const fmpr_t a,const fmpr_t b)628 fmpr_max(fmpr_t z, const fmpr_t a, const fmpr_t b)
629 {
630 if (fmpr_cmp(a, b) > 0)
631 fmpr_set(z, a);
632 else
633 fmpr_set(z, b);
634 }
635
636 static __inline__ slong
fmpr_bits(const fmpr_t x)637 fmpr_bits(const fmpr_t x)
638 {
639 if (fmpr_is_special(x))
640 return 0;
641 else
642 return fmpz_bits(fmpr_manref(x));
643 }
644
645 static __inline__ int
fmpr_is_int(const fmpr_t x)646 fmpr_is_int(const fmpr_t x)
647 {
648 if (fmpr_is_special(x))
649 return fmpr_is_zero(x);
650 else
651 return fmpz_sgn(fmpr_expref(x)) >= 0;
652 }
653
654 static __inline__ int
fmpr_is_int_2exp_si(const fmpr_t x,slong e)655 fmpr_is_int_2exp_si(const fmpr_t x, slong e)
656 {
657 if (fmpr_is_special(x))
658 return fmpr_is_zero(x);
659 else
660 return fmpz_cmp_si(fmpr_expref(x), e) >= 0;
661 }
662
663 #define CALL_MPFR_FUNC(r, func, y, x, prec, rnd) \
664 do { \
665 mpfr_t __t, __u; \
666 mpfr_rnd_t __rnd; \
667 __rnd = rnd_to_mpfr(rnd); \
668 mpfr_init2(__t, 2 + fmpz_bits(fmpr_manref(x))); \
669 mpfr_init2(__u, FLINT_MAX(2, prec)); \
670 mpfr_set_emin(MPFR_EMIN_MIN); \
671 mpfr_set_emax(MPFR_EMAX_MAX); \
672 fmpr_get_mpfr(__t, x, MPFR_RNDD); \
673 r = func(__u, __t, __rnd); \
674 if (mpfr_overflow_p() || mpfr_underflow_p()) \
675 { \
676 flint_printf("exception: mpfr overflow\n"); \
677 flint_abort(); \
678 } \
679 fmpr_set_mpfr(y, __u); \
680 if (r == 0) \
681 r = FMPR_RESULT_EXACT; \
682 else \
683 r = prec - fmpz_bits(fmpr_manref(y)); \
684 mpfr_clear(__t); \
685 mpfr_clear(__u); \
686 } while (0);
687
688
689 #define CALL_MPFR_FUNC_K(r, func, y, x, k, prec, rnd) \
690 do { \
691 mpfr_t __t, __u; \
692 mpfr_rnd_t __rnd; \
693 __rnd = rnd_to_mpfr(rnd); \
694 mpfr_init2(__t, 2 + fmpz_bits(fmpr_manref(x))); \
695 mpfr_init2(__u, FLINT_MAX(2, prec)); \
696 mpfr_set_emin(MPFR_EMIN_MIN); \
697 mpfr_set_emax(MPFR_EMAX_MAX); \
698 fmpr_get_mpfr(__t, x, MPFR_RNDD); \
699 r = func(__u, __t, k, __rnd); \
700 if (mpfr_overflow_p() || mpfr_underflow_p()) \
701 { \
702 flint_printf("exception: mpfr overflow\n"); \
703 flint_abort(); \
704 } \
705 fmpr_set_mpfr(y, __u); \
706 if (r == 0) \
707 r = FMPR_RESULT_EXACT; \
708 else \
709 r = prec - fmpz_bits(fmpr_manref(y)); \
710 mpfr_clear(__t); \
711 mpfr_clear(__u); \
712 } while (0);
713
714 #define CALL_MPFR_FUNC_2X1(r1, r2, func, y1, y2, x, prec, rnd) \
715 do { \
716 mpfr_t __t, __u, __v; \
717 mpfr_rnd_t __rnd; \
718 __rnd = rnd_to_mpfr(rnd); \
719 mpfr_init2(__t, 2 + fmpz_bits(fmpr_manref(x))); \
720 mpfr_init2(__u, FLINT_MAX(2, prec)); \
721 mpfr_init2(__v, FLINT_MAX(2, prec)); \
722 mpfr_set_emin(MPFR_EMIN_MIN); \
723 mpfr_set_emax(MPFR_EMAX_MAX); \
724 fmpr_get_mpfr(__t, x, MPFR_RNDD); \
725 func(__u, __v, __t, __rnd); \
726 if (mpfr_overflow_p() || mpfr_underflow_p()) \
727 { \
728 flint_printf("exception: mpfr overflow\n"); \
729 flint_abort(); \
730 } \
731 fmpr_set_mpfr(y1, __u); \
732 r1 = prec - fmpz_bits(fmpr_manref(y1)); \
733 fmpr_set_mpfr(y2, __v); \
734 r2 = prec - fmpz_bits(fmpr_manref(y2)); \
735 mpfr_clear(__t); \
736 mpfr_clear(__u); \
737 mpfr_clear(__v); \
738 } while (0);
739
740 void fmpr_pow_sloppy_fmpz(fmpr_t y, const fmpr_t b, const fmpz_t e,
741 slong prec, fmpr_rnd_t rnd);
742
743 void fmpr_pow_sloppy_ui(fmpr_t y, const fmpr_t b, ulong e,
744 slong prec, fmpr_rnd_t rnd);
745
746 void fmpr_pow_sloppy_si(fmpr_t y, const fmpr_t b, slong e,
747 slong prec, fmpr_rnd_t rnd);
748
749 /* vector functions */
750
751 static __inline__ fmpr_ptr
_fmpr_vec_init(slong n)752 _fmpr_vec_init(slong n)
753 {
754 slong i;
755 fmpr_ptr v = (fmpr_ptr) flint_malloc(sizeof(fmpr_struct) * n);
756
757 for (i = 0; i < n; i++)
758 fmpr_init(v + i);
759
760 return v;
761 }
762
763 static __inline__ void
_fmpr_vec_clear(fmpr_ptr v,slong n)764 _fmpr_vec_clear(fmpr_ptr v, slong n)
765 {
766 slong i;
767 for (i = 0; i < n; i++)
768 fmpr_clear(v + i);
769 flint_free(v);
770 }
771
772
773 #ifdef __cplusplus
774 }
775 #endif
776
777 #endif
778
779