xref: /dragonfly/contrib/gmp/mpn/generic/invertappr.c (revision dadd6466)
1 /* mpn_invertappr and helper functions.  Compute I such that
2    floor((B^{2n}-1)/U - 1 <= I + B^n <= floor((B^{2n}-1)/U.
3 
4    Contributed to the GNU project by Marco Bodrato.
5 
6    The algorithm used here was inspired by ApproximateReciprocal from "Modern
7    Computer Arithmetic", by Richard P. Brent and Paul Zimmermann.  Special
8    thanks to Paul Zimmermann for his very valuable suggestions on all the
9    theoretical aspects during the work on this code.
10 
11    THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES.  IT IS ONLY
12    SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
13    GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GMP RELEASE.
14 
15 Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc.
16 
17 This file is part of the GNU MP Library.
18 
19 The GNU MP Library is free software; you can redistribute it and/or modify
20 it under the terms of the GNU Lesser General Public License as published by
21 the Free Software Foundation; either version 3 of the License, or (at your
22 option) any later version.
23 
24 The GNU MP Library is distributed in the hope that it will be useful, but
25 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
26 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
27 License for more details.
28 
29 You should have received a copy of the GNU Lesser General Public License
30 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
31 
32 /* FIXME: Remove NULL and TMP_*, as soon as all the callers properly
33    allocate and pass the scratch to the function. */
34 #include <stdlib.h>		/* for NULL */
35 
36 #include "gmp.h"
37 #include "gmp-impl.h"
38 #include "longlong.h"
39 
40 /* FIXME: The iterative version splits the operand in two slighty unbalanced
41    parts, the use of log_2 (or counting the bits) underestimate the maximum
42    number of iterations.  */
43 
44 /* This is intended for constant THRESHOLDs only, where the compiler
45    can completely fold the result.  */
46 #define LOG2C(n) \
47  (((n) >=    0x1) + ((n) >=    0x2) + ((n) >=    0x4) + ((n) >=    0x8) + \
48   ((n) >=   0x10) + ((n) >=   0x20) + ((n) >=   0x40) + ((n) >=   0x80) + \
49   ((n) >=  0x100) + ((n) >=  0x200) + ((n) >=  0x400) + ((n) >=  0x800) + \
50   ((n) >= 0x1000) + ((n) >= 0x2000) + ((n) >= 0x4000) + ((n) >= 0x8000))
51 
52 #if TUNE_PROGRAM_BUILD
53 #define NPOWS \
54  ((sizeof(mp_size_t) > 6 ? 48 : 8*sizeof(mp_size_t)))
55 #define MAYBE_dcpi1_divappr   1
56 #else
57 #define NPOWS \
58  ((sizeof(mp_size_t) > 6 ? 48 : 8*sizeof(mp_size_t)) - LOG2C (INV_NEWTON_THRESHOLD))
59 #define MAYBE_dcpi1_divappr \
60   (INV_NEWTON_THRESHOLD < DC_DIVAPPR_Q_THRESHOLD)
61 #if (INV_NEWTON_THRESHOLD > INV_MULMOD_BNM1_THRESHOLD) && \
62     (INV_APPR_THRESHOLD > INV_MULMOD_BNM1_THRESHOLD)
63 #undef  INV_MULMOD_BNM1_THRESHOLD
64 #define INV_MULMOD_BNM1_THRESHOLD 0 /* always when Newton */
65 #endif
66 #endif
67 
68 /* All the three functions mpn{,_bc,_ni}_invertappr (ip, dp, n, scratch), take
69    the strictly normalised value {dp,n} (i.e., most significant bit must be set)
70    as an input, and compute {ip,n}: the approximate reciprocal of {dp,n}.
71 
72    Let e = mpn*_invertappr (ip, dp, n, scratch) be the returned value; the
73    following conditions are satisfied by the output:
74      0 <= e <= 1;
75      {dp,n}*(B^n+{ip,n}) < B^{2n} <= {dp,n}*(B^n+{ip,n}+1+e) .
76    I.e. e=0 means that the result {ip,n} equals the one given by mpn_invert.
77 	e=1 means that the result _may_ be one less than expected.
78 
79    The _bc version returns e=1 most of the time.
80    The _ni version should return e=0 most of the time; only about 1% of
81    possible random input should give e=1.
82 
83    When the strict result is needed, i.e., e=0 in the relation above:
84      {dp,n}*(B^n+{ip,n}) < B^{2n} <= {dp,n}*(B^n+{ip,n}+1) ;
85    the function mpn_invert (ip, dp, n, scratch) should be used instead.  */
86 
87 /* Maximum scratch needed by this branch (at tp): 3*n + 2 */
88 static mp_limb_t
89 mpn_bc_invertappr (mp_ptr ip, mp_srcptr dp, mp_size_t n, mp_ptr tp)
90 {
91   mp_ptr xp;
92 
93   ASSERT (n > 0);
94   ASSERT (dp[n-1] & GMP_NUMB_HIGHBIT);
95   ASSERT (! MPN_OVERLAP_P (ip, n, dp, n));
96   ASSERT (! MPN_OVERLAP_P (ip, n, tp, mpn_invertappr_itch(n)));
97   ASSERT (! MPN_OVERLAP_P (dp, n, tp, mpn_invertappr_itch(n)));
98 
99   /* Compute a base value of r limbs. */
100   if (n == 1)
101     invert_limb (*ip, *dp);
102   else {
103     mp_size_t i;
104     xp = tp + n + 2;				/* 2 * n limbs */
105 
106     for (i = n - 1; i >= 0; i--)
107       xp[i] = GMP_NUMB_MAX;
108     mpn_com (xp + n, dp, n);
109 
110     /* Now xp contains B^2n - {dp,n}*B^n - 1 */
111 
112     /* FIXME: if mpn_*pi1_divappr_q handles n==2, use it! */
113     if (n == 2) {
114       mpn_divrem_2 (ip, 0, xp, 4, dp);
115     } else {
116       gmp_pi1_t inv;
117       invert_pi1 (inv, dp[n-1], dp[n-2]);
118       if (! MAYBE_dcpi1_divappr
119 	  || BELOW_THRESHOLD (n, DC_DIVAPPR_Q_THRESHOLD))
120 	mpn_sbpi1_divappr_q (ip, xp, 2 * n, dp, n, inv.inv32);
121       else
122 	mpn_dcpi1_divappr_q (ip, xp, 2 * n, dp, n, &inv);
123       MPN_DECR_U(ip, n, 1);
124       return 1;
125     }
126   }
127   return 0;
128 }
129 
130 /* mpn_ni_invertappr: computes the approximate reciprocal using Newton's
131    iterations (at least one).
132 
133    Inspired by Algorithm "ApproximateReciprocal", published in "Modern Computer
134    Arithmetic" by Richard P. Brent and Paul Zimmermann, algorithm 3.5, page 121
135    in version 0.4 of the book.
136 
137    Some adaptations were introduced, to allow product mod B^m-1 and return the
138    value e.
139 
140    USE_MUL_N = 1 (default) introduces a correction in such a way that "the
141    value of B^{n+h}-T computed at step 8 cannot exceed B^n-1" (the book reads
142    "2B^n-1").  This correction should not require to modify the proof.
143 
144    We use a wrapped product modulo B^m-1.  NOTE: is there any normalisation
145    problem for the [0] class?  It shouldn't: we compute 2*|A*X_h - B^{n+h}| <
146    B^m-1.  We may get [0] if and only if we get AX_h = B^{n+h}.  This can
147    happen only if A=B^{n}/2, but this implies X_h = B^{h}*2-1 i.e., AX_h =
148    B^{n+h} - A, then we get into the "negative" branch, where X_h is not
149    incremented (because A < B^n).
150 
151    FIXME: the scratch for mulmod_bnm1 does not currently fit in the scratch, it
152    is allocated apart.  */
153 
154 #define USE_MUL_N 1
155 
156 mp_limb_t
157 mpn_ni_invertappr (mp_ptr ip, mp_srcptr dp, mp_size_t n, mp_ptr scratch)
158 {
159   mp_limb_t cy;
160   mp_ptr xp;
161   mp_size_t rn, mn;
162   mp_size_t sizes[NPOWS], *sizp;
163   mp_ptr tp;
164   TMP_DECL;
165 #define rp scratch
166 
167   ASSERT (n > 2);
168   ASSERT (dp[n-1] & GMP_NUMB_HIGHBIT);
169   ASSERT (! MPN_OVERLAP_P (ip, n, dp, n));
170   ASSERT (! MPN_OVERLAP_P (ip, n, scratch, mpn_invertappr_itch(n)));
171   ASSERT (! MPN_OVERLAP_P (dp, n, scratch, mpn_invertappr_itch(n)));
172 
173   /* Compute the computation precisions from highest to lowest, leaving the
174      base case size in 'rn'.  */
175   sizp = sizes;
176   rn = n;
177   do {
178     *sizp = rn;
179     rn = ((rn) >> 1) + 1;
180     sizp ++;
181   } while (ABOVE_THRESHOLD (rn, INV_NEWTON_THRESHOLD));
182 
183   /* We search the inverse of 0.{dp,n}, we compute it as 1.{ip,n} */
184   dp += n;
185   ip += n;
186 
187   /* Compute a base value of rn limbs. */
188   mpn_bc_invertappr (ip - rn, dp - rn, rn, scratch);
189 
190   TMP_MARK;
191 
192   if (ABOVE_THRESHOLD (n, INV_MULMOD_BNM1_THRESHOLD))
193     {
194       mn = mpn_mulmod_bnm1_next_size (n + 1);
195       tp = TMP_ALLOC_LIMBS (mpn_mulmod_bnm1_itch (mn, n, (n >> 1) + 1));
196     }
197   /* Use Newton's iterations to get the desired precision.*/
198 
199   /* define rp scratch; 2rn + 1 limbs <= 2(n>>1 + 1) + 1 <= n + 3  limbs */
200   /* Maximum scratch needed by this branch <= 3*n + 2 */
201   xp = scratch + n + 3;				/*  n + rn limbs */
202   while (1) {
203     mp_limb_t method;
204 
205     n = *--sizp;
206     /*
207       v    n  v
208       +----+--+
209       ^ rn ^
210     */
211 
212     /* Compute i_jd . */
213     if (BELOW_THRESHOLD (n, INV_MULMOD_BNM1_THRESHOLD)
214 	|| ((mn = mpn_mulmod_bnm1_next_size (n + 1)) > (n + rn))) {
215       /* FIXME: We do only need {xp,n+1}*/
216       mpn_mul (xp, dp - n, n, ip - rn, rn);
217       mpn_add_n (xp + rn, xp + rn, dp - n, n - rn + 1);
218       method = 1; /* Remember we used (truncated) product */
219       /* We computed cy.{xp,rn+n} <- 1.{ip,rn} * 0.{dp,n} */
220     } else { /* Use B^n-1 wraparound */
221       mpn_mulmod_bnm1 (xp, mn, dp - n, n, ip - rn, rn, tp);
222       /* We computed {xp,mn} <- {ip,rn} * {dp,n} mod (B^mn-1) */
223       /* We know that 2*|ip*dp + dp*B^rn - B^{rn+n}| < B^mn-1 */
224       /* Add dp*B^rn mod (B^mn-1) */
225       ASSERT (n >= mn - rn);
226       xp[mn] = 1 + mpn_add_n (xp + rn, xp + rn, dp - n, mn - rn);
227       cy = mpn_add_n (xp, xp, dp - (n - (mn - rn)), n - (mn - rn));
228       MPN_INCR_U (xp + n - (mn - rn), mn + 1 - n + (mn - rn), cy);
229       ASSERT (n + rn >=  mn);
230       /* Subtract B^{rn+n} */
231       MPN_DECR_U (xp + rn + n - mn, 2*mn + 1 - rn - n, 1);
232       if (xp[mn])
233 	MPN_INCR_U (xp, mn, xp[mn] - 1);
234       else
235 	MPN_DECR_U (xp, mn, 1);
236       method = 0; /* Remember we are working Mod B^m-1 */
237     }
238 
239     if (xp[n] < 2) { /* "positive" residue class */
240       cy = 1;
241       while (xp[n] || mpn_cmp (xp, dp - n, n)>0) {
242 	xp[n] -= mpn_sub_n (xp, xp, dp - n, n);
243 	cy ++;
244       }
245       MPN_DECR_U(ip - rn, rn, cy);
246       ASSERT (cy <= 4); /* at most 3 cycles for the while above */
247       ASSERT_NOCARRY (mpn_sub_n (xp, dp - n, xp, n));
248       ASSERT (xp[n] == 0);
249     } else { /* "negative" residue class */
250       mpn_com (xp, xp, n + 1);
251       MPN_INCR_U(xp, n + 1, method);
252       ASSERT (xp[n] <= 1);
253 #if USE_MUL_N
254       if (xp[n]) {
255 	MPN_INCR_U(ip - rn, rn, 1);
256 	ASSERT_CARRY (mpn_sub_n (xp, xp, dp - n, n));
257       }
258 #endif
259     }
260 
261     /* Compute x_ju_j. FIXME:We need {rp+rn,rn}, mulhi? */
262 #if USE_MUL_N
263     mpn_mul_n (rp, xp + n - rn, ip - rn, rn);
264 #else
265     rp[2*rn] = 0;
266     mpn_mul (rp, xp + n - rn, rn + xp[n], ip - rn, rn);
267 #endif
268     /* We need _only_ the carry from the next addition  */
269     /* Anyway 2rn-n <= 2... we don't need to optimise.  */
270     cy = mpn_add_n (rp + rn, rp + rn, xp + n - rn, 2*rn - n);
271     cy = mpn_add_nc (ip - n, rp + 3*rn - n, xp + rn, n - rn, cy);
272     MPN_INCR_U (ip - rn, rn, cy + (1-USE_MUL_N)*(rp[2*rn] + xp[n]));
273     if (sizp == sizes) { /* Get out of the cycle */
274       /* Check for possible carry propagation from below. */
275       cy = rp[3*rn - n - 1] > GMP_NUMB_MAX - 7; /* Be conservative. */
276 /*    cy = mpn_add_1 (rp + rn, rp + rn, 2*rn - n, 4); */
277       break;
278     }
279     rn = n;
280   }
281   TMP_FREE;
282 
283   return cy;
284 #undef rp
285 }
286 
287 mp_limb_t
288 mpn_invertappr (mp_ptr ip, mp_srcptr dp, mp_size_t n, mp_ptr scratch)
289 {
290   mp_limb_t res;
291   TMP_DECL;
292 
293   TMP_MARK;
294 
295   if (scratch == NULL)
296     scratch = TMP_ALLOC_LIMBS (mpn_invertappr_itch (n));
297 
298   ASSERT (n > 0);
299   ASSERT (dp[n-1] & GMP_NUMB_HIGHBIT);
300   ASSERT (! MPN_OVERLAP_P (ip, n, dp, n));
301   ASSERT (! MPN_OVERLAP_P (ip, n, scratch, mpn_invertappr_itch(n)));
302   ASSERT (! MPN_OVERLAP_P (dp, n, scratch, mpn_invertappr_itch(n)));
303 
304   if (BELOW_THRESHOLD (n, INV_NEWTON_THRESHOLD))
305     res = mpn_bc_invertappr (ip, dp, n, scratch);
306   else
307     res = mpn_ni_invertappr (ip, dp, n, scratch);
308 
309   TMP_FREE;
310   return res;
311 }
312