xref: /dragonfly/contrib/gmp/mpn/generic/divexact.c (revision 678e8cc6)
1 /* mpn_divexact(qp,np,nn,dp,dn,tp) -- Divide N = {np,nn} by D = {dp,dn} storing
2    the result in Q = {qp,nn-dn+1} expecting no remainder.  Overlap allowed
3    between Q and N; all other overlap disallowed.
4 
5    Contributed to the GNU project by Torbjorn Granlund.
6 
7    THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES.  IT IS ONLY
8    SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
9    GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GMP RELEASE.
10 
11 Copyright 2006, 2007, 2009 Free Software Foundation, Inc.
12 
13 This file is part of the GNU MP Library.
14 
15 The GNU MP Library is free software; you can redistribute it and/or modify
16 it under the terms of the GNU Lesser General Public License as published by
17 the Free Software Foundation; either version 3 of the License, or (at your
18 option) any later version.
19 
20 The GNU MP Library is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
22 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
23 License for more details.
24 
25 You should have received a copy of the GNU Lesser General Public License
26 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
27 
28 
29 #include "gmp.h"
30 #include "gmp-impl.h"
31 #include "longlong.h"
32 
33 #if 1
34 void
35 mpn_divexact (mp_ptr qp,
36 	      mp_srcptr np, mp_size_t nn,
37 	      mp_srcptr dp, mp_size_t dn)
38 {
39   unsigned shift;
40   mp_size_t qn;
41   mp_ptr tp, wp;
42   TMP_DECL;
43 
44   ASSERT (dn > 0);
45   ASSERT (nn >= dn);
46   ASSERT (dp[dn-1] > 0);
47 
48   while (dp[0] == 0)
49     {
50       ASSERT (np[0] == 0);
51       dp++;
52       np++;
53       dn--;
54       nn--;
55     }
56 
57   if (dn == 1)
58     {
59       MPN_DIVREM_OR_DIVEXACT_1 (qp, np, nn, dp[0]);
60       return;
61     }
62 
63   TMP_MARK;
64 
65   qn = nn + 1 - dn;
66   count_trailing_zeros (shift, dp[0]);
67 
68   if (shift > 0)
69     {
70       mp_size_t ss = (dn > qn) ? qn + 1 : dn;
71 
72       tp = TMP_ALLOC_LIMBS (ss);
73       mpn_rshift (tp, dp, ss, shift);
74       dp = tp;
75 
76       /* Since we have excluded dn == 1, we have nn > qn, and we need
77 	 to shift one limb beyond qn. */
78       wp = TMP_ALLOC_LIMBS (qn + 1);
79       mpn_rshift (wp, np, qn + 1, shift);
80     }
81   else
82     {
83       wp = TMP_ALLOC_LIMBS (qn);
84       MPN_COPY (wp, np, qn);
85     }
86 
87   if (dn > qn)
88     dn = qn;
89 
90   tp = TMP_ALLOC_LIMBS (mpn_bdiv_q_itch (qn, dn));
91   mpn_bdiv_q (qp, wp, qn, dp, dn, tp);
92   TMP_FREE;
93 }
94 
95 #else
96 
97 /* We use the Jebelean's bidirectional exact division algorithm.  This is
98    somewhat naively implemented, with equal quotient parts done by 2-adic
99    division and truncating division.  Since 2-adic division is faster, it
100    should be used for a larger chunk.
101 
102    This code is horrendously ugly, in all sorts of ways.
103 
104    * It was hacked without much care or thought, but with a testing program.
105    * It handles scratch space frivolously, and furthermore the itch function
106      is broken.
107    * Doesn't provide any measures to deal with mu_divappr_q's +3 error.  We
108      have yet to provoke an error due to this, though.
109    * Algorithm selection leaves a lot to be desired.  In particular, the choice
110      between DC and MU isn't a point, but we treat it like one.
111    * It makes the msb part 1 or 2 limbs larger than the lsb part, in spite of
112      that the latter is faster.  We should at least reverse this, but perhaps
113      we should make the lsb part considerably larger.  (How do we tune this?)
114 */
115 
116 mp_size_t
117 mpn_divexact_itch (mp_size_t nn, mp_size_t dn)
118 {
119   return nn + dn;		/* FIXME this is not right */
120 }
121 
122 void
123 mpn_divexact (mp_ptr qp,
124 	      mp_srcptr np, mp_size_t nn,
125 	      mp_srcptr dp, mp_size_t dn,
126 	      mp_ptr scratch)
127 {
128   mp_size_t qn;
129   mp_size_t nn0, qn0;
130   mp_size_t nn1, qn1;
131   mp_ptr tp;
132   mp_limb_t qml;
133   mp_limb_t qh;
134   int cnt;
135   mp_ptr xdp;
136   mp_limb_t di;
137   mp_limb_t cy;
138   gmp_pi1_t dinv;
139   TMP_DECL;
140 
141   TMP_MARK;
142 
143   qn = nn - dn + 1;
144 
145   /* For small divisors, and small quotients, don't use Jebelean's algorithm. */
146   if (dn < DIVEXACT_JEB_THRESHOLD || qn < DIVEXACT_JEB_THRESHOLD)
147     {
148       tp = scratch;
149       MPN_COPY (tp, np, qn);
150       binvert_limb (di, dp[0]);  di = -di;
151       dn = MIN (dn, qn);
152       mpn_sbpi1_bdiv_q (qp, tp, qn, dp, dn, di);
153       TMP_FREE;
154       return;
155     }
156 
157   qn0 = ((nn - dn) >> 1) + 1;	/* low quotient size */
158 
159   /* If quotient is much larger than the divisor, the bidirectional algorithm
160      does not work as currently implemented.  Fall back to plain bdiv.  */
161   if (qn0 > dn)
162     {
163       if (BELOW_THRESHOLD (dn, DC_BDIV_Q_THRESHOLD))
164 	{
165 	  tp = scratch;
166 	  MPN_COPY (tp, np, qn);
167 	  binvert_limb (di, dp[0]);  di = -di;
168 	  dn = MIN (dn, qn);
169 	  mpn_sbpi1_bdiv_q (qp, tp, qn, dp, dn, di);
170 	}
171       else if (BELOW_THRESHOLD (dn, MU_BDIV_Q_THRESHOLD))
172 	{
173 	  tp = scratch;
174 	  MPN_COPY (tp, np, qn);
175 	  binvert_limb (di, dp[0]);  di = -di;
176 	  mpn_dcpi1_bdiv_q (qp, tp, qn, dp, dn, di);
177 	}
178       else
179 	{
180 	  mpn_mu_bdiv_q (qp, np, qn, dp, dn, scratch);
181 	}
182       TMP_FREE;
183       return;
184     }
185 
186   nn0 = qn0 + qn0;
187 
188   nn1 = nn0 - 1 + ((nn-dn) & 1);
189   qn1 = qn0;
190   if (LIKELY (qn0 != dn))
191     {
192       nn1 = nn1 + 1;
193       qn1 = qn1 + 1;
194       if (UNLIKELY (dp[dn - 1] == 1 && qn1 != dn))
195 	{
196 	  /* If the leading divisor limb == 1, i.e. has just one bit, we have
197 	     to include an extra limb in order to get the needed overlap.  */
198 	  /* FIXME: Now with the mu_divappr_q function, we should really need
199 	     more overlap. That indicates one of two things: (1) The test code
200 	     is not good. (2) We actually overlap too much by default.  */
201 	  nn1 = nn1 + 1;
202 	  qn1 = qn1 + 1;
203 	}
204     }
205 
206   tp = TMP_ALLOC_LIMBS (nn1 + 1);
207 
208   count_leading_zeros (cnt, dp[dn - 1]);
209 
210   /* Normalize divisor, store into tmp area.  */
211   if (cnt != 0)
212     {
213       xdp = TMP_ALLOC_LIMBS (qn1);
214       mpn_lshift (xdp, dp + dn - qn1, qn1, cnt);
215     }
216   else
217     {
218       xdp = (mp_ptr) dp + dn - qn1;
219     }
220 
221   /* Shift dividend according to the divisor normalization.  */
222   /* FIXME: We compute too much here for XX_divappr_q, but these functions'
223      interfaces want a pointer to the imaginative least significant limb, not
224      to the least significant *used* limb.  Of course, we could leave nn1-qn1
225      rubbish limbs in the low part, to save some time.  */
226   if (cnt != 0)
227     {
228       cy = mpn_lshift (tp, np + nn - nn1, nn1, cnt);
229       if (cy != 0)
230 	{
231 	  tp[nn1] = cy;
232 	  nn1++;
233 	}
234     }
235   else
236     {
237       /* FIXME: This copy is not needed for mpn_mu_divappr_q, except when the
238 	 mpn_sub_n right before is executed.  */
239       MPN_COPY (tp, np + nn - nn1, nn1);
240     }
241 
242   invert_pi1 (dinv, xdp[qn1 - 1], xdp[qn1 - 2]);
243   if (BELOW_THRESHOLD (qn1, DC_DIVAPPR_Q_THRESHOLD))
244     {
245       qp[qn0 - 1 + nn1 - qn1] = mpn_sbpi1_divappr_q (qp + qn0 - 1, tp, nn1, xdp, qn1, dinv.inv32);
246     }
247   else if (BELOW_THRESHOLD (qn1, MU_DIVAPPR_Q_THRESHOLD))
248     {
249       qp[qn0 - 1 + nn1 - qn1] = mpn_dcpi1_divappr_q (qp + qn0 - 1, tp, nn1, xdp, qn1, &dinv);
250     }
251   else
252     {
253       /* FIXME: mpn_mu_divappr_q doesn't handle qh != 0.  Work around it with a
254 	 conditional subtraction here.  */
255       qh = mpn_cmp (tp + nn1 - qn1, xdp, qn1) >= 0;
256       if (qh)
257 	mpn_sub_n (tp + nn1 - qn1, tp + nn1 - qn1, xdp, qn1);
258       mpn_mu_divappr_q (qp + qn0 - 1, tp, nn1, xdp, qn1, scratch);
259       qp[qn0 - 1 + nn1 - qn1] = qh;
260     }
261   qml = qp[qn0 - 1];
262 
263   binvert_limb (di, dp[0]);  di = -di;
264 
265   if (BELOW_THRESHOLD (qn0, DC_BDIV_Q_THRESHOLD))
266     {
267       MPN_COPY (tp, np, qn0);
268       mpn_sbpi1_bdiv_q (qp, tp, qn0, dp, qn0, di);
269     }
270   else if (BELOW_THRESHOLD (qn0, MU_BDIV_Q_THRESHOLD))
271     {
272       MPN_COPY (tp, np, qn0);
273       mpn_dcpi1_bdiv_q (qp, tp, qn0, dp, qn0, di);
274     }
275   else
276     {
277       mpn_mu_bdiv_q (qp, np, qn0, dp, qn0, scratch);
278     }
279 
280   if (qml < qp[qn0 - 1])
281     mpn_decr_u (qp + qn0, 1);
282 
283   TMP_FREE;
284 }
285 #endif
286