1 /* mpn_mod_1(dividend_ptr, dividend_size, divisor_limb) --
2    Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
3    Return the single-limb remainder.
4    There are no constraints on the value of the divisor.
5 
6 Copyright 1991, 1993, 1994, 1999, 2000, 2002 Free Software Foundation, Inc.
7 
8 This file is part of the GNU MP Library.
9 
10 The GNU MP Library is free software; you can redistribute it and/or modify
11 it under the terms of the GNU Lesser General Public License as published by
12 the Free Software Foundation; either version 2.1 of the License, or (at your
13 option) any later version.
14 
15 The GNU MP Library is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
18 License for more details.
19 
20 You should have received a copy of the GNU Lesser General Public License
21 along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
22 the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
23 MA 02110-1301, USA. */
24 
25 #include "mpir.h"
26 #include "gmp-impl.h"
27 #include "longlong.h"
28 
29 /* The size where udiv_qrnnd_preinv should be used rather than udiv_qrnnd,
30    meaning the quotient size where that should happen, the quotient size
31    being how many udiv divisions will be done.
32 
33    The default is to use preinv always, CPUs where this doesn't suit have
34    tuned thresholds.  Note in particular that preinv should certainly be
35    used if that's the only division available (USE_PREINV_ALWAYS).  */
36 
37 #ifndef MOD_1_NORM_THRESHOLD
38 #define MOD_1_NORM_THRESHOLD  0
39 #endif
40 #ifndef MOD_1_UNNORM_THRESHOLD
41 #define MOD_1_UNNORM_THRESHOLD  0
42 #endif
43 
44 
45 /* The comments in mpn/generic/divrem_1.c apply here too.
46 
47    As noted in the algorithms section of the manual, the shifts in the loop
48    for the unnorm case can be avoided by calculating r = a%(d*2^n), followed
49    by a final (r*2^n)%(d*2^n).  In fact if it happens that a%(d*2^n) can
50    skip a division where (a*2^n)%(d*2^n) can't then there's the same number
51    of divide steps, though how often that happens depends on the assumed
52    distributions of dividend and divisor.  In any case this idea is left to
53    CPU specific implementations to consider.  */
54 
55 mp_limb_t
mpn_mod_1(mp_srcptr up,mp_size_t un,mp_limb_t d)56 mpn_mod_1 (mp_srcptr up, mp_size_t un, mp_limb_t d)
57 {
58   mp_size_t  i;
59   mp_limb_t  n1, n0, r;
60   mp_limb_t  dummy;
61 
62   ASSERT (un >= 0);
63   ASSERT (d != 0);
64 
65   /* Botch: Should this be handled at all?  Rely on callers?
66      But note un==0 is currently required by mpz/fdiv_r_ui.c and possibly
67      other places.  */
68   if (un == 0)
69     return 0;
70 
71   #if HAVE_NATIVE_mpn_divrem_euclidean_r_1
72   return mpn_divrem_euclidean_r_1(up,un,d);
73   #endif
74 
75   d <<= GMP_NAIL_BITS;
76 
77   if ((d & GMP_LIMB_HIGHBIT) != 0)
78     {
79       /* High limb is initial remainder, possibly with one subtract of
80 	 d to get r<d.  */
81       r = up[un - 1] << GMP_NAIL_BITS;
82       if (r >= d)
83 	r -= d;
84       r >>= GMP_NAIL_BITS;
85       un--;
86       if (un == 0)
87 	return r;
88 
89       if (BELOW_THRESHOLD (un, MOD_1_NORM_THRESHOLD))
90 	{
91 	plain:
92 	  for (i = un - 1; i >= 0; i--)
93 	    {
94 	      n0 = up[i] << GMP_NAIL_BITS;
95 	      udiv_qrnnd (dummy, r, r, n0, d);
96 	      r >>= GMP_NAIL_BITS;
97 	    }
98 	  return r;
99 	}
100       else
101 	{
102 	  mp_limb_t  inv;
103 	  invert_limb (inv, d);
104 	  for (i = un - 1; i >= 0; i--)
105 	    {
106 	      n0 = up[i] << GMP_NAIL_BITS;
107 	      udiv_qrnnd_preinv (dummy, r, r, n0, d, inv);
108 	      r >>= GMP_NAIL_BITS;
109 	    }
110 	  return r;
111 	}
112     }
113   else
114     {
115       int norm;
116 
117       /* Skip a division if high < divisor.  Having the test here before
118 	 normalizing will still skip as often as possible.  */
119       r = up[un - 1] << GMP_NAIL_BITS;
120       if (r < d)
121 	{
122 	  r >>= GMP_NAIL_BITS;
123 	  un--;
124 	  if (un == 0)
125 	    return r;
126 	}
127       else
128 	r = 0;
129 
130       /* If udiv_qrnnd doesn't need a normalized divisor, can use the simple
131 	 code above. */
132       if (! UDIV_NEEDS_NORMALIZATION
133 	  && BELOW_THRESHOLD (un, MOD_1_UNNORM_THRESHOLD))
134 	goto plain;
135 
136       count_leading_zeros (norm, d);
137       d <<= norm;
138 
139       n1 = up[un - 1] << GMP_NAIL_BITS;
140       r = (r << norm) | (n1 >> (GMP_LIMB_BITS - norm));
141 
142       if (UDIV_NEEDS_NORMALIZATION
143 	  && BELOW_THRESHOLD (un, MOD_1_UNNORM_THRESHOLD))
144 	{
145 	  for (i = un - 2; i >= 0; i--)
146 	    {
147 	      n0 = up[i] << GMP_NAIL_BITS;
148 	      udiv_qrnnd (dummy, r, r,
149 			  (n1 << norm) | (n0 >> (GMP_NUMB_BITS - norm)),
150 			  d);
151 	      r >>= GMP_NAIL_BITS;
152 	      n1 = n0;
153 	    }
154 	  udiv_qrnnd (dummy, r, r, n1 << norm, d);
155 	  r >>= GMP_NAIL_BITS;
156 	  return r >> norm;
157 	}
158       else
159 	{
160 	  mp_limb_t inv;
161 	  invert_limb (inv, d);
162 
163 	  for (i = un - 2; i >= 0; i--)
164 	    {
165 	      n0 = up[i] << GMP_NAIL_BITS;
166 	      udiv_qrnnd_preinv (dummy, r, r,
167 				 (n1 << norm) | (n0 >> (GMP_NUMB_BITS - norm)),
168 				 d, inv);
169 	      r >>= GMP_NAIL_BITS;
170 	      n1 = n0;
171 	    }
172 	  udiv_qrnnd_preinv (dummy, r, r, n1 << norm, d, inv);
173 	  r >>= GMP_NAIL_BITS;
174 	  return r >> norm;
175 	}
176     }
177 }
178