xref: /dragonfly/contrib/gmp/mpz/powm_ui.c (revision 6e278935)
1 /* mpz_powm_ui(res,base,exp,mod) -- Set RES to (base**exp) mod MOD.
2 
3 Copyright 1991, 1993, 1994, 1996, 1997, 2000, 2001, 2002, 2005 Free Software
4 Foundation, Inc.
5 
6 This file is part of the GNU MP Library.
7 
8 The GNU MP Library is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or (at your
11 option) any later version.
12 
13 The GNU MP Library is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
16 License for more details.
17 
18 You should have received a copy of the GNU Lesser General Public License
19 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
20 
21 
22 #include "gmp.h"
23 #include "gmp-impl.h"
24 #include "longlong.h"
25 
26 /* Compute t = a mod m, a is defined by (ap,an), m is defined by (mp,mn), and
27    t is defined by (tp,mn).  */
28 static void
29 reduce (mp_ptr tp, mp_srcptr ap, mp_size_t an, mp_srcptr mp, mp_size_t mn)
30 {
31   mp_ptr qp;
32   TMP_DECL;
33 
34   TMP_MARK;
35   qp = TMP_ALLOC_LIMBS (an - mn + 1);
36 
37   mpn_tdiv_qr (qp, tp, 0L, ap, an, mp, mn);
38 
39   TMP_FREE;
40 }
41 
42 void
43 mpz_powm_ui (mpz_ptr r, mpz_srcptr b, unsigned long int el, mpz_srcptr m)
44 {
45   mp_ptr xp, tp, qp, mp, bp;
46   mp_size_t xn, tn, mn, bn;
47   int m_zero_cnt;
48   int c;
49   mp_limb_t e;
50   TMP_DECL;
51 
52   mp = PTR(m);
53   mn = ABSIZ(m);
54   if (mn == 0)
55     DIVIDE_BY_ZERO;
56 
57   if (el == 0)
58     {
59       /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
60 	 depending on if MOD equals 1.  */
61       SIZ(r) = (mn == 1 && mp[0] == 1) ? 0 : 1;
62       PTR(r)[0] = 1;
63       return;
64     }
65 
66   TMP_MARK;
67 
68   /* Normalize m (i.e. make its most significant bit set) as required by
69      division functions below.  */
70   count_leading_zeros (m_zero_cnt, mp[mn - 1]);
71   m_zero_cnt -= GMP_NAIL_BITS;
72   if (m_zero_cnt != 0)
73     {
74       mp_ptr new_mp = TMP_ALLOC_LIMBS (mn);
75       mpn_lshift (new_mp, mp, mn, m_zero_cnt);
76       mp = new_mp;
77     }
78 
79   bn = ABSIZ(b);
80   bp = PTR(b);
81   if (bn > mn)
82     {
83       /* Reduce possibly huge base.  Use a function call to reduce, since we
84 	 don't want the quotient allocation to live until function return.  */
85       mp_ptr new_bp = TMP_ALLOC_LIMBS (mn);
86       reduce (new_bp, bp, bn, mp, mn);
87       bp = new_bp;
88       bn = mn;
89       /* Canonicalize the base, since we are potentially going to multiply with
90 	 it quite a few times.  */
91       MPN_NORMALIZE (bp, bn);
92     }
93 
94   if (bn == 0)
95     {
96       SIZ(r) = 0;
97       TMP_FREE;
98       return;
99     }
100 
101   tp = TMP_ALLOC_LIMBS (2 * mn + 1);
102   xp = TMP_ALLOC_LIMBS (mn);
103 
104   qp = TMP_ALLOC_LIMBS (mn + 1);
105 
106   MPN_COPY (xp, bp, bn);
107   xn = bn;
108 
109   e = el;
110   count_leading_zeros (c, e);
111   e = (e << c) << 1;		/* shift the exp bits to the left, lose msb */
112   c = GMP_LIMB_BITS - 1 - c;
113 
114   /* Main loop. */
115 
116   /* If m is already normalized (high bit of high limb set), and b is the
117      same size, but a bigger value, and e==1, then there's no modular
118      reductions done and we can end up with a result out of range at the
119      end. */
120   if (c == 0)
121     {
122       if (xn == mn && mpn_cmp (xp, mp, mn) >= 0)
123         mpn_sub_n (xp, xp, mp, mn);
124       goto finishup;
125     }
126 
127   while (c != 0)
128     {
129       mpn_sqr (tp, xp, xn);
130       tn = 2 * xn; tn -= tp[tn - 1] == 0;
131       if (tn < mn)
132 	{
133 	  MPN_COPY (xp, tp, tn);
134 	  xn = tn;
135 	}
136       else
137 	{
138 	  mpn_tdiv_qr (qp, xp, 0L, tp, tn, mp, mn);
139 	  xn = mn;
140 	}
141 
142       if ((mp_limb_signed_t) e < 0)
143 	{
144 	  mpn_mul (tp, xp, xn, bp, bn);
145 	  tn = xn + bn; tn -= tp[tn - 1] == 0;
146 	  if (tn < mn)
147 	    {
148 	      MPN_COPY (xp, tp, tn);
149 	      xn = tn;
150 	    }
151 	  else
152 	    {
153 	      mpn_tdiv_qr (qp, xp, 0L, tp, tn, mp, mn);
154 	      xn = mn;
155 	    }
156 	}
157       e <<= 1;
158       c--;
159     }
160 
161  finishup:
162   /* We shifted m left m_zero_cnt steps.  Adjust the result by reducing
163      it with the original MOD.  */
164   if (m_zero_cnt != 0)
165     {
166       mp_limb_t cy;
167       cy = mpn_lshift (tp, xp, xn, m_zero_cnt);
168       tp[xn] = cy; xn += cy != 0;
169 
170       if (xn < mn)
171 	{
172 	  MPN_COPY (xp, tp, xn);
173 	}
174       else
175 	{
176 	  mpn_tdiv_qr (qp, xp, 0L, tp, xn, mp, mn);
177 	  xn = mn;
178 	}
179       mpn_rshift (xp, xp, xn, m_zero_cnt);
180     }
181   MPN_NORMALIZE (xp, xn);
182 
183   if ((el & 1) != 0 && SIZ(b) < 0 && xn != 0)
184     {
185       mp = PTR(m);			/* want original, unnormalized m */
186       mpn_sub (xp, mp, mn, xp, xn);
187       xn = mn;
188       MPN_NORMALIZE (xp, xn);
189     }
190   MPZ_REALLOC (r, xn);
191   SIZ (r) = xn;
192   MPN_COPY (PTR(r), xp, xn);
193 
194   TMP_FREE;
195 }
196