1 /* mpz_powm(res,base,exp,mod) -- Set R to (U^E) mod M.
2
3 Contributed to the GNU project by Torbjorn Granlund.
4
5 Copyright 1991, 1993, 1994, 1996, 1997, 2000-2002, 2005, 2008, 2009,
6 2011, 2012, 2015, 2019 Free Software Foundation, Inc.
7
8 This file is part of the GNU MP Library.
9
10 The GNU MP Library is free software; you can redistribute it and/or modify
11 it under the terms of either:
12
13 * the GNU Lesser General Public License as published by the Free
14 Software Foundation; either version 3 of the License, or (at your
15 option) any later version.
16
17 or
18
19 * the GNU General Public License as published by the Free Software
20 Foundation; either version 2 of the License, or (at your option) any
21 later version.
22
23 or both in parallel, as here.
24
25 The GNU MP Library is distributed in the hope that it will be useful, but
26 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
27 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
28 for more details.
29
30 You should have received copies of the GNU General Public License and the
31 GNU Lesser General Public License along with the GNU MP Library. If not,
32 see https://www.gnu.org/licenses/. */
33
34
35 #include "gmp-impl.h"
36 #include "longlong.h"
37
38
39 /* TODO
40
41 * Improve handling of buffers. It is pretty ugly now.
42
43 * For even moduli, we compute a binvert of its odd part both here and in
44 mpn_powm. How can we avoid this recomputation?
45 */
46
47 /*
48 b ^ e mod m res
49 0 0 0 ?
50 0 e 0 ?
51 0 0 m ?
52 0 e m 0
53 b 0 0 ?
54 b e 0 ?
55 b 0 m 1 mod m
56 b e m b^e mod m
57 */
58
59 #define HANDLE_NEGATIVE_EXPONENT 1
60
61 void
mpz_powm(mpz_ptr r,mpz_srcptr b,mpz_srcptr e,mpz_srcptr m)62 mpz_powm (mpz_ptr r, mpz_srcptr b, mpz_srcptr e, mpz_srcptr m)
63 {
64 mp_size_t n, nodd, ncnt;
65 int cnt;
66 mp_ptr rp, tp;
67 mp_srcptr bp, ep, mp;
68 mp_size_t rn, bn, es, en, itch;
69 mpz_t new_b; /* note: value lives long via 'b' */
70 TMP_DECL;
71
72 n = ABSIZ(m);
73 if (UNLIKELY (n == 0))
74 DIVIDE_BY_ZERO;
75
76 mp = PTR(m);
77
78 TMP_MARK;
79
80 es = SIZ(e);
81 if (UNLIKELY (es <= 0))
82 {
83 if (es == 0)
84 {
85 /* b^0 mod m, b is anything and m is non-zero.
86 Result is 1 mod m, i.e., 1 or 0 depending on if m = 1. */
87 SIZ(r) = n != 1 || mp[0] != 1;
88 MPZ_NEWALLOC (r, 1)[0] = 1;
89 TMP_FREE; /* we haven't really allocated anything here */
90 return;
91 }
92 #if HANDLE_NEGATIVE_EXPONENT
93 MPZ_TMP_INIT (new_b, n + 1);
94
95 if (UNLIKELY (! mpz_invert (new_b, b, m)))
96 DIVIDE_BY_ZERO;
97 b = new_b;
98 es = -es;
99 #else
100 DIVIDE_BY_ZERO;
101 #endif
102 }
103 en = es;
104
105 bn = ABSIZ(b);
106
107 if (UNLIKELY (bn == 0))
108 {
109 SIZ(r) = 0;
110 TMP_FREE;
111 return;
112 }
113
114 ep = PTR(e);
115
116 /* Handle (b^1 mod m) early, since mpn_pow* do not handle that case. */
117 if (UNLIKELY (en == 1 && ep[0] == 1))
118 {
119 rp = TMP_ALLOC_LIMBS (n);
120 bp = PTR(b);
121 if (bn >= n)
122 {
123 mp_ptr qp = TMP_ALLOC_LIMBS (bn - n + 1);
124 mpn_tdiv_qr (qp, rp, 0L, bp, bn, mp, n);
125 rn = n;
126 MPN_NORMALIZE (rp, rn);
127
128 if (rn != 0 && SIZ(b) < 0)
129 {
130 mpn_sub (rp, mp, n, rp, rn);
131 rn = n;
132 MPN_NORMALIZE_NOT_ZERO (rp, rn);
133 }
134 }
135 else
136 {
137 if (SIZ(b) < 0)
138 {
139 mpn_sub (rp, mp, n, bp, bn);
140 rn = n;
141 MPN_NORMALIZE_NOT_ZERO (rp, rn);
142 }
143 else
144 {
145 MPN_COPY (rp, bp, bn);
146 rn = bn;
147 }
148 }
149 goto ret;
150 }
151
152 /* Remove low zero limbs from M. This loop will terminate for correctly
153 represented mpz numbers. */
154 ncnt = 0;
155 while (UNLIKELY (mp[0] == 0))
156 {
157 mp++;
158 ncnt++;
159 }
160 nodd = n - ncnt;
161 cnt = 0;
162 if (mp[0] % 2 == 0)
163 {
164 mp_ptr newmp = TMP_ALLOC_LIMBS (nodd);
165 count_trailing_zeros (cnt, mp[0]);
166 mpn_rshift (newmp, mp, nodd, cnt);
167 nodd -= newmp[nodd - 1] == 0;
168 mp = newmp;
169 ncnt++;
170 }
171
172 if (ncnt != 0)
173 {
174 /* We will call both mpn_powm and mpn_powlo. */
175 /* rp needs n, mpn_powlo needs 4n, the 2 mpn_binvert might need more */
176 mp_size_t n_largest_binvert = MAX (ncnt, nodd);
177 mp_size_t itch_binvert = mpn_binvert_itch (n_largest_binvert);
178 itch = 3 * n + MAX (itch_binvert, 2 * n);
179 }
180 else
181 {
182 /* We will call just mpn_powm. */
183 mp_size_t itch_binvert = mpn_binvert_itch (nodd);
184 itch = n + MAX (itch_binvert, 2 * n);
185 }
186 tp = TMP_ALLOC_LIMBS (itch);
187
188 rp = tp; tp += n;
189
190 bp = PTR(b);
191 mpn_powm (rp, bp, bn, ep, en, mp, nodd, tp);
192
193 rn = n;
194
195 if (ncnt != 0)
196 {
197 mp_ptr r2, xp, yp, odd_inv_2exp;
198 unsigned long t;
199 int bcnt;
200
201 if (bn < ncnt)
202 {
203 mp_ptr newbp = TMP_ALLOC_LIMBS (ncnt);
204 MPN_COPY (newbp, bp, bn);
205 MPN_ZERO (newbp + bn, ncnt - bn);
206 bp = newbp;
207 }
208
209 r2 = tp;
210
211 if (bp[0] % 2 == 0)
212 {
213 if (en > 1)
214 {
215 MPN_ZERO (r2, ncnt);
216 goto zero;
217 }
218
219 ASSERT (en == 1);
220 t = (ncnt - (cnt != 0)) * GMP_NUMB_BITS + cnt;
221
222 /* Count number of low zero bits in B, up to 3. */
223 bcnt = (0x1213 >> ((bp[0] & 7) << 1)) & 0x3;
224 /* Note that ep[0] * bcnt might overflow, but that just results
225 in a missed optimization. */
226 if (ep[0] * bcnt >= t)
227 {
228 MPN_ZERO (r2, ncnt);
229 goto zero;
230 }
231 }
232
233 mpn_powlo (r2, bp, ep, en, ncnt, tp + ncnt);
234
235 zero:
236 if (nodd < ncnt)
237 {
238 mp_ptr newmp = TMP_ALLOC_LIMBS (ncnt);
239 MPN_COPY (newmp, mp, nodd);
240 MPN_ZERO (newmp + nodd, ncnt - nodd);
241 mp = newmp;
242 }
243
244 odd_inv_2exp = tp + n;
245 mpn_binvert (odd_inv_2exp, mp, ncnt, tp + 2 * n);
246
247 mpn_sub (r2, r2, ncnt, rp, nodd > ncnt ? ncnt : nodd);
248
249 xp = tp + 2 * n;
250 mpn_mullo_n (xp, odd_inv_2exp, r2, ncnt);
251
252 if (cnt != 0)
253 xp[ncnt - 1] &= (CNST_LIMB(1) << cnt) - 1;
254
255 yp = tp;
256 if (ncnt > nodd)
257 mpn_mul (yp, xp, ncnt, mp, nodd);
258 else
259 mpn_mul (yp, mp, nodd, xp, ncnt);
260
261 mpn_add (rp, yp, n, rp, nodd);
262
263 ASSERT (nodd + ncnt >= n);
264 ASSERT (nodd + ncnt <= n + 1);
265 }
266
267 MPN_NORMALIZE (rp, rn);
268
269 if ((ep[0] & 1) && SIZ(b) < 0 && rn != 0)
270 {
271 mpn_sub (rp, PTR(m), n, rp, rn);
272 rn = n;
273 MPN_NORMALIZE (rp, rn);
274 }
275
276 ret:
277 MPZ_NEWALLOC (r, rn);
278 SIZ(r) = rn;
279 MPN_COPY (PTR(r), rp, rn);
280
281 TMP_FREE;
282 }
283