1 /* rsa-sign-tr.c
2
3 Creating RSA signatures, with some additional checks.
4
5 Copyright (C) 2001, 2015 Niels Möller
6 Copyright (C) 2012 Nikos Mavrogiannopoulos
7 Copyright (C) 2018 Red Hat Inc.
8
9 This file is part of GNU Nettle.
10
11 GNU Nettle is free software: you can redistribute it and/or
12 modify it under the terms of either:
13
14 * the GNU Lesser General Public License as published by the Free
15 Software Foundation; either version 3 of the License, or (at your
16 option) any later version.
17
18 or
19
20 * the GNU General Public License as published by the Free
21 Software Foundation; either version 2 of the License, or (at your
22 option) any later version.
23
24 or both in parallel, as here.
25
26 GNU Nettle is distributed in the hope that it will be useful,
27 but WITHOUT ANY WARRANTY; without even the implied warranty of
28 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
29 General Public License for more details.
30
31 You should have received copies of the GNU General Public License and
32 the GNU Lesser General Public License along with this program. If
33 not, see http://www.gnu.org/licenses/.
34 */
35
36 #if HAVE_CONFIG_H
37 # include "config.h"
38 #endif
39
40 #include <assert.h>
41
42 #include "gmp-glue.h"
43 #include "rsa.h"
44 #include "rsa-internal.h"
45
46 #define MAX(a, b) ((a) > (b) ? (a) : (b))
47
48 #if NETTLE_USE_MINI_GMP
49 /* Blinds m, by computing c = m r^e (mod n), for a random r. Also
50 returns the inverse (ri), for use by rsa_unblind. */
51 static void
rsa_blind(const struct rsa_public_key * pub,void * random_ctx,nettle_random_func * random,mpz_t c,mpz_t ri,const mpz_t m)52 rsa_blind (const struct rsa_public_key *pub,
53 void *random_ctx, nettle_random_func *random,
54 mpz_t c, mpz_t ri, const mpz_t m)
55 {
56 mpz_t r;
57
58 mpz_init(r);
59
60 /* c = m*(r^e)
61 * ri = r^(-1)
62 */
63 do
64 {
65 nettle_mpz_random(r, random_ctx, random, pub->n);
66 /* invert r */
67 }
68 while (!mpz_invert (ri, r, pub->n));
69
70 /* c = c*(r^e) mod n */
71 mpz_powm_sec(r, r, pub->e, pub->n);
72 mpz_mul(c, m, r);
73 mpz_fdiv_r(c, c, pub->n);
74
75 mpz_clear(r);
76 }
77
78 /* m = c ri mod n */
79 static void
rsa_unblind(const struct rsa_public_key * pub,mpz_t m,const mpz_t ri,const mpz_t c)80 rsa_unblind (const struct rsa_public_key *pub,
81 mpz_t m, const mpz_t ri, const mpz_t c)
82 {
83 mpz_mul(m, c, ri);
84 mpz_fdiv_r(m, m, pub->n);
85 }
86
87 /* Checks for any errors done in the RSA computation. That avoids
88 * attacks which rely on faults on hardware, or even software MPI
89 * implementation. */
90 int
rsa_compute_root_tr(const struct rsa_public_key * pub,const struct rsa_private_key * key,void * random_ctx,nettle_random_func * random,mpz_t x,const mpz_t m)91 rsa_compute_root_tr(const struct rsa_public_key *pub,
92 const struct rsa_private_key *key,
93 void *random_ctx, nettle_random_func *random,
94 mpz_t x, const mpz_t m)
95 {
96 int res;
97 mpz_t t, mb, xb, ri;
98
99 /* mpz_powm_sec handles only odd moduli. If p, q or n is even, the
100 key is invalid and rejected by rsa_private_key_prepare. However,
101 some applications, notably gnutls, don't use this function, and
102 we don't want an invalid key to lead to a crash down inside
103 mpz_powm_sec. So do an additional check here. */
104 if (mpz_even_p (pub->n) || mpz_even_p (key->p) || mpz_even_p (key->q))
105 return 0;
106
107 mpz_init (mb);
108 mpz_init (xb);
109 mpz_init (ri);
110 mpz_init (t);
111
112 rsa_blind (pub, random_ctx, random, mb, ri, m);
113
114 rsa_compute_root (key, xb, mb);
115
116 mpz_powm_sec(t, xb, pub->e, pub->n);
117 res = (mpz_cmp(mb, t) == 0);
118
119 if (res)
120 rsa_unblind (pub, x, ri, xb);
121
122 mpz_clear (mb);
123 mpz_clear (xb);
124 mpz_clear (ri);
125 mpz_clear (t);
126
127 return res;
128 }
129
130 int
_rsa_sec_compute_root_tr(const struct rsa_public_key * pub,const struct rsa_private_key * key,void * random_ctx,nettle_random_func * random,mp_limb_t * x,const mp_limb_t * m,size_t mn)131 _rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
132 const struct rsa_private_key *key,
133 void *random_ctx, nettle_random_func *random,
134 mp_limb_t *x, const mp_limb_t *m, size_t mn)
135 {
136 mpz_t mz;
137 mpz_t xz;
138 int res;
139
140 mpz_init(mz);
141 mpz_init(xz);
142
143 mpn_copyi(mpz_limbs_write(mz, mn), m, mn);
144 mpz_limbs_finish(mz, mn);
145
146 res = rsa_compute_root_tr(pub, key, random_ctx, random, xz, mz);
147
148 if (res)
149 mpz_limbs_copy(x, xz, mpz_size(pub->n));
150
151 mpz_clear(mz);
152 mpz_clear(xz);
153 return res;
154 }
155 #else
156 /* Blinds m, by computing c = m r^e (mod n), for a random r. Also
157 returns the inverse (ri), for use by rsa_unblind. */
158 static void
rsa_sec_blind(const struct rsa_public_key * pub,void * random_ctx,nettle_random_func * random,mp_limb_t * c,mp_limb_t * ri,const mp_limb_t * m,mp_size_t mn)159 rsa_sec_blind (const struct rsa_public_key *pub,
160 void *random_ctx, nettle_random_func *random,
161 mp_limb_t *c, mp_limb_t *ri, const mp_limb_t *m,
162 mp_size_t mn)
163 {
164 const mp_limb_t *ep = mpz_limbs_read (pub->e);
165 const mp_limb_t *np = mpz_limbs_read (pub->n);
166 mp_bitcnt_t ebn = mpz_sizeinbase (pub->e, 2);
167 mp_size_t nn = mpz_size (pub->n);
168 size_t itch;
169 size_t i2;
170 mp_limb_t *scratch;
171 TMP_GMP_DECL (tp, mp_limb_t);
172 TMP_GMP_DECL (rp, mp_limb_t);
173 TMP_GMP_DECL (r, uint8_t);
174
175 TMP_GMP_ALLOC (rp, nn);
176 TMP_GMP_ALLOC (r, nn * sizeof(mp_limb_t));
177
178 /* c = m*(r^e) mod n */
179 itch = mpn_sec_powm_itch(nn, ebn, nn);
180 i2 = mpn_sec_mul_itch(nn, mn);
181 itch = MAX(itch, i2);
182 i2 = mpn_sec_div_r_itch(nn + mn, nn);
183 itch = MAX(itch, i2);
184 i2 = mpn_sec_invert_itch(nn);
185 itch = MAX(itch, i2);
186
187 TMP_GMP_ALLOC (tp, nn + mn + itch);
188 scratch = tp + nn + mn;
189
190 /* ri = r^(-1) */
191 do
192 {
193 random(random_ctx, nn * sizeof(mp_limb_t), (uint8_t *)r);
194 mpn_set_base256(rp, nn, r, nn * sizeof(mp_limb_t));
195 mpn_copyi(tp, rp, nn);
196 /* invert r */
197 }
198 while (!mpn_sec_invert (ri, tp, np, nn, 2 * nn * GMP_NUMB_BITS, scratch));
199
200 mpn_sec_powm (c, rp, nn, ep, ebn, np, nn, scratch);
201 /* normally mn == nn, but m can be smaller in some cases */
202 mpn_sec_mul (tp, c, nn, m, mn, scratch);
203 mpn_sec_div_r (tp, nn + mn, np, nn, scratch);
204 mpn_copyi(c, tp, nn);
205
206 TMP_GMP_FREE (r);
207 TMP_GMP_FREE (rp);
208 TMP_GMP_FREE (tp);
209 }
210
211 /* m = c ri mod n */
212 static void
rsa_sec_unblind(const struct rsa_public_key * pub,mp_limb_t * x,mp_limb_t * ri,const mp_limb_t * c)213 rsa_sec_unblind (const struct rsa_public_key *pub,
214 mp_limb_t *x, mp_limb_t *ri, const mp_limb_t *c)
215 {
216 const mp_limb_t *np = mpz_limbs_read (pub->n);
217 mp_size_t nn = mpz_size (pub->n);
218
219 size_t itch;
220 size_t i2;
221 mp_limb_t *scratch;
222 TMP_GMP_DECL(tp, mp_limb_t);
223
224 itch = mpn_sec_mul_itch(nn, nn);
225 i2 = mpn_sec_div_r_itch(nn + nn, nn);
226 itch = MAX(itch, i2);
227
228 TMP_GMP_ALLOC (tp, nn + nn + itch);
229 scratch = tp + nn + nn;
230
231 mpn_sec_mul (tp, c, nn, ri, nn, scratch);
232 mpn_sec_div_r (tp, nn + nn, np, nn, scratch);
233 mpn_copyi(x, tp, nn);
234
235 TMP_GMP_FREE (tp);
236 }
237
238 static int
sec_equal(const mp_limb_t * a,const mp_limb_t * b,size_t limbs)239 sec_equal(const mp_limb_t *a, const mp_limb_t *b, size_t limbs)
240 {
241 volatile mp_limb_t z = 0;
242 size_t i;
243
244 for (i = 0; i < limbs; i++)
245 {
246 z |= (a[i] ^ b[i]);
247 }
248
249 /* FIXME: Might compile to a branch instruction on some platforms. */
250 return z == 0;
251 }
252
253 static int
rsa_sec_check_root(const struct rsa_public_key * pub,const mp_limb_t * x,const mp_limb_t * m)254 rsa_sec_check_root(const struct rsa_public_key *pub,
255 const mp_limb_t *x, const mp_limb_t *m)
256 {
257 mp_size_t nn = mpz_size (pub->n);
258 mp_size_t ebn = mpz_sizeinbase (pub->e, 2);
259 const mp_limb_t *np = mpz_limbs_read (pub->n);
260 const mp_limb_t *ep = mpz_limbs_read (pub->e);
261 int ret;
262
263 mp_size_t itch;
264
265 mp_limb_t *scratch;
266 TMP_GMP_DECL(tp, mp_limb_t);
267
268 itch = mpn_sec_powm_itch (nn, ebn, nn);
269 TMP_GMP_ALLOC (tp, nn + itch);
270 scratch = tp + nn;
271
272 mpn_sec_powm(tp, x, nn, ep, ebn, np, nn, scratch);
273 ret = sec_equal(tp, m, nn);
274
275 TMP_GMP_FREE (tp);
276 return ret;
277 }
278
279 static void
cnd_mpn_zero(int cnd,volatile mp_ptr rp,mp_size_t n)280 cnd_mpn_zero (int cnd, volatile mp_ptr rp, mp_size_t n)
281 {
282 volatile mp_limb_t c;
283 volatile mp_limb_t mask = (mp_limb_t) cnd - 1;
284
285 while (--n >= 0)
286 {
287 c = rp[n];
288 c &= mask;
289 rp[n] = c;
290 }
291 }
292
293 /* Checks for any errors done in the RSA computation. That avoids
294 * attacks which rely on faults on hardware, or even software MPI
295 * implementation.
296 * This version is side-channel silent even in case of error,
297 * the destination buffer is always overwritten */
298 int
_rsa_sec_compute_root_tr(const struct rsa_public_key * pub,const struct rsa_private_key * key,void * random_ctx,nettle_random_func * random,mp_limb_t * x,const mp_limb_t * m,size_t mn)299 _rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
300 const struct rsa_private_key *key,
301 void *random_ctx, nettle_random_func *random,
302 mp_limb_t *x, const mp_limb_t *m, size_t mn)
303 {
304 TMP_GMP_DECL (c, mp_limb_t);
305 TMP_GMP_DECL (ri, mp_limb_t);
306 TMP_GMP_DECL (scratch, mp_limb_t);
307 size_t key_limb_size;
308 int ret;
309
310 key_limb_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size);
311
312 /* mpz_powm_sec handles only odd moduli. If p, q or n is even, the
313 key is invalid and rejected by rsa_private_key_prepare. However,
314 some applications, notably gnutls, don't use this function, and
315 we don't want an invalid key to lead to a crash down inside
316 mpz_powm_sec. So do an additional check here. */
317 if (mpz_even_p (pub->n) || mpz_even_p (key->p) || mpz_even_p (key->q))
318 {
319 mpn_zero(x, key_limb_size);
320 return 0;
321 }
322
323 assert(mpz_size(pub->n) == key_limb_size);
324 assert(mn <= key_limb_size);
325
326 TMP_GMP_ALLOC (c, key_limb_size);
327 TMP_GMP_ALLOC (ri, key_limb_size);
328 TMP_GMP_ALLOC (scratch, _rsa_sec_compute_root_itch(key));
329
330 rsa_sec_blind (pub, random_ctx, random, x, ri, m, mn);
331
332 _rsa_sec_compute_root(key, c, x, scratch);
333
334 ret = rsa_sec_check_root(pub, c, x);
335
336 rsa_sec_unblind(pub, x, ri, c);
337
338 cnd_mpn_zero(1 - ret, x, key_limb_size);
339
340 TMP_GMP_FREE (scratch);
341 TMP_GMP_FREE (ri);
342 TMP_GMP_FREE (c);
343 return ret;
344 }
345
346 /* Checks for any errors done in the RSA computation. That avoids
347 * attacks which rely on faults on hardware, or even software MPI
348 * implementation.
349 * This version is maintained for API compatibility reasons. It
350 * is not completely side-channel silent. There are conditionals
351 * in buffer copying both in case of success or error.
352 */
353 int
rsa_compute_root_tr(const struct rsa_public_key * pub,const struct rsa_private_key * key,void * random_ctx,nettle_random_func * random,mpz_t x,const mpz_t m)354 rsa_compute_root_tr(const struct rsa_public_key *pub,
355 const struct rsa_private_key *key,
356 void *random_ctx, nettle_random_func *random,
357 mpz_t x, const mpz_t m)
358 {
359 TMP_GMP_DECL (l, mp_limb_t);
360 int res;
361
362 mp_size_t l_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size);
363 TMP_GMP_ALLOC (l, l_size);
364
365 res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, l,
366 mpz_limbs_read(m), mpz_size(m));
367 if (res) {
368 mp_limb_t *xp = mpz_limbs_write (x, l_size);
369 mpn_copyi (xp, l, l_size);
370 mpz_limbs_finish (x, l_size);
371 }
372
373 TMP_GMP_FREE (l);
374 return res;
375 }
376 #endif
377