1 /* rsa-sign-tr.c
2 
3    Creating RSA signatures, with some additional checks.
4 
5    Copyright (C) 2001, 2015 Niels Möller
6    Copyright (C) 2012 Nikos Mavrogiannopoulos
7    Copyright (C) 2018 Red Hat Inc.
8 
9    This file is part of GNU Nettle.
10 
11    GNU Nettle is free software: you can redistribute it and/or
12    modify it under the terms of either:
13 
14      * the GNU Lesser General Public License as published by the Free
15        Software Foundation; either version 3 of the License, or (at your
16        option) any later version.
17 
18    or
19 
20      * the GNU General Public License as published by the Free
21        Software Foundation; either version 2 of the License, or (at your
22        option) any later version.
23 
24    or both in parallel, as here.
25 
26    GNU Nettle is distributed in the hope that it will be useful,
27    but WITHOUT ANY WARRANTY; without even the implied warranty of
28    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
29    General Public License for more details.
30 
31    You should have received copies of the GNU General Public License and
32    the GNU Lesser General Public License along with this program.  If
33    not, see http://www.gnu.org/licenses/.
34 */
35 
36 #if HAVE_CONFIG_H
37 # include "config.h"
38 #endif
39 
40 #include <assert.h>
41 
42 #include "gmp-glue.h"
43 #include "rsa.h"
44 #include "rsa-internal.h"
45 
46 #define MAX(a, b) ((a) > (b) ? (a) : (b))
47 
48 #if NETTLE_USE_MINI_GMP
49 /* Blinds m, by computing c = m r^e (mod n), for a random r. Also
50    returns the inverse (ri), for use by rsa_unblind. */
51 static void
rsa_blind(const struct rsa_public_key * pub,void * random_ctx,nettle_random_func * random,mpz_t c,mpz_t ri,const mpz_t m)52 rsa_blind (const struct rsa_public_key *pub,
53 	   void *random_ctx, nettle_random_func *random,
54 	   mpz_t c, mpz_t ri, const mpz_t m)
55 {
56   mpz_t r;
57 
58   mpz_init(r);
59 
60   /* c = m*(r^e)
61    * ri = r^(-1)
62    */
63   do
64     {
65       nettle_mpz_random(r, random_ctx, random, pub->n);
66       /* invert r */
67     }
68   while (!mpz_invert (ri, r, pub->n));
69 
70   /* c = c*(r^e) mod n */
71   mpz_powm_sec(r, r, pub->e, pub->n);
72   mpz_mul(c, m, r);
73   mpz_fdiv_r(c, c, pub->n);
74 
75   mpz_clear(r);
76 }
77 
78 /* m = c ri mod n */
79 static void
rsa_unblind(const struct rsa_public_key * pub,mpz_t m,const mpz_t ri,const mpz_t c)80 rsa_unblind (const struct rsa_public_key *pub,
81 	     mpz_t m, const mpz_t ri, const mpz_t c)
82 {
83   mpz_mul(m, c, ri);
84   mpz_fdiv_r(m, m, pub->n);
85 }
86 
87 /* Checks for any errors done in the RSA computation. That avoids
88  * attacks which rely on faults on hardware, or even software MPI
89  * implementation. */
90 int
rsa_compute_root_tr(const struct rsa_public_key * pub,const struct rsa_private_key * key,void * random_ctx,nettle_random_func * random,mpz_t x,const mpz_t m)91 rsa_compute_root_tr(const struct rsa_public_key *pub,
92 		    const struct rsa_private_key *key,
93 		    void *random_ctx, nettle_random_func *random,
94 		    mpz_t x, const mpz_t m)
95 {
96   int res;
97   mpz_t t, mb, xb, ri;
98 
99   /* mpz_powm_sec handles only odd moduli. If p, q or n is even, the
100      key is invalid and rejected by rsa_private_key_prepare. However,
101      some applications, notably gnutls, don't use this function, and
102      we don't want an invalid key to lead to a crash down inside
103      mpz_powm_sec. So do an additional check here. */
104   if (mpz_even_p (pub->n) || mpz_even_p (key->p) || mpz_even_p (key->q))
105     return 0;
106 
107   mpz_init (mb);
108   mpz_init (xb);
109   mpz_init (ri);
110   mpz_init (t);
111 
112   rsa_blind (pub, random_ctx, random, mb, ri, m);
113 
114   rsa_compute_root (key, xb, mb);
115 
116   mpz_powm_sec(t, xb, pub->e, pub->n);
117   res = (mpz_cmp(mb, t) == 0);
118 
119   if (res)
120     rsa_unblind (pub, x, ri, xb);
121 
122   mpz_clear (mb);
123   mpz_clear (xb);
124   mpz_clear (ri);
125   mpz_clear (t);
126 
127   return res;
128 }
129 
130 int
_rsa_sec_compute_root_tr(const struct rsa_public_key * pub,const struct rsa_private_key * key,void * random_ctx,nettle_random_func * random,mp_limb_t * x,const mp_limb_t * m)131 _rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
132 			 const struct rsa_private_key *key,
133 			 void *random_ctx, nettle_random_func *random,
134 			 mp_limb_t *x, const mp_limb_t *m)
135 {
136   mp_size_t nn;
137   mpz_t mz;
138   mpz_t xz;
139   int res;
140 
141   mpz_init(xz);
142 
143   nn = mpz_size (pub->n);
144 
145   res = rsa_compute_root_tr(pub, key, random_ctx, random, xz,
146 			    mpz_roinit_n(mz, m, nn));
147 
148   if (res)
149     mpz_limbs_copy(x, xz, nn);
150 
151   mpz_clear(xz);
152   return res;
153 }
154 #else
155 /* Blinds m, by computing c = m r^e (mod n), for a random r. Also
156    returns the inverse (ri), for use by rsa_unblind. Must have c != m,
157    no in-place operation.*/
158 static void
rsa_sec_blind(const struct rsa_public_key * pub,void * random_ctx,nettle_random_func * random,mp_limb_t * c,mp_limb_t * ri,const mp_limb_t * m)159 rsa_sec_blind (const struct rsa_public_key *pub,
160                void *random_ctx, nettle_random_func *random,
161                mp_limb_t *c, mp_limb_t *ri, const mp_limb_t *m)
162 {
163   const mp_limb_t *ep = mpz_limbs_read (pub->e);
164   const mp_limb_t *np = mpz_limbs_read (pub->n);
165   mp_bitcnt_t ebn = mpz_sizeinbase (pub->e, 2);
166   mp_size_t nn = mpz_size (pub->n);
167   size_t itch;
168   size_t i2;
169   mp_limb_t *scratch;
170   TMP_GMP_DECL (tp, mp_limb_t);
171   TMP_GMP_DECL (rp, mp_limb_t);
172   TMP_GMP_DECL (r, uint8_t);
173 
174   TMP_GMP_ALLOC (rp, nn);
175   TMP_GMP_ALLOC (r, nn * sizeof(mp_limb_t));
176 
177   /* c = m*(r^e) mod n */
178   itch = mpn_sec_powm_itch(nn, ebn, nn);
179   i2 = mpn_sec_mul_itch(nn, nn);
180   itch = MAX(itch, i2);
181   i2 = mpn_sec_div_r_itch(2*nn, nn);
182   itch = MAX(itch, i2);
183   i2 = mpn_sec_invert_itch(nn);
184   itch = MAX(itch, i2);
185 
186   TMP_GMP_ALLOC (tp, 2*nn  + itch);
187   scratch = tp + 2*nn;
188 
189   /* ri = r^(-1) */
190   do
191     {
192       random(random_ctx, nn * sizeof(mp_limb_t), (uint8_t *)r);
193       mpn_set_base256(rp, nn, r, nn * sizeof(mp_limb_t));
194       mpn_copyi(tp, rp, nn);
195       /* invert r */
196     }
197   while (!mpn_sec_invert (ri, tp, np, nn, 2 * nn * GMP_NUMB_BITS, scratch));
198 
199   mpn_sec_powm (c, rp, nn, ep, ebn, np, nn, scratch);
200   mpn_sec_mul (tp, c, nn, m, nn, scratch);
201   mpn_sec_div_r (tp, 2*nn, np, nn, scratch);
202   mpn_copyi(c, tp, nn);
203 
204   TMP_GMP_FREE (r);
205   TMP_GMP_FREE (rp);
206   TMP_GMP_FREE (tp);
207 }
208 
209 /* m = c ri mod n. Allows x == c. */
210 static void
rsa_sec_unblind(const struct rsa_public_key * pub,mp_limb_t * x,mp_limb_t * ri,const mp_limb_t * c)211 rsa_sec_unblind (const struct rsa_public_key *pub,
212                  mp_limb_t *x, mp_limb_t *ri, const mp_limb_t *c)
213 {
214   const mp_limb_t *np = mpz_limbs_read (pub->n);
215   mp_size_t nn = mpz_size (pub->n);
216 
217   size_t itch;
218   size_t i2;
219   mp_limb_t *scratch;
220   TMP_GMP_DECL(tp, mp_limb_t);
221 
222   itch = mpn_sec_mul_itch(nn, nn);
223   i2 = mpn_sec_div_r_itch(nn + nn, nn);
224   itch = MAX(itch, i2);
225 
226   TMP_GMP_ALLOC (tp, nn + nn + itch);
227   scratch = tp + nn + nn;
228 
229   mpn_sec_mul (tp, c, nn, ri, nn, scratch);
230   mpn_sec_div_r (tp, nn + nn, np, nn, scratch);
231   mpn_copyi(x, tp, nn);
232 
233   TMP_GMP_FREE (tp);
234 }
235 
236 static int
sec_equal(const mp_limb_t * a,const mp_limb_t * b,size_t limbs)237 sec_equal(const mp_limb_t *a, const mp_limb_t *b, size_t limbs)
238 {
239   volatile mp_limb_t z = 0;
240   size_t i;
241 
242   for (i = 0; i < limbs; i++)
243     {
244       z |= (a[i] ^ b[i]);
245     }
246 
247   /* FIXME: Might compile to a branch instruction on some platforms. */
248   return z == 0;
249 }
250 
251 static int
rsa_sec_check_root(const struct rsa_public_key * pub,const mp_limb_t * x,const mp_limb_t * m)252 rsa_sec_check_root(const struct rsa_public_key *pub,
253                    const mp_limb_t *x, const mp_limb_t *m)
254 {
255   mp_size_t nn = mpz_size (pub->n);
256   mp_size_t ebn = mpz_sizeinbase (pub->e, 2);
257   const mp_limb_t *np = mpz_limbs_read (pub->n);
258   const mp_limb_t *ep = mpz_limbs_read (pub->e);
259   int ret;
260 
261   mp_size_t itch;
262 
263   mp_limb_t *scratch;
264   TMP_GMP_DECL(tp, mp_limb_t);
265 
266   itch = mpn_sec_powm_itch (nn, ebn, nn);
267   TMP_GMP_ALLOC (tp, nn + itch);
268   scratch = tp + nn;
269 
270   mpn_sec_powm(tp, x, nn, ep, ebn, np, nn, scratch);
271   ret = sec_equal(tp, m, nn);
272 
273   TMP_GMP_FREE (tp);
274   return ret;
275 }
276 
277 static void
cnd_mpn_zero(int cnd,volatile mp_ptr rp,mp_size_t n)278 cnd_mpn_zero (int cnd, volatile mp_ptr rp, mp_size_t n)
279 {
280   volatile mp_limb_t c;
281   volatile mp_limb_t mask = (mp_limb_t) cnd - 1;
282 
283   while (--n >= 0)
284     {
285       c = rp[n];
286       c &= mask;
287       rp[n] = c;
288     }
289 }
290 
291 /* Checks for any errors done in the RSA computation. That avoids
292  * attacks which rely on faults on hardware, or even software MPI
293  * implementation.
294  * This version is side-channel silent even in case of error,
295  * the destination buffer is always overwritten */
296 int
_rsa_sec_compute_root_tr(const struct rsa_public_key * pub,const struct rsa_private_key * key,void * random_ctx,nettle_random_func * random,mp_limb_t * x,const mp_limb_t * m)297 _rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
298 			 const struct rsa_private_key *key,
299 			 void *random_ctx, nettle_random_func *random,
300 			 mp_limb_t *x, const mp_limb_t *m)
301 {
302   TMP_GMP_DECL (c, mp_limb_t);
303   TMP_GMP_DECL (ri, mp_limb_t);
304   TMP_GMP_DECL (scratch, mp_limb_t);
305   size_t key_limb_size;
306   int ret;
307 
308   key_limb_size = mpz_size(pub->n);
309 
310   /* mpz_powm_sec handles only odd moduli. If p, q or n is even, the
311      key is invalid and rejected by rsa_private_key_prepare. However,
312      some applications, notably gnutls, don't use this function, and
313      we don't want an invalid key to lead to a crash down inside
314      mpz_powm_sec. So do an additional check here. */
315   if (mpz_even_p (pub->n) || mpz_even_p (key->p) || mpz_even_p (key->q))
316     {
317       mpn_zero(x, key_limb_size);
318       return 0;
319     }
320 
321   assert(mpz_size(pub->n) == key_limb_size);
322 
323   TMP_GMP_ALLOC (c, key_limb_size);
324   TMP_GMP_ALLOC (ri, key_limb_size);
325   TMP_GMP_ALLOC (scratch, _rsa_sec_compute_root_itch(key));
326 
327   rsa_sec_blind (pub, random_ctx, random, c, ri, m);
328 
329   _rsa_sec_compute_root(key, x, c, scratch);
330 
331   ret = rsa_sec_check_root(pub, x, c);
332 
333   rsa_sec_unblind(pub, x, ri, x);
334 
335   cnd_mpn_zero(1 - ret, x, key_limb_size);
336 
337   TMP_GMP_FREE (scratch);
338   TMP_GMP_FREE (ri);
339   TMP_GMP_FREE (c);
340   return ret;
341 }
342 
343 /* Checks for any errors done in the RSA computation. That avoids
344  * attacks which rely on faults on hardware, or even software MPI
345  * implementation.
346  * This version is maintained for API compatibility reasons. It
347  * is not completely side-channel silent. There are conditionals
348  * in buffer copying both in case of success or error.
349  */
350 int
rsa_compute_root_tr(const struct rsa_public_key * pub,const struct rsa_private_key * key,void * random_ctx,nettle_random_func * random,mpz_t x,const mpz_t m)351 rsa_compute_root_tr(const struct rsa_public_key *pub,
352 		    const struct rsa_private_key *key,
353 		    void *random_ctx, nettle_random_func *random,
354 		    mpz_t x, const mpz_t m)
355 {
356   TMP_GMP_DECL (l, mp_limb_t);
357   mp_size_t nn = mpz_size(pub->n);
358   int res;
359 
360   TMP_GMP_ALLOC (l, nn);
361   mpz_limbs_copy(l, m, nn);
362 
363   res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, l, l);
364   if (res) {
365     mp_limb_t *xp = mpz_limbs_write (x, nn);
366     mpn_copyi (xp, l, nn);
367     mpz_limbs_finish (x, nn);
368   }
369 
370   TMP_GMP_FREE (l);
371   return res;
372 }
373 #endif
374