xref: /dragonfly/contrib/gmp/mpn/generic/powm_sec.c (revision 0ca59c34)
1 /* mpn_powm_sec -- Compute R = U^E mod M.  Secure variant, side-channel silent
2    under the assumption that the multiply instruction is side channel silent.
3 
4    Contributed to the GNU project by Torbjorn Granlund.
5 
6    THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES.  IT IS ONLY
7    SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
8    GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
9 
10 Copyright 2007, 2008, 2009, 2011, 2012 Free Software Foundation, Inc.
11 
12 This file is part of the GNU MP Library.
13 
14 The GNU MP Library is free software; you can redistribute it and/or modify
15 it under the terms of the GNU Lesser General Public License as published by
16 the Free Software Foundation; either version 3 of the License, or (at your
17 option) any later version.
18 
19 The GNU MP Library is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
21 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
22 License for more details.
23 
24 You should have received a copy of the GNU Lesser General Public License
25 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
26 
27 
28 /*
29   BASIC ALGORITHM, Compute U^E mod M, where M < B^n is odd.
30 
31   1. T <- (B^n * U) mod M                Convert to REDC form
32 
33   2. Compute table U^0, U^1, U^2... of E-dependent size
34 
35   3. While there are more bits in E
36        W <- power left-to-right base-k
37 
38 
39   TODO:
40 
41    * Make getbits a macro, thereby allowing it to update the index operand.
42      That will simplify the code using getbits.  (Perhaps make getbits' sibling
43      getbit then have similar form, for symmetry.)
44 
45    * Write an itch function.  Or perhaps get rid of tp parameter since the huge
46      pp area is allocated locally anyway?
47 
48    * Choose window size without looping.  (Superoptimize or think(tm).)
49 
50    * Call new division functions, not mpn_tdiv_qr.
51 */
52 
53 #include "gmp.h"
54 #include "gmp-impl.h"
55 #include "longlong.h"
56 
57 #define WANT_CACHE_SECURITY 1
58 
59 
60 /* Define our own mpn squaring function.  We do this since we cannot use a
61    native mpn_sqr_basecase over TUNE_SQR_TOOM2_MAX, or a non-native one over
62    SQR_TOOM2_THRESHOLD.  This is so because of fixed size stack allocations
63    made inside mpn_sqr_basecase.  */
64 
65 #if HAVE_NATIVE_mpn_sqr_diagonal
66 #define MPN_SQR_DIAGONAL(rp, up, n)					\
67   mpn_sqr_diagonal (rp, up, n)
68 #else
69 #define MPN_SQR_DIAGONAL(rp, up, n)					\
70   do {									\
71     mp_size_t _i;							\
72     for (_i = 0; _i < (n); _i++)					\
73       {									\
74 	mp_limb_t ul, lpl;						\
75 	ul = (up)[_i];							\
76 	umul_ppmm ((rp)[2 * _i + 1], lpl, ul, ul << GMP_NAIL_BITS);	\
77 	(rp)[2 * _i] = lpl >> GMP_NAIL_BITS;				\
78       }									\
79   } while (0)
80 #endif
81 
82 
83 #if ! HAVE_NATIVE_mpn_sqr_basecase
84 /* The limit of the generic code is SQR_TOOM2_THRESHOLD.  */
85 #define SQR_BASECASE_LIM  SQR_TOOM2_THRESHOLD
86 #endif
87 
88 #if HAVE_NATIVE_mpn_sqr_basecase
89 #ifdef TUNE_SQR_TOOM2_MAX
90 /* We slightly abuse TUNE_SQR_TOOM2_MAX here.  If it is set for an assembly
91    mpn_sqr_basecase, it comes from SQR_TOOM2_THRESHOLD_MAX in the assembly
92    file.  An assembly mpn_sqr_basecase that does not define it, should allow
93    any size.  */
94 #define SQR_BASECASE_LIM  SQR_TOOM2_THRESHOLD
95 #endif
96 #endif
97 
98 #ifdef WANT_FAT_BINARY
99 /* For fat builds, we use SQR_TOOM2_THRESHOLD which will expand to a read from
100    __gmpn_cpuvec.  Perhaps any possible sqr_basecase.asm allow any size, and we
101    limit the use unnecessarily.  We cannot tell, so play it safe.  FIXME.  */
102 #define SQR_BASECASE_LIM  SQR_TOOM2_THRESHOLD
103 #endif
104 
105 #ifndef SQR_BASECASE_LIM
106 /* If SQR_BASECASE_LIM is now not defined, use mpn_sqr_basecase for any operand
107    size.  */
108 #define mpn_local_sqr(rp,up,n,tp) mpn_sqr_basecase(rp,up,n)
109 #else
110 /* Define our own squaring function, which uses mpn_sqr_basecase for its
111    allowed sizes, but its own code for larger sizes.  */
112 static void
113 mpn_local_sqr (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_ptr tp)
114 {
115   mp_size_t i;
116 
117   ASSERT (n >= 1);
118   ASSERT (! MPN_OVERLAP_P (rp, 2*n, up, n));
119 
120   if (BELOW_THRESHOLD (n, SQR_BASECASE_LIM))
121     {
122       mpn_sqr_basecase (rp, up, n);
123       return;
124     }
125 
126   {
127     mp_limb_t ul, lpl;
128     ul = up[0];
129     umul_ppmm (rp[1], lpl, ul, ul << GMP_NAIL_BITS);
130     rp[0] = lpl >> GMP_NAIL_BITS;
131   }
132   if (n > 1)
133     {
134       mp_limb_t cy;
135 
136       cy = mpn_mul_1 (tp, up + 1, n - 1, up[0]);
137       tp[n - 1] = cy;
138       for (i = 2; i < n; i++)
139 	{
140 	  mp_limb_t cy;
141 	  cy = mpn_addmul_1 (tp + 2 * i - 2, up + i, n - i, up[i - 1]);
142 	  tp[n + i - 2] = cy;
143 	}
144       MPN_SQR_DIAGONAL (rp + 2, up + 1, n - 1);
145 
146       {
147 	mp_limb_t cy;
148 #if HAVE_NATIVE_mpn_addlsh1_n
149 	cy = mpn_addlsh1_n (rp + 1, rp + 1, tp, 2 * n - 2);
150 #else
151 	cy = mpn_lshift (tp, tp, 2 * n - 2, 1);
152 	cy += mpn_add_n (rp + 1, rp + 1, tp, 2 * n - 2);
153 #endif
154 	rp[2 * n - 1] += cy;
155       }
156     }
157 }
158 #endif
159 
160 #define getbit(p,bi) \
161   ((p[(bi - 1) / GMP_LIMB_BITS] >> (bi - 1) % GMP_LIMB_BITS) & 1)
162 
163 static inline mp_limb_t
164 getbits (const mp_limb_t *p, mp_bitcnt_t bi, int nbits)
165 {
166   int nbits_in_r;
167   mp_limb_t r;
168   mp_size_t i;
169 
170   if (bi < nbits)
171     {
172       return p[0] & (((mp_limb_t) 1 << bi) - 1);
173     }
174   else
175     {
176       bi -= nbits;			/* bit index of low bit to extract */
177       i = bi / GMP_LIMB_BITS;		/* word index of low bit to extract */
178       bi %= GMP_LIMB_BITS;		/* bit index in low word */
179       r = p[i] >> bi;			/* extract (low) bits */
180       nbits_in_r = GMP_LIMB_BITS - bi;	/* number of bits now in r */
181       if (nbits_in_r < nbits)		/* did we get enough bits? */
182 	r += p[i + 1] << nbits_in_r;	/* prepend bits from higher word */
183       return r & (((mp_limb_t ) 1 << nbits) - 1);
184     }
185 }
186 
187 static inline int
188 win_size (mp_bitcnt_t eb)
189 {
190   int k;
191   static mp_bitcnt_t x[] = {0,4,27,100,325,1026,2905,7848,20457,51670,~(mp_bitcnt_t)0};
192   for (k = 1; eb > x[k]; k++)
193     ;
194   return k;
195 }
196 
197 /* Convert U to REDC form, U_r = B^n * U mod M */
198 static void
199 redcify (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr mp, mp_size_t n, mp_ptr tp)
200 {
201   mp_ptr qp;
202 
203   qp = tp + un + n;
204 
205   MPN_ZERO (tp, n);
206   MPN_COPY (tp + n, up, un);
207   mpn_tdiv_qr (qp, rp, 0L, tp, un + n, mp, n);
208 }
209 
210 /* rp[n-1..0] = bp[bn-1..0] ^ ep[en-1..0] mod mp[n-1..0]
211    Requires that mp[n-1..0] is odd.  FIXME: is this true?
212    Requires that ep[en-1..0] is > 1.
213    Uses scratch space at tp of 3n+1 limbs.  */
214 void
215 mpn_powm_sec (mp_ptr rp, mp_srcptr bp, mp_size_t bn,
216 	      mp_srcptr ep, mp_size_t en,
217 	      mp_srcptr mp, mp_size_t n, mp_ptr tp)
218 {
219   mp_limb_t minv;
220   int cnt;
221   mp_bitcnt_t ebi;
222   int windowsize, this_windowsize;
223   mp_limb_t expbits;
224   mp_ptr pp, this_pp;
225   long i;
226   int cnd;
227 
228   ASSERT (en > 1 || (en == 1 && ep[0] > 0));
229   ASSERT (n >= 1 && ((mp[0] & 1) != 0));
230 
231   count_leading_zeros (cnt, ep[en - 1]);
232   ebi = (mp_bitcnt_t) en * GMP_LIMB_BITS - cnt;
233 
234   windowsize = win_size (ebi);
235 
236   binvert_limb (minv, mp[0]);
237   minv = -minv;
238 
239   pp = tp + 4 * n;
240 
241   this_pp = pp;
242   this_pp[n] = 1;
243   redcify (this_pp, this_pp + n, 1, mp, n, tp + 6 * n);
244   this_pp += n;
245   redcify (this_pp, bp, bn, mp, n, tp + 6 * n);
246 
247   /* Precompute powers of b and put them in the temporary area at pp.  */
248   for (i = (1 << windowsize) - 2; i > 0; i--)
249     {
250       mpn_mul_basecase (tp, this_pp, n, pp + n, n);
251       this_pp += n;
252       mpn_redc_1_sec (this_pp, tp, mp, n, minv);
253     }
254 
255   expbits = getbits (ep, ebi, windowsize);
256   if (ebi < windowsize)
257     ebi = 0;
258   else
259     ebi -= windowsize;
260 
261 #if WANT_CACHE_SECURITY
262   mpn_tabselect (rp, pp, n, 1 << windowsize, expbits);
263 #else
264   MPN_COPY (rp, pp + n * expbits, n);
265 #endif
266 
267   while (ebi != 0)
268     {
269       expbits = getbits (ep, ebi, windowsize);
270       this_windowsize = windowsize;
271       if (ebi < windowsize)
272 	{
273 	  this_windowsize -= windowsize - ebi;
274 	  ebi = 0;
275 	}
276       else
277 	ebi -= windowsize;
278 
279       do
280 	{
281 	  mpn_local_sqr (tp, rp, n, tp + 2 * n);
282 	  mpn_redc_1_sec (rp, tp, mp, n, minv);
283 	  this_windowsize--;
284 	}
285       while (this_windowsize != 0);
286 
287 #if WANT_CACHE_SECURITY
288       mpn_tabselect (tp + 2*n, pp, n, 1 << windowsize, expbits);
289       mpn_mul_basecase (tp, rp, n, tp + 2*n, n);
290 #else
291       mpn_mul_basecase (tp, rp, n, pp + n * expbits, n);
292 #endif
293       mpn_redc_1_sec (rp, tp, mp, n, minv);
294     }
295 
296   MPN_COPY (tp, rp, n);
297   MPN_ZERO (tp + n, n);
298   mpn_redc_1_sec (rp, tp, mp, n, minv);
299   cnd = mpn_sub_n (tp, rp, mp, n);	/* we need just retval */
300   mpn_subcnd_n (rp, rp, mp, n, !cnd);
301 }
302 
303 #if ! HAVE_NATIVE_mpn_tabselect
304 /* Select entry `which' from table `tab', which has nents entries, each `n'
305    limbs.  Store the selected entry at rp.  Reads entire table to avoid
306    side-channel information leaks.  O(n*nents).
307    FIXME: Move to its own file.  */
308 void
309 mpn_tabselect (volatile mp_limb_t *rp, volatile mp_limb_t *tab, mp_size_t n,
310 	       mp_size_t nents, mp_size_t which)
311 {
312   mp_size_t k, i;
313   mp_limb_t mask;
314   volatile mp_limb_t *tp;
315 
316   for (k = 0; k < nents; k++)
317     {
318       mask = -(mp_limb_t) (which == k);
319       tp = tab + n * k;
320       for (i = 0; i < n; i++)
321 	{
322 	  rp[i] = (rp[i] & ~mask) | (tp[i] & mask);
323 	}
324     }
325 }
326 #endif
327 
328 mp_size_t
329 mpn_powm_sec_itch (mp_size_t bn, mp_size_t en, mp_size_t n)
330 {
331   int windowsize;
332   mp_size_t redcify_itch, itch;
333 
334   windowsize = win_size (en * GMP_NUMB_BITS); /* slight over-estimate of exp */
335   itch = 4 * n + (n << windowsize);
336   redcify_itch = 2 * bn + n + 1;
337   /* The 6n is due to the placement of reduce scratch 6n into the start of the
338      scratch area.  */
339   return MAX (itch, redcify_itch + 6 * n);
340 }
341