xref: /dragonfly/contrib/gmp/mpn/generic/powm_sec.c (revision 2020c8fe)
1 /* mpn_powm_sec -- Compute R = U^E mod M.  Secure variant, side-channel silent
2    under the assumption that the multiply instruction is side channel silent.
3 
4    Contributed to the GNU project by Torbjorn Granlund.
5 
6    THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES.  IT IS ONLY
7    SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
8    GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
9 
10 Copyright 2007, 2008, 2009 Free Software Foundation, Inc.
11 
12 This file is part of the GNU MP Library.
13 
14 The GNU MP Library is free software; you can redistribute it and/or modify
15 it under the terms of the GNU Lesser General Public License as published by
16 the Free Software Foundation; either version 3 of the License, or (at your
17 option) any later version.
18 
19 The GNU MP Library is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
21 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
22 License for more details.
23 
24 You should have received a copy of the GNU Lesser General Public License
25 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
26 
27 
28 /*
29   BASIC ALGORITHM, Compute U^E mod M, where M < B^n is odd.
30 
31   1. T <- (B^n * U) mod M                Convert to REDC form
32 
33   2. Compute table U^0, U^1, U^2... of E-dependent size
34 
35   3. While there are more bits in E
36        W <- power left-to-right base-k
37 
38 
39   TODO:
40 
41    * Make getbits a macro, thereby allowing it to update the index operand.
42      That will simplify the code using getbits.  (Perhaps make getbits' sibling
43      getbit then have similar form, for symmetry.)
44 
45    * Write an itch function.  Or perhaps get rid of tp parameter since the huge
46      pp area is allocated locally anyway?
47 
48    * Choose window size without looping.  (Superoptimize or think(tm).)
49 
50    * Call new division functions, not mpn_tdiv_qr.
51 */
52 
53 #include "gmp.h"
54 #include "gmp-impl.h"
55 #include "longlong.h"
56 
57 #define WANT_CACHE_SECURITY 1
58 
59 
60 /* Define our own mpn squaring function.  We do this since we cannot use a
61    native mpn_sqr_basecase over TUNE_SQR_TOOM2_MAX, or a non-native one over
62    SQR_TOOM2_THRESHOLD.  This is so because of fixed size stack allocations
63    made inside mpn_sqr_basecase.  */
64 
65 #if HAVE_NATIVE_mpn_sqr_diagonal
66 #define MPN_SQR_DIAGONAL(rp, up, n)					\
67   mpn_sqr_diagonal (rp, up, n)
68 #else
69 #define MPN_SQR_DIAGONAL(rp, up, n)					\
70   do {									\
71     mp_size_t _i;							\
72     for (_i = 0; _i < (n); _i++)					\
73       {									\
74 	mp_limb_t ul, lpl;						\
75 	ul = (up)[_i];							\
76 	umul_ppmm ((rp)[2 * _i + 1], lpl, ul, ul << GMP_NAIL_BITS);	\
77 	(rp)[2 * _i] = lpl >> GMP_NAIL_BITS;				\
78       }									\
79   } while (0)
80 #endif
81 
82 
83 #if ! HAVE_NATIVE_mpn_sqr_basecase
84 /* The limit of the generic code is SQR_TOOM2_THRESHOLD.  */
85 #define SQR_BASECASE_MAX  SQR_TOOM2_THRESHOLD
86 #endif
87 
88 #if HAVE_NATIVE_mpn_sqr_basecase
89 #ifdef TUNE_SQR_TOOM2_MAX
90 /* We slightly abuse TUNE_SQR_TOOM2_MAX here.  If it is set for an assembly
91    mpn_sqr_basecase, it comes from SQR_TOOM2_THRESHOLD_MAX in the assembly
92    file.  An assembly mpn_sqr_basecase that does not define it, should allow
93    any size.  */
94 #define SQR_BASECASE_MAX  SQR_TOOM2_THRESHOLD
95 #endif
96 #endif
97 
98 #ifndef SQR_BASECASE_MAX
99 /* If SQR_BASECASE_MAX is now not defined, use mpn_sqr_basecase for any operand
100    size.  */
101 #define mpn_local_sqr(rp,up,n,tp) mpn_sqr_basecase(rp,up,n)
102 #else
103 /* Define our own squaring function, which uses mpn_sqr_basecase for its
104    allowed sizes, but its own code for larger sizes.  */
105 static void
106 mpn_local_sqr (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_ptr tp)
107 {
108   mp_size_t i;
109 
110   ASSERT (n >= 1);
111   ASSERT (! MPN_OVERLAP_P (rp, 2*n, up, n));
112 
113   if (n < SQR_BASECASE_MAX)
114     {
115       mpn_sqr_basecase (rp, up, n);
116       return;
117     }
118 
119   {
120     mp_limb_t ul, lpl;
121     ul = up[0];
122     umul_ppmm (rp[1], lpl, ul, ul << GMP_NAIL_BITS);
123     rp[0] = lpl >> GMP_NAIL_BITS;
124   }
125   if (n > 1)
126     {
127       mp_limb_t cy;
128       TMP_DECL;
129       TMP_MARK;
130 
131       cy = mpn_mul_1 (tp, up + 1, n - 1, up[0]);
132       tp[n - 1] = cy;
133       for (i = 2; i < n; i++)
134 	{
135 	  mp_limb_t cy;
136 	  cy = mpn_addmul_1 (tp + 2 * i - 2, up + i, n - i, up[i - 1]);
137 	  tp[n + i - 2] = cy;
138 	}
139       MPN_SQR_DIAGONAL (rp + 2, up + 1, n - 1);
140 
141       {
142 	mp_limb_t cy;
143 #if HAVE_NATIVE_mpn_addlsh1_n
144 	cy = mpn_addlsh1_n (rp + 1, rp + 1, tp, 2 * n - 2);
145 #else
146 	cy = mpn_lshift (tp, tp, 2 * n - 2, 1);
147 	cy += mpn_add_n (rp + 1, rp + 1, tp, 2 * n - 2);
148 #endif
149 	rp[2 * n - 1] += cy;
150       }
151 
152       TMP_FREE;
153     }
154 }
155 #endif
156 
157 #define getbit(p,bi) \
158   ((p[(bi - 1) / GMP_LIMB_BITS] >> (bi - 1) % GMP_LIMB_BITS) & 1)
159 
160 static inline mp_limb_t
161 getbits (const mp_limb_t *p, mp_bitcnt_t bi, int nbits)
162 {
163   int nbits_in_r;
164   mp_limb_t r;
165   mp_size_t i;
166 
167   if (bi < nbits)
168     {
169       return p[0] & (((mp_limb_t) 1 << bi) - 1);
170     }
171   else
172     {
173       bi -= nbits;			/* bit index of low bit to extract */
174       i = bi / GMP_LIMB_BITS;		/* word index of low bit to extract */
175       bi %= GMP_LIMB_BITS;		/* bit index in low word */
176       r = p[i] >> bi;			/* extract (low) bits */
177       nbits_in_r = GMP_LIMB_BITS - bi;	/* number of bits now in r */
178       if (nbits_in_r < nbits)		/* did we get enough bits? */
179 	r += p[i + 1] << nbits_in_r;	/* prepend bits from higher word */
180       return r & (((mp_limb_t ) 1 << nbits) - 1);
181     }
182 }
183 
184 static inline int
185 win_size (mp_bitcnt_t eb)
186 {
187   int k;
188   static mp_bitcnt_t x[] = {0,4,27,100,325,1026,2905,7848,20457,51670,~(mp_bitcnt_t)0};
189   for (k = 1; eb > x[k]; k++)
190     ;
191   return k;
192 }
193 
194 /* Convert U to REDC form, U_r = B^n * U mod M */
195 static void
196 redcify (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr mp, mp_size_t n, mp_ptr tp)
197 {
198   mp_ptr qp;
199   TMP_DECL;
200   TMP_MARK;
201 
202   qp = tp + un + n;
203 
204   MPN_ZERO (tp, n);
205   MPN_COPY (tp + n, up, un);
206   mpn_tdiv_qr (qp, rp, 0L, tp, un + n, mp, n);
207   TMP_FREE;
208 }
209 
210 /* rp[n-1..0] = bp[bn-1..0] ^ ep[en-1..0] mod mp[n-1..0]
211    Requires that mp[n-1..0] is odd.  FIXME: is this true?
212    Requires that ep[en-1..0] is > 1.
213    Uses scratch space at tp of 3n+1 limbs.  */
214 void
215 mpn_powm_sec (mp_ptr rp, mp_srcptr bp, mp_size_t bn,
216 	      mp_srcptr ep, mp_size_t en,
217 	      mp_srcptr mp, mp_size_t n, mp_ptr tp)
218 {
219   mp_limb_t minv;
220   int cnt;
221   mp_bitcnt_t ebi;
222   int windowsize, this_windowsize;
223   mp_limb_t expbits;
224   mp_ptr pp, this_pp;
225   long i;
226   int cnd;
227   TMP_DECL;
228 
229   ASSERT (en > 1 || (en == 1 && ep[0] > 0));
230   ASSERT (n >= 1 && ((mp[0] & 1) != 0));
231 
232   TMP_MARK;
233 
234   count_leading_zeros (cnt, ep[en - 1]);
235   ebi = (mp_bitcnt_t) en * GMP_LIMB_BITS - cnt;
236 
237   windowsize = win_size (ebi);
238 
239   binvert_limb (minv, mp[0]);
240   minv = -minv;
241 
242   pp = tp + 4 * n;
243 
244   this_pp = pp;
245   this_pp[n] = 1;
246   redcify (this_pp, this_pp + n, 1, mp, n, tp + 6 * n);
247   this_pp += n;
248   redcify (this_pp, bp, bn, mp, n, tp + 6 * n);
249 
250   /* Precompute powers of b and put them in the temporary area at pp.  */
251   for (i = (1 << windowsize) - 2; i > 0; i--)
252     {
253       mpn_mul_basecase (tp, this_pp, n, pp + n, n);
254       this_pp += n;
255       mpn_redc_1_sec (this_pp, tp, mp, n, minv);
256     }
257 
258   expbits = getbits (ep, ebi, windowsize);
259   if (ebi < windowsize)
260     ebi = 0;
261   else
262     ebi -= windowsize;
263 
264   MPN_COPY (rp, pp + n * expbits, n);
265 
266   while (ebi != 0)
267     {
268       expbits = getbits (ep, ebi, windowsize);
269       this_windowsize = windowsize;
270       if (ebi < windowsize)
271 	{
272 	  this_windowsize -= windowsize - ebi;
273 	  ebi = 0;
274 	}
275       else
276 	ebi -= windowsize;
277 
278       do
279 	{
280 	  mpn_local_sqr (tp, rp, n, tp + 2 * n);
281 	  mpn_redc_1_sec (rp, tp, mp, n, minv);
282 	  this_windowsize--;
283 	}
284       while (this_windowsize != 0);
285 
286 #if WANT_CACHE_SECURITY
287       mpn_tabselect (tp + 2*n, pp, n, 1 << windowsize, expbits);
288       mpn_mul_basecase (tp, rp, n, tp + 2*n, n);
289 #else
290       mpn_mul_basecase (tp, rp, n, pp + n * expbits, n);
291 #endif
292       mpn_redc_1_sec (rp, tp, mp, n, minv);
293     }
294 
295   MPN_COPY (tp, rp, n);
296   MPN_ZERO (tp + n, n);
297   mpn_redc_1_sec (rp, tp, mp, n, minv);
298   cnd = mpn_sub_n (tp, rp, mp, n);	/* we need just retval */
299   mpn_subcnd_n (rp, rp, mp, n, !cnd);
300   TMP_FREE;
301 }
302 
303 #if ! HAVE_NATIVE_mpn_tabselect
304 /* Select entry `which' from table `tab', which has nents entries, each `n'
305    limbs.  Store the selected entry at rp.  Reads entire table to avoid
306    side-channel information leaks.  O(n*nents).
307    FIXME: Move to its own file.  */
308 void
309 mpn_tabselect (volatile mp_limb_t *rp, volatile mp_limb_t *tab, mp_size_t n,
310 	       mp_size_t nents, mp_size_t which)
311 {
312   mp_size_t k, i;
313   mp_limb_t mask;
314   volatile mp_limb_t *tp;
315 
316   for (k = 0; k < nents; k++)
317     {
318       mask = -(mp_limb_t) (which == k);
319       tp = tab + n * k;
320       for (i = 0; i < n; i++)
321 	{
322 	  rp[i] = (rp[i] & ~mask) | (tp[i] & mask);
323 	}
324     }
325 }
326 #endif
327 
328 mp_size_t
329 mpn_powm_sec_itch (mp_size_t bn, mp_size_t en, mp_size_t n)
330 {
331   int windowsize;
332   mp_size_t redcify_itch, itch;
333 
334   windowsize = win_size (en * GMP_NUMB_BITS); /* slight over-estimate of exp */
335   itch = 4 * n + (n << windowsize);
336   redcify_itch = 2 * bn + n + 1;
337   /* The 6n is due to the placement of reduce scratch 6n into the start of the
338      scratch area.  */
339   return MAX (itch, redcify_itch + 6 * n);
340 }
341