1 /* mpih-mul.c  -  MPI helper functions
2  * Copyright (C) 1994, 1996, 1998, 1999, 2000,
3  *               2001, 2002 Free Software Foundation, Inc.
4  *
5  * This file is part of Libgcrypt.
6  *
7  * Libgcrypt is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU Lesser General Public License as
9  * published by the Free Software Foundation; either version 2.1 of
10  * the License, or (at your option) any later version.
11  *
12  * Libgcrypt is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
20  *
21  * Note: This code is heavily based on the GNU MP Library.
22  *	 Actually it's the same code with only minor changes in the
23  *	 way the data is stored; this is to support the abstraction
24  *	 of an optional secure memory allocation which may be used
25  *	 to avoid revealing of sensitive data due to paging etc.
26  */
27 
28 #include <config.h>
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include "mpi-internal.h"
33 #include "longlong.h"
34 #include "g10lib.h"
35 
36 #define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
37     do {						\
38 	if( (size) < KARATSUBA_THRESHOLD )		\
39 	    mul_n_basecase (prodp, up, vp, size);	\
40 	else						\
41 	    mul_n (prodp, up, vp, size, tspace);	\
42     } while (0);
43 
44 #define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \
45     do {					    \
46 	if ((size) < KARATSUBA_THRESHOLD)	    \
47 	    _gcry_mpih_sqr_n_basecase (prodp, up, size);	 \
48 	else					    \
49 	    _gcry_mpih_sqr_n (prodp, up, size, tspace);	 \
50     } while (0);
51 
52 
53 
54 
55 /* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
56  * both with SIZE limbs, and store the result at PRODP.  2 * SIZE limbs are
57  * always stored.  Return the most significant limb.
58  *
59  * Argument constraints:
60  * 1. PRODP != UP and PRODP != VP, i.e. the destination
61  *    must be distinct from the multiplier and the multiplicand.
62  *
63  *
64  * Handle simple cases with traditional multiplication.
65  *
66  * This is the most critical code of multiplication.  All multiplies rely
67  * on this, both small and huge.  Small ones arrive here immediately.  Huge
68  * ones arrive here as this is the base case for Karatsuba's recursive
69  * algorithm below.
70  */
71 
72 static mpi_limb_t
mul_n_basecase(mpi_ptr_t prodp,mpi_ptr_t up,mpi_ptr_t vp,mpi_size_t size)73 mul_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up,
74 				 mpi_ptr_t vp, mpi_size_t size)
75 {
76     mpi_size_t i;
77     mpi_limb_t cy;
78     mpi_limb_t v_limb;
79 
80     /* Multiply by the first limb in V separately, as the result can be
81      * stored (not added) to PROD.  We also avoid a loop for zeroing.  */
82     v_limb = vp[0];
83     if( v_limb <= 1 ) {
84 	if( v_limb == 1 )
85 	    MPN_COPY( prodp, up, size );
86 	else
87 	    MPN_ZERO( prodp, size );
88 	cy = 0;
89     }
90     else
91 	cy = _gcry_mpih_mul_1( prodp, up, size, v_limb );
92 
93     prodp[size] = cy;
94     prodp++;
95 
96     /* For each iteration in the outer loop, multiply one limb from
97      * U with one limb from V, and add it to PROD.  */
98     for( i = 1; i < size; i++ ) {
99 	v_limb = vp[i];
100 	if( v_limb <= 1 ) {
101 	    cy = 0;
102 	    if( v_limb == 1 )
103 	       cy = _gcry_mpih_add_n(prodp, prodp, up, size);
104 	}
105 	else
106 	    cy = _gcry_mpih_addmul_1(prodp, up, size, v_limb);
107 
108 	prodp[size] = cy;
109 	prodp++;
110     }
111 
112     return cy;
113 }
114 
115 
116 static void
mul_n(mpi_ptr_t prodp,mpi_ptr_t up,mpi_ptr_t vp,mpi_size_t size,mpi_ptr_t tspace)117 mul_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp,
118 			mpi_size_t size, mpi_ptr_t tspace )
119 {
120     if( size & 1 ) {
121       /* The size is odd, and the code below doesn't handle that.
122        * Multiply the least significant (size - 1) limbs with a recursive
123        * call, and handle the most significant limb of S1 and S2
124        * separately.
125        * A slightly faster way to do this would be to make the Karatsuba
126        * code below behave as if the size were even, and let it check for
127        * odd size in the end.  I.e., in essence move this code to the end.
128        * Doing so would save us a recursive call, and potentially make the
129        * stack grow a lot less.
130        */
131       mpi_size_t esize = size - 1;	 /* even size */
132       mpi_limb_t cy_limb;
133 
134       MPN_MUL_N_RECURSE( prodp, up, vp, esize, tspace );
135       cy_limb = _gcry_mpih_addmul_1( prodp + esize, up, esize, vp[esize] );
136       prodp[esize + esize] = cy_limb;
137       cy_limb = _gcry_mpih_addmul_1( prodp + esize, vp, size, up[esize] );
138       prodp[esize + size] = cy_limb;
139     }
140     else {
141 	/* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
142 	 *
143 	 * Split U in two pieces, U1 and U0, such that
144 	 * U = U0 + U1*(B**n),
145 	 * and V in V1 and V0, such that
146 	 * V = V0 + V1*(B**n).
147 	 *
148 	 * UV is then computed recursively using the identity
149 	 *
150 	 *	  2n   n	  n			n
151 	 * UV = (B  + B )U V  +  B (U -U )(V -V )  +  (B + 1)U V
152 	 *		  1 1	     1	0   0  1	      0 0
153 	 *
154 	 * Where B = 2**BITS_PER_MP_LIMB.
155 	 */
156 	mpi_size_t hsize = size >> 1;
157 	mpi_limb_t cy;
158 	int negflg;
159 
160 	/* Product H.	   ________________  ________________
161 	 *		  |_____U1 x V1____||____U0 x V0_____|
162 	 * Put result in upper part of PROD and pass low part of TSPACE
163 	 * as new TSPACE.
164 	 */
165 	MPN_MUL_N_RECURSE(prodp + size, up + hsize, vp + hsize, hsize, tspace);
166 
167 	/* Product M.	   ________________
168 	 *		  |_(U1-U0)(V0-V1)_|
169 	 */
170 	if( _gcry_mpih_cmp(up + hsize, up, hsize) >= 0 ) {
171 	    _gcry_mpih_sub_n(prodp, up + hsize, up, hsize);
172 	    negflg = 0;
173 	}
174 	else {
175 	    _gcry_mpih_sub_n(prodp, up, up + hsize, hsize);
176 	    negflg = 1;
177 	}
178 	if( _gcry_mpih_cmp(vp + hsize, vp, hsize) >= 0 ) {
179 	    _gcry_mpih_sub_n(prodp + hsize, vp + hsize, vp, hsize);
180 	    negflg ^= 1;
181 	}
182 	else {
183 	    _gcry_mpih_sub_n(prodp + hsize, vp, vp + hsize, hsize);
184 	    /* No change of NEGFLG.  */
185 	}
186 	/* Read temporary operands from low part of PROD.
187 	 * Put result in low part of TSPACE using upper part of TSPACE
188 	 * as new TSPACE.
189 	 */
190 	MPN_MUL_N_RECURSE(tspace, prodp, prodp + hsize, hsize, tspace + size);
191 
192 	/* Add/copy product H. */
193 	MPN_COPY (prodp + hsize, prodp + size, hsize);
194 	cy = _gcry_mpih_add_n( prodp + size, prodp + size,
195 			    prodp + size + hsize, hsize);
196 
197 	/* Add product M (if NEGFLG M is a negative number) */
198 	if(negflg)
199 	    cy -= _gcry_mpih_sub_n(prodp + hsize, prodp + hsize, tspace, size);
200 	else
201 	    cy += _gcry_mpih_add_n(prodp + hsize, prodp + hsize, tspace, size);
202 
203 	/* Product L.	   ________________  ________________
204 	 *		  |________________||____U0 x V0_____|
205 	 * Read temporary operands from low part of PROD.
206 	 * Put result in low part of TSPACE using upper part of TSPACE
207 	 * as new TSPACE.
208 	 */
209 	MPN_MUL_N_RECURSE(tspace, up, vp, hsize, tspace + size);
210 
211 	/* Add/copy Product L (twice) */
212 
213 	cy += _gcry_mpih_add_n(prodp + hsize, prodp + hsize, tspace, size);
214 	if( cy )
215 	  _gcry_mpih_add_1(prodp + hsize + size, prodp + hsize + size, hsize, cy);
216 
217 	MPN_COPY(prodp, tspace, hsize);
218 	cy = _gcry_mpih_add_n(prodp + hsize, prodp + hsize, tspace + hsize, hsize);
219 	if( cy )
220 	    _gcry_mpih_add_1(prodp + size, prodp + size, size, 1);
221     }
222 }
223 
224 
225 void
_gcry_mpih_sqr_n_basecase(mpi_ptr_t prodp,mpi_ptr_t up,mpi_size_t size)226 _gcry_mpih_sqr_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size )
227 {
228     mpi_size_t i;
229     mpi_limb_t cy_limb;
230     mpi_limb_t v_limb;
231 
232     /* Multiply by the first limb in V separately, as the result can be
233      * stored (not added) to PROD.  We also avoid a loop for zeroing.  */
234     v_limb = up[0];
235     if( v_limb <= 1 ) {
236 	if( v_limb == 1 )
237 	    MPN_COPY( prodp, up, size );
238 	else
239 	    MPN_ZERO(prodp, size);
240 	cy_limb = 0;
241     }
242     else
243 	cy_limb = _gcry_mpih_mul_1( prodp, up, size, v_limb );
244 
245     prodp[size] = cy_limb;
246     prodp++;
247 
248     /* For each iteration in the outer loop, multiply one limb from
249      * U with one limb from V, and add it to PROD.  */
250     for( i=1; i < size; i++) {
251 	v_limb = up[i];
252 	if( v_limb <= 1 ) {
253 	    cy_limb = 0;
254 	    if( v_limb == 1 )
255 		cy_limb = _gcry_mpih_add_n(prodp, prodp, up, size);
256 	}
257 	else
258 	    cy_limb = _gcry_mpih_addmul_1(prodp, up, size, v_limb);
259 
260 	prodp[size] = cy_limb;
261 	prodp++;
262     }
263 }
264 
265 
266 void
_gcry_mpih_sqr_n(mpi_ptr_t prodp,mpi_ptr_t up,mpi_size_t size,mpi_ptr_t tspace)267 _gcry_mpih_sqr_n( mpi_ptr_t prodp,
268                   mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
269 {
270     if( size & 1 ) {
271 	/* The size is odd, and the code below doesn't handle that.
272 	 * Multiply the least significant (size - 1) limbs with a recursive
273 	 * call, and handle the most significant limb of S1 and S2
274 	 * separately.
275 	 * A slightly faster way to do this would be to make the Karatsuba
276 	 * code below behave as if the size were even, and let it check for
277 	 * odd size in the end.  I.e., in essence move this code to the end.
278 	 * Doing so would save us a recursive call, and potentially make the
279 	 * stack grow a lot less.
280 	 */
281 	mpi_size_t esize = size - 1;	   /* even size */
282 	mpi_limb_t cy_limb;
283 
284 	MPN_SQR_N_RECURSE( prodp, up, esize, tspace );
285 	cy_limb = _gcry_mpih_addmul_1( prodp + esize, up, esize, up[esize] );
286 	prodp[esize + esize] = cy_limb;
287 	cy_limb = _gcry_mpih_addmul_1( prodp + esize, up, size, up[esize] );
288 
289 	prodp[esize + size] = cy_limb;
290     }
291     else {
292 	mpi_size_t hsize = size >> 1;
293 	mpi_limb_t cy;
294 
295 	/* Product H.	   ________________  ________________
296 	 *		  |_____U1 x U1____||____U0 x U0_____|
297 	 * Put result in upper part of PROD and pass low part of TSPACE
298 	 * as new TSPACE.
299 	 */
300 	MPN_SQR_N_RECURSE(prodp + size, up + hsize, hsize, tspace);
301 
302 	/* Product M.	   ________________
303 	 *		  |_(U1-U0)(U0-U1)_|
304 	 */
305 	if( _gcry_mpih_cmp( up + hsize, up, hsize) >= 0 )
306 	    _gcry_mpih_sub_n( prodp, up + hsize, up, hsize);
307 	else
308 	    _gcry_mpih_sub_n (prodp, up, up + hsize, hsize);
309 
310 	/* Read temporary operands from low part of PROD.
311 	 * Put result in low part of TSPACE using upper part of TSPACE
312 	 * as new TSPACE.  */
313 	MPN_SQR_N_RECURSE(tspace, prodp, hsize, tspace + size);
314 
315 	/* Add/copy product H  */
316 	MPN_COPY(prodp + hsize, prodp + size, hsize);
317 	cy = _gcry_mpih_add_n(prodp + size, prodp + size,
318 			   prodp + size + hsize, hsize);
319 
320 	/* Add product M (if NEGFLG M is a negative number).  */
321 	cy -= _gcry_mpih_sub_n (prodp + hsize, prodp + hsize, tspace, size);
322 
323 	/* Product L.	   ________________  ________________
324 	 *		  |________________||____U0 x U0_____|
325 	 * Read temporary operands from low part of PROD.
326 	 * Put result in low part of TSPACE using upper part of TSPACE
327 	 * as new TSPACE.  */
328 	MPN_SQR_N_RECURSE (tspace, up, hsize, tspace + size);
329 
330 	/* Add/copy Product L (twice).	*/
331 	cy += _gcry_mpih_add_n (prodp + hsize, prodp + hsize, tspace, size);
332 	if( cy )
333 	    _gcry_mpih_add_1(prodp + hsize + size, prodp + hsize + size,
334 							    hsize, cy);
335 
336 	MPN_COPY(prodp, tspace, hsize);
337 	cy = _gcry_mpih_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
338 	if( cy )
339 	    _gcry_mpih_add_1 (prodp + size, prodp + size, size, 1);
340     }
341 }
342 
343 
344 /* This should be made into an inline function in gmp.h.  */
345 void
_gcry_mpih_mul_n(mpi_ptr_t prodp,mpi_ptr_t up,mpi_ptr_t vp,mpi_size_t size)346 _gcry_mpih_mul_n( mpi_ptr_t prodp,
347                      mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
348 {
349     int secure;
350 
351     if( up == vp ) {
352 	if( size < KARATSUBA_THRESHOLD )
353 	    _gcry_mpih_sqr_n_basecase( prodp, up, size );
354 	else {
355 	    mpi_ptr_t tspace;
356 	    secure = _gcry_is_secure( up );
357 	    tspace = mpi_alloc_limb_space( 2 * size, secure );
358 	    _gcry_mpih_sqr_n( prodp, up, size, tspace );
359 	    _gcry_mpi_free_limb_space (tspace, 2 * size );
360 	}
361     }
362     else {
363 	if( size < KARATSUBA_THRESHOLD )
364 	    mul_n_basecase( prodp, up, vp, size );
365 	else {
366 	    mpi_ptr_t tspace;
367 	    secure = _gcry_is_secure( up ) || _gcry_is_secure( vp );
368 	    tspace = mpi_alloc_limb_space( 2 * size, secure );
369 	    mul_n (prodp, up, vp, size, tspace);
370 	    _gcry_mpi_free_limb_space (tspace, 2 * size );
371 	}
372     }
373 }
374 
375 
376 
377 void
_gcry_mpih_mul_karatsuba_case(mpi_ptr_t prodp,mpi_ptr_t up,mpi_size_t usize,mpi_ptr_t vp,mpi_size_t vsize,struct karatsuba_ctx * ctx)378 _gcry_mpih_mul_karatsuba_case( mpi_ptr_t prodp,
379                                   mpi_ptr_t up, mpi_size_t usize,
380                                   mpi_ptr_t vp, mpi_size_t vsize,
381                                   struct karatsuba_ctx *ctx )
382 {
383     mpi_limb_t cy;
384 
385     if( !ctx->tspace || ctx->tspace_size < vsize ) {
386 	if( ctx->tspace )
387 	    _gcry_mpi_free_limb_space( ctx->tspace, ctx->tspace_nlimbs );
388         ctx->tspace_nlimbs = 2 * vsize;
389 	ctx->tspace = mpi_alloc_limb_space (2 * vsize,
390 				            (_gcry_is_secure (up)
391                                              || _gcry_is_secure (vp)));
392 	ctx->tspace_size = vsize;
393     }
394 
395     MPN_MUL_N_RECURSE( prodp, up, vp, vsize, ctx->tspace );
396 
397     prodp += vsize;
398     up += vsize;
399     usize -= vsize;
400     if( usize >= vsize ) {
401 	if( !ctx->tp || ctx->tp_size < vsize ) {
402 	    if( ctx->tp )
403 		_gcry_mpi_free_limb_space( ctx->tp, ctx->tp_nlimbs );
404             ctx->tp_nlimbs = 2 * vsize;
405 	    ctx->tp = mpi_alloc_limb_space (2 * vsize,
406                                             (_gcry_is_secure (up)
407                                              || _gcry_is_secure (vp)));
408 	    ctx->tp_size = vsize;
409 	}
410 
411 	do {
412 	    MPN_MUL_N_RECURSE( ctx->tp, up, vp, vsize, ctx->tspace );
413 	    cy = _gcry_mpih_add_n( prodp, prodp, ctx->tp, vsize );
414 	    _gcry_mpih_add_1( prodp + vsize, ctx->tp + vsize, vsize, cy );
415 	    prodp += vsize;
416 	    up += vsize;
417 	    usize -= vsize;
418 	} while( usize >= vsize );
419     }
420 
421     if( usize ) {
422 	if( usize < KARATSUBA_THRESHOLD ) {
423 	    _gcry_mpih_mul( ctx->tspace, vp, vsize, up, usize );
424 	}
425 	else {
426 	    if( !ctx->next ) {
427 		ctx->next = xcalloc( 1, sizeof *ctx );
428 	    }
429 	    _gcry_mpih_mul_karatsuba_case( ctx->tspace,
430 					vp, vsize,
431 					up, usize,
432 					ctx->next );
433 	}
434 
435 	cy = _gcry_mpih_add_n( prodp, prodp, ctx->tspace, vsize);
436 	_gcry_mpih_add_1( prodp + vsize, ctx->tspace + vsize, usize, cy );
437     }
438 }
439 
440 
441 void
_gcry_mpih_release_karatsuba_ctx(struct karatsuba_ctx * ctx)442 _gcry_mpih_release_karatsuba_ctx( struct karatsuba_ctx *ctx )
443 {
444     struct karatsuba_ctx *ctx2;
445 
446     if( ctx->tp )
447 	_gcry_mpi_free_limb_space( ctx->tp, ctx->tp_nlimbs );
448     if( ctx->tspace )
449 	_gcry_mpi_free_limb_space( ctx->tspace, ctx->tspace_nlimbs );
450     for( ctx=ctx->next; ctx; ctx = ctx2 ) {
451 	ctx2 = ctx->next;
452 	if( ctx->tp )
453             _gcry_mpi_free_limb_space( ctx->tp, ctx->tp_nlimbs );
454 	if( ctx->tspace )
455 	    _gcry_mpi_free_limb_space( ctx->tspace, ctx->tspace_nlimbs );
456 	xfree( ctx );
457     }
458 }
459 
460 /* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
461  * and v (pointed to by VP, with VSIZE limbs), and store the result at
462  * PRODP.  USIZE + VSIZE limbs are always stored, but if the input
463  * operands are normalized.  Return the most significant limb of the
464  * result.
465  *
466  * NOTE: The space pointed to by PRODP is overwritten before finished
467  * with U and V, so overlap is an error.
468  *
469  * Argument constraints:
470  * 1. USIZE >= VSIZE.
471  * 2. PRODP != UP and PRODP != VP, i.e. the destination
472  *    must be distinct from the multiplier and the multiplicand.
473  */
474 
475 mpi_limb_t
_gcry_mpih_mul(mpi_ptr_t prodp,mpi_ptr_t up,mpi_size_t usize,mpi_ptr_t vp,mpi_size_t vsize)476 _gcry_mpih_mul( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
477                    mpi_ptr_t vp, mpi_size_t vsize)
478 {
479     mpi_ptr_t prod_endp = prodp + usize + vsize - 1;
480     mpi_limb_t cy;
481     struct karatsuba_ctx ctx;
482 
483     if( vsize < KARATSUBA_THRESHOLD ) {
484 	mpi_size_t i;
485 	mpi_limb_t v_limb;
486 
487 	if( !vsize )
488 	    return 0;
489 
490 	/* Multiply by the first limb in V separately, as the result can be
491 	 * stored (not added) to PROD.	We also avoid a loop for zeroing.  */
492 	v_limb = vp[0];
493 	if( v_limb <= 1 ) {
494 	    if( v_limb == 1 )
495 		MPN_COPY( prodp, up, usize );
496 	    else
497 		MPN_ZERO( prodp, usize );
498 	    cy = 0;
499 	}
500 	else
501 	    cy = _gcry_mpih_mul_1( prodp, up, usize, v_limb );
502 
503 	prodp[usize] = cy;
504 	prodp++;
505 
506 	/* For each iteration in the outer loop, multiply one limb from
507 	 * U with one limb from V, and add it to PROD.	*/
508 	for( i = 1; i < vsize; i++ ) {
509 	    v_limb = vp[i];
510 	    if( v_limb <= 1 ) {
511 		cy = 0;
512 		if( v_limb == 1 )
513 		   cy = _gcry_mpih_add_n(prodp, prodp, up, usize);
514 	    }
515 	    else
516 		cy = _gcry_mpih_addmul_1(prodp, up, usize, v_limb);
517 
518 	    prodp[usize] = cy;
519 	    prodp++;
520 	}
521 
522 	return cy;
523     }
524 
525     memset( &ctx, 0, sizeof ctx );
526     _gcry_mpih_mul_karatsuba_case( prodp, up, usize, vp, vsize, &ctx );
527     _gcry_mpih_release_karatsuba_ctx( &ctx );
528     return *prod_endp;
529 }
530