1 /* 2 * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved. 3 * 4 * Licensed under the OpenSSL license (the "License"). You may not use 5 * this file except in compliance with the License. You can obtain a copy 6 * in the file LICENSE in the source distribution or at 7 * https://www.openssl.org/source/license.html 8 */ 9 10 #include "internal/cryptlib.h" 11 #include "internal/constant_time.h" 12 #include "bn_local.h" 13 14 #include <stdlib.h> 15 #ifdef _WIN32 16 # include <malloc.h> 17 # ifndef alloca 18 # define alloca _alloca 19 # endif 20 #elif defined(__GNUC__) 21 # ifndef alloca 22 # define alloca(s) __builtin_alloca((s)) 23 # endif 24 #elif defined(__sun) 25 # include <alloca.h> 26 #endif 27 28 #include "rsaz_exp.h" 29 30 #undef SPARC_T4_MONT 31 #if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc)) 32 # include "sparc_arch.h" 33 extern unsigned int OPENSSL_sparcv9cap_P[]; 34 # define SPARC_T4_MONT 35 #endif 36 37 /* maximum precomputation table size for *variable* sliding windows */ 38 #define TABLE_SIZE 32 39 40 /* 41 * Beyond this limit the constant time code is disabled due to 42 * the possible overflow in the computation of powerbufLen in 43 * BN_mod_exp_mont_consttime. 44 * When this limit is exceeded, the computation will be done using 45 * non-constant time code, but it will take very long. 46 */ 47 #define BN_CONSTTIME_SIZE_LIMIT (INT_MAX / BN_BYTES / 256) 48 49 /* this one works - simple but works */ 50 int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) 51 { 52 int i, bits, ret = 0; 53 BIGNUM *v, *rr; 54 55 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0 56 || BN_get_flags(a, BN_FLG_CONSTTIME) != 0) { 57 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */ 58 BNerr(BN_F_BN_EXP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); 59 return 0; 60 } 61 62 BN_CTX_start(ctx); 63 rr = ((r == a) || (r == p)) ? BN_CTX_get(ctx) : r; 64 v = BN_CTX_get(ctx); 65 if (rr == NULL || v == NULL) 66 goto err; 67 68 if (BN_copy(v, a) == NULL) 69 goto err; 70 bits = BN_num_bits(p); 71 72 if (BN_is_odd(p)) { 73 if (BN_copy(rr, a) == NULL) 74 goto err; 75 } else { 76 if (!BN_one(rr)) 77 goto err; 78 } 79 80 for (i = 1; i < bits; i++) { 81 if (!BN_sqr(v, v, ctx)) 82 goto err; 83 if (BN_is_bit_set(p, i)) { 84 if (!BN_mul(rr, rr, v, ctx)) 85 goto err; 86 } 87 } 88 if (r != rr && BN_copy(r, rr) == NULL) 89 goto err; 90 91 ret = 1; 92 err: 93 BN_CTX_end(ctx); 94 bn_check_top(r); 95 return ret; 96 } 97 98 int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, 99 BN_CTX *ctx) 100 { 101 int ret; 102 103 bn_check_top(a); 104 bn_check_top(p); 105 bn_check_top(m); 106 107 /*- 108 * For even modulus m = 2^k*m_odd, it might make sense to compute 109 * a^p mod m_odd and a^p mod 2^k separately (with Montgomery 110 * exponentiation for the odd part), using appropriate exponent 111 * reductions, and combine the results using the CRT. 112 * 113 * For now, we use Montgomery only if the modulus is odd; otherwise, 114 * exponentiation using the reciprocal-based quick remaindering 115 * algorithm is used. 116 * 117 * (Timing obtained with expspeed.c [computations a^p mod m 118 * where a, p, m are of the same length: 256, 512, 1024, 2048, 119 * 4096, 8192 bits], compared to the running time of the 120 * standard algorithm: 121 * 122 * BN_mod_exp_mont 33 .. 40 % [AMD K6-2, Linux, debug configuration] 123 * 55 .. 77 % [UltraSparc processor, but 124 * debug-solaris-sparcv8-gcc conf.] 125 * 126 * BN_mod_exp_recp 50 .. 70 % [AMD K6-2, Linux, debug configuration] 127 * 62 .. 118 % [UltraSparc, debug-solaris-sparcv8-gcc] 128 * 129 * On the Sparc, BN_mod_exp_recp was faster than BN_mod_exp_mont 130 * at 2048 and more bits, but at 512 and 1024 bits, it was 131 * slower even than the standard algorithm! 132 * 133 * "Real" timings [linux-elf, solaris-sparcv9-gcc configurations] 134 * should be obtained when the new Montgomery reduction code 135 * has been integrated into OpenSSL.) 136 */ 137 138 #define MONT_MUL_MOD 139 #define MONT_EXP_WORD 140 #define RECP_MUL_MOD 141 142 #ifdef MONT_MUL_MOD 143 if (BN_is_odd(m)) { 144 # ifdef MONT_EXP_WORD 145 if (a->top == 1 && !a->neg 146 && (BN_get_flags(p, BN_FLG_CONSTTIME) == 0) 147 && (BN_get_flags(a, BN_FLG_CONSTTIME) == 0) 148 && (BN_get_flags(m, BN_FLG_CONSTTIME) == 0)) { 149 BN_ULONG A = a->d[0]; 150 ret = BN_mod_exp_mont_word(r, A, p, m, ctx, NULL); 151 } else 152 # endif 153 ret = BN_mod_exp_mont(r, a, p, m, ctx, NULL); 154 } else 155 #endif 156 #ifdef RECP_MUL_MOD 157 { 158 ret = BN_mod_exp_recp(r, a, p, m, ctx); 159 } 160 #else 161 { 162 ret = BN_mod_exp_simple(r, a, p, m, ctx); 163 } 164 #endif 165 166 bn_check_top(r); 167 return ret; 168 } 169 170 int BN_mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, 171 const BIGNUM *m, BN_CTX *ctx) 172 { 173 int i, j, bits, ret = 0, wstart, wend, window, wvalue; 174 int start = 1; 175 BIGNUM *aa; 176 /* Table of variables obtained from 'ctx' */ 177 BIGNUM *val[TABLE_SIZE]; 178 BN_RECP_CTX recp; 179 180 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0 181 || BN_get_flags(a, BN_FLG_CONSTTIME) != 0 182 || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) { 183 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */ 184 BNerr(BN_F_BN_MOD_EXP_RECP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); 185 return 0; 186 } 187 188 bits = BN_num_bits(p); 189 if (bits == 0) { 190 /* x**0 mod 1, or x**0 mod -1 is still zero. */ 191 if (BN_abs_is_word(m, 1)) { 192 ret = 1; 193 BN_zero(r); 194 } else { 195 ret = BN_one(r); 196 } 197 return ret; 198 } 199 200 BN_RECP_CTX_init(&recp); 201 202 BN_CTX_start(ctx); 203 aa = BN_CTX_get(ctx); 204 val[0] = BN_CTX_get(ctx); 205 if (val[0] == NULL) 206 goto err; 207 208 if (m->neg) { 209 /* ignore sign of 'm' */ 210 if (!BN_copy(aa, m)) 211 goto err; 212 aa->neg = 0; 213 if (BN_RECP_CTX_set(&recp, aa, ctx) <= 0) 214 goto err; 215 } else { 216 if (BN_RECP_CTX_set(&recp, m, ctx) <= 0) 217 goto err; 218 } 219 220 if (!BN_nnmod(val[0], a, m, ctx)) 221 goto err; /* 1 */ 222 if (BN_is_zero(val[0])) { 223 BN_zero(r); 224 ret = 1; 225 goto err; 226 } 227 228 window = BN_window_bits_for_exponent_size(bits); 229 if (window > 1) { 230 if (!BN_mod_mul_reciprocal(aa, val[0], val[0], &recp, ctx)) 231 goto err; /* 2 */ 232 j = 1 << (window - 1); 233 for (i = 1; i < j; i++) { 234 if (((val[i] = BN_CTX_get(ctx)) == NULL) || 235 !BN_mod_mul_reciprocal(val[i], val[i - 1], aa, &recp, ctx)) 236 goto err; 237 } 238 } 239 240 start = 1; /* This is used to avoid multiplication etc 241 * when there is only the value '1' in the 242 * buffer. */ 243 wvalue = 0; /* The 'value' of the window */ 244 wstart = bits - 1; /* The top bit of the window */ 245 wend = 0; /* The bottom bit of the window */ 246 247 if (!BN_one(r)) 248 goto err; 249 250 for (;;) { 251 if (BN_is_bit_set(p, wstart) == 0) { 252 if (!start) 253 if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx)) 254 goto err; 255 if (wstart == 0) 256 break; 257 wstart--; 258 continue; 259 } 260 /* 261 * We now have wstart on a 'set' bit, we now need to work out how bit 262 * a window to do. To do this we need to scan forward until the last 263 * set bit before the end of the window 264 */ 265 j = wstart; 266 wvalue = 1; 267 wend = 0; 268 for (i = 1; i < window; i++) { 269 if (wstart - i < 0) 270 break; 271 if (BN_is_bit_set(p, wstart - i)) { 272 wvalue <<= (i - wend); 273 wvalue |= 1; 274 wend = i; 275 } 276 } 277 278 /* wend is the size of the current window */ 279 j = wend + 1; 280 /* add the 'bytes above' */ 281 if (!start) 282 for (i = 0; i < j; i++) { 283 if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx)) 284 goto err; 285 } 286 287 /* wvalue will be an odd number < 2^window */ 288 if (!BN_mod_mul_reciprocal(r, r, val[wvalue >> 1], &recp, ctx)) 289 goto err; 290 291 /* move the 'window' down further */ 292 wstart -= wend + 1; 293 wvalue = 0; 294 start = 0; 295 if (wstart < 0) 296 break; 297 } 298 ret = 1; 299 err: 300 BN_CTX_end(ctx); 301 BN_RECP_CTX_free(&recp); 302 bn_check_top(r); 303 return ret; 304 } 305 306 int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, 307 const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont) 308 { 309 int i, j, bits, ret = 0, wstart, wend, window, wvalue; 310 int start = 1; 311 BIGNUM *d, *r; 312 const BIGNUM *aa; 313 /* Table of variables obtained from 'ctx' */ 314 BIGNUM *val[TABLE_SIZE]; 315 BN_MONT_CTX *mont = NULL; 316 317 bn_check_top(a); 318 bn_check_top(p); 319 bn_check_top(m); 320 321 if (!BN_is_odd(m)) { 322 BNerr(BN_F_BN_MOD_EXP_MONT, BN_R_CALLED_WITH_EVEN_MODULUS); 323 return 0; 324 } 325 326 if (m->top <= BN_CONSTTIME_SIZE_LIMIT 327 && (BN_get_flags(p, BN_FLG_CONSTTIME) != 0 328 || BN_get_flags(a, BN_FLG_CONSTTIME) != 0 329 || BN_get_flags(m, BN_FLG_CONSTTIME) != 0)) { 330 return BN_mod_exp_mont_consttime(rr, a, p, m, ctx, in_mont); 331 } 332 333 bits = BN_num_bits(p); 334 if (bits == 0) { 335 /* x**0 mod 1, or x**0 mod -1 is still zero. */ 336 if (BN_abs_is_word(m, 1)) { 337 ret = 1; 338 BN_zero(rr); 339 } else { 340 ret = BN_one(rr); 341 } 342 return ret; 343 } 344 345 BN_CTX_start(ctx); 346 d = BN_CTX_get(ctx); 347 r = BN_CTX_get(ctx); 348 val[0] = BN_CTX_get(ctx); 349 if (val[0] == NULL) 350 goto err; 351 352 /* 353 * If this is not done, things will break in the montgomery part 354 */ 355 356 if (in_mont != NULL) 357 mont = in_mont; 358 else { 359 if ((mont = BN_MONT_CTX_new()) == NULL) 360 goto err; 361 if (!BN_MONT_CTX_set(mont, m, ctx)) 362 goto err; 363 } 364 365 if (a->neg || BN_ucmp(a, m) >= 0) { 366 if (!BN_nnmod(val[0], a, m, ctx)) 367 goto err; 368 aa = val[0]; 369 } else 370 aa = a; 371 if (!bn_to_mont_fixed_top(val[0], aa, mont, ctx)) 372 goto err; /* 1 */ 373 374 window = BN_window_bits_for_exponent_size(bits); 375 if (window > 1) { 376 if (!bn_mul_mont_fixed_top(d, val[0], val[0], mont, ctx)) 377 goto err; /* 2 */ 378 j = 1 << (window - 1); 379 for (i = 1; i < j; i++) { 380 if (((val[i] = BN_CTX_get(ctx)) == NULL) || 381 !bn_mul_mont_fixed_top(val[i], val[i - 1], d, mont, ctx)) 382 goto err; 383 } 384 } 385 386 start = 1; /* This is used to avoid multiplication etc 387 * when there is only the value '1' in the 388 * buffer. */ 389 wvalue = 0; /* The 'value' of the window */ 390 wstart = bits - 1; /* The top bit of the window */ 391 wend = 0; /* The bottom bit of the window */ 392 393 #if 1 /* by Shay Gueron's suggestion */ 394 j = m->top; /* borrow j */ 395 if (m->d[j - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { 396 if (bn_wexpand(r, j) == NULL) 397 goto err; 398 /* 2^(top*BN_BITS2) - m */ 399 r->d[0] = (0 - m->d[0]) & BN_MASK2; 400 for (i = 1; i < j; i++) 401 r->d[i] = (~m->d[i]) & BN_MASK2; 402 r->top = j; 403 r->flags |= BN_FLG_FIXED_TOP; 404 } else 405 #endif 406 if (!bn_to_mont_fixed_top(r, BN_value_one(), mont, ctx)) 407 goto err; 408 for (;;) { 409 if (BN_is_bit_set(p, wstart) == 0) { 410 if (!start) { 411 if (!bn_mul_mont_fixed_top(r, r, r, mont, ctx)) 412 goto err; 413 } 414 if (wstart == 0) 415 break; 416 wstart--; 417 continue; 418 } 419 /* 420 * We now have wstart on a 'set' bit, we now need to work out how bit 421 * a window to do. To do this we need to scan forward until the last 422 * set bit before the end of the window 423 */ 424 j = wstart; 425 wvalue = 1; 426 wend = 0; 427 for (i = 1; i < window; i++) { 428 if (wstart - i < 0) 429 break; 430 if (BN_is_bit_set(p, wstart - i)) { 431 wvalue <<= (i - wend); 432 wvalue |= 1; 433 wend = i; 434 } 435 } 436 437 /* wend is the size of the current window */ 438 j = wend + 1; 439 /* add the 'bytes above' */ 440 if (!start) 441 for (i = 0; i < j; i++) { 442 if (!bn_mul_mont_fixed_top(r, r, r, mont, ctx)) 443 goto err; 444 } 445 446 /* wvalue will be an odd number < 2^window */ 447 if (!bn_mul_mont_fixed_top(r, r, val[wvalue >> 1], mont, ctx)) 448 goto err; 449 450 /* move the 'window' down further */ 451 wstart -= wend + 1; 452 wvalue = 0; 453 start = 0; 454 if (wstart < 0) 455 break; 456 } 457 /* 458 * Done with zero-padded intermediate BIGNUMs. Final BN_from_montgomery 459 * removes padding [if any] and makes return value suitable for public 460 * API consumer. 461 */ 462 #if defined(SPARC_T4_MONT) 463 if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) { 464 j = mont->N.top; /* borrow j */ 465 val[0]->d[0] = 1; /* borrow val[0] */ 466 for (i = 1; i < j; i++) 467 val[0]->d[i] = 0; 468 val[0]->top = j; 469 if (!BN_mod_mul_montgomery(rr, r, val[0], mont, ctx)) 470 goto err; 471 } else 472 #endif 473 if (!BN_from_montgomery(rr, r, mont, ctx)) 474 goto err; 475 ret = 1; 476 err: 477 if (in_mont == NULL) 478 BN_MONT_CTX_free(mont); 479 BN_CTX_end(ctx); 480 bn_check_top(rr); 481 return ret; 482 } 483 484 static BN_ULONG bn_get_bits(const BIGNUM *a, int bitpos) 485 { 486 BN_ULONG ret = 0; 487 int wordpos; 488 489 wordpos = bitpos / BN_BITS2; 490 bitpos %= BN_BITS2; 491 if (wordpos >= 0 && wordpos < a->top) { 492 ret = a->d[wordpos] & BN_MASK2; 493 if (bitpos) { 494 ret >>= bitpos; 495 if (++wordpos < a->top) 496 ret |= a->d[wordpos] << (BN_BITS2 - bitpos); 497 } 498 } 499 500 return ret & BN_MASK2; 501 } 502 503 /* 504 * BN_mod_exp_mont_consttime() stores the precomputed powers in a specific 505 * layout so that accessing any of these table values shows the same access 506 * pattern as far as cache lines are concerned. The following functions are 507 * used to transfer a BIGNUM from/to that table. 508 */ 509 510 static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top, 511 unsigned char *buf, int idx, 512 int window) 513 { 514 int i, j; 515 int width = 1 << window; 516 BN_ULONG *table = (BN_ULONG *)buf; 517 518 if (top > b->top) 519 top = b->top; /* this works because 'buf' is explicitly 520 * zeroed */ 521 for (i = 0, j = idx; i < top; i++, j += width) { 522 table[j] = b->d[i]; 523 } 524 525 return 1; 526 } 527 528 static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top, 529 unsigned char *buf, int idx, 530 int window) 531 { 532 int i, j; 533 int width = 1 << window; 534 /* 535 * We declare table 'volatile' in order to discourage compiler 536 * from reordering loads from the table. Concern is that if 537 * reordered in specific manner loads might give away the 538 * information we are trying to conceal. Some would argue that 539 * compiler can reorder them anyway, but it can as well be 540 * argued that doing so would be violation of standard... 541 */ 542 volatile BN_ULONG *table = (volatile BN_ULONG *)buf; 543 544 if (bn_wexpand(b, top) == NULL) 545 return 0; 546 547 if (window <= 3) { 548 for (i = 0; i < top; i++, table += width) { 549 BN_ULONG acc = 0; 550 551 for (j = 0; j < width; j++) { 552 acc |= table[j] & 553 ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1)); 554 } 555 556 b->d[i] = acc; 557 } 558 } else { 559 int xstride = 1 << (window - 2); 560 BN_ULONG y0, y1, y2, y3; 561 562 i = idx >> (window - 2); /* equivalent of idx / xstride */ 563 idx &= xstride - 1; /* equivalent of idx % xstride */ 564 565 y0 = (BN_ULONG)0 - (constant_time_eq_int(i,0)&1); 566 y1 = (BN_ULONG)0 - (constant_time_eq_int(i,1)&1); 567 y2 = (BN_ULONG)0 - (constant_time_eq_int(i,2)&1); 568 y3 = (BN_ULONG)0 - (constant_time_eq_int(i,3)&1); 569 570 for (i = 0; i < top; i++, table += width) { 571 BN_ULONG acc = 0; 572 573 for (j = 0; j < xstride; j++) { 574 acc |= ( (table[j + 0 * xstride] & y0) | 575 (table[j + 1 * xstride] & y1) | 576 (table[j + 2 * xstride] & y2) | 577 (table[j + 3 * xstride] & y3) ) 578 & ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1)); 579 } 580 581 b->d[i] = acc; 582 } 583 } 584 585 b->top = top; 586 b->flags |= BN_FLG_FIXED_TOP; 587 return 1; 588 } 589 590 /* 591 * Given a pointer value, compute the next address that is a cache line 592 * multiple. 593 */ 594 #define MOD_EXP_CTIME_ALIGN(x_) \ 595 ((unsigned char*)(x_) + (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - (((size_t)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK)))) 596 597 /* 598 * This variant of BN_mod_exp_mont() uses fixed windows and the special 599 * precomputation memory layout to limit data-dependency to a minimum to 600 * protect secret exponents (cf. the hyper-threading timing attacks pointed 601 * out by Colin Percival, 602 * http://www.daemonology.net/hyperthreading-considered-harmful/) 603 */ 604 int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, 605 const BIGNUM *m, BN_CTX *ctx, 606 BN_MONT_CTX *in_mont) 607 { 608 int i, bits, ret = 0, window, wvalue, wmask, window0; 609 int top; 610 BN_MONT_CTX *mont = NULL; 611 612 int numPowers; 613 unsigned char *powerbufFree = NULL; 614 int powerbufLen = 0; 615 unsigned char *powerbuf = NULL; 616 BIGNUM tmp, am; 617 #if defined(SPARC_T4_MONT) 618 unsigned int t4 = 0; 619 #endif 620 621 bn_check_top(a); 622 bn_check_top(p); 623 bn_check_top(m); 624 625 if (!BN_is_odd(m)) { 626 BNerr(BN_F_BN_MOD_EXP_MONT_CONSTTIME, BN_R_CALLED_WITH_EVEN_MODULUS); 627 return 0; 628 } 629 630 top = m->top; 631 632 if (top > BN_CONSTTIME_SIZE_LIMIT) { 633 /* Prevent overflowing the powerbufLen computation below */ 634 return BN_mod_exp_mont(rr, a, p, m, ctx, in_mont); 635 } 636 637 /* 638 * Use all bits stored in |p|, rather than |BN_num_bits|, so we do not leak 639 * whether the top bits are zero. 640 */ 641 bits = p->top * BN_BITS2; 642 if (bits == 0) { 643 /* x**0 mod 1, or x**0 mod -1 is still zero. */ 644 if (BN_abs_is_word(m, 1)) { 645 ret = 1; 646 BN_zero(rr); 647 } else { 648 ret = BN_one(rr); 649 } 650 return ret; 651 } 652 653 BN_CTX_start(ctx); 654 655 /* 656 * Allocate a montgomery context if it was not supplied by the caller. If 657 * this is not done, things will break in the montgomery part. 658 */ 659 if (in_mont != NULL) 660 mont = in_mont; 661 else { 662 if ((mont = BN_MONT_CTX_new()) == NULL) 663 goto err; 664 if (!BN_MONT_CTX_set(mont, m, ctx)) 665 goto err; 666 } 667 668 if (a->neg || BN_ucmp(a, m) >= 0) { 669 BIGNUM *reduced = BN_CTX_get(ctx); 670 if (reduced == NULL 671 || !BN_nnmod(reduced, a, m, ctx)) { 672 goto err; 673 } 674 a = reduced; 675 } 676 677 #ifdef RSAZ_ENABLED 678 /* 679 * If the size of the operands allow it, perform the optimized 680 * RSAZ exponentiation. For further information see 681 * crypto/bn/rsaz_exp.c and accompanying assembly modules. 682 */ 683 if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024) 684 && rsaz_avx2_eligible()) { 685 if (NULL == bn_wexpand(rr, 16)) 686 goto err; 687 RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d, 688 mont->n0[0]); 689 rr->top = 16; 690 rr->neg = 0; 691 bn_correct_top(rr); 692 ret = 1; 693 goto err; 694 } else if ((8 == a->top) && (8 == p->top) && (BN_num_bits(m) == 512)) { 695 if (NULL == bn_wexpand(rr, 8)) 696 goto err; 697 RSAZ_512_mod_exp(rr->d, a->d, p->d, m->d, mont->n0[0], mont->RR.d); 698 rr->top = 8; 699 rr->neg = 0; 700 bn_correct_top(rr); 701 ret = 1; 702 goto err; 703 } 704 #endif 705 706 /* Get the window size to use with size of p. */ 707 window = BN_window_bits_for_ctime_exponent_size(bits); 708 #if defined(SPARC_T4_MONT) 709 if (window >= 5 && (top & 15) == 0 && top <= 64 && 710 (OPENSSL_sparcv9cap_P[1] & (CFR_MONTMUL | CFR_MONTSQR)) == 711 (CFR_MONTMUL | CFR_MONTSQR) && (t4 = OPENSSL_sparcv9cap_P[0])) 712 window = 5; 713 else 714 #endif 715 #if defined(OPENSSL_BN_ASM_MONT5) 716 if (window >= 5 && top <= BN_SOFT_LIMIT) { 717 window = 5; /* ~5% improvement for RSA2048 sign, and even 718 * for RSA4096 */ 719 /* reserve space for mont->N.d[] copy */ 720 powerbufLen += top * sizeof(mont->N.d[0]); 721 } 722 #endif 723 (void)0; 724 725 /* 726 * Allocate a buffer large enough to hold all of the pre-computed powers 727 * of am, am itself and tmp. 728 */ 729 numPowers = 1 << window; 730 powerbufLen += sizeof(m->d[0]) * (top * numPowers + 731 ((2 * top) > 732 numPowers ? (2 * top) : numPowers)); 733 #ifdef alloca 734 if (powerbufLen < 3072) 735 powerbufFree = 736 alloca(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH); 737 else 738 #endif 739 if ((powerbufFree = 740 OPENSSL_malloc(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH)) 741 == NULL) 742 goto err; 743 744 powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree); 745 memset(powerbuf, 0, powerbufLen); 746 747 #ifdef alloca 748 if (powerbufLen < 3072) 749 powerbufFree = NULL; 750 #endif 751 752 /* lay down tmp and am right after powers table */ 753 tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers); 754 am.d = tmp.d + top; 755 tmp.top = am.top = 0; 756 tmp.dmax = am.dmax = top; 757 tmp.neg = am.neg = 0; 758 tmp.flags = am.flags = BN_FLG_STATIC_DATA; 759 760 /* prepare a^0 in Montgomery domain */ 761 #if 1 /* by Shay Gueron's suggestion */ 762 if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { 763 /* 2^(top*BN_BITS2) - m */ 764 tmp.d[0] = (0 - m->d[0]) & BN_MASK2; 765 for (i = 1; i < top; i++) 766 tmp.d[i] = (~m->d[i]) & BN_MASK2; 767 tmp.top = top; 768 } else 769 #endif 770 if (!bn_to_mont_fixed_top(&tmp, BN_value_one(), mont, ctx)) 771 goto err; 772 773 /* prepare a^1 in Montgomery domain */ 774 if (!bn_to_mont_fixed_top(&am, a, mont, ctx)) 775 goto err; 776 777 if (top > BN_SOFT_LIMIT) 778 goto fallback; 779 780 #if defined(SPARC_T4_MONT) 781 if (t4) { 782 typedef int (*bn_pwr5_mont_f) (BN_ULONG *tp, const BN_ULONG *np, 783 const BN_ULONG *n0, const void *table, 784 int power, int bits); 785 int bn_pwr5_mont_t4_8(BN_ULONG *tp, const BN_ULONG *np, 786 const BN_ULONG *n0, const void *table, 787 int power, int bits); 788 int bn_pwr5_mont_t4_16(BN_ULONG *tp, const BN_ULONG *np, 789 const BN_ULONG *n0, const void *table, 790 int power, int bits); 791 int bn_pwr5_mont_t4_24(BN_ULONG *tp, const BN_ULONG *np, 792 const BN_ULONG *n0, const void *table, 793 int power, int bits); 794 int bn_pwr5_mont_t4_32(BN_ULONG *tp, const BN_ULONG *np, 795 const BN_ULONG *n0, const void *table, 796 int power, int bits); 797 static const bn_pwr5_mont_f pwr5_funcs[4] = { 798 bn_pwr5_mont_t4_8, bn_pwr5_mont_t4_16, 799 bn_pwr5_mont_t4_24, bn_pwr5_mont_t4_32 800 }; 801 bn_pwr5_mont_f pwr5_worker = pwr5_funcs[top / 16 - 1]; 802 803 typedef int (*bn_mul_mont_f) (BN_ULONG *rp, const BN_ULONG *ap, 804 const void *bp, const BN_ULONG *np, 805 const BN_ULONG *n0); 806 int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, 807 const BN_ULONG *np, const BN_ULONG *n0); 808 int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap, 809 const void *bp, const BN_ULONG *np, 810 const BN_ULONG *n0); 811 int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap, 812 const void *bp, const BN_ULONG *np, 813 const BN_ULONG *n0); 814 int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap, 815 const void *bp, const BN_ULONG *np, 816 const BN_ULONG *n0); 817 static const bn_mul_mont_f mul_funcs[4] = { 818 bn_mul_mont_t4_8, bn_mul_mont_t4_16, 819 bn_mul_mont_t4_24, bn_mul_mont_t4_32 820 }; 821 bn_mul_mont_f mul_worker = mul_funcs[top / 16 - 1]; 822 823 void bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap, 824 const void *bp, const BN_ULONG *np, 825 const BN_ULONG *n0, int num); 826 void bn_mul_mont_t4(BN_ULONG *rp, const BN_ULONG *ap, 827 const void *bp, const BN_ULONG *np, 828 const BN_ULONG *n0, int num); 829 void bn_mul_mont_gather5_t4(BN_ULONG *rp, const BN_ULONG *ap, 830 const void *table, const BN_ULONG *np, 831 const BN_ULONG *n0, int num, int power); 832 void bn_flip_n_scatter5_t4(const BN_ULONG *inp, size_t num, 833 void *table, size_t power); 834 void bn_gather5_t4(BN_ULONG *out, size_t num, 835 void *table, size_t power); 836 void bn_flip_t4(BN_ULONG *dst, BN_ULONG *src, size_t num); 837 838 BN_ULONG *np = mont->N.d, *n0 = mont->n0; 839 int stride = 5 * (6 - (top / 16 - 1)); /* multiple of 5, but less 840 * than 32 */ 841 842 /* 843 * BN_to_montgomery can contaminate words above .top [in 844 * BN_DEBUG[_DEBUG] build]... 845 */ 846 for (i = am.top; i < top; i++) 847 am.d[i] = 0; 848 for (i = tmp.top; i < top; i++) 849 tmp.d[i] = 0; 850 851 bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 0); 852 bn_flip_n_scatter5_t4(am.d, top, powerbuf, 1); 853 if (!(*mul_worker) (tmp.d, am.d, am.d, np, n0) && 854 !(*mul_worker) (tmp.d, am.d, am.d, np, n0)) 855 bn_mul_mont_vis3(tmp.d, am.d, am.d, np, n0, top); 856 bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 2); 857 858 for (i = 3; i < 32; i++) { 859 /* Calculate a^i = a^(i-1) * a */ 860 if (!(*mul_worker) (tmp.d, tmp.d, am.d, np, n0) && 861 !(*mul_worker) (tmp.d, tmp.d, am.d, np, n0)) 862 bn_mul_mont_vis3(tmp.d, tmp.d, am.d, np, n0, top); 863 bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, i); 864 } 865 866 /* switch to 64-bit domain */ 867 np = alloca(top * sizeof(BN_ULONG)); 868 top /= 2; 869 bn_flip_t4(np, mont->N.d, top); 870 871 /* 872 * The exponent may not have a whole number of fixed-size windows. 873 * To simplify the main loop, the initial window has between 1 and 874 * full-window-size bits such that what remains is always a whole 875 * number of windows 876 */ 877 window0 = (bits - 1) % 5 + 1; 878 wmask = (1 << window0) - 1; 879 bits -= window0; 880 wvalue = bn_get_bits(p, bits) & wmask; 881 bn_gather5_t4(tmp.d, top, powerbuf, wvalue); 882 883 /* 884 * Scan the exponent one window at a time starting from the most 885 * significant bits. 886 */ 887 while (bits > 0) { 888 if (bits < stride) 889 stride = bits; 890 bits -= stride; 891 wvalue = bn_get_bits(p, bits); 892 893 if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride)) 894 continue; 895 /* retry once and fall back */ 896 if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride)) 897 continue; 898 899 bits += stride - 5; 900 wvalue >>= stride - 5; 901 wvalue &= 31; 902 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); 903 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); 904 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); 905 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); 906 bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); 907 bn_mul_mont_gather5_t4(tmp.d, tmp.d, powerbuf, np, n0, top, 908 wvalue); 909 } 910 911 bn_flip_t4(tmp.d, tmp.d, top); 912 top *= 2; 913 /* back to 32-bit domain */ 914 tmp.top = top; 915 bn_correct_top(&tmp); 916 OPENSSL_cleanse(np, top * sizeof(BN_ULONG)); 917 } else 918 #endif 919 #if defined(OPENSSL_BN_ASM_MONT5) 920 if (window == 5 && top > 1) { 921 /* 922 * This optimization uses ideas from https://eprint.iacr.org/2011/239, 923 * specifically optimization of cache-timing attack countermeasures, 924 * pre-computation optimization, and Almost Montgomery Multiplication. 925 * 926 * The paper discusses a 4-bit window to optimize 512-bit modular 927 * exponentiation, used in RSA-1024 with CRT, but RSA-1024 is no longer 928 * important. 929 * 930 * |bn_mul_mont_gather5| and |bn_power5| implement the "almost" 931 * reduction variant, so the values here may not be fully reduced. 932 * They are bounded by R (i.e. they fit in |top| words), not |m|. 933 * Additionally, we pass these "almost" reduced inputs into 934 * |bn_mul_mont|, which implements the normal reduction variant. 935 * Given those inputs, |bn_mul_mont| may not give reduced 936 * output, but it will still produce "almost" reduced output. 937 */ 938 void bn_mul_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap, 939 const void *table, const BN_ULONG *np, 940 const BN_ULONG *n0, int num, int power); 941 void bn_scatter5(const BN_ULONG *inp, size_t num, 942 void *table, size_t power); 943 void bn_gather5(BN_ULONG *out, size_t num, void *table, size_t power); 944 void bn_power5(BN_ULONG *rp, const BN_ULONG *ap, 945 const void *table, const BN_ULONG *np, 946 const BN_ULONG *n0, int num, int power); 947 int bn_get_bits5(const BN_ULONG *ap, int off); 948 949 BN_ULONG *n0 = mont->n0, *np; 950 951 /* 952 * BN_to_montgomery can contaminate words above .top [in 953 * BN_DEBUG[_DEBUG] build]... 954 */ 955 for (i = am.top; i < top; i++) 956 am.d[i] = 0; 957 for (i = tmp.top; i < top; i++) 958 tmp.d[i] = 0; 959 960 /* 961 * copy mont->N.d[] to improve cache locality 962 */ 963 for (np = am.d + top, i = 0; i < top; i++) 964 np[i] = mont->N.d[i]; 965 966 bn_scatter5(tmp.d, top, powerbuf, 0); 967 bn_scatter5(am.d, am.top, powerbuf, 1); 968 bn_mul_mont(tmp.d, am.d, am.d, np, n0, top); 969 bn_scatter5(tmp.d, top, powerbuf, 2); 970 971 # if 0 972 for (i = 3; i < 32; i++) { 973 /* Calculate a^i = a^(i-1) * a */ 974 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); 975 bn_scatter5(tmp.d, top, powerbuf, i); 976 } 977 # else 978 /* same as above, but uses squaring for 1/2 of operations */ 979 for (i = 4; i < 32; i *= 2) { 980 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); 981 bn_scatter5(tmp.d, top, powerbuf, i); 982 } 983 for (i = 3; i < 8; i += 2) { 984 int j; 985 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); 986 bn_scatter5(tmp.d, top, powerbuf, i); 987 for (j = 2 * i; j < 32; j *= 2) { 988 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); 989 bn_scatter5(tmp.d, top, powerbuf, j); 990 } 991 } 992 for (; i < 16; i += 2) { 993 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); 994 bn_scatter5(tmp.d, top, powerbuf, i); 995 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); 996 bn_scatter5(tmp.d, top, powerbuf, 2 * i); 997 } 998 for (; i < 32; i += 2) { 999 bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np, n0, top, i - 1); 1000 bn_scatter5(tmp.d, top, powerbuf, i); 1001 } 1002 # endif 1003 /* 1004 * The exponent may not have a whole number of fixed-size windows. 1005 * To simplify the main loop, the initial window has between 1 and 1006 * full-window-size bits such that what remains is always a whole 1007 * number of windows 1008 */ 1009 window0 = (bits - 1) % 5 + 1; 1010 wmask = (1 << window0) - 1; 1011 bits -= window0; 1012 wvalue = bn_get_bits(p, bits) & wmask; 1013 bn_gather5(tmp.d, top, powerbuf, wvalue); 1014 1015 /* 1016 * Scan the exponent one window at a time starting from the most 1017 * significant bits. 1018 */ 1019 if (top & 7) { 1020 while (bits > 0) { 1021 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); 1022 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); 1023 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); 1024 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); 1025 bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); 1026 bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top, 1027 bn_get_bits5(p->d, bits -= 5)); 1028 } 1029 } else { 1030 while (bits > 0) { 1031 bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, 1032 bn_get_bits5(p->d, bits -= 5)); 1033 } 1034 } 1035 1036 tmp.top = top; 1037 /* 1038 * The result is now in |tmp| in Montgomery form, but it may not be 1039 * fully reduced. This is within bounds for |BN_from_montgomery| 1040 * (tmp < R <= m*R) so it will, when converting from Montgomery form, 1041 * produce a fully reduced result. 1042 * 1043 * This differs from Figure 2 of the paper, which uses AMM(h, 1) to 1044 * convert from Montgomery form with unreduced output, followed by an 1045 * extra reduction step. In the paper's terminology, we replace 1046 * steps 9 and 10 with MM(h, 1). 1047 */ 1048 } else 1049 #endif 1050 { 1051 fallback: 1052 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, window)) 1053 goto err; 1054 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, window)) 1055 goto err; 1056 1057 /* 1058 * If the window size is greater than 1, then calculate 1059 * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) (even 1060 * powers could instead be computed as (a^(i/2))^2 to use the slight 1061 * performance advantage of sqr over mul). 1062 */ 1063 if (window > 1) { 1064 if (!bn_mul_mont_fixed_top(&tmp, &am, &am, mont, ctx)) 1065 goto err; 1066 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2, 1067 window)) 1068 goto err; 1069 for (i = 3; i < numPowers; i++) { 1070 /* Calculate a^i = a^(i-1) * a */ 1071 if (!bn_mul_mont_fixed_top(&tmp, &am, &tmp, mont, ctx)) 1072 goto err; 1073 if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i, 1074 window)) 1075 goto err; 1076 } 1077 } 1078 1079 /* 1080 * The exponent may not have a whole number of fixed-size windows. 1081 * To simplify the main loop, the initial window has between 1 and 1082 * full-window-size bits such that what remains is always a whole 1083 * number of windows 1084 */ 1085 window0 = (bits - 1) % window + 1; 1086 wmask = (1 << window0) - 1; 1087 bits -= window0; 1088 wvalue = bn_get_bits(p, bits) & wmask; 1089 if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp, top, powerbuf, wvalue, 1090 window)) 1091 goto err; 1092 1093 wmask = (1 << window) - 1; 1094 /* 1095 * Scan the exponent one window at a time starting from the most 1096 * significant bits. 1097 */ 1098 while (bits > 0) { 1099 1100 /* Square the result window-size times */ 1101 for (i = 0; i < window; i++) 1102 if (!bn_mul_mont_fixed_top(&tmp, &tmp, &tmp, mont, ctx)) 1103 goto err; 1104 1105 /* 1106 * Get a window's worth of bits from the exponent 1107 * This avoids calling BN_is_bit_set for each bit, which 1108 * is not only slower but also makes each bit vulnerable to 1109 * EM (and likely other) side-channel attacks like One&Done 1110 * (for details see "One&Done: A Single-Decryption EM-Based 1111 * Attack on OpenSSL's Constant-Time Blinded RSA" by M. Alam, 1112 * H. Khan, M. Dey, N. Sinha, R. Callan, A. Zajic, and 1113 * M. Prvulovic, in USENIX Security'18) 1114 */ 1115 bits -= window; 1116 wvalue = bn_get_bits(p, bits) & wmask; 1117 /* 1118 * Fetch the appropriate pre-computed value from the pre-buf 1119 */ 1120 if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue, 1121 window)) 1122 goto err; 1123 1124 /* Multiply the result into the intermediate result */ 1125 if (!bn_mul_mont_fixed_top(&tmp, &tmp, &am, mont, ctx)) 1126 goto err; 1127 } 1128 } 1129 1130 /* 1131 * Done with zero-padded intermediate BIGNUMs. Final BN_from_montgomery 1132 * removes padding [if any] and makes return value suitable for public 1133 * API consumer. 1134 */ 1135 #if defined(SPARC_T4_MONT) 1136 if (OPENSSL_sparcv9cap_P[0] & (SPARCV9_VIS3 | SPARCV9_PREFER_FPU)) { 1137 am.d[0] = 1; /* borrow am */ 1138 for (i = 1; i < top; i++) 1139 am.d[i] = 0; 1140 if (!BN_mod_mul_montgomery(rr, &tmp, &am, mont, ctx)) 1141 goto err; 1142 } else 1143 #endif 1144 if (!BN_from_montgomery(rr, &tmp, mont, ctx)) 1145 goto err; 1146 ret = 1; 1147 err: 1148 if (in_mont == NULL) 1149 BN_MONT_CTX_free(mont); 1150 if (powerbuf != NULL) { 1151 OPENSSL_cleanse(powerbuf, powerbufLen); 1152 OPENSSL_free(powerbufFree); 1153 } 1154 BN_CTX_end(ctx); 1155 return ret; 1156 } 1157 1158 int BN_mod_exp_mont_word(BIGNUM *rr, BN_ULONG a, const BIGNUM *p, 1159 const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont) 1160 { 1161 BN_MONT_CTX *mont = NULL; 1162 int b, bits, ret = 0; 1163 int r_is_one; 1164 BN_ULONG w, next_w; 1165 BIGNUM *r, *t; 1166 BIGNUM *swap_tmp; 1167 #define BN_MOD_MUL_WORD(r, w, m) \ 1168 (BN_mul_word(r, (w)) && \ 1169 (/* BN_ucmp(r, (m)) < 0 ? 1 :*/ \ 1170 (BN_mod(t, r, m, ctx) && (swap_tmp = r, r = t, t = swap_tmp, 1)))) 1171 /* 1172 * BN_MOD_MUL_WORD is only used with 'w' large, so the BN_ucmp test is 1173 * probably more overhead than always using BN_mod (which uses BN_copy if 1174 * a similar test returns true). 1175 */ 1176 /* 1177 * We can use BN_mod and do not need BN_nnmod because our accumulator is 1178 * never negative (the result of BN_mod does not depend on the sign of 1179 * the modulus). 1180 */ 1181 #define BN_TO_MONTGOMERY_WORD(r, w, mont) \ 1182 (BN_set_word(r, (w)) && BN_to_montgomery(r, r, (mont), ctx)) 1183 1184 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0 1185 || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) { 1186 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */ 1187 BNerr(BN_F_BN_MOD_EXP_MONT_WORD, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); 1188 return 0; 1189 } 1190 1191 bn_check_top(p); 1192 bn_check_top(m); 1193 1194 if (!BN_is_odd(m)) { 1195 BNerr(BN_F_BN_MOD_EXP_MONT_WORD, BN_R_CALLED_WITH_EVEN_MODULUS); 1196 return 0; 1197 } 1198 if (m->top == 1) 1199 a %= m->d[0]; /* make sure that 'a' is reduced */ 1200 1201 bits = BN_num_bits(p); 1202 if (bits == 0) { 1203 /* x**0 mod 1, or x**0 mod -1 is still zero. */ 1204 if (BN_abs_is_word(m, 1)) { 1205 ret = 1; 1206 BN_zero(rr); 1207 } else { 1208 ret = BN_one(rr); 1209 } 1210 return ret; 1211 } 1212 if (a == 0) { 1213 BN_zero(rr); 1214 ret = 1; 1215 return ret; 1216 } 1217 1218 BN_CTX_start(ctx); 1219 r = BN_CTX_get(ctx); 1220 t = BN_CTX_get(ctx); 1221 if (t == NULL) 1222 goto err; 1223 1224 if (in_mont != NULL) 1225 mont = in_mont; 1226 else { 1227 if ((mont = BN_MONT_CTX_new()) == NULL) 1228 goto err; 1229 if (!BN_MONT_CTX_set(mont, m, ctx)) 1230 goto err; 1231 } 1232 1233 r_is_one = 1; /* except for Montgomery factor */ 1234 1235 /* bits-1 >= 0 */ 1236 1237 /* The result is accumulated in the product r*w. */ 1238 w = a; /* bit 'bits-1' of 'p' is always set */ 1239 for (b = bits - 2; b >= 0; b--) { 1240 /* First, square r*w. */ 1241 next_w = w * w; 1242 if ((next_w / w) != w) { /* overflow */ 1243 if (r_is_one) { 1244 if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) 1245 goto err; 1246 r_is_one = 0; 1247 } else { 1248 if (!BN_MOD_MUL_WORD(r, w, m)) 1249 goto err; 1250 } 1251 next_w = 1; 1252 } 1253 w = next_w; 1254 if (!r_is_one) { 1255 if (!BN_mod_mul_montgomery(r, r, r, mont, ctx)) 1256 goto err; 1257 } 1258 1259 /* Second, multiply r*w by 'a' if exponent bit is set. */ 1260 if (BN_is_bit_set(p, b)) { 1261 next_w = w * a; 1262 if ((next_w / a) != w) { /* overflow */ 1263 if (r_is_one) { 1264 if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) 1265 goto err; 1266 r_is_one = 0; 1267 } else { 1268 if (!BN_MOD_MUL_WORD(r, w, m)) 1269 goto err; 1270 } 1271 next_w = a; 1272 } 1273 w = next_w; 1274 } 1275 } 1276 1277 /* Finally, set r:=r*w. */ 1278 if (w != 1) { 1279 if (r_is_one) { 1280 if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) 1281 goto err; 1282 r_is_one = 0; 1283 } else { 1284 if (!BN_MOD_MUL_WORD(r, w, m)) 1285 goto err; 1286 } 1287 } 1288 1289 if (r_is_one) { /* can happen only if a == 1 */ 1290 if (!BN_one(rr)) 1291 goto err; 1292 } else { 1293 if (!BN_from_montgomery(rr, r, mont, ctx)) 1294 goto err; 1295 } 1296 ret = 1; 1297 err: 1298 if (in_mont == NULL) 1299 BN_MONT_CTX_free(mont); 1300 BN_CTX_end(ctx); 1301 bn_check_top(rr); 1302 return ret; 1303 } 1304 1305 /* The old fallback, simple version :-) */ 1306 int BN_mod_exp_simple(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, 1307 const BIGNUM *m, BN_CTX *ctx) 1308 { 1309 int i, j, bits, ret = 0, wstart, wend, window, wvalue; 1310 int start = 1; 1311 BIGNUM *d; 1312 /* Table of variables obtained from 'ctx' */ 1313 BIGNUM *val[TABLE_SIZE]; 1314 1315 if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0 1316 || BN_get_flags(a, BN_FLG_CONSTTIME) != 0 1317 || BN_get_flags(m, BN_FLG_CONSTTIME) != 0) { 1318 /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */ 1319 BNerr(BN_F_BN_MOD_EXP_SIMPLE, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); 1320 return 0; 1321 } 1322 1323 bits = BN_num_bits(p); 1324 if (bits == 0) { 1325 /* x**0 mod 1, or x**0 mod -1 is still zero. */ 1326 if (BN_abs_is_word(m, 1)) { 1327 ret = 1; 1328 BN_zero(r); 1329 } else { 1330 ret = BN_one(r); 1331 } 1332 return ret; 1333 } 1334 1335 BN_CTX_start(ctx); 1336 d = BN_CTX_get(ctx); 1337 val[0] = BN_CTX_get(ctx); 1338 if (val[0] == NULL) 1339 goto err; 1340 1341 if (!BN_nnmod(val[0], a, m, ctx)) 1342 goto err; /* 1 */ 1343 if (BN_is_zero(val[0])) { 1344 BN_zero(r); 1345 ret = 1; 1346 goto err; 1347 } 1348 1349 window = BN_window_bits_for_exponent_size(bits); 1350 if (window > 1) { 1351 if (!BN_mod_mul(d, val[0], val[0], m, ctx)) 1352 goto err; /* 2 */ 1353 j = 1 << (window - 1); 1354 for (i = 1; i < j; i++) { 1355 if (((val[i] = BN_CTX_get(ctx)) == NULL) || 1356 !BN_mod_mul(val[i], val[i - 1], d, m, ctx)) 1357 goto err; 1358 } 1359 } 1360 1361 start = 1; /* This is used to avoid multiplication etc 1362 * when there is only the value '1' in the 1363 * buffer. */ 1364 wvalue = 0; /* The 'value' of the window */ 1365 wstart = bits - 1; /* The top bit of the window */ 1366 wend = 0; /* The bottom bit of the window */ 1367 1368 if (!BN_one(r)) 1369 goto err; 1370 1371 for (;;) { 1372 if (BN_is_bit_set(p, wstart) == 0) { 1373 if (!start) 1374 if (!BN_mod_mul(r, r, r, m, ctx)) 1375 goto err; 1376 if (wstart == 0) 1377 break; 1378 wstart--; 1379 continue; 1380 } 1381 /* 1382 * We now have wstart on a 'set' bit, we now need to work out how bit 1383 * a window to do. To do this we need to scan forward until the last 1384 * set bit before the end of the window 1385 */ 1386 j = wstart; 1387 wvalue = 1; 1388 wend = 0; 1389 for (i = 1; i < window; i++) { 1390 if (wstart - i < 0) 1391 break; 1392 if (BN_is_bit_set(p, wstart - i)) { 1393 wvalue <<= (i - wend); 1394 wvalue |= 1; 1395 wend = i; 1396 } 1397 } 1398 1399 /* wend is the size of the current window */ 1400 j = wend + 1; 1401 /* add the 'bytes above' */ 1402 if (!start) 1403 for (i = 0; i < j; i++) { 1404 if (!BN_mod_mul(r, r, r, m, ctx)) 1405 goto err; 1406 } 1407 1408 /* wvalue will be an odd number < 2^window */ 1409 if (!BN_mod_mul(r, r, val[wvalue >> 1], m, ctx)) 1410 goto err; 1411 1412 /* move the 'window' down further */ 1413 wstart -= wend + 1; 1414 wvalue = 0; 1415 start = 0; 1416 if (wstart < 0) 1417 break; 1418 } 1419 ret = 1; 1420 err: 1421 BN_CTX_end(ctx); 1422 bn_check_top(r); 1423 return ret; 1424 } 1425