1 /* crypto/ec/ecp_nistputil.c */ 2 /* 3 * Written by Bodo Moeller for the OpenSSL project. 4 */ 5 /* Copyright 2011 Google Inc. 6 * 7 * Licensed under the Apache License, Version 2.0 (the "License"); 8 * 9 * you may not use this file except in compliance with the License. 10 * You may obtain a copy of the License at 11 * 12 * http://www.apache.org/licenses/LICENSE-2.0 13 * 14 * Unless required by applicable law or agreed to in writing, software 15 * distributed under the License is distributed on an "AS IS" BASIS, 16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 * See the License for the specific language governing permissions and 18 * limitations under the License. 19 */ 20 21 #include <openssl/opensslconf.h> 22 #ifndef OPENSSL_NO_EC_NISTP_64_GCC_128 23 24 /* 25 * Common utility functions for ecp_nistp224.c, ecp_nistp256.c, ecp_nistp521.c. 26 */ 27 28 #include <stddef.h> 29 #include "ec_lcl.h" 30 31 /* Convert an array of points into affine coordinates. 32 * (If the point at infinity is found (Z = 0), it remains unchanged.) 33 * This function is essentially an equivalent to EC_POINTs_make_affine(), but 34 * works with the internal representation of points as used by ecp_nistp###.c 35 * rather than with (BIGNUM-based) EC_POINT data structures. 36 * 37 * point_array is the input/output buffer ('num' points in projective form, 38 * i.e. three coordinates each), based on an internal representation of 39 * field elements of size 'felem_size'. 40 * 41 * tmp_felems needs to point to a temporary array of 'num'+1 field elements 42 * for storage of intermediate values. 43 */ 44 void ec_GFp_nistp_points_make_affine_internal(size_t num, void *point_array, 45 size_t felem_size, void *tmp_felems, 46 void (*felem_one)(void *out), 47 int (*felem_is_zero)(const void *in), 48 void (*felem_assign)(void *out, const void *in), 49 void (*felem_square)(void *out, const void *in), 50 void (*felem_mul)(void *out, const void *in1, const void *in2), 51 void (*felem_inv)(void *out, const void *in), 52 void (*felem_contract)(void *out, const void *in)) 53 { 54 int i = 0; 55 56 #define tmp_felem(I) (&((char *)tmp_felems)[(I) * felem_size]) 57 #define X(I) (&((char *)point_array)[3*(I) * felem_size]) 58 #define Y(I) (&((char *)point_array)[(3*(I) + 1) * felem_size]) 59 #define Z(I) (&((char *)point_array)[(3*(I) + 2) * felem_size]) 60 61 if (!felem_is_zero(Z(0))) 62 felem_assign(tmp_felem(0), Z(0)); 63 else 64 felem_one(tmp_felem(0)); 65 for (i = 1; i < (int)num; i++) 66 { 67 if (!felem_is_zero(Z(i))) 68 felem_mul(tmp_felem(i), tmp_felem(i-1), Z(i)); 69 else 70 felem_assign(tmp_felem(i), tmp_felem(i-1)); 71 } 72 /* Now each tmp_felem(i) is the product of Z(0) .. Z(i), skipping any zero-valued factors: 73 * if Z(i) = 0, we essentially pretend that Z(i) = 1 */ 74 75 felem_inv(tmp_felem(num-1), tmp_felem(num-1)); 76 for (i = num - 1; i >= 0; i--) 77 { 78 if (i > 0) 79 /* tmp_felem(i-1) is the product of Z(0) .. Z(i-1), 80 * tmp_felem(i) is the inverse of the product of Z(0) .. Z(i) 81 */ 82 felem_mul(tmp_felem(num), tmp_felem(i-1), tmp_felem(i)); /* 1/Z(i) */ 83 else 84 felem_assign(tmp_felem(num), tmp_felem(0)); /* 1/Z(0) */ 85 86 if (!felem_is_zero(Z(i))) 87 { 88 if (i > 0) 89 /* For next iteration, replace tmp_felem(i-1) by its inverse */ 90 felem_mul(tmp_felem(i-1), tmp_felem(i), Z(i)); 91 92 /* Convert point (X, Y, Z) into affine form (X/(Z^2), Y/(Z^3), 1) */ 93 felem_square(Z(i), tmp_felem(num)); /* 1/(Z^2) */ 94 felem_mul(X(i), X(i), Z(i)); /* X/(Z^2) */ 95 felem_mul(Z(i), Z(i), tmp_felem(num)); /* 1/(Z^3) */ 96 felem_mul(Y(i), Y(i), Z(i)); /* Y/(Z^3) */ 97 felem_contract(X(i), X(i)); 98 felem_contract(Y(i), Y(i)); 99 felem_one(Z(i)); 100 } 101 else 102 { 103 if (i > 0) 104 /* For next iteration, replace tmp_felem(i-1) by its inverse */ 105 felem_assign(tmp_felem(i-1), tmp_felem(i)); 106 } 107 } 108 } 109 110 /* 111 * This function looks at 5+1 scalar bits (5 current, 1 adjacent less 112 * significant bit), and recodes them into a signed digit for use in fast point 113 * multiplication: the use of signed rather than unsigned digits means that 114 * fewer points need to be precomputed, given that point inversion is easy 115 * (a precomputed point dP makes -dP available as well). 116 * 117 * BACKGROUND: 118 * 119 * Signed digits for multiplication were introduced by Booth ("A signed binary 120 * multiplication technique", Quart. Journ. Mech. and Applied Math., vol. IV, 121 * pt. 2 (1951), pp. 236-240), in that case for multiplication of integers. 122 * Booth's original encoding did not generally improve the density of nonzero 123 * digits over the binary representation, and was merely meant to simplify the 124 * handling of signed factors given in two's complement; but it has since been 125 * shown to be the basis of various signed-digit representations that do have 126 * further advantages, including the wNAF, using the following general approach: 127 * 128 * (1) Given a binary representation 129 * 130 * b_k ... b_2 b_1 b_0, 131 * 132 * of a nonnegative integer (b_k in {0, 1}), rewrite it in digits 0, 1, -1 133 * by using bit-wise subtraction as follows: 134 * 135 * b_k b_(k-1) ... b_2 b_1 b_0 136 * - b_k ... b_3 b_2 b_1 b_0 137 * ------------------------------------- 138 * s_k b_(k-1) ... s_3 s_2 s_1 s_0 139 * 140 * A left-shift followed by subtraction of the original value yields a new 141 * representation of the same value, using signed bits s_i = b_(i+1) - b_i. 142 * This representation from Booth's paper has since appeared in the 143 * literature under a variety of different names including "reversed binary 144 * form", "alternating greedy expansion", "mutual opposite form", and 145 * "sign-alternating {+-1}-representation". 146 * 147 * An interesting property is that among the nonzero bits, values 1 and -1 148 * strictly alternate. 149 * 150 * (2) Various window schemes can be applied to the Booth representation of 151 * integers: for example, right-to-left sliding windows yield the wNAF 152 * (a signed-digit encoding independently discovered by various researchers 153 * in the 1990s), and left-to-right sliding windows yield a left-to-right 154 * equivalent of the wNAF (independently discovered by various researchers 155 * around 2004). 156 * 157 * To prevent leaking information through side channels in point multiplication, 158 * we need to recode the given integer into a regular pattern: sliding windows 159 * as in wNAFs won't do, we need their fixed-window equivalent -- which is a few 160 * decades older: we'll be using the so-called "modified Booth encoding" due to 161 * MacSorley ("High-speed arithmetic in binary computers", Proc. IRE, vol. 49 162 * (1961), pp. 67-91), in a radix-2^5 setting. That is, we always combine five 163 * signed bits into a signed digit: 164 * 165 * s_(4j + 4) s_(4j + 3) s_(4j + 2) s_(4j + 1) s_(4j) 166 * 167 * The sign-alternating property implies that the resulting digit values are 168 * integers from -16 to 16. 169 * 170 * Of course, we don't actually need to compute the signed digits s_i as an 171 * intermediate step (that's just a nice way to see how this scheme relates 172 * to the wNAF): a direct computation obtains the recoded digit from the 173 * six bits b_(4j + 4) ... b_(4j - 1). 174 * 175 * This function takes those five bits as an integer (0 .. 63), writing the 176 * recoded digit to *sign (0 for positive, 1 for negative) and *digit (absolute 177 * value, in the range 0 .. 8). Note that this integer essentially provides the 178 * input bits "shifted to the left" by one position: for example, the input to 179 * compute the least significant recoded digit, given that there's no bit b_-1, 180 * has to be b_4 b_3 b_2 b_1 b_0 0. 181 * 182 */ 183 void ec_GFp_nistp_recode_scalar_bits(unsigned char *sign, unsigned char *digit, unsigned char in) 184 { 185 unsigned char s, d; 186 187 s = ~((in >> 5) - 1); /* sets all bits to MSB(in), 'in' seen as 6-bit value */ 188 d = (1 << 6) - in - 1; 189 d = (d & s) | (in & ~s); 190 d = (d >> 1) + (d & 1); 191 192 *sign = s & 1; 193 *digit = d; 194 } 195 #else 196 static void *dummy=&dummy; 197 #endif 198