1 /* $OpenBSD: curve25519.c,v 1.2 2020/07/22 13:54:30 tobhe Exp $ */ 2 /* 3 * Copyright (C) 2018-2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 4 * Copyright (C) 2015-2016 The fiat-crypto Authors. 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 * 18 * This contains two implementation: a machine-generated formally verified 19 * implementation of Curve25519 ECDH from: 20 * <https://github.com/mit-plv/fiat-crypto>. Though originally machine 21 * generated, it has been tweaked to be suitable for use in the kernel. It is 22 * optimized for 32-bit machines and machines that cannot work efficiently with 23 * 128-bit integer types. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/systm.h> 28 #include <crypto/curve25519.h> 29 30 #define __always_inline __inline __attribute__((__always_inline__)) 31 static const uint8_t null_point[CURVE25519_KEY_SIZE]; 32 static const uint8_t base_point[CURVE25519_KEY_SIZE] = { 9 }; 33 34 int curve25519_generate_public(uint8_t pub[CURVE25519_KEY_SIZE], 35 const uint8_t secret[CURVE25519_KEY_SIZE]) 36 { 37 if (timingsafe_bcmp(secret, null_point, CURVE25519_KEY_SIZE) == 0) 38 return 0; 39 return curve25519(pub, secret, base_point); 40 } 41 42 static __always_inline uint32_t get_unaligned_le32(const uint8_t *a) 43 { 44 uint32_t l; 45 __builtin_memcpy(&l, a, sizeof(l)); 46 return letoh32(l); 47 } 48 49 /* fe means field element. Here the field is \Z/(2^255-19). An element t, 50 * entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 51 * t[3]+2^102 t[4]+...+2^230 t[9]. 52 * fe limbs are bounded by 1.125*2^26,1.125*2^25,1.125*2^26,1.125*2^25,etc. 53 * Multiplication and carrying produce fe from fe_loose. 54 */ 55 typedef struct fe { uint32_t v[10]; } fe; 56 57 /* fe_loose limbs are bounded by 3.375*2^26,3.375*2^25,3.375*2^26,3.375*2^25,etc 58 * Addition and subtraction produce fe_loose from (fe, fe). 59 */ 60 typedef struct fe_loose { uint32_t v[10]; } fe_loose; 61 62 static __always_inline void fe_frombytes_impl(uint32_t h[10], const uint8_t *s) 63 { 64 /* Ignores top bit of s. */ 65 uint32_t a0 = get_unaligned_le32(s); 66 uint32_t a1 = get_unaligned_le32(s+4); 67 uint32_t a2 = get_unaligned_le32(s+8); 68 uint32_t a3 = get_unaligned_le32(s+12); 69 uint32_t a4 = get_unaligned_le32(s+16); 70 uint32_t a5 = get_unaligned_le32(s+20); 71 uint32_t a6 = get_unaligned_le32(s+24); 72 uint32_t a7 = get_unaligned_le32(s+28); 73 h[0] = a0&((1<<26)-1); /* 26 used, 32-26 left. 26 */ 74 h[1] = (a0>>26) | ((a1&((1<<19)-1))<< 6); /* (32-26) + 19 = 6+19 = 25 */ 75 h[2] = (a1>>19) | ((a2&((1<<13)-1))<<13); /* (32-19) + 13 = 13+13 = 26 */ 76 h[3] = (a2>>13) | ((a3&((1<< 6)-1))<<19); /* (32-13) + 6 = 19+ 6 = 25 */ 77 h[4] = (a3>> 6); /* (32- 6) = 26 */ 78 h[5] = a4&((1<<25)-1); /* 25 */ 79 h[6] = (a4>>25) | ((a5&((1<<19)-1))<< 7); /* (32-25) + 19 = 7+19 = 26 */ 80 h[7] = (a5>>19) | ((a6&((1<<12)-1))<<13); /* (32-19) + 12 = 13+12 = 25 */ 81 h[8] = (a6>>12) | ((a7&((1<< 6)-1))<<20); /* (32-12) + 6 = 20+ 6 = 26 */ 82 h[9] = (a7>> 6)&((1<<25)-1); /* 25 */ 83 } 84 85 static __always_inline void fe_frombytes(fe *h, const uint8_t *s) 86 { 87 fe_frombytes_impl(h->v, s); 88 } 89 90 static __always_inline uint8_t /*bool*/ 91 addcarryx_u25(uint8_t /*bool*/ c, uint32_t a, uint32_t b, uint32_t *low) 92 { 93 /* This function extracts 25 bits of result and 1 bit of carry 94 * (26 total), so a 32-bit intermediate is sufficient. 95 */ 96 uint32_t x = a + b + c; 97 *low = x & ((1 << 25) - 1); 98 return (x >> 25) & 1; 99 } 100 101 static __always_inline uint8_t /*bool*/ 102 addcarryx_u26(uint8_t /*bool*/ c, uint32_t a, uint32_t b, uint32_t *low) 103 { 104 /* This function extracts 26 bits of result and 1 bit of carry 105 * (27 total), so a 32-bit intermediate is sufficient. 106 */ 107 uint32_t x = a + b + c; 108 *low = x & ((1 << 26) - 1); 109 return (x >> 26) & 1; 110 } 111 112 static __always_inline uint8_t /*bool*/ 113 subborrow_u25(uint8_t /*bool*/ c, uint32_t a, uint32_t b, uint32_t *low) 114 { 115 /* This function extracts 25 bits of result and 1 bit of borrow 116 * (26 total), so a 32-bit intermediate is sufficient. 117 */ 118 uint32_t x = a - b - c; 119 *low = x & ((1 << 25) - 1); 120 return x >> 31; 121 } 122 123 static __always_inline uint8_t /*bool*/ 124 subborrow_u26(uint8_t /*bool*/ c, uint32_t a, uint32_t b, uint32_t *low) 125 { 126 /* This function extracts 26 bits of result and 1 bit of borrow 127 *(27 total), so a 32-bit intermediate is sufficient. 128 */ 129 uint32_t x = a - b - c; 130 *low = x & ((1 << 26) - 1); 131 return x >> 31; 132 } 133 134 static __always_inline uint32_t cmovznz32(uint32_t t, uint32_t z, uint32_t nz) 135 { 136 t = -!!t; /* all set if nonzero, 0 if 0 */ 137 return (t&nz) | ((~t)&z); 138 } 139 140 static __always_inline void fe_freeze(uint32_t out[10], const uint32_t in1[10]) 141 { 142 const uint32_t x17 = in1[9]; 143 const uint32_t x18 = in1[8]; 144 const uint32_t x16 = in1[7]; 145 const uint32_t x14 = in1[6]; 146 const uint32_t x12 = in1[5]; 147 const uint32_t x10 = in1[4]; 148 const uint32_t x8 = in1[3]; 149 const uint32_t x6 = in1[2]; 150 const uint32_t x4 = in1[1]; 151 const uint32_t x2 = in1[0]; 152 uint32_t x20; uint8_t/*bool*/ x21 = subborrow_u26(0x0, x2, 0x3ffffed, &x20); 153 uint32_t x23; uint8_t/*bool*/ x24 = subborrow_u25(x21, x4, 0x1ffffff, &x23); 154 uint32_t x26; uint8_t/*bool*/ x27 = subborrow_u26(x24, x6, 0x3ffffff, &x26); 155 uint32_t x29; uint8_t/*bool*/ x30 = subborrow_u25(x27, x8, 0x1ffffff, &x29); 156 uint32_t x32; uint8_t/*bool*/ x33 = subborrow_u26(x30, x10, 0x3ffffff, &x32); 157 uint32_t x35; uint8_t/*bool*/ x36 = subborrow_u25(x33, x12, 0x1ffffff, &x35); 158 uint32_t x38; uint8_t/*bool*/ x39 = subborrow_u26(x36, x14, 0x3ffffff, &x38); 159 uint32_t x41; uint8_t/*bool*/ x42 = subborrow_u25(x39, x16, 0x1ffffff, &x41); 160 uint32_t x44; uint8_t/*bool*/ x45 = subborrow_u26(x42, x18, 0x3ffffff, &x44); 161 uint32_t x47; uint8_t/*bool*/ x48 = subborrow_u25(x45, x17, 0x1ffffff, &x47); 162 uint32_t x49 = cmovznz32(x48, 0x0, 0xffffffff); 163 uint32_t x50 = (x49 & 0x3ffffed); 164 uint32_t x52; uint8_t/*bool*/ x53 = addcarryx_u26(0x0, x20, x50, &x52); 165 uint32_t x54 = (x49 & 0x1ffffff); 166 uint32_t x56; uint8_t/*bool*/ x57 = addcarryx_u25(x53, x23, x54, &x56); 167 uint32_t x58 = (x49 & 0x3ffffff); 168 uint32_t x60; uint8_t/*bool*/ x61 = addcarryx_u26(x57, x26, x58, &x60); 169 uint32_t x62 = (x49 & 0x1ffffff); 170 uint32_t x64; uint8_t/*bool*/ x65 = addcarryx_u25(x61, x29, x62, &x64); 171 uint32_t x66 = (x49 & 0x3ffffff); 172 uint32_t x68; uint8_t/*bool*/ x69 = addcarryx_u26(x65, x32, x66, &x68); 173 uint32_t x70 = (x49 & 0x1ffffff); 174 uint32_t x72; uint8_t/*bool*/ x73 = addcarryx_u25(x69, x35, x70, &x72); 175 uint32_t x74 = (x49 & 0x3ffffff); 176 uint32_t x76; uint8_t/*bool*/ x77 = addcarryx_u26(x73, x38, x74, &x76); 177 uint32_t x78 = (x49 & 0x1ffffff); 178 uint32_t x80; uint8_t/*bool*/ x81 = addcarryx_u25(x77, x41, x78, &x80); 179 uint32_t x82 = (x49 & 0x3ffffff); 180 uint32_t x84; uint8_t/*bool*/ x85 = addcarryx_u26(x81, x44, x82, &x84); 181 uint32_t x86 = (x49 & 0x1ffffff); 182 uint32_t x88; addcarryx_u25(x85, x47, x86, &x88); 183 out[0] = x52; 184 out[1] = x56; 185 out[2] = x60; 186 out[3] = x64; 187 out[4] = x68; 188 out[5] = x72; 189 out[6] = x76; 190 out[7] = x80; 191 out[8] = x84; 192 out[9] = x88; 193 } 194 195 static __always_inline void fe_tobytes(uint8_t s[32], const fe *f) 196 { 197 uint32_t h[10]; 198 fe_freeze(h, f->v); 199 s[0] = h[0] >> 0; 200 s[1] = h[0] >> 8; 201 s[2] = h[0] >> 16; 202 s[3] = (h[0] >> 24) | (h[1] << 2); 203 s[4] = h[1] >> 6; 204 s[5] = h[1] >> 14; 205 s[6] = (h[1] >> 22) | (h[2] << 3); 206 s[7] = h[2] >> 5; 207 s[8] = h[2] >> 13; 208 s[9] = (h[2] >> 21) | (h[3] << 5); 209 s[10] = h[3] >> 3; 210 s[11] = h[3] >> 11; 211 s[12] = (h[3] >> 19) | (h[4] << 6); 212 s[13] = h[4] >> 2; 213 s[14] = h[4] >> 10; 214 s[15] = h[4] >> 18; 215 s[16] = h[5] >> 0; 216 s[17] = h[5] >> 8; 217 s[18] = h[5] >> 16; 218 s[19] = (h[5] >> 24) | (h[6] << 1); 219 s[20] = h[6] >> 7; 220 s[21] = h[6] >> 15; 221 s[22] = (h[6] >> 23) | (h[7] << 3); 222 s[23] = h[7] >> 5; 223 s[24] = h[7] >> 13; 224 s[25] = (h[7] >> 21) | (h[8] << 4); 225 s[26] = h[8] >> 4; 226 s[27] = h[8] >> 12; 227 s[28] = (h[8] >> 20) | (h[9] << 6); 228 s[29] = h[9] >> 2; 229 s[30] = h[9] >> 10; 230 s[31] = h[9] >> 18; 231 } 232 233 /* h = f */ 234 static __always_inline void fe_copy(fe *h, const fe *f) 235 { 236 memmove(h, f, sizeof(uint32_t) * 10); 237 } 238 239 static __always_inline void fe_copy_lt(fe_loose *h, const fe *f) 240 { 241 memmove(h, f, sizeof(uint32_t) * 10); 242 } 243 244 /* h = 0 */ 245 static __always_inline void fe_0(fe *h) 246 { 247 memset(h, 0, sizeof(uint32_t) * 10); 248 } 249 250 /* h = 1 */ 251 static __always_inline void fe_1(fe *h) 252 { 253 memset(h, 0, sizeof(uint32_t) * 10); 254 h->v[0] = 1; 255 } 256 257 static void fe_add_impl(uint32_t out[10], const uint32_t in1[10], const uint32_t in2[10]) 258 { 259 const uint32_t x20 = in1[9]; 260 const uint32_t x21 = in1[8]; 261 const uint32_t x19 = in1[7]; 262 const uint32_t x17 = in1[6]; 263 const uint32_t x15 = in1[5]; 264 const uint32_t x13 = in1[4]; 265 const uint32_t x11 = in1[3]; 266 const uint32_t x9 = in1[2]; 267 const uint32_t x7 = in1[1]; 268 const uint32_t x5 = in1[0]; 269 const uint32_t x38 = in2[9]; 270 const uint32_t x39 = in2[8]; 271 const uint32_t x37 = in2[7]; 272 const uint32_t x35 = in2[6]; 273 const uint32_t x33 = in2[5]; 274 const uint32_t x31 = in2[4]; 275 const uint32_t x29 = in2[3]; 276 const uint32_t x27 = in2[2]; 277 const uint32_t x25 = in2[1]; 278 const uint32_t x23 = in2[0]; 279 out[0] = (x5 + x23); 280 out[1] = (x7 + x25); 281 out[2] = (x9 + x27); 282 out[3] = (x11 + x29); 283 out[4] = (x13 + x31); 284 out[5] = (x15 + x33); 285 out[6] = (x17 + x35); 286 out[7] = (x19 + x37); 287 out[8] = (x21 + x39); 288 out[9] = (x20 + x38); 289 } 290 291 /* h = f + g 292 * Can overlap h with f or g. 293 */ 294 static __always_inline void fe_add(fe_loose *h, const fe *f, const fe *g) 295 { 296 fe_add_impl(h->v, f->v, g->v); 297 } 298 299 static void fe_sub_impl(uint32_t out[10], const uint32_t in1[10], const uint32_t in2[10]) 300 { 301 const uint32_t x20 = in1[9]; 302 const uint32_t x21 = in1[8]; 303 const uint32_t x19 = in1[7]; 304 const uint32_t x17 = in1[6]; 305 const uint32_t x15 = in1[5]; 306 const uint32_t x13 = in1[4]; 307 const uint32_t x11 = in1[3]; 308 const uint32_t x9 = in1[2]; 309 const uint32_t x7 = in1[1]; 310 const uint32_t x5 = in1[0]; 311 const uint32_t x38 = in2[9]; 312 const uint32_t x39 = in2[8]; 313 const uint32_t x37 = in2[7]; 314 const uint32_t x35 = in2[6]; 315 const uint32_t x33 = in2[5]; 316 const uint32_t x31 = in2[4]; 317 const uint32_t x29 = in2[3]; 318 const uint32_t x27 = in2[2]; 319 const uint32_t x25 = in2[1]; 320 const uint32_t x23 = in2[0]; 321 out[0] = ((0x7ffffda + x5) - x23); 322 out[1] = ((0x3fffffe + x7) - x25); 323 out[2] = ((0x7fffffe + x9) - x27); 324 out[3] = ((0x3fffffe + x11) - x29); 325 out[4] = ((0x7fffffe + x13) - x31); 326 out[5] = ((0x3fffffe + x15) - x33); 327 out[6] = ((0x7fffffe + x17) - x35); 328 out[7] = ((0x3fffffe + x19) - x37); 329 out[8] = ((0x7fffffe + x21) - x39); 330 out[9] = ((0x3fffffe + x20) - x38); 331 } 332 333 /* h = f - g 334 * Can overlap h with f or g. 335 */ 336 static __always_inline void fe_sub(fe_loose *h, const fe *f, const fe *g) 337 { 338 fe_sub_impl(h->v, f->v, g->v); 339 } 340 341 static void fe_mul_impl(uint32_t out[10], const uint32_t in1[10], const uint32_t in2[10]) 342 { 343 const uint32_t x20 = in1[9]; 344 const uint32_t x21 = in1[8]; 345 const uint32_t x19 = in1[7]; 346 const uint32_t x17 = in1[6]; 347 const uint32_t x15 = in1[5]; 348 const uint32_t x13 = in1[4]; 349 const uint32_t x11 = in1[3]; 350 const uint32_t x9 = in1[2]; 351 const uint32_t x7 = in1[1]; 352 const uint32_t x5 = in1[0]; 353 const uint32_t x38 = in2[9]; 354 const uint32_t x39 = in2[8]; 355 const uint32_t x37 = in2[7]; 356 const uint32_t x35 = in2[6]; 357 const uint32_t x33 = in2[5]; 358 const uint32_t x31 = in2[4]; 359 const uint32_t x29 = in2[3]; 360 const uint32_t x27 = in2[2]; 361 const uint32_t x25 = in2[1]; 362 const uint32_t x23 = in2[0]; 363 uint64_t x40 = ((uint64_t)x23 * x5); 364 uint64_t x41 = (((uint64_t)x23 * x7) + ((uint64_t)x25 * x5)); 365 uint64_t x42 = ((((uint64_t)(0x2 * x25) * x7) + ((uint64_t)x23 * x9)) + ((uint64_t)x27 * x5)); 366 uint64_t x43 = (((((uint64_t)x25 * x9) + ((uint64_t)x27 * x7)) + ((uint64_t)x23 * x11)) + ((uint64_t)x29 * x5)); 367 uint64_t x44 = (((((uint64_t)x27 * x9) + (0x2 * (((uint64_t)x25 * x11) + ((uint64_t)x29 * x7)))) + ((uint64_t)x23 * x13)) + ((uint64_t)x31 * x5)); 368 uint64_t x45 = (((((((uint64_t)x27 * x11) + ((uint64_t)x29 * x9)) + ((uint64_t)x25 * x13)) + ((uint64_t)x31 * x7)) + ((uint64_t)x23 * x15)) + ((uint64_t)x33 * x5)); 369 uint64_t x46 = (((((0x2 * ((((uint64_t)x29 * x11) + ((uint64_t)x25 * x15)) + ((uint64_t)x33 * x7))) + ((uint64_t)x27 * x13)) + ((uint64_t)x31 * x9)) + ((uint64_t)x23 * x17)) + ((uint64_t)x35 * x5)); 370 uint64_t x47 = (((((((((uint64_t)x29 * x13) + ((uint64_t)x31 * x11)) + ((uint64_t)x27 * x15)) + ((uint64_t)x33 * x9)) + ((uint64_t)x25 * x17)) + ((uint64_t)x35 * x7)) + ((uint64_t)x23 * x19)) + ((uint64_t)x37 * x5)); 371 uint64_t x48 = (((((((uint64_t)x31 * x13) + (0x2 * (((((uint64_t)x29 * x15) + ((uint64_t)x33 * x11)) + ((uint64_t)x25 * x19)) + ((uint64_t)x37 * x7)))) + ((uint64_t)x27 * x17)) + ((uint64_t)x35 * x9)) + ((uint64_t)x23 * x21)) + ((uint64_t)x39 * x5)); 372 uint64_t x49 = (((((((((((uint64_t)x31 * x15) + ((uint64_t)x33 * x13)) + ((uint64_t)x29 * x17)) + ((uint64_t)x35 * x11)) + ((uint64_t)x27 * x19)) + ((uint64_t)x37 * x9)) + ((uint64_t)x25 * x21)) + ((uint64_t)x39 * x7)) + ((uint64_t)x23 * x20)) + ((uint64_t)x38 * x5)); 373 uint64_t x50 = (((((0x2 * ((((((uint64_t)x33 * x15) + ((uint64_t)x29 * x19)) + ((uint64_t)x37 * x11)) + ((uint64_t)x25 * x20)) + ((uint64_t)x38 * x7))) + ((uint64_t)x31 * x17)) + ((uint64_t)x35 * x13)) + ((uint64_t)x27 * x21)) + ((uint64_t)x39 * x9)); 374 uint64_t x51 = (((((((((uint64_t)x33 * x17) + ((uint64_t)x35 * x15)) + ((uint64_t)x31 * x19)) + ((uint64_t)x37 * x13)) + ((uint64_t)x29 * x21)) + ((uint64_t)x39 * x11)) + ((uint64_t)x27 * x20)) + ((uint64_t)x38 * x9)); 375 uint64_t x52 = (((((uint64_t)x35 * x17) + (0x2 * (((((uint64_t)x33 * x19) + ((uint64_t)x37 * x15)) + ((uint64_t)x29 * x20)) + ((uint64_t)x38 * x11)))) + ((uint64_t)x31 * x21)) + ((uint64_t)x39 * x13)); 376 uint64_t x53 = (((((((uint64_t)x35 * x19) + ((uint64_t)x37 * x17)) + ((uint64_t)x33 * x21)) + ((uint64_t)x39 * x15)) + ((uint64_t)x31 * x20)) + ((uint64_t)x38 * x13)); 377 uint64_t x54 = (((0x2 * ((((uint64_t)x37 * x19) + ((uint64_t)x33 * x20)) + ((uint64_t)x38 * x15))) + ((uint64_t)x35 * x21)) + ((uint64_t)x39 * x17)); 378 uint64_t x55 = (((((uint64_t)x37 * x21) + ((uint64_t)x39 * x19)) + ((uint64_t)x35 * x20)) + ((uint64_t)x38 * x17)); 379 uint64_t x56 = (((uint64_t)x39 * x21) + (0x2 * (((uint64_t)x37 * x20) + ((uint64_t)x38 * x19)))); 380 uint64_t x57 = (((uint64_t)x39 * x20) + ((uint64_t)x38 * x21)); 381 uint64_t x58 = ((uint64_t)(0x2 * x38) * x20); 382 uint64_t x59 = (x48 + (x58 << 0x4)); 383 uint64_t x60 = (x59 + (x58 << 0x1)); 384 uint64_t x61 = (x60 + x58); 385 uint64_t x62 = (x47 + (x57 << 0x4)); 386 uint64_t x63 = (x62 + (x57 << 0x1)); 387 uint64_t x64 = (x63 + x57); 388 uint64_t x65 = (x46 + (x56 << 0x4)); 389 uint64_t x66 = (x65 + (x56 << 0x1)); 390 uint64_t x67 = (x66 + x56); 391 uint64_t x68 = (x45 + (x55 << 0x4)); 392 uint64_t x69 = (x68 + (x55 << 0x1)); 393 uint64_t x70 = (x69 + x55); 394 uint64_t x71 = (x44 + (x54 << 0x4)); 395 uint64_t x72 = (x71 + (x54 << 0x1)); 396 uint64_t x73 = (x72 + x54); 397 uint64_t x74 = (x43 + (x53 << 0x4)); 398 uint64_t x75 = (x74 + (x53 << 0x1)); 399 uint64_t x76 = (x75 + x53); 400 uint64_t x77 = (x42 + (x52 << 0x4)); 401 uint64_t x78 = (x77 + (x52 << 0x1)); 402 uint64_t x79 = (x78 + x52); 403 uint64_t x80 = (x41 + (x51 << 0x4)); 404 uint64_t x81 = (x80 + (x51 << 0x1)); 405 uint64_t x82 = (x81 + x51); 406 uint64_t x83 = (x40 + (x50 << 0x4)); 407 uint64_t x84 = (x83 + (x50 << 0x1)); 408 uint64_t x85 = (x84 + x50); 409 uint64_t x86 = (x85 >> 0x1a); 410 uint32_t x87 = ((uint32_t)x85 & 0x3ffffff); 411 uint64_t x88 = (x86 + x82); 412 uint64_t x89 = (x88 >> 0x19); 413 uint32_t x90 = ((uint32_t)x88 & 0x1ffffff); 414 uint64_t x91 = (x89 + x79); 415 uint64_t x92 = (x91 >> 0x1a); 416 uint32_t x93 = ((uint32_t)x91 & 0x3ffffff); 417 uint64_t x94 = (x92 + x76); 418 uint64_t x95 = (x94 >> 0x19); 419 uint32_t x96 = ((uint32_t)x94 & 0x1ffffff); 420 uint64_t x97 = (x95 + x73); 421 uint64_t x98 = (x97 >> 0x1a); 422 uint32_t x99 = ((uint32_t)x97 & 0x3ffffff); 423 uint64_t x100 = (x98 + x70); 424 uint64_t x101 = (x100 >> 0x19); 425 uint32_t x102 = ((uint32_t)x100 & 0x1ffffff); 426 uint64_t x103 = (x101 + x67); 427 uint64_t x104 = (x103 >> 0x1a); 428 uint32_t x105 = ((uint32_t)x103 & 0x3ffffff); 429 uint64_t x106 = (x104 + x64); 430 uint64_t x107 = (x106 >> 0x19); 431 uint32_t x108 = ((uint32_t)x106 & 0x1ffffff); 432 uint64_t x109 = (x107 + x61); 433 uint64_t x110 = (x109 >> 0x1a); 434 uint32_t x111 = ((uint32_t)x109 & 0x3ffffff); 435 uint64_t x112 = (x110 + x49); 436 uint64_t x113 = (x112 >> 0x19); 437 uint32_t x114 = ((uint32_t)x112 & 0x1ffffff); 438 uint64_t x115 = (x87 + (0x13 * x113)); 439 uint32_t x116 = (uint32_t) (x115 >> 0x1a); 440 uint32_t x117 = ((uint32_t)x115 & 0x3ffffff); 441 uint32_t x118 = (x116 + x90); 442 uint32_t x119 = (x118 >> 0x19); 443 uint32_t x120 = (x118 & 0x1ffffff); 444 out[0] = x117; 445 out[1] = x120; 446 out[2] = (x119 + x93); 447 out[3] = x96; 448 out[4] = x99; 449 out[5] = x102; 450 out[6] = x105; 451 out[7] = x108; 452 out[8] = x111; 453 out[9] = x114; 454 } 455 456 static __always_inline void fe_mul_ttt(fe *h, const fe *f, const fe *g) 457 { 458 fe_mul_impl(h->v, f->v, g->v); 459 } 460 461 static __always_inline void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g) 462 { 463 fe_mul_impl(h->v, f->v, g->v); 464 } 465 466 static __always_inline void 467 fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g) 468 { 469 fe_mul_impl(h->v, f->v, g->v); 470 } 471 472 static void fe_sqr_impl(uint32_t out[10], const uint32_t in1[10]) 473 { 474 const uint32_t x17 = in1[9]; 475 const uint32_t x18 = in1[8]; 476 const uint32_t x16 = in1[7]; 477 const uint32_t x14 = in1[6]; 478 const uint32_t x12 = in1[5]; 479 const uint32_t x10 = in1[4]; 480 const uint32_t x8 = in1[3]; 481 const uint32_t x6 = in1[2]; 482 const uint32_t x4 = in1[1]; 483 const uint32_t x2 = in1[0]; 484 uint64_t x19 = ((uint64_t)x2 * x2); 485 uint64_t x20 = ((uint64_t)(0x2 * x2) * x4); 486 uint64_t x21 = (0x2 * (((uint64_t)x4 * x4) + ((uint64_t)x2 * x6))); 487 uint64_t x22 = (0x2 * (((uint64_t)x4 * x6) + ((uint64_t)x2 * x8))); 488 uint64_t x23 = ((((uint64_t)x6 * x6) + ((uint64_t)(0x4 * x4) * x8)) + ((uint64_t)(0x2 * x2) * x10)); 489 uint64_t x24 = (0x2 * ((((uint64_t)x6 * x8) + ((uint64_t)x4 * x10)) + ((uint64_t)x2 * x12))); 490 uint64_t x25 = (0x2 * (((((uint64_t)x8 * x8) + ((uint64_t)x6 * x10)) + ((uint64_t)x2 * x14)) + ((uint64_t)(0x2 * x4) * x12))); 491 uint64_t x26 = (0x2 * (((((uint64_t)x8 * x10) + ((uint64_t)x6 * x12)) + ((uint64_t)x4 * x14)) + ((uint64_t)x2 * x16))); 492 uint64_t x27 = (((uint64_t)x10 * x10) + (0x2 * ((((uint64_t)x6 * x14) + ((uint64_t)x2 * x18)) + (0x2 * (((uint64_t)x4 * x16) + ((uint64_t)x8 * x12)))))); 493 uint64_t x28 = (0x2 * ((((((uint64_t)x10 * x12) + ((uint64_t)x8 * x14)) + ((uint64_t)x6 * x16)) + ((uint64_t)x4 * x18)) + ((uint64_t)x2 * x17))); 494 uint64_t x29 = (0x2 * (((((uint64_t)x12 * x12) + ((uint64_t)x10 * x14)) + ((uint64_t)x6 * x18)) + (0x2 * (((uint64_t)x8 * x16) + ((uint64_t)x4 * x17))))); 495 uint64_t x30 = (0x2 * (((((uint64_t)x12 * x14) + ((uint64_t)x10 * x16)) + ((uint64_t)x8 * x18)) + ((uint64_t)x6 * x17))); 496 uint64_t x31 = (((uint64_t)x14 * x14) + (0x2 * (((uint64_t)x10 * x18) + (0x2 * (((uint64_t)x12 * x16) + ((uint64_t)x8 * x17)))))); 497 uint64_t x32 = (0x2 * ((((uint64_t)x14 * x16) + ((uint64_t)x12 * x18)) + ((uint64_t)x10 * x17))); 498 uint64_t x33 = (0x2 * ((((uint64_t)x16 * x16) + ((uint64_t)x14 * x18)) + ((uint64_t)(0x2 * x12) * x17))); 499 uint64_t x34 = (0x2 * (((uint64_t)x16 * x18) + ((uint64_t)x14 * x17))); 500 uint64_t x35 = (((uint64_t)x18 * x18) + ((uint64_t)(0x4 * x16) * x17)); 501 uint64_t x36 = ((uint64_t)(0x2 * x18) * x17); 502 uint64_t x37 = ((uint64_t)(0x2 * x17) * x17); 503 uint64_t x38 = (x27 + (x37 << 0x4)); 504 uint64_t x39 = (x38 + (x37 << 0x1)); 505 uint64_t x40 = (x39 + x37); 506 uint64_t x41 = (x26 + (x36 << 0x4)); 507 uint64_t x42 = (x41 + (x36 << 0x1)); 508 uint64_t x43 = (x42 + x36); 509 uint64_t x44 = (x25 + (x35 << 0x4)); 510 uint64_t x45 = (x44 + (x35 << 0x1)); 511 uint64_t x46 = (x45 + x35); 512 uint64_t x47 = (x24 + (x34 << 0x4)); 513 uint64_t x48 = (x47 + (x34 << 0x1)); 514 uint64_t x49 = (x48 + x34); 515 uint64_t x50 = (x23 + (x33 << 0x4)); 516 uint64_t x51 = (x50 + (x33 << 0x1)); 517 uint64_t x52 = (x51 + x33); 518 uint64_t x53 = (x22 + (x32 << 0x4)); 519 uint64_t x54 = (x53 + (x32 << 0x1)); 520 uint64_t x55 = (x54 + x32); 521 uint64_t x56 = (x21 + (x31 << 0x4)); 522 uint64_t x57 = (x56 + (x31 << 0x1)); 523 uint64_t x58 = (x57 + x31); 524 uint64_t x59 = (x20 + (x30 << 0x4)); 525 uint64_t x60 = (x59 + (x30 << 0x1)); 526 uint64_t x61 = (x60 + x30); 527 uint64_t x62 = (x19 + (x29 << 0x4)); 528 uint64_t x63 = (x62 + (x29 << 0x1)); 529 uint64_t x64 = (x63 + x29); 530 uint64_t x65 = (x64 >> 0x1a); 531 uint32_t x66 = ((uint32_t)x64 & 0x3ffffff); 532 uint64_t x67 = (x65 + x61); 533 uint64_t x68 = (x67 >> 0x19); 534 uint32_t x69 = ((uint32_t)x67 & 0x1ffffff); 535 uint64_t x70 = (x68 + x58); 536 uint64_t x71 = (x70 >> 0x1a); 537 uint32_t x72 = ((uint32_t)x70 & 0x3ffffff); 538 uint64_t x73 = (x71 + x55); 539 uint64_t x74 = (x73 >> 0x19); 540 uint32_t x75 = ((uint32_t)x73 & 0x1ffffff); 541 uint64_t x76 = (x74 + x52); 542 uint64_t x77 = (x76 >> 0x1a); 543 uint32_t x78 = ((uint32_t)x76 & 0x3ffffff); 544 uint64_t x79 = (x77 + x49); 545 uint64_t x80 = (x79 >> 0x19); 546 uint32_t x81 = ((uint32_t)x79 & 0x1ffffff); 547 uint64_t x82 = (x80 + x46); 548 uint64_t x83 = (x82 >> 0x1a); 549 uint32_t x84 = ((uint32_t)x82 & 0x3ffffff); 550 uint64_t x85 = (x83 + x43); 551 uint64_t x86 = (x85 >> 0x19); 552 uint32_t x87 = ((uint32_t)x85 & 0x1ffffff); 553 uint64_t x88 = (x86 + x40); 554 uint64_t x89 = (x88 >> 0x1a); 555 uint32_t x90 = ((uint32_t)x88 & 0x3ffffff); 556 uint64_t x91 = (x89 + x28); 557 uint64_t x92 = (x91 >> 0x19); 558 uint32_t x93 = ((uint32_t)x91 & 0x1ffffff); 559 uint64_t x94 = (x66 + (0x13 * x92)); 560 uint32_t x95 = (uint32_t) (x94 >> 0x1a); 561 uint32_t x96 = ((uint32_t)x94 & 0x3ffffff); 562 uint32_t x97 = (x95 + x69); 563 uint32_t x98 = (x97 >> 0x19); 564 uint32_t x99 = (x97 & 0x1ffffff); 565 out[0] = x96; 566 out[1] = x99; 567 out[2] = (x98 + x72); 568 out[3] = x75; 569 out[4] = x78; 570 out[5] = x81; 571 out[6] = x84; 572 out[7] = x87; 573 out[8] = x90; 574 out[9] = x93; 575 } 576 577 static __always_inline void fe_sq_tl(fe *h, const fe_loose *f) 578 { 579 fe_sqr_impl(h->v, f->v); 580 } 581 582 static __always_inline void fe_sq_tt(fe *h, const fe *f) 583 { 584 fe_sqr_impl(h->v, f->v); 585 } 586 587 static __always_inline void fe_loose_invert(fe *out, const fe_loose *z) 588 { 589 fe t0; 590 fe t1; 591 fe t2; 592 fe t3; 593 int i; 594 595 fe_sq_tl(&t0, z); 596 fe_sq_tt(&t1, &t0); 597 for (i = 1; i < 2; ++i) 598 fe_sq_tt(&t1, &t1); 599 fe_mul_tlt(&t1, z, &t1); 600 fe_mul_ttt(&t0, &t0, &t1); 601 fe_sq_tt(&t2, &t0); 602 fe_mul_ttt(&t1, &t1, &t2); 603 fe_sq_tt(&t2, &t1); 604 for (i = 1; i < 5; ++i) 605 fe_sq_tt(&t2, &t2); 606 fe_mul_ttt(&t1, &t2, &t1); 607 fe_sq_tt(&t2, &t1); 608 for (i = 1; i < 10; ++i) 609 fe_sq_tt(&t2, &t2); 610 fe_mul_ttt(&t2, &t2, &t1); 611 fe_sq_tt(&t3, &t2); 612 for (i = 1; i < 20; ++i) 613 fe_sq_tt(&t3, &t3); 614 fe_mul_ttt(&t2, &t3, &t2); 615 fe_sq_tt(&t2, &t2); 616 for (i = 1; i < 10; ++i) 617 fe_sq_tt(&t2, &t2); 618 fe_mul_ttt(&t1, &t2, &t1); 619 fe_sq_tt(&t2, &t1); 620 for (i = 1; i < 50; ++i) 621 fe_sq_tt(&t2, &t2); 622 fe_mul_ttt(&t2, &t2, &t1); 623 fe_sq_tt(&t3, &t2); 624 for (i = 1; i < 100; ++i) 625 fe_sq_tt(&t3, &t3); 626 fe_mul_ttt(&t2, &t3, &t2); 627 fe_sq_tt(&t2, &t2); 628 for (i = 1; i < 50; ++i) 629 fe_sq_tt(&t2, &t2); 630 fe_mul_ttt(&t1, &t2, &t1); 631 fe_sq_tt(&t1, &t1); 632 for (i = 1; i < 5; ++i) 633 fe_sq_tt(&t1, &t1); 634 fe_mul_ttt(out, &t1, &t0); 635 } 636 637 static __always_inline void fe_invert(fe *out, const fe *z) 638 { 639 fe_loose l; 640 fe_copy_lt(&l, z); 641 fe_loose_invert(out, &l); 642 } 643 644 /* Replace (f,g) with (g,f) if b == 1; 645 * replace (f,g) with (f,g) if b == 0. 646 * 647 * Preconditions: b in {0,1} 648 */ 649 static __always_inline void fe_cswap(fe *f, fe *g, unsigned int b) 650 { 651 unsigned i; 652 b = 0 - b; 653 for (i = 0; i < 10; i++) { 654 uint32_t x = f->v[i] ^ g->v[i]; 655 x &= b; 656 f->v[i] ^= x; 657 g->v[i] ^= x; 658 } 659 } 660 661 /* NOTE: based on fiat-crypto fe_mul, edited for in2=121666, 0, 0.*/ 662 static __always_inline void fe_mul_121666_impl(uint32_t out[10], const uint32_t in1[10]) 663 { 664 const uint32_t x20 = in1[9]; 665 const uint32_t x21 = in1[8]; 666 const uint32_t x19 = in1[7]; 667 const uint32_t x17 = in1[6]; 668 const uint32_t x15 = in1[5]; 669 const uint32_t x13 = in1[4]; 670 const uint32_t x11 = in1[3]; 671 const uint32_t x9 = in1[2]; 672 const uint32_t x7 = in1[1]; 673 const uint32_t x5 = in1[0]; 674 const uint32_t x38 = 0; 675 const uint32_t x39 = 0; 676 const uint32_t x37 = 0; 677 const uint32_t x35 = 0; 678 const uint32_t x33 = 0; 679 const uint32_t x31 = 0; 680 const uint32_t x29 = 0; 681 const uint32_t x27 = 0; 682 const uint32_t x25 = 0; 683 const uint32_t x23 = 121666; 684 uint64_t x40 = ((uint64_t)x23 * x5); 685 uint64_t x41 = (((uint64_t)x23 * x7) + ((uint64_t)x25 * x5)); 686 uint64_t x42 = ((((uint64_t)(0x2 * x25) * x7) + ((uint64_t)x23 * x9)) + ((uint64_t)x27 * x5)); 687 uint64_t x43 = (((((uint64_t)x25 * x9) + ((uint64_t)x27 * x7)) + ((uint64_t)x23 * x11)) + ((uint64_t)x29 * x5)); 688 uint64_t x44 = (((((uint64_t)x27 * x9) + (0x2 * (((uint64_t)x25 * x11) + ((uint64_t)x29 * x7)))) + ((uint64_t)x23 * x13)) + ((uint64_t)x31 * x5)); 689 uint64_t x45 = (((((((uint64_t)x27 * x11) + ((uint64_t)x29 * x9)) + ((uint64_t)x25 * x13)) + ((uint64_t)x31 * x7)) + ((uint64_t)x23 * x15)) + ((uint64_t)x33 * x5)); 690 uint64_t x46 = (((((0x2 * ((((uint64_t)x29 * x11) + ((uint64_t)x25 * x15)) + ((uint64_t)x33 * x7))) + ((uint64_t)x27 * x13)) + ((uint64_t)x31 * x9)) + ((uint64_t)x23 * x17)) + ((uint64_t)x35 * x5)); 691 uint64_t x47 = (((((((((uint64_t)x29 * x13) + ((uint64_t)x31 * x11)) + ((uint64_t)x27 * x15)) + ((uint64_t)x33 * x9)) + ((uint64_t)x25 * x17)) + ((uint64_t)x35 * x7)) + ((uint64_t)x23 * x19)) + ((uint64_t)x37 * x5)); 692 uint64_t x48 = (((((((uint64_t)x31 * x13) + (0x2 * (((((uint64_t)x29 * x15) + ((uint64_t)x33 * x11)) + ((uint64_t)x25 * x19)) + ((uint64_t)x37 * x7)))) + ((uint64_t)x27 * x17)) + ((uint64_t)x35 * x9)) + ((uint64_t)x23 * x21)) + ((uint64_t)x39 * x5)); 693 uint64_t x49 = (((((((((((uint64_t)x31 * x15) + ((uint64_t)x33 * x13)) + ((uint64_t)x29 * x17)) + ((uint64_t)x35 * x11)) + ((uint64_t)x27 * x19)) + ((uint64_t)x37 * x9)) + ((uint64_t)x25 * x21)) + ((uint64_t)x39 * x7)) + ((uint64_t)x23 * x20)) + ((uint64_t)x38 * x5)); 694 uint64_t x50 = (((((0x2 * ((((((uint64_t)x33 * x15) + ((uint64_t)x29 * x19)) + ((uint64_t)x37 * x11)) + ((uint64_t)x25 * x20)) + ((uint64_t)x38 * x7))) + ((uint64_t)x31 * x17)) + ((uint64_t)x35 * x13)) + ((uint64_t)x27 * x21)) + ((uint64_t)x39 * x9)); 695 uint64_t x51 = (((((((((uint64_t)x33 * x17) + ((uint64_t)x35 * x15)) + ((uint64_t)x31 * x19)) + ((uint64_t)x37 * x13)) + ((uint64_t)x29 * x21)) + ((uint64_t)x39 * x11)) + ((uint64_t)x27 * x20)) + ((uint64_t)x38 * x9)); 696 uint64_t x52 = (((((uint64_t)x35 * x17) + (0x2 * (((((uint64_t)x33 * x19) + ((uint64_t)x37 * x15)) + ((uint64_t)x29 * x20)) + ((uint64_t)x38 * x11)))) + ((uint64_t)x31 * x21)) + ((uint64_t)x39 * x13)); 697 uint64_t x53 = (((((((uint64_t)x35 * x19) + ((uint64_t)x37 * x17)) + ((uint64_t)x33 * x21)) + ((uint64_t)x39 * x15)) + ((uint64_t)x31 * x20)) + ((uint64_t)x38 * x13)); 698 uint64_t x54 = (((0x2 * ((((uint64_t)x37 * x19) + ((uint64_t)x33 * x20)) + ((uint64_t)x38 * x15))) + ((uint64_t)x35 * x21)) + ((uint64_t)x39 * x17)); 699 uint64_t x55 = (((((uint64_t)x37 * x21) + ((uint64_t)x39 * x19)) + ((uint64_t)x35 * x20)) + ((uint64_t)x38 * x17)); 700 uint64_t x56 = (((uint64_t)x39 * x21) + (0x2 * (((uint64_t)x37 * x20) + ((uint64_t)x38 * x19)))); 701 uint64_t x57 = (((uint64_t)x39 * x20) + ((uint64_t)x38 * x21)); 702 uint64_t x58 = ((uint64_t)(0x2 * x38) * x20); 703 uint64_t x59 = (x48 + (x58 << 0x4)); 704 uint64_t x60 = (x59 + (x58 << 0x1)); 705 uint64_t x61 = (x60 + x58); 706 uint64_t x62 = (x47 + (x57 << 0x4)); 707 uint64_t x63 = (x62 + (x57 << 0x1)); 708 uint64_t x64 = (x63 + x57); 709 uint64_t x65 = (x46 + (x56 << 0x4)); 710 uint64_t x66 = (x65 + (x56 << 0x1)); 711 uint64_t x67 = (x66 + x56); 712 uint64_t x68 = (x45 + (x55 << 0x4)); 713 uint64_t x69 = (x68 + (x55 << 0x1)); 714 uint64_t x70 = (x69 + x55); 715 uint64_t x71 = (x44 + (x54 << 0x4)); 716 uint64_t x72 = (x71 + (x54 << 0x1)); 717 uint64_t x73 = (x72 + x54); 718 uint64_t x74 = (x43 + (x53 << 0x4)); 719 uint64_t x75 = (x74 + (x53 << 0x1)); 720 uint64_t x76 = (x75 + x53); 721 uint64_t x77 = (x42 + (x52 << 0x4)); 722 uint64_t x78 = (x77 + (x52 << 0x1)); 723 uint64_t x79 = (x78 + x52); 724 uint64_t x80 = (x41 + (x51 << 0x4)); 725 uint64_t x81 = (x80 + (x51 << 0x1)); 726 uint64_t x82 = (x81 + x51); 727 uint64_t x83 = (x40 + (x50 << 0x4)); 728 uint64_t x84 = (x83 + (x50 << 0x1)); 729 uint64_t x85 = (x84 + x50); 730 uint64_t x86 = (x85 >> 0x1a); 731 uint32_t x87 = ((uint32_t)x85 & 0x3ffffff); 732 uint64_t x88 = (x86 + x82); 733 uint64_t x89 = (x88 >> 0x19); 734 uint32_t x90 = ((uint32_t)x88 & 0x1ffffff); 735 uint64_t x91 = (x89 + x79); 736 uint64_t x92 = (x91 >> 0x1a); 737 uint32_t x93 = ((uint32_t)x91 & 0x3ffffff); 738 uint64_t x94 = (x92 + x76); 739 uint64_t x95 = (x94 >> 0x19); 740 uint32_t x96 = ((uint32_t)x94 & 0x1ffffff); 741 uint64_t x97 = (x95 + x73); 742 uint64_t x98 = (x97 >> 0x1a); 743 uint32_t x99 = ((uint32_t)x97 & 0x3ffffff); 744 uint64_t x100 = (x98 + x70); 745 uint64_t x101 = (x100 >> 0x19); 746 uint32_t x102 = ((uint32_t)x100 & 0x1ffffff); 747 uint64_t x103 = (x101 + x67); 748 uint64_t x104 = (x103 >> 0x1a); 749 uint32_t x105 = ((uint32_t)x103 & 0x3ffffff); 750 uint64_t x106 = (x104 + x64); 751 uint64_t x107 = (x106 >> 0x19); 752 uint32_t x108 = ((uint32_t)x106 & 0x1ffffff); 753 uint64_t x109 = (x107 + x61); 754 uint64_t x110 = (x109 >> 0x1a); 755 uint32_t x111 = ((uint32_t)x109 & 0x3ffffff); 756 uint64_t x112 = (x110 + x49); 757 uint64_t x113 = (x112 >> 0x19); 758 uint32_t x114 = ((uint32_t)x112 & 0x1ffffff); 759 uint64_t x115 = (x87 + (0x13 * x113)); 760 uint32_t x116 = (uint32_t) (x115 >> 0x1a); 761 uint32_t x117 = ((uint32_t)x115 & 0x3ffffff); 762 uint32_t x118 = (x116 + x90); 763 uint32_t x119 = (x118 >> 0x19); 764 uint32_t x120 = (x118 & 0x1ffffff); 765 out[0] = x117; 766 out[1] = x120; 767 out[2] = (x119 + x93); 768 out[3] = x96; 769 out[4] = x99; 770 out[5] = x102; 771 out[6] = x105; 772 out[7] = x108; 773 out[8] = x111; 774 out[9] = x114; 775 } 776 777 static __always_inline void fe_mul121666(fe *h, const fe_loose *f) 778 { 779 fe_mul_121666_impl(h->v, f->v); 780 } 781 782 int curve25519(uint8_t out[CURVE25519_KEY_SIZE], 783 const uint8_t scalar[CURVE25519_KEY_SIZE], 784 const uint8_t point[CURVE25519_KEY_SIZE]) 785 { 786 fe x1, x2, z2, x3, z3; 787 fe_loose x2l, z2l, x3l; 788 unsigned swap = 0; 789 int pos; 790 uint8_t e[32]; 791 792 memcpy(e, scalar, 32); 793 curve25519_clamp_secret(e); 794 795 /* The following implementation was transcribed to Coq and proven to 796 * correspond to unary scalar multiplication in affine coordinates given 797 * that x1 != 0 is the x coordinate of some point on the curve. It was 798 * also checked in Coq that doing a ladderstep with x1 = x3 = 0 gives 799 * z2' = z3' = 0, and z2 = z3 = 0 gives z2' = z3' = 0. The statement was 800 * quantified over the underlying field, so it applies to Curve25519 801 * itself and the quadratic twist of Curve25519. It was not proven in 802 * Coq that prime-field arithmetic correctly simulates extension-field 803 * arithmetic on prime-field values. The decoding of the byte array 804 * representation of e was not considered. 805 * 806 * Specification of Montgomery curves in affine coordinates: 807 * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Spec/MontgomeryCurve.v#L27> 808 * 809 * Proof that these form a group that is isomorphic to a Weierstrass 810 * curve: 811 * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/AffineProofs.v#L35> 812 * 813 * Coq transcription and correctness proof of the loop 814 * (where scalarbits=255): 815 * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L118> 816 * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L278> 817 * preconditions: 0 <= e < 2^255 (not necessarily e < order), 818 * fe_invert(0) = 0 819 */ 820 fe_frombytes(&x1, point); 821 fe_1(&x2); 822 fe_0(&z2); 823 fe_copy(&x3, &x1); 824 fe_1(&z3); 825 826 for (pos = 254; pos >= 0; --pos) { 827 fe tmp0, tmp1; 828 fe_loose tmp0l, tmp1l; 829 /* loop invariant as of right before the test, for the case 830 * where x1 != 0: 831 * pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3 832 * is nonzero 833 * let r := e >> (pos+1) in the following equalities of 834 * projective points: 835 * to_xz (r*P) === if swap then (x3, z3) else (x2, z2) 836 * to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3) 837 * x1 is the nonzero x coordinate of the nonzero 838 * point (r*P-(r+1)*P) 839 */ 840 unsigned b = 1 & (e[pos / 8] >> (pos & 7)); 841 swap ^= b; 842 fe_cswap(&x2, &x3, swap); 843 fe_cswap(&z2, &z3, swap); 844 swap = b; 845 /* Coq transcription of ladderstep formula (called from 846 * transcribed loop): 847 * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L89> 848 * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L131> 849 * x1 != 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L217> 850 * x1 = 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L147> 851 */ 852 fe_sub(&tmp0l, &x3, &z3); 853 fe_sub(&tmp1l, &x2, &z2); 854 fe_add(&x2l, &x2, &z2); 855 fe_add(&z2l, &x3, &z3); 856 fe_mul_tll(&z3, &tmp0l, &x2l); 857 fe_mul_tll(&z2, &z2l, &tmp1l); 858 fe_sq_tl(&tmp0, &tmp1l); 859 fe_sq_tl(&tmp1, &x2l); 860 fe_add(&x3l, &z3, &z2); 861 fe_sub(&z2l, &z3, &z2); 862 fe_mul_ttt(&x2, &tmp1, &tmp0); 863 fe_sub(&tmp1l, &tmp1, &tmp0); 864 fe_sq_tl(&z2, &z2l); 865 fe_mul121666(&z3, &tmp1l); 866 fe_sq_tl(&x3, &x3l); 867 fe_add(&tmp0l, &tmp0, &z3); 868 fe_mul_ttt(&z3, &x1, &z2); 869 fe_mul_tll(&z2, &tmp1l, &tmp0l); 870 } 871 /* here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3) 872 * else (x2, z2) 873 */ 874 fe_cswap(&x2, &x3, swap); 875 fe_cswap(&z2, &z3, swap); 876 877 fe_invert(&z2, &z2); 878 fe_mul_ttt(&x2, &x2, &z2); 879 fe_tobytes(out, &x2); 880 881 explicit_bzero(&x1, sizeof(x1)); 882 explicit_bzero(&x2, sizeof(x2)); 883 explicit_bzero(&z2, sizeof(z2)); 884 explicit_bzero(&x3, sizeof(x3)); 885 explicit_bzero(&z3, sizeof(z3)); 886 explicit_bzero(&x2l, sizeof(x2l)); 887 explicit_bzero(&z2l, sizeof(z2l)); 888 explicit_bzero(&x3l, sizeof(x3l)); 889 explicit_bzero(&e, sizeof(e)); 890 return timingsafe_bcmp(out, null_point, CURVE25519_KEY_SIZE); 891 } 892