1 /*
2 * Based on curve25519-donna-c64.c from github.com/agl/curve25519-donna
3 * revision 80ad9b9930c9baef5829dd2a235b6b7646d32a8e
4 *
5 * Further changes
6 * (C) 2014,2018 Jack Lloyd
7 *
8 * Botan is released under the Simplified BSD License (see license.txt)
9 */
10
11 /* Copyright 2008, Google Inc.
12 * All rights reserved.
13 *
14 * Code released into the public domain.
15 *
16 * curve25519-donna: Curve25519 elliptic curve, public key function
17 *
18 * https://code.google.com/p/curve25519-donna/
19 *
20 * Adam Langley <agl@imperialviolet.org>
21 *
22 * Derived from public domain C code by Daniel J. Bernstein <djb@cr.yp.to>
23 *
24 * More information about curve25519 can be found here
25 * https://cr.yp.to/ecdh.html
26 *
27 * djb's sample implementation of curve25519 is written in a special assembly
28 * language called qhasm and uses the floating point registers.
29 *
30 * This is, almost, a clean room reimplementation from the curve25519 paper. It
31 * uses many of the tricks described therein. Only the crecip function is taken
32 * from the sample implementation.
33 */
34
35 #include <botan/curve25519.h>
36 #include <botan/mul128.h>
37 #include <botan/internal/ct_utils.h>
38 #include <botan/internal/donna128.h>
39 #include <botan/loadstor.h>
40
41 namespace Botan {
42
43 namespace {
44
45 #if !defined(BOTAN_TARGET_HAS_NATIVE_UINT128)
46 typedef donna128 uint128_t;
47 #endif
48
49 /* Sum two numbers: output += in */
fsum(uint64_t out[5],const uint64_t in[5])50 inline void fsum(uint64_t out[5], const uint64_t in[5])
51 {
52 out[0] += in[0];
53 out[1] += in[1];
54 out[2] += in[2];
55 out[3] += in[3];
56 out[4] += in[4];
57 }
58
59 /* Find the difference of two numbers: out = in - out
60 * (note the order of the arguments!)
61 *
62 * Assumes that out[i] < 2**52
63 * On return, out[i] < 2**55
64 */
fdifference_backwards(uint64_t out[5],const uint64_t in[5])65 inline void fdifference_backwards(uint64_t out[5], const uint64_t in[5])
66 {
67 /* 152 is 19 << 3 */
68 const uint64_t two54m152 = (static_cast<uint64_t>(1) << 54) - 152;
69 const uint64_t two54m8 = (static_cast<uint64_t>(1) << 54) - 8;
70
71 out[0] = in[0] + two54m152 - out[0];
72 out[1] = in[1] + two54m8 - out[1];
73 out[2] = in[2] + two54m8 - out[2];
74 out[3] = in[3] + two54m8 - out[3];
75 out[4] = in[4] + two54m8 - out[4];
76 }
77
fadd_sub(uint64_t x[5],uint64_t y[5])78 inline void fadd_sub(uint64_t x[5],
79 uint64_t y[5])
80 {
81 // TODO merge these and avoid the tmp array
82 uint64_t tmp[5];
83 copy_mem(tmp, y, 5);
84 fsum(y, x);
85 fdifference_backwards(x, tmp); // does x - z
86 }
87
88 /* Multiply a number by a scalar: out = in * scalar */
fscalar_product(uint64_t out[5],const uint64_t in[5],const uint64_t scalar)89 inline void fscalar_product(uint64_t out[5], const uint64_t in[5], const uint64_t scalar)
90 {
91 uint128_t a = uint128_t(in[0]) * scalar;
92 out[0] = a & 0x7ffffffffffff;
93
94 a = uint128_t(in[1]) * scalar + carry_shift(a, 51);
95 out[1] = a & 0x7ffffffffffff;
96
97 a = uint128_t(in[2]) * scalar + carry_shift(a, 51);
98 out[2] = a & 0x7ffffffffffff;
99
100 a = uint128_t(in[3]) * scalar + carry_shift(a, 51);
101 out[3] = a & 0x7ffffffffffff;
102
103 a = uint128_t(in[4]) * scalar + carry_shift(a, 51);
104 out[4] = a & 0x7ffffffffffff;
105
106 out[0] += carry_shift(a, 51) * 19;
107 }
108
109 /* Multiply two numbers: out = in2 * in
110 *
111 * out must be distinct to both inputs. The inputs are reduced coefficient
112 * form, the output is not.
113 *
114 * Assumes that in[i] < 2**55 and likewise for in2.
115 * On return, out[i] < 2**52
116 */
fmul(uint64_t out[5],const uint64_t in[5],const uint64_t in2[5])117 inline void fmul(uint64_t out[5], const uint64_t in[5], const uint64_t in2[5])
118 {
119 const uint128_t s0 = in2[0];
120 const uint128_t s1 = in2[1];
121 const uint128_t s2 = in2[2];
122 const uint128_t s3 = in2[3];
123 const uint128_t s4 = in2[4];
124
125 uint64_t r0 = in[0];
126 uint64_t r1 = in[1];
127 uint64_t r2 = in[2];
128 uint64_t r3 = in[3];
129 uint64_t r4 = in[4];
130
131 uint128_t t0 = r0 * s0;
132 uint128_t t1 = r0 * s1 + r1 * s0;
133 uint128_t t2 = r0 * s2 + r2 * s0 + r1 * s1;
134 uint128_t t3 = r0 * s3 + r3 * s0 + r1 * s2 + r2 * s1;
135 uint128_t t4 = r0 * s4 + r4 * s0 + r3 * s1 + r1 * s3 + r2 * s2;
136
137 r4 *= 19;
138 r1 *= 19;
139 r2 *= 19;
140 r3 *= 19;
141
142 t0 += r4 * s1 + r1 * s4 + r2 * s3 + r3 * s2;
143 t1 += r4 * s2 + r2 * s4 + r3 * s3;
144 t2 += r4 * s3 + r3 * s4;
145 t3 += r4 * s4;
146
147 r0 = t0 & 0x7ffffffffffff; t1 += carry_shift(t0, 51);
148 r1 = t1 & 0x7ffffffffffff; t2 += carry_shift(t1, 51);
149 r2 = t2 & 0x7ffffffffffff; t3 += carry_shift(t2, 51);
150 r3 = t3 & 0x7ffffffffffff; t4 += carry_shift(t3, 51);
151 r4 = t4 & 0x7ffffffffffff; uint64_t c = carry_shift(t4, 51);
152
153 r0 += c * 19; c = r0 >> 51; r0 = r0 & 0x7ffffffffffff;
154 r1 += c; c = r1 >> 51; r1 = r1 & 0x7ffffffffffff;
155 r2 += c;
156
157 out[0] = r0;
158 out[1] = r1;
159 out[2] = r2;
160 out[3] = r3;
161 out[4] = r4;
162 }
163
fsquare(uint64_t out[5],const uint64_t in[5],size_t count=1)164 inline void fsquare(uint64_t out[5], const uint64_t in[5], size_t count = 1)
165 {
166 uint64_t r0 = in[0];
167 uint64_t r1 = in[1];
168 uint64_t r2 = in[2];
169 uint64_t r3 = in[3];
170 uint64_t r4 = in[4];
171
172 for(size_t i = 0; i != count; ++i)
173 {
174 const uint64_t d0 = r0 * 2;
175 const uint64_t d1 = r1 * 2;
176 const uint64_t d2 = r2 * 2 * 19;
177 const uint64_t d419 = r4 * 19;
178 const uint64_t d4 = d419 * 2;
179
180 uint128_t t0 = uint128_t(r0) * r0 + uint128_t(d4) * r1 + uint128_t(d2) * (r3 );
181 uint128_t t1 = uint128_t(d0) * r1 + uint128_t(d4) * r2 + uint128_t(r3) * (r3 * 19);
182 uint128_t t2 = uint128_t(d0) * r2 + uint128_t(r1) * r1 + uint128_t(d4) * (r3 );
183 uint128_t t3 = uint128_t(d0) * r3 + uint128_t(d1) * r2 + uint128_t(r4) * (d419 );
184 uint128_t t4 = uint128_t(d0) * r4 + uint128_t(d1) * r3 + uint128_t(r2) * (r2 );
185
186 r0 = t0 & 0x7ffffffffffff; t1 += carry_shift(t0, 51);
187 r1 = t1 & 0x7ffffffffffff; t2 += carry_shift(t1, 51);
188 r2 = t2 & 0x7ffffffffffff; t3 += carry_shift(t2, 51);
189 r3 = t3 & 0x7ffffffffffff; t4 += carry_shift(t3, 51);
190 r4 = t4 & 0x7ffffffffffff; uint64_t c = carry_shift(t4, 51);
191
192 r0 += c * 19; c = r0 >> 51; r0 = r0 & 0x7ffffffffffff;
193 r1 += c; c = r1 >> 51; r1 = r1 & 0x7ffffffffffff;
194 r2 += c;
195 }
196
197 out[0] = r0;
198 out[1] = r1;
199 out[2] = r2;
200 out[3] = r3;
201 out[4] = r4;
202 }
203
204 /* Take a little-endian, 32-byte number and expand it into polynomial form */
fexpand(uint64_t * out,const uint8_t * in)205 inline void fexpand(uint64_t *out, const uint8_t *in)
206 {
207 out[0] = load_le<uint64_t>(in, 0) & 0x7ffffffffffff;
208 out[1] = (load_le<uint64_t>(in+6, 0) >> 3) & 0x7ffffffffffff;
209 out[2] = (load_le<uint64_t>(in+12, 0) >> 6) & 0x7ffffffffffff;
210 out[3] = (load_le<uint64_t>(in+19, 0) >> 1) & 0x7ffffffffffff;
211 out[4] = (load_le<uint64_t>(in+24, 0) >> 12) & 0x7ffffffffffff;
212 }
213
214 /* Take a fully reduced polynomial form number and contract it into a
215 * little-endian, 32-byte array
216 */
fcontract(uint8_t * out,const uint64_t input[5])217 inline void fcontract(uint8_t *out, const uint64_t input[5])
218 {
219 uint128_t t0 = input[0];
220 uint128_t t1 = input[1];
221 uint128_t t2 = input[2];
222 uint128_t t3 = input[3];
223 uint128_t t4 = input[4];
224
225 for(size_t i = 0; i != 2; ++i)
226 {
227 t1 += t0 >> 51; t0 &= 0x7ffffffffffff;
228 t2 += t1 >> 51; t1 &= 0x7ffffffffffff;
229 t3 += t2 >> 51; t2 &= 0x7ffffffffffff;
230 t4 += t3 >> 51; t3 &= 0x7ffffffffffff;
231 t0 += (t4 >> 51) * 19; t4 &= 0x7ffffffffffff;
232 }
233
234 /* now t is between 0 and 2^255-1, properly carried. */
235 /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */
236
237 t0 += 19;
238
239 t1 += t0 >> 51; t0 &= 0x7ffffffffffff;
240 t2 += t1 >> 51; t1 &= 0x7ffffffffffff;
241 t3 += t2 >> 51; t2 &= 0x7ffffffffffff;
242 t4 += t3 >> 51; t3 &= 0x7ffffffffffff;
243 t0 += (t4 >> 51) * 19; t4 &= 0x7ffffffffffff;
244
245 /* now between 19 and 2^255-1 in both cases, and offset by 19. */
246
247 t0 += 0x8000000000000 - 19;
248 t1 += 0x8000000000000 - 1;
249 t2 += 0x8000000000000 - 1;
250 t3 += 0x8000000000000 - 1;
251 t4 += 0x8000000000000 - 1;
252
253 /* now between 2^255 and 2^256-20, and offset by 2^255. */
254
255 t1 += t0 >> 51; t0 &= 0x7ffffffffffff;
256 t2 += t1 >> 51; t1 &= 0x7ffffffffffff;
257 t3 += t2 >> 51; t2 &= 0x7ffffffffffff;
258 t4 += t3 >> 51; t3 &= 0x7ffffffffffff;
259 t4 &= 0x7ffffffffffff;
260
261 store_le(out,
262 combine_lower(t0, 0, t1, 51),
263 combine_lower(t1, 13, t2, 38),
264 combine_lower(t2, 26, t3, 25),
265 combine_lower(t3, 39, t4, 12));
266 }
267
268 /* Input: Q, Q', Q-Q'
269 * Out: 2Q, Q+Q'
270 *
271 * result.two_q (2*Q): long form
272 * result.q_plus_q_dash (Q + Q): long form
273 * in_q: short form, destroyed
274 * in_q_dash: short form, destroyed
275 * in_q_minus_q_dash: short form, preserved
276 */
fmonty(uint64_t result_two_q_x[5],uint64_t result_two_q_z[5],uint64_t result_q_plus_q_dash_x[5],uint64_t result_q_plus_q_dash_z[5],uint64_t in_q_x[5],uint64_t in_q_z[5],uint64_t in_q_dash_x[5],uint64_t in_q_dash_z[5],const uint64_t q_minus_q_dash[5])277 void fmonty(uint64_t result_two_q_x[5],
278 uint64_t result_two_q_z[5],
279 uint64_t result_q_plus_q_dash_x[5],
280 uint64_t result_q_plus_q_dash_z[5],
281 uint64_t in_q_x[5],
282 uint64_t in_q_z[5],
283 uint64_t in_q_dash_x[5],
284 uint64_t in_q_dash_z[5],
285 const uint64_t q_minus_q_dash[5])
286 {
287 uint64_t zzz[5];
288 uint64_t xx[5];
289 uint64_t zz[5];
290 uint64_t xxprime[5];
291 uint64_t zzprime[5];
292 uint64_t zzzprime[5];
293
294 fadd_sub(in_q_z, in_q_x);
295 fadd_sub(in_q_dash_z, in_q_dash_x);
296
297 fmul(xxprime, in_q_dash_x, in_q_z);
298 fmul(zzprime, in_q_dash_z, in_q_x);
299
300 fadd_sub(zzprime, xxprime);
301
302 fsquare(result_q_plus_q_dash_x, xxprime);
303 fsquare(zzzprime, zzprime);
304 fmul(result_q_plus_q_dash_z, zzzprime, q_minus_q_dash);
305
306 fsquare(xx, in_q_x);
307 fsquare(zz, in_q_z);
308 fmul(result_two_q_x, xx, zz);
309
310 fdifference_backwards(zz, xx); // does zz = xx - zz
311 fscalar_product(zzz, zz, 121665);
312 fsum(zzz, xx);
313
314 fmul(result_two_q_z, zz, zzz);
315 }
316
317 /*
318 * Maybe swap the contents of two uint64_t arrays (@a and @b),
319 * Param @iswap is assumed to be either 0 or 1
320 *
321 * This function performs the swap without leaking any side-channel
322 * information.
323 */
swap_conditional(uint64_t a[5],uint64_t b[5],uint64_t c[5],uint64_t d[5],uint64_t iswap)324 inline void swap_conditional(uint64_t a[5], uint64_t b[5],
325 uint64_t c[5], uint64_t d[5],
326 uint64_t iswap)
327 {
328 const uint64_t swap = 0 - iswap;
329
330 for(size_t i = 0; i < 5; ++i)
331 {
332 const uint64_t x0 = swap & (a[i] ^ b[i]);
333 const uint64_t x1 = swap & (c[i] ^ d[i]);
334 a[i] ^= x0;
335 b[i] ^= x0;
336 c[i] ^= x1;
337 d[i] ^= x1;
338 }
339 }
340
341 /* Calculates nQ where Q is the x-coordinate of a point on the curve
342 *
343 * resultx/resultz: the x/z coordinate of the resulting curve point (short form)
344 * n: a little endian, 32-byte number
345 * q: a point of the curve (short form)
346 */
cmult(uint64_t resultx[5],uint64_t resultz[5],const uint8_t n[32],const uint64_t q[5])347 void cmult(uint64_t resultx[5], uint64_t resultz[5], const uint8_t n[32], const uint64_t q[5])
348 {
349 uint64_t a[5] = {0}; // nqpqx
350 uint64_t b[5] = {1}; // npqpz
351 uint64_t c[5] = {1}; // nqx
352 uint64_t d[5] = {0}; // nqz
353 uint64_t e[5] = {0}; // npqqx2
354 uint64_t f[5] = {1}; // npqqz2
355 uint64_t g[5] = {0}; // nqx2
356 uint64_t h[5] = {1}; // nqz2
357
358 copy_mem(a, q, 5);
359
360 for(size_t i = 0; i < 32; ++i)
361 {
362 const uint64_t bit0 = (n[31 - i] >> 7) & 1;
363 const uint64_t bit1 = (n[31 - i] >> 6) & 1;
364 const uint64_t bit2 = (n[31 - i] >> 5) & 1;
365 const uint64_t bit3 = (n[31 - i] >> 4) & 1;
366 const uint64_t bit4 = (n[31 - i] >> 3) & 1;
367 const uint64_t bit5 = (n[31 - i] >> 2) & 1;
368 const uint64_t bit6 = (n[31 - i] >> 1) & 1;
369 const uint64_t bit7 = (n[31 - i] >> 0) & 1;
370
371 swap_conditional(c, a, d, b, bit0);
372 fmonty(g, h, e, f, c, d, a, b, q);
373
374 swap_conditional(g, e, h, f, bit0 ^ bit1);
375 fmonty(c, d, a, b, g, h, e, f, q);
376
377 swap_conditional(c, a, d, b, bit1 ^ bit2);
378 fmonty(g, h, e, f, c, d, a, b, q);
379
380 swap_conditional(g, e, h, f, bit2 ^ bit3);
381 fmonty(c, d, a, b, g, h, e, f, q);
382
383 swap_conditional(c, a, d, b, bit3 ^ bit4);
384 fmonty(g, h, e, f, c, d, a, b, q);
385
386 swap_conditional(g, e, h, f, bit4 ^ bit5);
387 fmonty(c, d, a, b, g, h, e, f, q);
388
389 swap_conditional(c, a, d, b, bit5 ^ bit6);
390 fmonty(g, h, e, f, c, d, a, b, q);
391
392 swap_conditional(g, e, h, f, bit6 ^ bit7);
393 fmonty(c, d, a, b, g, h, e, f, q);
394
395 swap_conditional(c, a, d, b, bit7);
396 }
397
398 copy_mem(resultx, c, 5);
399 copy_mem(resultz, d, 5);
400 }
401
402
403 // -----------------------------------------------------------------------------
404 // Shamelessly copied from djb's code, tightened a little
405 // -----------------------------------------------------------------------------
crecip(uint64_t out[5],const uint64_t z[5])406 void crecip(uint64_t out[5], const uint64_t z[5])
407 {
408 uint64_t a[5];
409 uint64_t b[5];
410 uint64_t c[5];
411 uint64_t t0[5];
412
413 fsquare(a, z); // 2
414 fsquare(t0, a, 2); // 8
415 fmul(b, t0, z); // 9
416 fmul(a, b, a); // 11
417 fsquare(t0, a); // 22
418 fmul(b, t0, b); // 2^5 - 2^0 = 31
419 fsquare(t0, b, 5); // 2^10 - 2^5
420 fmul(b, t0, b); // 2^10 - 2^0
421 fsquare(t0, b, 10); // 2^20 - 2^10
422 fmul(c, t0, b); // 2^20 - 2^0
423 fsquare(t0, c, 20); // 2^40 - 2^20
424 fmul(t0, t0, c); // 2^40 - 2^0
425 fsquare(t0, t0, 10); // 2^50 - 2^10
426 fmul(b, t0, b); // 2^50 - 2^0
427 fsquare(t0, b, 50); // 2^100 - 2^50
428 fmul(c, t0, b); // 2^100 - 2^0
429 fsquare(t0, c, 100); // 2^200 - 2^100
430 fmul(t0, t0, c); // 2^200 - 2^0
431 fsquare(t0, t0, 50); // 2^250 - 2^50
432 fmul(t0, t0, b); // 2^250 - 2^0
433 fsquare(t0, t0, 5); // 2^255 - 2^5
434 fmul(out, t0, a); // 2^255 - 21
435 }
436
437 }
438
439 void
curve25519_donna(uint8_t mypublic[32],const uint8_t secret[32],const uint8_t basepoint[32])440 curve25519_donna(uint8_t mypublic[32], const uint8_t secret[32], const uint8_t basepoint[32])
441 {
442 CT::poison(secret, 32);
443 CT::poison(basepoint, 32);
444
445 uint64_t bp[5], x[5], z[5], zmone[5];
446 uint8_t e[32];
447
448 copy_mem(e, secret, 32);
449 e[ 0] &= 248;
450 e[31] &= 127;
451 e[31] |= 64;
452
453 fexpand(bp, basepoint);
454 cmult(x, z, e, bp);
455 crecip(zmone, z);
456 fmul(z, x, zmone);
457 fcontract(mypublic, z);
458
459 CT::unpoison(secret, 32);
460 CT::unpoison(basepoint, 32);
461 CT::unpoison(mypublic, 32);
462 }
463
464 }
465