1 /**********************************************************************
2  * Copyright (c) 2013, 2014 Pieter Wuille                             *
3  * Distributed under the MIT software license, see the accompanying   *
4  * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5  **********************************************************************/
6 
7 #ifndef SECP256K1_GROUP_IMPL_H
8 #define SECP256K1_GROUP_IMPL_H
9 
10 #include "num.h"
11 #include "field.h"
12 #include "group.h"
13 
14 /* These points can be generated in sage as follows:
15  *
16  * 0. Setup a worksheet with the following parameters.
17  *   b = 4  # whatever CURVE_B will be set to
18  *   F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F)
19  *   C = EllipticCurve ([F (0), F (b)])
20  *
21  * 1. Determine all the small orders available to you. (If there are
22  *    no satisfactory ones, go back and change b.)
23  *   print C.order().factor(limit=1000)
24  *
25  * 2. Choose an order as one of the prime factors listed in the above step.
26  *    (You can also multiply some to get a composite order, though the
27  *    tests will crash trying to invert scalars during signing.) We take a
28  *    random point and scale it to drop its order to the desired value.
29  *    There is some probability this won't work; just try again.
30  *   order = 199
31  *   P = C.random_point()
32  *   P = (int(P.order()) / int(order)) * P
33  *   assert(P.order() == order)
34  *
35  * 3. Print the values. You'll need to use a vim macro or something to
36  *    split the hex output into 4-byte chunks.
37  *   print "%x %x" % P.xy()
38  */
39 #if defined(EXHAUSTIVE_TEST_ORDER)
40 #  if EXHAUSTIVE_TEST_ORDER == 199
41 const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
42     0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069,
43     0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18,
44     0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868,
45     0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED
46 );
47 
48 const int CURVE_B = 4;
49 #  elif EXHAUSTIVE_TEST_ORDER == 13
50 const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
51     0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0,
52     0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15,
53     0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e,
54     0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac
55 );
56 const int CURVE_B = 2;
57 #  else
58 #    error No known generator for the specified exhaustive test group order.
59 #  endif
60 #else
61 /** Generator for secp256k1, value 'g' defined in
62  *  "Standards for Efficient Cryptography" (SEC2) 2.7.1.
63  */
64 static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
65     0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL,
66     0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL,
67     0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL,
68     0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
69 );
70 
71 const int CURVE_B = 7;
72 #endif
73 
secp256k1_ge_set_gej_zinv(secp256k1_ge * r,const secp256k1_gej * a,const secp256k1_fe * zi)74 static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
75     secp256k1_fe zi2;
76     secp256k1_fe zi3;
77     secp256k1_fe_sqr(&zi2, zi);
78     secp256k1_fe_mul(&zi3, &zi2, zi);
79     secp256k1_fe_mul(&r->x, &a->x, &zi2);
80     secp256k1_fe_mul(&r->y, &a->y, &zi3);
81     r->infinity = a->infinity;
82 }
83 
secp256k1_ge_set_xy(secp256k1_ge * r,const secp256k1_fe * x,const secp256k1_fe * y)84 static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y) {
85     r->infinity = 0;
86     r->x = *x;
87     r->y = *y;
88 }
89 
secp256k1_ge_is_infinity(const secp256k1_ge * a)90 static int secp256k1_ge_is_infinity(const secp256k1_ge *a) {
91     return a->infinity;
92 }
93 
secp256k1_ge_neg(secp256k1_ge * r,const secp256k1_ge * a)94 static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a) {
95     *r = *a;
96     secp256k1_fe_normalize_weak(&r->y);
97     secp256k1_fe_negate(&r->y, &r->y, 1);
98 }
99 
secp256k1_ge_set_gej(secp256k1_ge * r,secp256k1_gej * a)100 static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) {
101     secp256k1_fe z2, z3;
102     r->infinity = a->infinity;
103     secp256k1_fe_inv(&a->z, &a->z);
104     secp256k1_fe_sqr(&z2, &a->z);
105     secp256k1_fe_mul(&z3, &a->z, &z2);
106     secp256k1_fe_mul(&a->x, &a->x, &z2);
107     secp256k1_fe_mul(&a->y, &a->y, &z3);
108     secp256k1_fe_set_int(&a->z, 1);
109     r->x = a->x;
110     r->y = a->y;
111 }
112 
secp256k1_ge_set_gej_var(secp256k1_ge * r,secp256k1_gej * a)113 static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) {
114     secp256k1_fe z2, z3;
115     r->infinity = a->infinity;
116     if (a->infinity) {
117         return;
118     }
119     secp256k1_fe_inv_var(&a->z, &a->z);
120     secp256k1_fe_sqr(&z2, &a->z);
121     secp256k1_fe_mul(&z3, &a->z, &z2);
122     secp256k1_fe_mul(&a->x, &a->x, &z2);
123     secp256k1_fe_mul(&a->y, &a->y, &z3);
124     secp256k1_fe_set_int(&a->z, 1);
125     r->x = a->x;
126     r->y = a->y;
127 }
128 
secp256k1_ge_set_all_gej_var(secp256k1_ge * r,const secp256k1_gej * a,size_t len,const secp256k1_callback * cb)129 static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb) {
130     secp256k1_fe *az;
131     secp256k1_fe *azi;
132     size_t i;
133     size_t count = 0;
134     az = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * len);
135     for (i = 0; i < len; i++) {
136         if (!a[i].infinity) {
137             az[count++] = a[i].z;
138         }
139     }
140 
141     azi = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * count);
142     secp256k1_fe_inv_all_var(azi, az, count);
143     free(az);
144 
145     count = 0;
146     for (i = 0; i < len; i++) {
147         r[i].infinity = a[i].infinity;
148         if (!a[i].infinity) {
149             secp256k1_ge_set_gej_zinv(&r[i], &a[i], &azi[count++]);
150         }
151     }
152     free(azi);
153 }
154 
secp256k1_ge_set_table_gej_var(secp256k1_ge * r,const secp256k1_gej * a,const secp256k1_fe * zr,size_t len)155 static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len) {
156     size_t i = len - 1;
157     secp256k1_fe zi;
158 
159     if (len > 0) {
160         /* Compute the inverse of the last z coordinate, and use it to compute the last affine output. */
161         secp256k1_fe_inv(&zi, &a[i].z);
162         secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi);
163 
164         /* Work out way backwards, using the z-ratios to scale the x/y values. */
165         while (i > 0) {
166             secp256k1_fe_mul(&zi, &zi, &zr[i]);
167             i--;
168             secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi);
169         }
170     }
171 }
172 
secp256k1_ge_globalz_set_table_gej(size_t len,secp256k1_ge * r,secp256k1_fe * globalz,const secp256k1_gej * a,const secp256k1_fe * zr)173 static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr) {
174     size_t i = len - 1;
175     secp256k1_fe zs;
176 
177     if (len > 0) {
178         /* The z of the final point gives us the "global Z" for the table. */
179         r[i].x = a[i].x;
180         r[i].y = a[i].y;
181         *globalz = a[i].z;
182         r[i].infinity = 0;
183         zs = zr[i];
184 
185         /* Work our way backwards, using the z-ratios to scale the x/y values. */
186         while (i > 0) {
187             if (i != len - 1) {
188                 secp256k1_fe_mul(&zs, &zs, &zr[i]);
189             }
190             i--;
191             secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zs);
192         }
193     }
194 }
195 
secp256k1_gej_set_infinity(secp256k1_gej * r)196 static void secp256k1_gej_set_infinity(secp256k1_gej *r) {
197     r->infinity = 1;
198     secp256k1_fe_clear(&r->x);
199     secp256k1_fe_clear(&r->y);
200     secp256k1_fe_clear(&r->z);
201 }
202 
secp256k1_gej_clear(secp256k1_gej * r)203 static void secp256k1_gej_clear(secp256k1_gej *r) {
204     r->infinity = 0;
205     secp256k1_fe_clear(&r->x);
206     secp256k1_fe_clear(&r->y);
207     secp256k1_fe_clear(&r->z);
208 }
209 
secp256k1_ge_clear(secp256k1_ge * r)210 static void secp256k1_ge_clear(secp256k1_ge *r) {
211     r->infinity = 0;
212     secp256k1_fe_clear(&r->x);
213     secp256k1_fe_clear(&r->y);
214 }
215 
secp256k1_ge_set_xquad(secp256k1_ge * r,const secp256k1_fe * x)216 static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) {
217     secp256k1_fe x2, x3, c;
218     r->x = *x;
219     secp256k1_fe_sqr(&x2, x);
220     secp256k1_fe_mul(&x3, x, &x2);
221     r->infinity = 0;
222     secp256k1_fe_set_int(&c, CURVE_B);
223     secp256k1_fe_add(&c, &x3);
224     return secp256k1_fe_sqrt(&r->y, &c);
225 }
226 
secp256k1_ge_set_xo_var(secp256k1_ge * r,const secp256k1_fe * x,int odd)227 static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) {
228     if (!secp256k1_ge_set_xquad(r, x)) {
229         return 0;
230     }
231     secp256k1_fe_normalize_var(&r->y);
232     if (secp256k1_fe_is_odd(&r->y) != odd) {
233         secp256k1_fe_negate(&r->y, &r->y, 1);
234     }
235     return 1;
236 
237 }
238 
secp256k1_gej_set_ge(secp256k1_gej * r,const secp256k1_ge * a)239 static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a) {
240    r->infinity = a->infinity;
241    r->x = a->x;
242    r->y = a->y;
243    secp256k1_fe_set_int(&r->z, 1);
244 }
245 
secp256k1_gej_eq_x_var(const secp256k1_fe * x,const secp256k1_gej * a)246 static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a) {
247     secp256k1_fe r, r2;
248     VERIFY_CHECK(!a->infinity);
249     secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x);
250     r2 = a->x; secp256k1_fe_normalize_weak(&r2);
251     return secp256k1_fe_equal_var(&r, &r2);
252 }
253 
secp256k1_gej_neg(secp256k1_gej * r,const secp256k1_gej * a)254 static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) {
255     r->infinity = a->infinity;
256     r->x = a->x;
257     r->y = a->y;
258     r->z = a->z;
259     secp256k1_fe_normalize_weak(&r->y);
260     secp256k1_fe_negate(&r->y, &r->y, 1);
261 }
262 
secp256k1_gej_is_infinity(const secp256k1_gej * a)263 static int secp256k1_gej_is_infinity(const secp256k1_gej *a) {
264     return a->infinity;
265 }
266 
secp256k1_gej_is_valid_var(const secp256k1_gej * a)267 static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) {
268     secp256k1_fe y2, x3, z2, z6;
269     if (a->infinity) {
270         return 0;
271     }
272     /** y^2 = x^3 + 7
273      *  (Y/Z^3)^2 = (X/Z^2)^3 + 7
274      *  Y^2 / Z^6 = X^3 / Z^6 + 7
275      *  Y^2 = X^3 + 7*Z^6
276      */
277     secp256k1_fe_sqr(&y2, &a->y);
278     secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
279     secp256k1_fe_sqr(&z2, &a->z);
280     secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2);
281     secp256k1_fe_mul_int(&z6, CURVE_B);
282     secp256k1_fe_add(&x3, &z6);
283     secp256k1_fe_normalize_weak(&x3);
284     return secp256k1_fe_equal_var(&y2, &x3);
285 }
286 
secp256k1_ge_is_valid_var(const secp256k1_ge * a)287 static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
288     secp256k1_fe y2, x3, c;
289     if (a->infinity) {
290         return 0;
291     }
292     /* y^2 = x^3 + 7 */
293     secp256k1_fe_sqr(&y2, &a->y);
294     secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
295     secp256k1_fe_set_int(&c, CURVE_B);
296     secp256k1_fe_add(&x3, &c);
297     secp256k1_fe_normalize_weak(&x3);
298     return secp256k1_fe_equal_var(&y2, &x3);
299 }
300 
secp256k1_gej_double_var(secp256k1_gej * r,const secp256k1_gej * a,secp256k1_fe * rzr)301 static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) {
302     /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate.
303      *
304      * Note that there is an implementation described at
305      *     https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
306      * which trades a multiply for a square, but in practice this is actually slower,
307      * mainly because it requires more normalizations.
308      */
309     secp256k1_fe t1,t2,t3,t4;
310     /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
311      *  Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have
312      *  y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p.
313      *
314      *  Having said this, if this function receives a point on a sextic twist, e.g. by
315      *  a fault attack, it is possible for y to be 0. This happens for y^2 = x^3 + 6,
316      *  since -6 does have a cube root mod p. For this point, this function will not set
317      *  the infinity flag even though the point doubles to infinity, and the result
318      *  point will be gibberish (z = 0 but infinity = 0).
319      */
320     r->infinity = a->infinity;
321     if (r->infinity) {
322         if (rzr != NULL) {
323             secp256k1_fe_set_int(rzr, 1);
324         }
325         return;
326     }
327 
328     if (rzr != NULL) {
329         *rzr = a->y;
330         secp256k1_fe_normalize_weak(rzr);
331         secp256k1_fe_mul_int(rzr, 2);
332     }
333 
334     secp256k1_fe_mul(&r->z, &a->z, &a->y);
335     secp256k1_fe_mul_int(&r->z, 2);       /* Z' = 2*Y*Z (2) */
336     secp256k1_fe_sqr(&t1, &a->x);
337     secp256k1_fe_mul_int(&t1, 3);         /* T1 = 3*X^2 (3) */
338     secp256k1_fe_sqr(&t2, &t1);           /* T2 = 9*X^4 (1) */
339     secp256k1_fe_sqr(&t3, &a->y);
340     secp256k1_fe_mul_int(&t3, 2);         /* T3 = 2*Y^2 (2) */
341     secp256k1_fe_sqr(&t4, &t3);
342     secp256k1_fe_mul_int(&t4, 2);         /* T4 = 8*Y^4 (2) */
343     secp256k1_fe_mul(&t3, &t3, &a->x);    /* T3 = 2*X*Y^2 (1) */
344     r->x = t3;
345     secp256k1_fe_mul_int(&r->x, 4);       /* X' = 8*X*Y^2 (4) */
346     secp256k1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */
347     secp256k1_fe_add(&r->x, &t2);         /* X' = 9*X^4 - 8*X*Y^2 (6) */
348     secp256k1_fe_negate(&t2, &t2, 1);     /* T2 = -9*X^4 (2) */
349     secp256k1_fe_mul_int(&t3, 6);         /* T3 = 12*X*Y^2 (6) */
350     secp256k1_fe_add(&t3, &t2);           /* T3 = 12*X*Y^2 - 9*X^4 (8) */
351     secp256k1_fe_mul(&r->y, &t1, &t3);    /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */
352     secp256k1_fe_negate(&t2, &t4, 2);     /* T2 = -8*Y^4 (3) */
353     secp256k1_fe_add(&r->y, &t2);         /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */
354 }
355 
secp256k1_gej_double_nonzero(secp256k1_gej * r,const secp256k1_gej * a,secp256k1_fe * rzr)356 static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) {
357     VERIFY_CHECK(!secp256k1_gej_is_infinity(a));
358     secp256k1_gej_double_var(r, a, rzr);
359 }
360 
secp256k1_gej_add_var(secp256k1_gej * r,const secp256k1_gej * a,const secp256k1_gej * b,secp256k1_fe * rzr)361 static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) {
362     /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */
363     secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
364 
365     if (a->infinity) {
366         VERIFY_CHECK(rzr == NULL);
367         *r = *b;
368         return;
369     }
370 
371     if (b->infinity) {
372         if (rzr != NULL) {
373             secp256k1_fe_set_int(rzr, 1);
374         }
375         *r = *a;
376         return;
377     }
378 
379     r->infinity = 0;
380     secp256k1_fe_sqr(&z22, &b->z);
381     secp256k1_fe_sqr(&z12, &a->z);
382     secp256k1_fe_mul(&u1, &a->x, &z22);
383     secp256k1_fe_mul(&u2, &b->x, &z12);
384     secp256k1_fe_mul(&s1, &a->y, &z22); secp256k1_fe_mul(&s1, &s1, &b->z);
385     secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z);
386     secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
387     secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
388     if (secp256k1_fe_normalizes_to_zero_var(&h)) {
389         if (secp256k1_fe_normalizes_to_zero_var(&i)) {
390             secp256k1_gej_double_var(r, a, rzr);
391         } else {
392             if (rzr != NULL) {
393                 secp256k1_fe_set_int(rzr, 0);
394             }
395             r->infinity = 1;
396         }
397         return;
398     }
399     secp256k1_fe_sqr(&i2, &i);
400     secp256k1_fe_sqr(&h2, &h);
401     secp256k1_fe_mul(&h3, &h, &h2);
402     secp256k1_fe_mul(&h, &h, &b->z);
403     if (rzr != NULL) {
404         *rzr = h;
405     }
406     secp256k1_fe_mul(&r->z, &a->z, &h);
407     secp256k1_fe_mul(&t, &u1, &h2);
408     r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
409     secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
410     secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
411     secp256k1_fe_add(&r->y, &h3);
412 }
413 
secp256k1_gej_add_ge_var(secp256k1_gej * r,const secp256k1_gej * a,const secp256k1_ge * b,secp256k1_fe * rzr)414 static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) {
415     /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
416     secp256k1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
417     if (a->infinity) {
418         VERIFY_CHECK(rzr == NULL);
419         secp256k1_gej_set_ge(r, b);
420         return;
421     }
422     if (b->infinity) {
423         if (rzr != NULL) {
424             secp256k1_fe_set_int(rzr, 1);
425         }
426         *r = *a;
427         return;
428     }
429     r->infinity = 0;
430 
431     secp256k1_fe_sqr(&z12, &a->z);
432     u1 = a->x; secp256k1_fe_normalize_weak(&u1);
433     secp256k1_fe_mul(&u2, &b->x, &z12);
434     s1 = a->y; secp256k1_fe_normalize_weak(&s1);
435     secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z);
436     secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
437     secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
438     if (secp256k1_fe_normalizes_to_zero_var(&h)) {
439         if (secp256k1_fe_normalizes_to_zero_var(&i)) {
440             secp256k1_gej_double_var(r, a, rzr);
441         } else {
442             if (rzr != NULL) {
443                 secp256k1_fe_set_int(rzr, 0);
444             }
445             r->infinity = 1;
446         }
447         return;
448     }
449     secp256k1_fe_sqr(&i2, &i);
450     secp256k1_fe_sqr(&h2, &h);
451     secp256k1_fe_mul(&h3, &h, &h2);
452     if (rzr != NULL) {
453         *rzr = h;
454     }
455     secp256k1_fe_mul(&r->z, &a->z, &h);
456     secp256k1_fe_mul(&t, &u1, &h2);
457     r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
458     secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
459     secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
460     secp256k1_fe_add(&r->y, &h3);
461 }
462 
secp256k1_gej_add_zinv_var(secp256k1_gej * r,const secp256k1_gej * a,const secp256k1_ge * b,const secp256k1_fe * bzinv)463 static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) {
464     /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
465     secp256k1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
466 
467     if (b->infinity) {
468         *r = *a;
469         return;
470     }
471     if (a->infinity) {
472         secp256k1_fe bzinv2, bzinv3;
473         r->infinity = b->infinity;
474         secp256k1_fe_sqr(&bzinv2, bzinv);
475         secp256k1_fe_mul(&bzinv3, &bzinv2, bzinv);
476         secp256k1_fe_mul(&r->x, &b->x, &bzinv2);
477         secp256k1_fe_mul(&r->y, &b->y, &bzinv3);
478         secp256k1_fe_set_int(&r->z, 1);
479         return;
480     }
481     r->infinity = 0;
482 
483     /** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to
484      *  secp256k1's isomorphism we can multiply the Z coordinates on both sides
485      *  by bzinv, and get: (rx,ry,rz*bzinv) = (ax,ay,az*bzinv) + (bx,by,1).
486      *  This means that (rx,ry,rz) can be calculated as
487      *  (ax,ay,az*bzinv) + (bx,by,1), when not applying the bzinv factor to rz.
488      *  The variable az below holds the modified Z coordinate for a, which is used
489      *  for the computation of rx and ry, but not for rz.
490      */
491     secp256k1_fe_mul(&az, &a->z, bzinv);
492 
493     secp256k1_fe_sqr(&z12, &az);
494     u1 = a->x; secp256k1_fe_normalize_weak(&u1);
495     secp256k1_fe_mul(&u2, &b->x, &z12);
496     s1 = a->y; secp256k1_fe_normalize_weak(&s1);
497     secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &az);
498     secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
499     secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
500     if (secp256k1_fe_normalizes_to_zero_var(&h)) {
501         if (secp256k1_fe_normalizes_to_zero_var(&i)) {
502             secp256k1_gej_double_var(r, a, NULL);
503         } else {
504             r->infinity = 1;
505         }
506         return;
507     }
508     secp256k1_fe_sqr(&i2, &i);
509     secp256k1_fe_sqr(&h2, &h);
510     secp256k1_fe_mul(&h3, &h, &h2);
511     r->z = a->z; secp256k1_fe_mul(&r->z, &r->z, &h);
512     secp256k1_fe_mul(&t, &u1, &h2);
513     r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
514     secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
515     secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
516     secp256k1_fe_add(&r->y, &h3);
517 }
518 
519 
secp256k1_gej_add_ge(secp256k1_gej * r,const secp256k1_gej * a,const secp256k1_ge * b)520 static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b) {
521     /* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */
522     static const secp256k1_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
523     secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr;
524     secp256k1_fe m_alt, rr_alt;
525     int infinity, degenerate;
526     VERIFY_CHECK(!b->infinity);
527     VERIFY_CHECK(a->infinity == 0 || a->infinity == 1);
528 
529     /** In:
530      *    Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks.
531      *    In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002.
532      *  we find as solution for a unified addition/doubling formula:
533      *    lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation.
534      *    x3 = lambda^2 - (x1 + x2)
535      *    2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2).
536      *
537      *  Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives:
538      *    U1 = X1*Z2^2, U2 = X2*Z1^2
539      *    S1 = Y1*Z2^3, S2 = Y2*Z1^3
540      *    Z = Z1*Z2
541      *    T = U1+U2
542      *    M = S1+S2
543      *    Q = T*M^2
544      *    R = T^2-U1*U2
545      *    X3 = 4*(R^2-Q)
546      *    Y3 = 4*(R*(3*Q-2*R^2)-M^4)
547      *    Z3 = 2*M*Z
548      *  (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.)
549      *
550      *  This formula has the benefit of being the same for both addition
551      *  of distinct points and doubling. However, it breaks down in the
552      *  case that either point is infinity, or that y1 = -y2. We handle
553      *  these cases in the following ways:
554      *
555      *    - If b is infinity we simply bail by means of a VERIFY_CHECK.
556      *
557      *    - If a is infinity, we detect this, and at the end of the
558      *      computation replace the result (which will be meaningless,
559      *      but we compute to be constant-time) with b.x : b.y : 1.
560      *
561      *    - If a = -b, we have y1 = -y2, which is a degenerate case.
562      *      But here the answer is infinity, so we simply set the
563      *      infinity flag of the result, overriding the computed values
564      *      without even needing to cmov.
565      *
566      *    - If y1 = -y2 but x1 != x2, which does occur thanks to certain
567      *      properties of our curve (specifically, 1 has nontrivial cube
568      *      roots in our field, and the curve equation has no x coefficient)
569      *      then the answer is not infinity but also not given by the above
570      *      equation. In this case, we cmov in place an alternate expression
571      *      for lambda. Specifically (y1 - y2)/(x1 - x2). Where both these
572      *      expressions for lambda are defined, they are equal, and can be
573      *      obtained from each other by multiplication by (y1 + y2)/(y1 + y2)
574      *      then substitution of x^3 + 7 for y^2 (using the curve equation).
575      *      For all pairs of nonzero points (a, b) at least one is defined,
576      *      so this covers everything.
577      */
578 
579     secp256k1_fe_sqr(&zz, &a->z);                       /* z = Z1^2 */
580     u1 = a->x; secp256k1_fe_normalize_weak(&u1);        /* u1 = U1 = X1*Z2^2 (1) */
581     secp256k1_fe_mul(&u2, &b->x, &zz);                  /* u2 = U2 = X2*Z1^2 (1) */
582     s1 = a->y; secp256k1_fe_normalize_weak(&s1);        /* s1 = S1 = Y1*Z2^3 (1) */
583     secp256k1_fe_mul(&s2, &b->y, &zz);                  /* s2 = Y2*Z1^2 (1) */
584     secp256k1_fe_mul(&s2, &s2, &a->z);                  /* s2 = S2 = Y2*Z1^3 (1) */
585     t = u1; secp256k1_fe_add(&t, &u2);                  /* t = T = U1+U2 (2) */
586     m = s1; secp256k1_fe_add(&m, &s2);                  /* m = M = S1+S2 (2) */
587     secp256k1_fe_sqr(&rr, &t);                          /* rr = T^2 (1) */
588     secp256k1_fe_negate(&m_alt, &u2, 1);                /* Malt = -X2*Z1^2 */
589     secp256k1_fe_mul(&tt, &u1, &m_alt);                 /* tt = -U1*U2 (2) */
590     secp256k1_fe_add(&rr, &tt);                         /* rr = R = T^2-U1*U2 (3) */
591     /** If lambda = R/M = 0/0 we have a problem (except in the "trivial"
592      *  case that Z = z1z2 = 0, and this is special-cased later on). */
593     degenerate = secp256k1_fe_normalizes_to_zero(&m) &
594                  secp256k1_fe_normalizes_to_zero(&rr);
595     /* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2.
596      * This means either x1 == beta*x2 or beta*x1 == x2, where beta is
597      * a nontrivial cube root of one. In either case, an alternate
598      * non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2),
599      * so we set R/M equal to this. */
600     rr_alt = s1;
601     secp256k1_fe_mul_int(&rr_alt, 2);       /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */
602     secp256k1_fe_add(&m_alt, &u1);          /* Malt = X1*Z2^2 - X2*Z1^2 */
603 
604     secp256k1_fe_cmov(&rr_alt, &rr, !degenerate);
605     secp256k1_fe_cmov(&m_alt, &m, !degenerate);
606     /* Now Ralt / Malt = lambda and is guaranteed not to be 0/0.
607      * From here on out Ralt and Malt represent the numerator
608      * and denominator of lambda; R and M represent the explicit
609      * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */
610     secp256k1_fe_sqr(&n, &m_alt);                       /* n = Malt^2 (1) */
611     secp256k1_fe_mul(&q, &n, &t);                       /* q = Q = T*Malt^2 (1) */
612     /* These two lines use the observation that either M == Malt or M == 0,
613      * so M^3 * Malt is either Malt^4 (which is computed by squaring), or
614      * zero (which is "computed" by cmov). So the cost is one squaring
615      * versus two multiplications. */
616     secp256k1_fe_sqr(&n, &n);
617     secp256k1_fe_cmov(&n, &m, degenerate);              /* n = M^3 * Malt (2) */
618     secp256k1_fe_sqr(&t, &rr_alt);                      /* t = Ralt^2 (1) */
619     secp256k1_fe_mul(&r->z, &a->z, &m_alt);             /* r->z = Malt*Z (1) */
620     infinity = secp256k1_fe_normalizes_to_zero(&r->z) * (1 - a->infinity);
621     secp256k1_fe_mul_int(&r->z, 2);                     /* r->z = Z3 = 2*Malt*Z (2) */
622     secp256k1_fe_negate(&q, &q, 1);                     /* q = -Q (2) */
623     secp256k1_fe_add(&t, &q);                           /* t = Ralt^2-Q (3) */
624     secp256k1_fe_normalize_weak(&t);
625     r->x = t;                                           /* r->x = Ralt^2-Q (1) */
626     secp256k1_fe_mul_int(&t, 2);                        /* t = 2*x3 (2) */
627     secp256k1_fe_add(&t, &q);                           /* t = 2*x3 - Q: (4) */
628     secp256k1_fe_mul(&t, &t, &rr_alt);                  /* t = Ralt*(2*x3 - Q) (1) */
629     secp256k1_fe_add(&t, &n);                           /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */
630     secp256k1_fe_negate(&r->y, &t, 3);                  /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */
631     secp256k1_fe_normalize_weak(&r->y);
632     secp256k1_fe_mul_int(&r->x, 4);                     /* r->x = X3 = 4*(Ralt^2-Q) */
633     secp256k1_fe_mul_int(&r->y, 4);                     /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */
634 
635     /** In case a->infinity == 1, replace r with (b->x, b->y, 1). */
636     secp256k1_fe_cmov(&r->x, &b->x, a->infinity);
637     secp256k1_fe_cmov(&r->y, &b->y, a->infinity);
638     secp256k1_fe_cmov(&r->z, &fe_1, a->infinity);
639     r->infinity = infinity;
640 }
641 
secp256k1_gej_rescale(secp256k1_gej * r,const secp256k1_fe * s)642 static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) {
643     /* Operations: 4 mul, 1 sqr */
644     secp256k1_fe zz;
645     VERIFY_CHECK(!secp256k1_fe_is_zero(s));
646     secp256k1_fe_sqr(&zz, s);
647     secp256k1_fe_mul(&r->x, &r->x, &zz);                /* r->x *= s^2 */
648     secp256k1_fe_mul(&r->y, &r->y, &zz);
649     secp256k1_fe_mul(&r->y, &r->y, s);                  /* r->y *= s^3 */
650     secp256k1_fe_mul(&r->z, &r->z, s);                  /* r->z *= s   */
651 }
652 
secp256k1_ge_to_storage(secp256k1_ge_storage * r,const secp256k1_ge * a)653 static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a) {
654     secp256k1_fe x, y;
655     VERIFY_CHECK(!a->infinity);
656     x = a->x;
657     secp256k1_fe_normalize(&x);
658     y = a->y;
659     secp256k1_fe_normalize(&y);
660     secp256k1_fe_to_storage(&r->x, &x);
661     secp256k1_fe_to_storage(&r->y, &y);
662 }
663 
secp256k1_ge_from_storage(secp256k1_ge * r,const secp256k1_ge_storage * a)664 static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a) {
665     secp256k1_fe_from_storage(&r->x, &a->x);
666     secp256k1_fe_from_storage(&r->y, &a->y);
667     r->infinity = 0;
668 }
669 
secp256k1_ge_storage_cmov(secp256k1_ge_storage * r,const secp256k1_ge_storage * a,int flag)670 static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag) {
671     secp256k1_fe_storage_cmov(&r->x, &a->x, flag);
672     secp256k1_fe_storage_cmov(&r->y, &a->y, flag);
673 }
674 
675 #ifdef USE_ENDOMORPHISM
secp256k1_ge_mul_lambda(secp256k1_ge * r,const secp256k1_ge * a)676 static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
677     static const secp256k1_fe beta = SECP256K1_FE_CONST(
678         0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
679         0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul
680     );
681     *r = *a;
682     secp256k1_fe_mul(&r->x, &r->x, &beta);
683 }
684 #endif
685 
secp256k1_gej_has_quad_y_var(const secp256k1_gej * a)686 static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) {
687     secp256k1_fe yz;
688 
689     if (a->infinity) {
690         return 0;
691     }
692 
693     /* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as
694      * that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z
695        is */
696     secp256k1_fe_mul(&yz, &a->y, &a->z);
697     return secp256k1_fe_is_quad_var(&yz);
698 }
699 
700 #endif /* SECP256K1_GROUP_IMPL_H */
701