1 /***********************************************************************
2  * Copyright (c) 2013, 2014 Pieter Wuille                              *
3  * Distributed under the MIT software license, see the accompanying    *
4  * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5  ***********************************************************************/
6 
7 #ifndef SECP256K1_GROUP_IMPL_H
8 #define SECP256K1_GROUP_IMPL_H
9 
10 #include "field.h"
11 #include "group.h"
12 
13 /* These exhaustive group test orders and generators are chosen such that:
14  * - The field size is equal to that of secp256k1, so field code is the same.
15  * - The curve equation is of the form y^2=x^3+B for some constant B.
16  * - The subgroup has a generator 2*P, where P.x=1.
17  * - The subgroup has size less than 1000 to permit exhaustive testing.
18  * - The subgroup admits an endomorphism of the form lambda*(x,y) == (beta*x,y).
19  *
20  * These parameters are generated using sage/gen_exhaustive_groups.sage.
21  */
22 #if defined(EXHAUSTIVE_TEST_ORDER)
23 #  if EXHAUSTIVE_TEST_ORDER == 13
24 static const rustsecp256k1_v0_4_1_ge rustsecp256k1_v0_4_1_ge_const_g = SECP256K1_GE_CONST(
25     0xc3459c3d, 0x35326167, 0xcd86cce8, 0x07a2417f,
26     0x5b8bd567, 0xde8538ee, 0x0d507b0c, 0xd128f5bb,
27     0x8e467fec, 0xcd30000a, 0x6cc1184e, 0x25d382c2,
28     0xa2f4494e, 0x2fbe9abc, 0x8b64abac, 0xd005fb24
29 );
30 static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_const_b = SECP256K1_FE_CONST(
31     0x3d3486b2, 0x159a9ca5, 0xc75638be, 0xb23a69bc,
32     0x946a45ab, 0x24801247, 0xb4ed2b8e, 0x26b6a417
33 );
34 #  elif EXHAUSTIVE_TEST_ORDER == 199
35 static const rustsecp256k1_v0_4_1_ge rustsecp256k1_v0_4_1_ge_const_g = SECP256K1_GE_CONST(
36     0x226e653f, 0xc8df7744, 0x9bacbf12, 0x7d1dcbf9,
37     0x87f05b2a, 0xe7edbd28, 0x1f564575, 0xc48dcf18,
38     0xa13872c2, 0xe933bb17, 0x5d9ffd5b, 0xb5b6e10c,
39     0x57fe3c00, 0xbaaaa15a, 0xe003ec3e, 0x9c269bae
40 );
41 static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_const_b = SECP256K1_FE_CONST(
42     0x2cca28fa, 0xfc614b80, 0x2a3db42b, 0x00ba00b1,
43     0xbea8d943, 0xdace9ab2, 0x9536daea, 0x0074defb
44 );
45 #  else
46 #    error No known generator for the specified exhaustive test group order.
47 #  endif
48 #else
49 /** Generator for secp256k1, value 'g' defined in
50  *  "Standards for Efficient Cryptography" (SEC2) 2.7.1.
51  */
52 static const rustsecp256k1_v0_4_1_ge rustsecp256k1_v0_4_1_ge_const_g = SECP256K1_GE_CONST(
53     0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL,
54     0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL,
55     0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL,
56     0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
57 );
58 
59 static const rustsecp256k1_v0_4_1_fe rustsecp256k1_v0_4_1_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7);
60 #endif
61 
rustsecp256k1_v0_4_1_ge_set_gej_zinv(rustsecp256k1_v0_4_1_ge * r,const rustsecp256k1_v0_4_1_gej * a,const rustsecp256k1_v0_4_1_fe * zi)62 static void rustsecp256k1_v0_4_1_ge_set_gej_zinv(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_fe *zi) {
63     rustsecp256k1_v0_4_1_fe zi2;
64     rustsecp256k1_v0_4_1_fe zi3;
65     rustsecp256k1_v0_4_1_fe_sqr(&zi2, zi);
66     rustsecp256k1_v0_4_1_fe_mul(&zi3, &zi2, zi);
67     rustsecp256k1_v0_4_1_fe_mul(&r->x, &a->x, &zi2);
68     rustsecp256k1_v0_4_1_fe_mul(&r->y, &a->y, &zi3);
69     r->infinity = a->infinity;
70 }
71 
rustsecp256k1_v0_4_1_ge_set_xy(rustsecp256k1_v0_4_1_ge * r,const rustsecp256k1_v0_4_1_fe * x,const rustsecp256k1_v0_4_1_fe * y)72 static void rustsecp256k1_v0_4_1_ge_set_xy(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_fe *x, const rustsecp256k1_v0_4_1_fe *y) {
73     r->infinity = 0;
74     r->x = *x;
75     r->y = *y;
76 }
77 
rustsecp256k1_v0_4_1_ge_is_infinity(const rustsecp256k1_v0_4_1_ge * a)78 static int rustsecp256k1_v0_4_1_ge_is_infinity(const rustsecp256k1_v0_4_1_ge *a) {
79     return a->infinity;
80 }
81 
rustsecp256k1_v0_4_1_ge_neg(rustsecp256k1_v0_4_1_ge * r,const rustsecp256k1_v0_4_1_ge * a)82 static void rustsecp256k1_v0_4_1_ge_neg(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge *a) {
83     *r = *a;
84     rustsecp256k1_v0_4_1_fe_normalize_weak(&r->y);
85     rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->y, 1);
86 }
87 
rustsecp256k1_v0_4_1_ge_set_gej(rustsecp256k1_v0_4_1_ge * r,rustsecp256k1_v0_4_1_gej * a)88 static void rustsecp256k1_v0_4_1_ge_set_gej(rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_gej *a) {
89     rustsecp256k1_v0_4_1_fe z2, z3;
90     r->infinity = a->infinity;
91     rustsecp256k1_v0_4_1_fe_inv(&a->z, &a->z);
92     rustsecp256k1_v0_4_1_fe_sqr(&z2, &a->z);
93     rustsecp256k1_v0_4_1_fe_mul(&z3, &a->z, &z2);
94     rustsecp256k1_v0_4_1_fe_mul(&a->x, &a->x, &z2);
95     rustsecp256k1_v0_4_1_fe_mul(&a->y, &a->y, &z3);
96     rustsecp256k1_v0_4_1_fe_set_int(&a->z, 1);
97     r->x = a->x;
98     r->y = a->y;
99 }
100 
rustsecp256k1_v0_4_1_ge_set_gej_var(rustsecp256k1_v0_4_1_ge * r,rustsecp256k1_v0_4_1_gej * a)101 static void rustsecp256k1_v0_4_1_ge_set_gej_var(rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_gej *a) {
102     rustsecp256k1_v0_4_1_fe z2, z3;
103     if (a->infinity) {
104         rustsecp256k1_v0_4_1_ge_set_infinity(r);
105         return;
106     }
107     rustsecp256k1_v0_4_1_fe_inv_var(&a->z, &a->z);
108     rustsecp256k1_v0_4_1_fe_sqr(&z2, &a->z);
109     rustsecp256k1_v0_4_1_fe_mul(&z3, &a->z, &z2);
110     rustsecp256k1_v0_4_1_fe_mul(&a->x, &a->x, &z2);
111     rustsecp256k1_v0_4_1_fe_mul(&a->y, &a->y, &z3);
112     rustsecp256k1_v0_4_1_fe_set_int(&a->z, 1);
113     rustsecp256k1_v0_4_1_ge_set_xy(r, &a->x, &a->y);
114 }
115 
rustsecp256k1_v0_4_1_ge_set_all_gej_var(rustsecp256k1_v0_4_1_ge * r,const rustsecp256k1_v0_4_1_gej * a,size_t len)116 static void rustsecp256k1_v0_4_1_ge_set_all_gej_var(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_gej *a, size_t len) {
117     rustsecp256k1_v0_4_1_fe u;
118     size_t i;
119     size_t last_i = SIZE_MAX;
120 
121     for (i = 0; i < len; i++) {
122         if (a[i].infinity) {
123             rustsecp256k1_v0_4_1_ge_set_infinity(&r[i]);
124         } else {
125             /* Use destination's x coordinates as scratch space */
126             if (last_i == SIZE_MAX) {
127                 r[i].x = a[i].z;
128             } else {
129                 rustsecp256k1_v0_4_1_fe_mul(&r[i].x, &r[last_i].x, &a[i].z);
130             }
131             last_i = i;
132         }
133     }
134     if (last_i == SIZE_MAX) {
135         return;
136     }
137     rustsecp256k1_v0_4_1_fe_inv_var(&u, &r[last_i].x);
138 
139     i = last_i;
140     while (i > 0) {
141         i--;
142         if (!a[i].infinity) {
143             rustsecp256k1_v0_4_1_fe_mul(&r[last_i].x, &r[i].x, &u);
144             rustsecp256k1_v0_4_1_fe_mul(&u, &u, &a[last_i].z);
145             last_i = i;
146         }
147     }
148     VERIFY_CHECK(!a[last_i].infinity);
149     r[last_i].x = u;
150 
151     for (i = 0; i < len; i++) {
152         if (!a[i].infinity) {
153             rustsecp256k1_v0_4_1_ge_set_gej_zinv(&r[i], &a[i], &r[i].x);
154         }
155     }
156 }
157 
rustsecp256k1_v0_4_1_ge_globalz_set_table_gej(size_t len,rustsecp256k1_v0_4_1_ge * r,rustsecp256k1_v0_4_1_fe * globalz,const rustsecp256k1_v0_4_1_gej * a,const rustsecp256k1_v0_4_1_fe * zr)158 static void rustsecp256k1_v0_4_1_ge_globalz_set_table_gej(size_t len, rustsecp256k1_v0_4_1_ge *r, rustsecp256k1_v0_4_1_fe *globalz, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_fe *zr) {
159     size_t i = len - 1;
160     rustsecp256k1_v0_4_1_fe zs;
161 
162     if (len > 0) {
163         /* The z of the final point gives us the "global Z" for the table. */
164         r[i].x = a[i].x;
165         r[i].y = a[i].y;
166         /* Ensure all y values are in weak normal form for fast negation of points */
167         rustsecp256k1_v0_4_1_fe_normalize_weak(&r[i].y);
168         *globalz = a[i].z;
169         r[i].infinity = 0;
170         zs = zr[i];
171 
172         /* Work our way backwards, using the z-ratios to scale the x/y values. */
173         while (i > 0) {
174             if (i != len - 1) {
175                 rustsecp256k1_v0_4_1_fe_mul(&zs, &zs, &zr[i]);
176             }
177             i--;
178             rustsecp256k1_v0_4_1_ge_set_gej_zinv(&r[i], &a[i], &zs);
179         }
180     }
181 }
182 
rustsecp256k1_v0_4_1_gej_set_infinity(rustsecp256k1_v0_4_1_gej * r)183 static void rustsecp256k1_v0_4_1_gej_set_infinity(rustsecp256k1_v0_4_1_gej *r) {
184     r->infinity = 1;
185     rustsecp256k1_v0_4_1_fe_clear(&r->x);
186     rustsecp256k1_v0_4_1_fe_clear(&r->y);
187     rustsecp256k1_v0_4_1_fe_clear(&r->z);
188 }
189 
rustsecp256k1_v0_4_1_ge_set_infinity(rustsecp256k1_v0_4_1_ge * r)190 static void rustsecp256k1_v0_4_1_ge_set_infinity(rustsecp256k1_v0_4_1_ge *r) {
191     r->infinity = 1;
192     rustsecp256k1_v0_4_1_fe_clear(&r->x);
193     rustsecp256k1_v0_4_1_fe_clear(&r->y);
194 }
195 
rustsecp256k1_v0_4_1_gej_clear(rustsecp256k1_v0_4_1_gej * r)196 static void rustsecp256k1_v0_4_1_gej_clear(rustsecp256k1_v0_4_1_gej *r) {
197     r->infinity = 0;
198     rustsecp256k1_v0_4_1_fe_clear(&r->x);
199     rustsecp256k1_v0_4_1_fe_clear(&r->y);
200     rustsecp256k1_v0_4_1_fe_clear(&r->z);
201 }
202 
rustsecp256k1_v0_4_1_ge_clear(rustsecp256k1_v0_4_1_ge * r)203 static void rustsecp256k1_v0_4_1_ge_clear(rustsecp256k1_v0_4_1_ge *r) {
204     r->infinity = 0;
205     rustsecp256k1_v0_4_1_fe_clear(&r->x);
206     rustsecp256k1_v0_4_1_fe_clear(&r->y);
207 }
208 
rustsecp256k1_v0_4_1_ge_set_xo_var(rustsecp256k1_v0_4_1_ge * r,const rustsecp256k1_v0_4_1_fe * x,int odd)209 static int rustsecp256k1_v0_4_1_ge_set_xo_var(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_fe *x, int odd) {
210     rustsecp256k1_v0_4_1_fe x2, x3;
211     r->x = *x;
212     rustsecp256k1_v0_4_1_fe_sqr(&x2, x);
213     rustsecp256k1_v0_4_1_fe_mul(&x3, x, &x2);
214     r->infinity = 0;
215     rustsecp256k1_v0_4_1_fe_add(&x3, &rustsecp256k1_v0_4_1_fe_const_b);
216     if (!rustsecp256k1_v0_4_1_fe_sqrt(&r->y, &x3)) {
217         return 0;
218     }
219     rustsecp256k1_v0_4_1_fe_normalize_var(&r->y);
220     if (rustsecp256k1_v0_4_1_fe_is_odd(&r->y) != odd) {
221         rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->y, 1);
222     }
223     return 1;
224 
225 }
226 
rustsecp256k1_v0_4_1_gej_set_ge(rustsecp256k1_v0_4_1_gej * r,const rustsecp256k1_v0_4_1_ge * a)227 static void rustsecp256k1_v0_4_1_gej_set_ge(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_ge *a) {
228    r->infinity = a->infinity;
229    r->x = a->x;
230    r->y = a->y;
231    rustsecp256k1_v0_4_1_fe_set_int(&r->z, 1);
232 }
233 
rustsecp256k1_v0_4_1_gej_eq_x_var(const rustsecp256k1_v0_4_1_fe * x,const rustsecp256k1_v0_4_1_gej * a)234 static int rustsecp256k1_v0_4_1_gej_eq_x_var(const rustsecp256k1_v0_4_1_fe *x, const rustsecp256k1_v0_4_1_gej *a) {
235     rustsecp256k1_v0_4_1_fe r, r2;
236     VERIFY_CHECK(!a->infinity);
237     rustsecp256k1_v0_4_1_fe_sqr(&r, &a->z); rustsecp256k1_v0_4_1_fe_mul(&r, &r, x);
238     r2 = a->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&r2);
239     return rustsecp256k1_v0_4_1_fe_equal_var(&r, &r2);
240 }
241 
rustsecp256k1_v0_4_1_gej_neg(rustsecp256k1_v0_4_1_gej * r,const rustsecp256k1_v0_4_1_gej * a)242 static void rustsecp256k1_v0_4_1_gej_neg(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a) {
243     r->infinity = a->infinity;
244     r->x = a->x;
245     r->y = a->y;
246     r->z = a->z;
247     rustsecp256k1_v0_4_1_fe_normalize_weak(&r->y);
248     rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->y, 1);
249 }
250 
rustsecp256k1_v0_4_1_gej_is_infinity(const rustsecp256k1_v0_4_1_gej * a)251 static int rustsecp256k1_v0_4_1_gej_is_infinity(const rustsecp256k1_v0_4_1_gej *a) {
252     return a->infinity;
253 }
254 
rustsecp256k1_v0_4_1_ge_is_valid_var(const rustsecp256k1_v0_4_1_ge * a)255 static int rustsecp256k1_v0_4_1_ge_is_valid_var(const rustsecp256k1_v0_4_1_ge *a) {
256     rustsecp256k1_v0_4_1_fe y2, x3;
257     if (a->infinity) {
258         return 0;
259     }
260     /* y^2 = x^3 + 7 */
261     rustsecp256k1_v0_4_1_fe_sqr(&y2, &a->y);
262     rustsecp256k1_v0_4_1_fe_sqr(&x3, &a->x); rustsecp256k1_v0_4_1_fe_mul(&x3, &x3, &a->x);
263     rustsecp256k1_v0_4_1_fe_add(&x3, &rustsecp256k1_v0_4_1_fe_const_b);
264     rustsecp256k1_v0_4_1_fe_normalize_weak(&x3);
265     return rustsecp256k1_v0_4_1_fe_equal_var(&y2, &x3);
266 }
267 
rustsecp256k1_v0_4_1_gej_double(rustsecp256k1_v0_4_1_gej * r,const rustsecp256k1_v0_4_1_gej * a)268 static SECP256K1_INLINE void rustsecp256k1_v0_4_1_gej_double(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a) {
269     /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate.
270      *
271      * Note that there is an implementation described at
272      *     https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
273      * which trades a multiply for a square, but in practice this is actually slower,
274      * mainly because it requires more normalizations.
275      */
276     rustsecp256k1_v0_4_1_fe t1,t2,t3,t4;
277 
278     r->infinity = a->infinity;
279 
280     rustsecp256k1_v0_4_1_fe_mul(&r->z, &a->z, &a->y);
281     rustsecp256k1_v0_4_1_fe_mul_int(&r->z, 2);       /* Z' = 2*Y*Z (2) */
282     rustsecp256k1_v0_4_1_fe_sqr(&t1, &a->x);
283     rustsecp256k1_v0_4_1_fe_mul_int(&t1, 3);         /* T1 = 3*X^2 (3) */
284     rustsecp256k1_v0_4_1_fe_sqr(&t2, &t1);           /* T2 = 9*X^4 (1) */
285     rustsecp256k1_v0_4_1_fe_sqr(&t3, &a->y);
286     rustsecp256k1_v0_4_1_fe_mul_int(&t3, 2);         /* T3 = 2*Y^2 (2) */
287     rustsecp256k1_v0_4_1_fe_sqr(&t4, &t3);
288     rustsecp256k1_v0_4_1_fe_mul_int(&t4, 2);         /* T4 = 8*Y^4 (2) */
289     rustsecp256k1_v0_4_1_fe_mul(&t3, &t3, &a->x);    /* T3 = 2*X*Y^2 (1) */
290     r->x = t3;
291     rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 4);       /* X' = 8*X*Y^2 (4) */
292     rustsecp256k1_v0_4_1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */
293     rustsecp256k1_v0_4_1_fe_add(&r->x, &t2);         /* X' = 9*X^4 - 8*X*Y^2 (6) */
294     rustsecp256k1_v0_4_1_fe_negate(&t2, &t2, 1);     /* T2 = -9*X^4 (2) */
295     rustsecp256k1_v0_4_1_fe_mul_int(&t3, 6);         /* T3 = 12*X*Y^2 (6) */
296     rustsecp256k1_v0_4_1_fe_add(&t3, &t2);           /* T3 = 12*X*Y^2 - 9*X^4 (8) */
297     rustsecp256k1_v0_4_1_fe_mul(&r->y, &t1, &t3);    /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */
298     rustsecp256k1_v0_4_1_fe_negate(&t2, &t4, 2);     /* T2 = -8*Y^4 (3) */
299     rustsecp256k1_v0_4_1_fe_add(&r->y, &t2);         /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */
300 }
301 
rustsecp256k1_v0_4_1_gej_double_var(rustsecp256k1_v0_4_1_gej * r,const rustsecp256k1_v0_4_1_gej * a,rustsecp256k1_v0_4_1_fe * rzr)302 static void rustsecp256k1_v0_4_1_gej_double_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, rustsecp256k1_v0_4_1_fe *rzr) {
303     /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
304      *  Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have
305      *  y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p.
306      *
307      *  Having said this, if this function receives a point on a sextic twist, e.g. by
308      *  a fault attack, it is possible for y to be 0. This happens for y^2 = x^3 + 6,
309      *  since -6 does have a cube root mod p. For this point, this function will not set
310      *  the infinity flag even though the point doubles to infinity, and the result
311      *  point will be gibberish (z = 0 but infinity = 0).
312      */
313     if (a->infinity) {
314         rustsecp256k1_v0_4_1_gej_set_infinity(r);
315         if (rzr != NULL) {
316             rustsecp256k1_v0_4_1_fe_set_int(rzr, 1);
317         }
318         return;
319     }
320 
321     if (rzr != NULL) {
322         *rzr = a->y;
323         rustsecp256k1_v0_4_1_fe_normalize_weak(rzr);
324         rustsecp256k1_v0_4_1_fe_mul_int(rzr, 2);
325     }
326 
327     rustsecp256k1_v0_4_1_gej_double(r, a);
328 }
329 
rustsecp256k1_v0_4_1_gej_add_var(rustsecp256k1_v0_4_1_gej * r,const rustsecp256k1_v0_4_1_gej * a,const rustsecp256k1_v0_4_1_gej * b,rustsecp256k1_v0_4_1_fe * rzr)330 static void rustsecp256k1_v0_4_1_gej_add_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_gej *b, rustsecp256k1_v0_4_1_fe *rzr) {
331     /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */
332     rustsecp256k1_v0_4_1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
333 
334     if (a->infinity) {
335         VERIFY_CHECK(rzr == NULL);
336         *r = *b;
337         return;
338     }
339 
340     if (b->infinity) {
341         if (rzr != NULL) {
342             rustsecp256k1_v0_4_1_fe_set_int(rzr, 1);
343         }
344         *r = *a;
345         return;
346     }
347 
348     r->infinity = 0;
349     rustsecp256k1_v0_4_1_fe_sqr(&z22, &b->z);
350     rustsecp256k1_v0_4_1_fe_sqr(&z12, &a->z);
351     rustsecp256k1_v0_4_1_fe_mul(&u1, &a->x, &z22);
352     rustsecp256k1_v0_4_1_fe_mul(&u2, &b->x, &z12);
353     rustsecp256k1_v0_4_1_fe_mul(&s1, &a->y, &z22); rustsecp256k1_v0_4_1_fe_mul(&s1, &s1, &b->z);
354     rustsecp256k1_v0_4_1_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_1_fe_mul(&s2, &s2, &a->z);
355     rustsecp256k1_v0_4_1_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_1_fe_add(&h, &u2);
356     rustsecp256k1_v0_4_1_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_1_fe_add(&i, &s2);
357     if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&h)) {
358         if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&i)) {
359             rustsecp256k1_v0_4_1_gej_double_var(r, a, rzr);
360         } else {
361             if (rzr != NULL) {
362                 rustsecp256k1_v0_4_1_fe_set_int(rzr, 0);
363             }
364             rustsecp256k1_v0_4_1_gej_set_infinity(r);
365         }
366         return;
367     }
368     rustsecp256k1_v0_4_1_fe_sqr(&i2, &i);
369     rustsecp256k1_v0_4_1_fe_sqr(&h2, &h);
370     rustsecp256k1_v0_4_1_fe_mul(&h3, &h, &h2);
371     rustsecp256k1_v0_4_1_fe_mul(&h, &h, &b->z);
372     if (rzr != NULL) {
373         *rzr = h;
374     }
375     rustsecp256k1_v0_4_1_fe_mul(&r->z, &a->z, &h);
376     rustsecp256k1_v0_4_1_fe_mul(&t, &u1, &h2);
377     r->x = t; rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_1_fe_add(&r->x, &h3); rustsecp256k1_v0_4_1_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_1_fe_add(&r->x, &i2);
378     rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_1_fe_add(&r->y, &t); rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, &i);
379     rustsecp256k1_v0_4_1_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_1_fe_negate(&h3, &h3, 1);
380     rustsecp256k1_v0_4_1_fe_add(&r->y, &h3);
381 }
382 
rustsecp256k1_v0_4_1_gej_add_ge_var(rustsecp256k1_v0_4_1_gej * r,const rustsecp256k1_v0_4_1_gej * a,const rustsecp256k1_v0_4_1_ge * b,rustsecp256k1_v0_4_1_fe * rzr)383 static void rustsecp256k1_v0_4_1_gej_add_ge_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b, rustsecp256k1_v0_4_1_fe *rzr) {
384     /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
385     rustsecp256k1_v0_4_1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
386     if (a->infinity) {
387         VERIFY_CHECK(rzr == NULL);
388         rustsecp256k1_v0_4_1_gej_set_ge(r, b);
389         return;
390     }
391     if (b->infinity) {
392         if (rzr != NULL) {
393             rustsecp256k1_v0_4_1_fe_set_int(rzr, 1);
394         }
395         *r = *a;
396         return;
397     }
398     r->infinity = 0;
399 
400     rustsecp256k1_v0_4_1_fe_sqr(&z12, &a->z);
401     u1 = a->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&u1);
402     rustsecp256k1_v0_4_1_fe_mul(&u2, &b->x, &z12);
403     s1 = a->y; rustsecp256k1_v0_4_1_fe_normalize_weak(&s1);
404     rustsecp256k1_v0_4_1_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_1_fe_mul(&s2, &s2, &a->z);
405     rustsecp256k1_v0_4_1_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_1_fe_add(&h, &u2);
406     rustsecp256k1_v0_4_1_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_1_fe_add(&i, &s2);
407     if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&h)) {
408         if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&i)) {
409             rustsecp256k1_v0_4_1_gej_double_var(r, a, rzr);
410         } else {
411             if (rzr != NULL) {
412                 rustsecp256k1_v0_4_1_fe_set_int(rzr, 0);
413             }
414             rustsecp256k1_v0_4_1_gej_set_infinity(r);
415         }
416         return;
417     }
418     rustsecp256k1_v0_4_1_fe_sqr(&i2, &i);
419     rustsecp256k1_v0_4_1_fe_sqr(&h2, &h);
420     rustsecp256k1_v0_4_1_fe_mul(&h3, &h, &h2);
421     if (rzr != NULL) {
422         *rzr = h;
423     }
424     rustsecp256k1_v0_4_1_fe_mul(&r->z, &a->z, &h);
425     rustsecp256k1_v0_4_1_fe_mul(&t, &u1, &h2);
426     r->x = t; rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_1_fe_add(&r->x, &h3); rustsecp256k1_v0_4_1_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_1_fe_add(&r->x, &i2);
427     rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_1_fe_add(&r->y, &t); rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, &i);
428     rustsecp256k1_v0_4_1_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_1_fe_negate(&h3, &h3, 1);
429     rustsecp256k1_v0_4_1_fe_add(&r->y, &h3);
430 }
431 
rustsecp256k1_v0_4_1_gej_add_zinv_var(rustsecp256k1_v0_4_1_gej * r,const rustsecp256k1_v0_4_1_gej * a,const rustsecp256k1_v0_4_1_ge * b,const rustsecp256k1_v0_4_1_fe * bzinv)432 static void rustsecp256k1_v0_4_1_gej_add_zinv_var(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b, const rustsecp256k1_v0_4_1_fe *bzinv) {
433     /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
434     rustsecp256k1_v0_4_1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
435 
436     if (b->infinity) {
437         *r = *a;
438         return;
439     }
440     if (a->infinity) {
441         rustsecp256k1_v0_4_1_fe bzinv2, bzinv3;
442         r->infinity = b->infinity;
443         rustsecp256k1_v0_4_1_fe_sqr(&bzinv2, bzinv);
444         rustsecp256k1_v0_4_1_fe_mul(&bzinv3, &bzinv2, bzinv);
445         rustsecp256k1_v0_4_1_fe_mul(&r->x, &b->x, &bzinv2);
446         rustsecp256k1_v0_4_1_fe_mul(&r->y, &b->y, &bzinv3);
447         rustsecp256k1_v0_4_1_fe_set_int(&r->z, 1);
448         return;
449     }
450     r->infinity = 0;
451 
452     /** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to
453      *  secp256k1's isomorphism we can multiply the Z coordinates on both sides
454      *  by bzinv, and get: (rx,ry,rz*bzinv) = (ax,ay,az*bzinv) + (bx,by,1).
455      *  This means that (rx,ry,rz) can be calculated as
456      *  (ax,ay,az*bzinv) + (bx,by,1), when not applying the bzinv factor to rz.
457      *  The variable az below holds the modified Z coordinate for a, which is used
458      *  for the computation of rx and ry, but not for rz.
459      */
460     rustsecp256k1_v0_4_1_fe_mul(&az, &a->z, bzinv);
461 
462     rustsecp256k1_v0_4_1_fe_sqr(&z12, &az);
463     u1 = a->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&u1);
464     rustsecp256k1_v0_4_1_fe_mul(&u2, &b->x, &z12);
465     s1 = a->y; rustsecp256k1_v0_4_1_fe_normalize_weak(&s1);
466     rustsecp256k1_v0_4_1_fe_mul(&s2, &b->y, &z12); rustsecp256k1_v0_4_1_fe_mul(&s2, &s2, &az);
467     rustsecp256k1_v0_4_1_fe_negate(&h, &u1, 1); rustsecp256k1_v0_4_1_fe_add(&h, &u2);
468     rustsecp256k1_v0_4_1_fe_negate(&i, &s1, 1); rustsecp256k1_v0_4_1_fe_add(&i, &s2);
469     if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&h)) {
470         if (rustsecp256k1_v0_4_1_fe_normalizes_to_zero_var(&i)) {
471             rustsecp256k1_v0_4_1_gej_double_var(r, a, NULL);
472         } else {
473             rustsecp256k1_v0_4_1_gej_set_infinity(r);
474         }
475         return;
476     }
477     rustsecp256k1_v0_4_1_fe_sqr(&i2, &i);
478     rustsecp256k1_v0_4_1_fe_sqr(&h2, &h);
479     rustsecp256k1_v0_4_1_fe_mul(&h3, &h, &h2);
480     r->z = a->z; rustsecp256k1_v0_4_1_fe_mul(&r->z, &r->z, &h);
481     rustsecp256k1_v0_4_1_fe_mul(&t, &u1, &h2);
482     r->x = t; rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 2); rustsecp256k1_v0_4_1_fe_add(&r->x, &h3); rustsecp256k1_v0_4_1_fe_negate(&r->x, &r->x, 3); rustsecp256k1_v0_4_1_fe_add(&r->x, &i2);
483     rustsecp256k1_v0_4_1_fe_negate(&r->y, &r->x, 5); rustsecp256k1_v0_4_1_fe_add(&r->y, &t); rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, &i);
484     rustsecp256k1_v0_4_1_fe_mul(&h3, &h3, &s1); rustsecp256k1_v0_4_1_fe_negate(&h3, &h3, 1);
485     rustsecp256k1_v0_4_1_fe_add(&r->y, &h3);
486 }
487 
488 
rustsecp256k1_v0_4_1_gej_add_ge(rustsecp256k1_v0_4_1_gej * r,const rustsecp256k1_v0_4_1_gej * a,const rustsecp256k1_v0_4_1_ge * b)489 static void rustsecp256k1_v0_4_1_gej_add_ge(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_gej *a, const rustsecp256k1_v0_4_1_ge *b) {
490     /* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */
491     static const rustsecp256k1_v0_4_1_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
492     rustsecp256k1_v0_4_1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr;
493     rustsecp256k1_v0_4_1_fe m_alt, rr_alt;
494     int infinity, degenerate;
495     VERIFY_CHECK(!b->infinity);
496     VERIFY_CHECK(a->infinity == 0 || a->infinity == 1);
497 
498     /** In:
499      *    Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks.
500      *    In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002.
501      *  we find as solution for a unified addition/doubling formula:
502      *    lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation.
503      *    x3 = lambda^2 - (x1 + x2)
504      *    2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2).
505      *
506      *  Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives:
507      *    U1 = X1*Z2^2, U2 = X2*Z1^2
508      *    S1 = Y1*Z2^3, S2 = Y2*Z1^3
509      *    Z = Z1*Z2
510      *    T = U1+U2
511      *    M = S1+S2
512      *    Q = T*M^2
513      *    R = T^2-U1*U2
514      *    X3 = 4*(R^2-Q)
515      *    Y3 = 4*(R*(3*Q-2*R^2)-M^4)
516      *    Z3 = 2*M*Z
517      *  (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.)
518      *
519      *  This formula has the benefit of being the same for both addition
520      *  of distinct points and doubling. However, it breaks down in the
521      *  case that either point is infinity, or that y1 = -y2. We handle
522      *  these cases in the following ways:
523      *
524      *    - If b is infinity we simply bail by means of a VERIFY_CHECK.
525      *
526      *    - If a is infinity, we detect this, and at the end of the
527      *      computation replace the result (which will be meaningless,
528      *      but we compute to be constant-time) with b.x : b.y : 1.
529      *
530      *    - If a = -b, we have y1 = -y2, which is a degenerate case.
531      *      But here the answer is infinity, so we simply set the
532      *      infinity flag of the result, overriding the computed values
533      *      without even needing to cmov.
534      *
535      *    - If y1 = -y2 but x1 != x2, which does occur thanks to certain
536      *      properties of our curve (specifically, 1 has nontrivial cube
537      *      roots in our field, and the curve equation has no x coefficient)
538      *      then the answer is not infinity but also not given by the above
539      *      equation. In this case, we cmov in place an alternate expression
540      *      for lambda. Specifically (y1 - y2)/(x1 - x2). Where both these
541      *      expressions for lambda are defined, they are equal, and can be
542      *      obtained from each other by multiplication by (y1 + y2)/(y1 + y2)
543      *      then substitution of x^3 + 7 for y^2 (using the curve equation).
544      *      For all pairs of nonzero points (a, b) at least one is defined,
545      *      so this covers everything.
546      */
547 
548     rustsecp256k1_v0_4_1_fe_sqr(&zz, &a->z);                       /* z = Z1^2 */
549     u1 = a->x; rustsecp256k1_v0_4_1_fe_normalize_weak(&u1);        /* u1 = U1 = X1*Z2^2 (1) */
550     rustsecp256k1_v0_4_1_fe_mul(&u2, &b->x, &zz);                  /* u2 = U2 = X2*Z1^2 (1) */
551     s1 = a->y; rustsecp256k1_v0_4_1_fe_normalize_weak(&s1);        /* s1 = S1 = Y1*Z2^3 (1) */
552     rustsecp256k1_v0_4_1_fe_mul(&s2, &b->y, &zz);                  /* s2 = Y2*Z1^2 (1) */
553     rustsecp256k1_v0_4_1_fe_mul(&s2, &s2, &a->z);                  /* s2 = S2 = Y2*Z1^3 (1) */
554     t = u1; rustsecp256k1_v0_4_1_fe_add(&t, &u2);                  /* t = T = U1+U2 (2) */
555     m = s1; rustsecp256k1_v0_4_1_fe_add(&m, &s2);                  /* m = M = S1+S2 (2) */
556     rustsecp256k1_v0_4_1_fe_sqr(&rr, &t);                          /* rr = T^2 (1) */
557     rustsecp256k1_v0_4_1_fe_negate(&m_alt, &u2, 1);                /* Malt = -X2*Z1^2 */
558     rustsecp256k1_v0_4_1_fe_mul(&tt, &u1, &m_alt);                 /* tt = -U1*U2 (2) */
559     rustsecp256k1_v0_4_1_fe_add(&rr, &tt);                         /* rr = R = T^2-U1*U2 (3) */
560     /** If lambda = R/M = 0/0 we have a problem (except in the "trivial"
561      *  case that Z = z1z2 = 0, and this is special-cased later on). */
562     degenerate = rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&m) &
563                  rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&rr);
564     /* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2.
565      * This means either x1 == beta*x2 or beta*x1 == x2, where beta is
566      * a nontrivial cube root of one. In either case, an alternate
567      * non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2),
568      * so we set R/M equal to this. */
569     rr_alt = s1;
570     rustsecp256k1_v0_4_1_fe_mul_int(&rr_alt, 2);       /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */
571     rustsecp256k1_v0_4_1_fe_add(&m_alt, &u1);          /* Malt = X1*Z2^2 - X2*Z1^2 */
572 
573     rustsecp256k1_v0_4_1_fe_cmov(&rr_alt, &rr, !degenerate);
574     rustsecp256k1_v0_4_1_fe_cmov(&m_alt, &m, !degenerate);
575     /* Now Ralt / Malt = lambda and is guaranteed not to be 0/0.
576      * From here on out Ralt and Malt represent the numerator
577      * and denominator of lambda; R and M represent the explicit
578      * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */
579     rustsecp256k1_v0_4_1_fe_sqr(&n, &m_alt);                       /* n = Malt^2 (1) */
580     rustsecp256k1_v0_4_1_fe_mul(&q, &n, &t);                       /* q = Q = T*Malt^2 (1) */
581     /* These two lines use the observation that either M == Malt or M == 0,
582      * so M^3 * Malt is either Malt^4 (which is computed by squaring), or
583      * zero (which is "computed" by cmov). So the cost is one squaring
584      * versus two multiplications. */
585     rustsecp256k1_v0_4_1_fe_sqr(&n, &n);
586     rustsecp256k1_v0_4_1_fe_cmov(&n, &m, degenerate);              /* n = M^3 * Malt (2) */
587     rustsecp256k1_v0_4_1_fe_sqr(&t, &rr_alt);                      /* t = Ralt^2 (1) */
588     rustsecp256k1_v0_4_1_fe_mul(&r->z, &a->z, &m_alt);             /* r->z = Malt*Z (1) */
589     infinity = rustsecp256k1_v0_4_1_fe_normalizes_to_zero(&r->z) & ~a->infinity;
590     rustsecp256k1_v0_4_1_fe_mul_int(&r->z, 2);                     /* r->z = Z3 = 2*Malt*Z (2) */
591     rustsecp256k1_v0_4_1_fe_negate(&q, &q, 1);                     /* q = -Q (2) */
592     rustsecp256k1_v0_4_1_fe_add(&t, &q);                           /* t = Ralt^2-Q (3) */
593     rustsecp256k1_v0_4_1_fe_normalize_weak(&t);
594     r->x = t;                                           /* r->x = Ralt^2-Q (1) */
595     rustsecp256k1_v0_4_1_fe_mul_int(&t, 2);                        /* t = 2*x3 (2) */
596     rustsecp256k1_v0_4_1_fe_add(&t, &q);                           /* t = 2*x3 - Q: (4) */
597     rustsecp256k1_v0_4_1_fe_mul(&t, &t, &rr_alt);                  /* t = Ralt*(2*x3 - Q) (1) */
598     rustsecp256k1_v0_4_1_fe_add(&t, &n);                           /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */
599     rustsecp256k1_v0_4_1_fe_negate(&r->y, &t, 3);                  /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */
600     rustsecp256k1_v0_4_1_fe_normalize_weak(&r->y);
601     rustsecp256k1_v0_4_1_fe_mul_int(&r->x, 4);                     /* r->x = X3 = 4*(Ralt^2-Q) */
602     rustsecp256k1_v0_4_1_fe_mul_int(&r->y, 4);                     /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */
603 
604     /** In case a->infinity == 1, replace r with (b->x, b->y, 1). */
605     rustsecp256k1_v0_4_1_fe_cmov(&r->x, &b->x, a->infinity);
606     rustsecp256k1_v0_4_1_fe_cmov(&r->y, &b->y, a->infinity);
607     rustsecp256k1_v0_4_1_fe_cmov(&r->z, &fe_1, a->infinity);
608     r->infinity = infinity;
609 }
610 
rustsecp256k1_v0_4_1_gej_rescale(rustsecp256k1_v0_4_1_gej * r,const rustsecp256k1_v0_4_1_fe * s)611 static void rustsecp256k1_v0_4_1_gej_rescale(rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_fe *s) {
612     /* Operations: 4 mul, 1 sqr */
613     rustsecp256k1_v0_4_1_fe zz;
614     VERIFY_CHECK(!rustsecp256k1_v0_4_1_fe_is_zero(s));
615     rustsecp256k1_v0_4_1_fe_sqr(&zz, s);
616     rustsecp256k1_v0_4_1_fe_mul(&r->x, &r->x, &zz);                /* r->x *= s^2 */
617     rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, &zz);
618     rustsecp256k1_v0_4_1_fe_mul(&r->y, &r->y, s);                  /* r->y *= s^3 */
619     rustsecp256k1_v0_4_1_fe_mul(&r->z, &r->z, s);                  /* r->z *= s   */
620 }
621 
rustsecp256k1_v0_4_1_ge_to_storage(rustsecp256k1_v0_4_1_ge_storage * r,const rustsecp256k1_v0_4_1_ge * a)622 static void rustsecp256k1_v0_4_1_ge_to_storage(rustsecp256k1_v0_4_1_ge_storage *r, const rustsecp256k1_v0_4_1_ge *a) {
623     rustsecp256k1_v0_4_1_fe x, y;
624     VERIFY_CHECK(!a->infinity);
625     x = a->x;
626     rustsecp256k1_v0_4_1_fe_normalize(&x);
627     y = a->y;
628     rustsecp256k1_v0_4_1_fe_normalize(&y);
629     rustsecp256k1_v0_4_1_fe_to_storage(&r->x, &x);
630     rustsecp256k1_v0_4_1_fe_to_storage(&r->y, &y);
631 }
632 
rustsecp256k1_v0_4_1_ge_from_storage(rustsecp256k1_v0_4_1_ge * r,const rustsecp256k1_v0_4_1_ge_storage * a)633 static void rustsecp256k1_v0_4_1_ge_from_storage(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge_storage *a) {
634     rustsecp256k1_v0_4_1_fe_from_storage(&r->x, &a->x);
635     rustsecp256k1_v0_4_1_fe_from_storage(&r->y, &a->y);
636     r->infinity = 0;
637 }
638 
rustsecp256k1_v0_4_1_ge_storage_cmov(rustsecp256k1_v0_4_1_ge_storage * r,const rustsecp256k1_v0_4_1_ge_storage * a,int flag)639 static SECP256K1_INLINE void rustsecp256k1_v0_4_1_ge_storage_cmov(rustsecp256k1_v0_4_1_ge_storage *r, const rustsecp256k1_v0_4_1_ge_storage *a, int flag) {
640     rustsecp256k1_v0_4_1_fe_storage_cmov(&r->x, &a->x, flag);
641     rustsecp256k1_v0_4_1_fe_storage_cmov(&r->y, &a->y, flag);
642 }
643 
rustsecp256k1_v0_4_1_ge_mul_lambda(rustsecp256k1_v0_4_1_ge * r,const rustsecp256k1_v0_4_1_ge * a)644 static void rustsecp256k1_v0_4_1_ge_mul_lambda(rustsecp256k1_v0_4_1_ge *r, const rustsecp256k1_v0_4_1_ge *a) {
645     static const rustsecp256k1_v0_4_1_fe beta = SECP256K1_FE_CONST(
646         0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
647         0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul
648     );
649     *r = *a;
650     rustsecp256k1_v0_4_1_fe_mul(&r->x, &r->x, &beta);
651 }
652 
rustsecp256k1_v0_4_1_ge_is_in_correct_subgroup(const rustsecp256k1_v0_4_1_ge * ge)653 static int rustsecp256k1_v0_4_1_ge_is_in_correct_subgroup(const rustsecp256k1_v0_4_1_ge* ge) {
654 #ifdef EXHAUSTIVE_TEST_ORDER
655     rustsecp256k1_v0_4_1_gej out;
656     int i;
657 
658     /* A very simple EC multiplication ladder that avoids a dependency on ecmult. */
659     rustsecp256k1_v0_4_1_gej_set_infinity(&out);
660     for (i = 0; i < 32; ++i) {
661         rustsecp256k1_v0_4_1_gej_double_var(&out, &out, NULL);
662         if ((((uint32_t)EXHAUSTIVE_TEST_ORDER) >> (31 - i)) & 1) {
663             rustsecp256k1_v0_4_1_gej_add_ge_var(&out, &out, ge, NULL);
664         }
665     }
666     return rustsecp256k1_v0_4_1_gej_is_infinity(&out);
667 #else
668     (void)ge;
669     /* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */
670     return 1;
671 #endif
672 }
673 
674 #endif /* SECP256K1_GROUP_IMPL_H */
675