1 /***********************************************************************
2  * Copyright (c) 2020 Peter Dettman                                    *
3  * Distributed under the MIT software license, see the accompanying    *
4  * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5  **********************************************************************/
6 
7 #ifndef SECP256K1_MODINV64_IMPL_H
8 #define SECP256K1_MODINV64_IMPL_H
9 
10 #include "modinv64.h"
11 
12 #include "util.h"
13 
14 /* This file implements modular inversion based on the paper "Fast constant-time gcd computation and
15  * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang.
16  *
17  * For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an
18  * implementation for N=62, using 62-bit signed limbs represented as int64_t.
19  */
20 
21 #ifdef VERIFY
22 /* Helper function to compute the absolute value of an int64_t.
23  * (we don't use abs/labs/llabs as it depends on the int sizes). */
rustsecp256k1_v0_4_1_modinv64_abs(int64_t v)24 static int64_t rustsecp256k1_v0_4_1_modinv64_abs(int64_t v) {
25     VERIFY_CHECK(v > INT64_MIN);
26     if (v < 0) return -v;
27     return v;
28 }
29 
30 static const rustsecp256k1_v0_4_1_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}};
31 
32 /* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^62). */
rustsecp256k1_v0_4_1_modinv64_mul_62(rustsecp256k1_v0_4_1_modinv64_signed62 * r,const rustsecp256k1_v0_4_1_modinv64_signed62 * a,int alen,int64_t factor)33 static void rustsecp256k1_v0_4_1_modinv64_mul_62(rustsecp256k1_v0_4_1_modinv64_signed62 *r, const rustsecp256k1_v0_4_1_modinv64_signed62 *a, int alen, int64_t factor) {
34     const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
35     int128_t c = 0;
36     int i;
37     for (i = 0; i < 4; ++i) {
38         if (i < alen) c += (int128_t)a->v[i] * factor;
39         r->v[i] = (int64_t)c & M62; c >>= 62;
40     }
41     if (4 < alen) c += (int128_t)a->v[4] * factor;
42     VERIFY_CHECK(c == (int64_t)c);
43     r->v[4] = (int64_t)c;
44 }
45 
46 /* Return -1 for a<b*factor, 0 for a==b*factor, 1 for a>b*factor. A has alen limbs; b has 5. */
rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(const rustsecp256k1_v0_4_1_modinv64_signed62 * a,int alen,const rustsecp256k1_v0_4_1_modinv64_signed62 * b,int64_t factor)47 static int rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(const rustsecp256k1_v0_4_1_modinv64_signed62 *a, int alen, const rustsecp256k1_v0_4_1_modinv64_signed62 *b, int64_t factor) {
48     int i;
49     rustsecp256k1_v0_4_1_modinv64_signed62 am, bm;
50     rustsecp256k1_v0_4_1_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */
51     rustsecp256k1_v0_4_1_modinv64_mul_62(&bm, b, 5, factor);
52     for (i = 0; i < 4; ++i) {
53         /* Verify that all but the top limb of a and b are normalized. */
54         VERIFY_CHECK(am.v[i] >> 62 == 0);
55         VERIFY_CHECK(bm.v[i] >> 62 == 0);
56     }
57     for (i = 4; i >= 0; --i) {
58         if (am.v[i] < bm.v[i]) return -1;
59         if (am.v[i] > bm.v[i]) return 1;
60     }
61     return 0;
62 }
63 #endif
64 
65 /* Take as input a signed62 number in range (-2*modulus,modulus), and add a multiple of the modulus
66  * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the
67  * process. The input must have limbs in range (-2^62,2^62). The output will have limbs in range
68  * [0,2^62). */
rustsecp256k1_v0_4_1_modinv64_normalize_62(rustsecp256k1_v0_4_1_modinv64_signed62 * r,int64_t sign,const rustsecp256k1_v0_4_1_modinv64_modinfo * modinfo)69 static void rustsecp256k1_v0_4_1_modinv64_normalize_62(rustsecp256k1_v0_4_1_modinv64_signed62 *r, int64_t sign, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo) {
70     const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
71     int64_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4];
72     int64_t cond_add, cond_negate;
73 
74 #ifdef VERIFY
75     /* Verify that all limbs are in range (-2^62,2^62). */
76     int i;
77     for (i = 0; i < 5; ++i) {
78         VERIFY_CHECK(r->v[i] >= -M62);
79         VERIFY_CHECK(r->v[i] <= M62);
80     }
81     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */
82     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
83 #endif
84 
85     /* In a first step, add the modulus if the input is negative, and then negate if requested.
86      * This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input
87      * limbs are in range (-2^62,2^62), this cannot overflow an int64_t. Note that the right
88      * shifts below are signed sign-extending shifts (see assumptions.h for tests that that is
89      * indeed the behavior of the right shift operator). */
90     cond_add = r4 >> 63;
91     r0 += modinfo->modulus.v[0] & cond_add;
92     r1 += modinfo->modulus.v[1] & cond_add;
93     r2 += modinfo->modulus.v[2] & cond_add;
94     r3 += modinfo->modulus.v[3] & cond_add;
95     r4 += modinfo->modulus.v[4] & cond_add;
96     cond_negate = sign >> 63;
97     r0 = (r0 ^ cond_negate) - cond_negate;
98     r1 = (r1 ^ cond_negate) - cond_negate;
99     r2 = (r2 ^ cond_negate) - cond_negate;
100     r3 = (r3 ^ cond_negate) - cond_negate;
101     r4 = (r4 ^ cond_negate) - cond_negate;
102     /* Propagate the top bits, to bring limbs back to range (-2^62,2^62). */
103     r1 += r0 >> 62; r0 &= M62;
104     r2 += r1 >> 62; r1 &= M62;
105     r3 += r2 >> 62; r2 &= M62;
106     r4 += r3 >> 62; r3 &= M62;
107 
108     /* In a second step add the modulus again if the result is still negative, bringing
109      * r to range [0,modulus). */
110     cond_add = r4 >> 63;
111     r0 += modinfo->modulus.v[0] & cond_add;
112     r1 += modinfo->modulus.v[1] & cond_add;
113     r2 += modinfo->modulus.v[2] & cond_add;
114     r3 += modinfo->modulus.v[3] & cond_add;
115     r4 += modinfo->modulus.v[4] & cond_add;
116     /* And propagate again. */
117     r1 += r0 >> 62; r0 &= M62;
118     r2 += r1 >> 62; r1 &= M62;
119     r3 += r2 >> 62; r2 &= M62;
120     r4 += r3 >> 62; r3 &= M62;
121 
122     r->v[0] = r0;
123     r->v[1] = r1;
124     r->v[2] = r2;
125     r->v[3] = r3;
126     r->v[4] = r4;
127 
128 #ifdef VERIFY
129     VERIFY_CHECK(r0 >> 62 == 0);
130     VERIFY_CHECK(r1 >> 62 == 0);
131     VERIFY_CHECK(r2 >> 62 == 0);
132     VERIFY_CHECK(r3 >> 62 == 0);
133     VERIFY_CHECK(r4 >> 62 == 0);
134     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */
135     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
136 #endif
137 }
138 
139 /* Data type for transition matrices (see section 3 of explanation).
140  *
141  * t = [ u  v ]
142  *     [ q  r ]
143  */
144 typedef struct {
145     int64_t u, v, q, r;
146 } rustsecp256k1_v0_4_1_modinv64_trans2x2;
147 
148 /* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)).
149  * Note that the transformation matrix is scaled by 2^62 and not 2^59.
150  *
151  * Input:  zeta: initial zeta
152  *         f0:   bottom limb of initial f
153  *         g0:   bottom limb of initial g
154  * Output: t: transition matrix
155  * Return: final zeta
156  *
157  * Implements the divsteps_n_matrix function from the explanation.
158  */
rustsecp256k1_v0_4_1_modinv64_divsteps_59(int64_t zeta,uint64_t f0,uint64_t g0,rustsecp256k1_v0_4_1_modinv64_trans2x2 * t)159 static int64_t rustsecp256k1_v0_4_1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_4_1_modinv64_trans2x2 *t) {
160     /* u,v,q,r are the elements of the transformation matrix being built up,
161      * starting with the identity matrix times 8 (because the caller expects
162      * a result scaled by 2^62). Semantically they are signed integers
163      * in range [-2^62,2^62], but here represented as unsigned mod 2^64. This
164      * permits left shifting (which is UB for negative numbers). The range
165      * being inside [-2^63,2^63) means that casting to signed works correctly.
166      */
167     uint64_t u = 8, v = 0, q = 0, r = 8;
168     uint64_t c1, c2, f = f0, g = g0, x, y, z;
169     int i;
170 
171     for (i = 3; i < 62; ++i) {
172         VERIFY_CHECK((f & 1) == 1); /* f must always be odd */
173         VERIFY_CHECK((u * f0 + v * g0) == f << i);
174         VERIFY_CHECK((q * f0 + r * g0) == g << i);
175         /* Compute conditional masks for (zeta < 0) and for (g & 1). */
176         c1 = zeta >> 63;
177         c2 = -(g & 1);
178         /* Compute x,y,z, conditionally negated versions of f,u,v. */
179         x = (f ^ c1) - c1;
180         y = (u ^ c1) - c1;
181         z = (v ^ c1) - c1;
182         /* Conditionally add x,y,z to g,q,r. */
183         g += x & c2;
184         q += y & c2;
185         r += z & c2;
186         /* In what follows, c1 is a condition mask for (zeta < 0) and (g & 1). */
187         c1 &= c2;
188         /* Conditionally change zeta into -zeta-2 or zeta-1. */
189         zeta = (zeta ^ c1) - 1;
190         /* Conditionally add g,q,r to f,u,v. */
191         f += g & c1;
192         u += q & c1;
193         v += r & c1;
194         /* Shifts */
195         g >>= 1;
196         u <<= 1;
197         v <<= 1;
198         /* Bounds on zeta that follow from the bounds on iteration count (max 10*59 divsteps). */
199         VERIFY_CHECK(zeta >= -591 && zeta <= 591);
200     }
201     /* Return data in t and return value. */
202     t->u = (int64_t)u;
203     t->v = (int64_t)v;
204     t->q = (int64_t)q;
205     t->r = (int64_t)r;
206     /* The determinant of t must be a power of two. This guarantees that multiplication with t
207      * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
208      * will be divided out again). As each divstep's individual matrix has determinant 2, the
209      * aggregate of 59 of them will have determinant 2^59. Multiplying with the initial
210      * 8*identity (which has determinant 2^6) means the overall outputs has determinant
211      * 2^65. */
212     VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 65);
213     return zeta;
214 }
215 
216 /* Compute the transition matrix and eta for 62 divsteps (variable time, eta=-delta).
217  *
218  * Input:  eta: initial eta
219  *         f0:  bottom limb of initial f
220  *         g0:  bottom limb of initial g
221  * Output: t: transition matrix
222  * Return: final eta
223  *
224  * Implements the divsteps_n_matrix_var function from the explanation.
225  */
rustsecp256k1_v0_4_1_modinv64_divsteps_62_var(int64_t eta,uint64_t f0,uint64_t g0,rustsecp256k1_v0_4_1_modinv64_trans2x2 * t)226 static int64_t rustsecp256k1_v0_4_1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, rustsecp256k1_v0_4_1_modinv64_trans2x2 *t) {
227     /* Transformation matrix; see comments in rustsecp256k1_v0_4_1_modinv64_divsteps_62. */
228     uint64_t u = 1, v = 0, q = 0, r = 1;
229     uint64_t f = f0, g = g0, m;
230     uint32_t w;
231     int i = 62, limit, zeros;
232 
233     for (;;) {
234         /* Use a sentinel bit to count zeros only up to i. */
235         zeros = rustsecp256k1_v0_4_1_ctz64_var(g | (UINT64_MAX << i));
236         /* Perform zeros divsteps at once; they all just divide g by two. */
237         g >>= zeros;
238         u <<= zeros;
239         v <<= zeros;
240         eta -= zeros;
241         i -= zeros;
242         /* We're done once we've done 62 divsteps. */
243         if (i == 0) break;
244         VERIFY_CHECK((f & 1) == 1);
245         VERIFY_CHECK((g & 1) == 1);
246         VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i));
247         VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i));
248         /* Bounds on eta that follow from the bounds on iteration count (max 12*62 divsteps). */
249         VERIFY_CHECK(eta >= -745 && eta <= 745);
250         /* If eta is negative, negate it and replace f,g with g,-f. */
251         if (eta < 0) {
252             uint64_t tmp;
253             eta = -eta;
254             tmp = f; f = g; g = -tmp;
255             tmp = u; u = q; q = -tmp;
256             tmp = v; v = r; r = -tmp;
257             /* Use a formula to cancel out up to 6 bits of g. Also, no more than i can be cancelled
258              * out (as we'd be done before that point), and no more than eta+1 can be done as its
259              * will flip again once that happens. */
260             limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
261             VERIFY_CHECK(limit > 0 && limit <= 62);
262             /* m is a mask for the bottom min(limit, 6) bits. */
263             m = (UINT64_MAX >> (64 - limit)) & 63U;
264             /* Find what multiple of f must be added to g to cancel its bottom min(limit, 6)
265              * bits. */
266             w = (f * g * (f * f - 2)) & m;
267         } else {
268             /* In this branch, use a simpler formula that only lets us cancel up to 4 bits of g, as
269              * eta tends to be smaller here. */
270             limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
271             VERIFY_CHECK(limit > 0 && limit <= 62);
272             /* m is a mask for the bottom min(limit, 4) bits. */
273             m = (UINT64_MAX >> (64 - limit)) & 15U;
274             /* Find what multiple of f must be added to g to cancel its bottom min(limit, 4)
275              * bits. */
276             w = f + (((f + 1) & 4) << 1);
277             w = (-w * g) & m;
278         }
279         g += f * w;
280         q += u * w;
281         r += v * w;
282         VERIFY_CHECK((g & m) == 0);
283     }
284     /* Return data in t and return value. */
285     t->u = (int64_t)u;
286     t->v = (int64_t)v;
287     t->q = (int64_t)q;
288     t->r = (int64_t)r;
289     /* The determinant of t must be a power of two. This guarantees that multiplication with t
290      * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
291      * will be divided out again). As each divstep's individual matrix has determinant 2, the
292      * aggregate of 62 of them will have determinant 2^62. */
293     VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 62);
294     return eta;
295 }
296 
297 /* Compute (t/2^62) * [d, e] mod modulus, where t is a transition matrix scaled by 2^62.
298  *
299  * On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range
300  * (-2^62,2^62).
301  *
302  * This implements the update_de function from the explanation.
303  */
rustsecp256k1_v0_4_1_modinv64_update_de_62(rustsecp256k1_v0_4_1_modinv64_signed62 * d,rustsecp256k1_v0_4_1_modinv64_signed62 * e,const rustsecp256k1_v0_4_1_modinv64_trans2x2 * t,const rustsecp256k1_v0_4_1_modinv64_modinfo * modinfo)304 static void rustsecp256k1_v0_4_1_modinv64_update_de_62(rustsecp256k1_v0_4_1_modinv64_signed62 *d, rustsecp256k1_v0_4_1_modinv64_signed62 *e, const rustsecp256k1_v0_4_1_modinv64_trans2x2 *t, const rustsecp256k1_v0_4_1_modinv64_modinfo* modinfo) {
305     const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
306     const int64_t d0 = d->v[0], d1 = d->v[1], d2 = d->v[2], d3 = d->v[3], d4 = d->v[4];
307     const int64_t e0 = e->v[0], e1 = e->v[1], e2 = e->v[2], e3 = e->v[3], e4 = e->v[4];
308     const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
309     int64_t md, me, sd, se;
310     int128_t cd, ce;
311 #ifdef VERIFY
312     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
313     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0);  /* d <    modulus */
314     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
315     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0);  /* e <    modulus */
316     VERIFY_CHECK((rustsecp256k1_v0_4_1_modinv64_abs(u) + rustsecp256k1_v0_4_1_modinv64_abs(v)) >= 0); /* |u|+|v| doesn't overflow */
317     VERIFY_CHECK((rustsecp256k1_v0_4_1_modinv64_abs(q) + rustsecp256k1_v0_4_1_modinv64_abs(r)) >= 0); /* |q|+|r| doesn't overflow */
318     VERIFY_CHECK((rustsecp256k1_v0_4_1_modinv64_abs(u) + rustsecp256k1_v0_4_1_modinv64_abs(v)) <= M62 + 1); /* |u|+|v| <= 2^62 */
319     VERIFY_CHECK((rustsecp256k1_v0_4_1_modinv64_abs(q) + rustsecp256k1_v0_4_1_modinv64_abs(r)) <= M62 + 1); /* |q|+|r| <= 2^62 */
320 #endif
321     /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
322     sd = d4 >> 63;
323     se = e4 >> 63;
324     md = (u & sd) + (v & se);
325     me = (q & sd) + (r & se);
326     /* Begin computing t*[d,e]. */
327     cd = (int128_t)u * d0 + (int128_t)v * e0;
328     ce = (int128_t)q * d0 + (int128_t)r * e0;
329     /* Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits. */
330     md -= (modinfo->modulus_inv62 * (uint64_t)cd + md) & M62;
331     me -= (modinfo->modulus_inv62 * (uint64_t)ce + me) & M62;
332     /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */
333     cd += (int128_t)modinfo->modulus.v[0] * md;
334     ce += (int128_t)modinfo->modulus.v[0] * me;
335     /* Verify that the low 62 bits of the computation are indeed zero, and then throw them away. */
336     VERIFY_CHECK(((int64_t)cd & M62) == 0); cd >>= 62;
337     VERIFY_CHECK(((int64_t)ce & M62) == 0); ce >>= 62;
338     /* Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift). */
339     cd += (int128_t)u * d1 + (int128_t)v * e1;
340     ce += (int128_t)q * d1 + (int128_t)r * e1;
341     if (modinfo->modulus.v[1]) { /* Optimize for the case where limb of modulus is zero. */
342         cd += (int128_t)modinfo->modulus.v[1] * md;
343         ce += (int128_t)modinfo->modulus.v[1] * me;
344     }
345     d->v[0] = (int64_t)cd & M62; cd >>= 62;
346     e->v[0] = (int64_t)ce & M62; ce >>= 62;
347     /* Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1. */
348     cd += (int128_t)u * d2 + (int128_t)v * e2;
349     ce += (int128_t)q * d2 + (int128_t)r * e2;
350     if (modinfo->modulus.v[2]) { /* Optimize for the case where limb of modulus is zero. */
351         cd += (int128_t)modinfo->modulus.v[2] * md;
352         ce += (int128_t)modinfo->modulus.v[2] * me;
353     }
354     d->v[1] = (int64_t)cd & M62; cd >>= 62;
355     e->v[1] = (int64_t)ce & M62; ce >>= 62;
356     /* Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2. */
357     cd += (int128_t)u * d3 + (int128_t)v * e3;
358     ce += (int128_t)q * d3 + (int128_t)r * e3;
359     if (modinfo->modulus.v[3]) { /* Optimize for the case where limb of modulus is zero. */
360         cd += (int128_t)modinfo->modulus.v[3] * md;
361         ce += (int128_t)modinfo->modulus.v[3] * me;
362     }
363     d->v[2] = (int64_t)cd & M62; cd >>= 62;
364     e->v[2] = (int64_t)ce & M62; ce >>= 62;
365     /* Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3. */
366     cd += (int128_t)u * d4 + (int128_t)v * e4;
367     ce += (int128_t)q * d4 + (int128_t)r * e4;
368     cd += (int128_t)modinfo->modulus.v[4] * md;
369     ce += (int128_t)modinfo->modulus.v[4] * me;
370     d->v[3] = (int64_t)cd & M62; cd >>= 62;
371     e->v[3] = (int64_t)ce & M62; ce >>= 62;
372     /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */
373     d->v[4] = (int64_t)cd;
374     e->v[4] = (int64_t)ce;
375 #ifdef VERIFY
376     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
377     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0);  /* d <    modulus */
378     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
379     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0);  /* e <    modulus */
380 #endif
381 }
382 
383 /* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62.
384  *
385  * This implements the update_fg function from the explanation.
386  */
rustsecp256k1_v0_4_1_modinv64_update_fg_62(rustsecp256k1_v0_4_1_modinv64_signed62 * f,rustsecp256k1_v0_4_1_modinv64_signed62 * g,const rustsecp256k1_v0_4_1_modinv64_trans2x2 * t)387 static void rustsecp256k1_v0_4_1_modinv64_update_fg_62(rustsecp256k1_v0_4_1_modinv64_signed62 *f, rustsecp256k1_v0_4_1_modinv64_signed62 *g, const rustsecp256k1_v0_4_1_modinv64_trans2x2 *t) {
388     const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
389     const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4];
390     const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4];
391     const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
392     int128_t cf, cg;
393     /* Start computing t*[f,g]. */
394     cf = (int128_t)u * f0 + (int128_t)v * g0;
395     cg = (int128_t)q * f0 + (int128_t)r * g0;
396     /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */
397     VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62;
398     VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62;
399     /* Compute limb 1 of t*[f,g], and store it as output limb 0 (= down shift). */
400     cf += (int128_t)u * f1 + (int128_t)v * g1;
401     cg += (int128_t)q * f1 + (int128_t)r * g1;
402     f->v[0] = (int64_t)cf & M62; cf >>= 62;
403     g->v[0] = (int64_t)cg & M62; cg >>= 62;
404     /* Compute limb 2 of t*[f,g], and store it as output limb 1. */
405     cf += (int128_t)u * f2 + (int128_t)v * g2;
406     cg += (int128_t)q * f2 + (int128_t)r * g2;
407     f->v[1] = (int64_t)cf & M62; cf >>= 62;
408     g->v[1] = (int64_t)cg & M62; cg >>= 62;
409     /* Compute limb 3 of t*[f,g], and store it as output limb 2. */
410     cf += (int128_t)u * f3 + (int128_t)v * g3;
411     cg += (int128_t)q * f3 + (int128_t)r * g3;
412     f->v[2] = (int64_t)cf & M62; cf >>= 62;
413     g->v[2] = (int64_t)cg & M62; cg >>= 62;
414     /* Compute limb 4 of t*[f,g], and store it as output limb 3. */
415     cf += (int128_t)u * f4 + (int128_t)v * g4;
416     cg += (int128_t)q * f4 + (int128_t)r * g4;
417     f->v[3] = (int64_t)cf & M62; cf >>= 62;
418     g->v[3] = (int64_t)cg & M62; cg >>= 62;
419     /* What remains is limb 5 of t*[f,g]; store it as output limb 4. */
420     f->v[4] = (int64_t)cf;
421     g->v[4] = (int64_t)cg;
422 }
423 
424 /* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps.
425  *
426  * Version that operates on a variable number of limbs in f and g.
427  *
428  * This implements the update_fg function from the explanation.
429  */
rustsecp256k1_v0_4_1_modinv64_update_fg_62_var(int len,rustsecp256k1_v0_4_1_modinv64_signed62 * f,rustsecp256k1_v0_4_1_modinv64_signed62 * g,const rustsecp256k1_v0_4_1_modinv64_trans2x2 * t)430 static void rustsecp256k1_v0_4_1_modinv64_update_fg_62_var(int len, rustsecp256k1_v0_4_1_modinv64_signed62 *f, rustsecp256k1_v0_4_1_modinv64_signed62 *g, const rustsecp256k1_v0_4_1_modinv64_trans2x2 *t) {
431     const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
432     const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
433     int64_t fi, gi;
434     int128_t cf, cg;
435     int i;
436     VERIFY_CHECK(len > 0);
437     /* Start computing t*[f,g]. */
438     fi = f->v[0];
439     gi = g->v[0];
440     cf = (int128_t)u * fi + (int128_t)v * gi;
441     cg = (int128_t)q * fi + (int128_t)r * gi;
442     /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */
443     VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62;
444     VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62;
445     /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting
446      * down by 62 bits). */
447     for (i = 1; i < len; ++i) {
448         fi = f->v[i];
449         gi = g->v[i];
450         cf += (int128_t)u * fi + (int128_t)v * gi;
451         cg += (int128_t)q * fi + (int128_t)r * gi;
452         f->v[i - 1] = (int64_t)cf & M62; cf >>= 62;
453         g->v[i - 1] = (int64_t)cg & M62; cg >>= 62;
454     }
455     /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */
456     f->v[len - 1] = (int64_t)cf;
457     g->v[len - 1] = (int64_t)cg;
458 }
459 
460 /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */
rustsecp256k1_v0_4_1_modinv64(rustsecp256k1_v0_4_1_modinv64_signed62 * x,const rustsecp256k1_v0_4_1_modinv64_modinfo * modinfo)461 static void rustsecp256k1_v0_4_1_modinv64(rustsecp256k1_v0_4_1_modinv64_signed62 *x, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo) {
462     /* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */
463     rustsecp256k1_v0_4_1_modinv64_signed62 d = {{0, 0, 0, 0, 0}};
464     rustsecp256k1_v0_4_1_modinv64_signed62 e = {{1, 0, 0, 0, 0}};
465     rustsecp256k1_v0_4_1_modinv64_signed62 f = modinfo->modulus;
466     rustsecp256k1_v0_4_1_modinv64_signed62 g = *x;
467     int i;
468     int64_t zeta = -1; /* zeta = -(delta+1/2); delta starts at 1/2. */
469 
470     /* Do 10 iterations of 59 divsteps each = 590 divsteps. This suffices for 256-bit inputs. */
471     for (i = 0; i < 10; ++i) {
472         /* Compute transition matrix and new zeta after 59 divsteps. */
473         rustsecp256k1_v0_4_1_modinv64_trans2x2 t;
474         zeta = rustsecp256k1_v0_4_1_modinv64_divsteps_59(zeta, f.v[0], g.v[0], &t);
475         /* Update d,e using that transition matrix. */
476         rustsecp256k1_v0_4_1_modinv64_update_de_62(&d, &e, &t, modinfo);
477         /* Update f,g using that transition matrix. */
478 #ifdef VERIFY
479         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
480         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
481         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
482         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0);  /* g <  modulus */
483 #endif
484         rustsecp256k1_v0_4_1_modinv64_update_fg_62(&f, &g, &t);
485 #ifdef VERIFY
486         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
487         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
488         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
489         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0);  /* g <  modulus */
490 #endif
491     }
492 
493     /* At this point sufficient iterations have been performed that g must have reached 0
494      * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g
495      * values i.e. +/- 1, and d now contains +/- the modular inverse. */
496 #ifdef VERIFY
497     /* g == 0 */
498     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0);
499     /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
500     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 ||
501                  rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 ||
502                  (rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
503                   rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
504                   (rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 ||
505                    rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0)));
506 #endif
507 
508     /* Optionally negate d, normalize to [0,modulus), and return it. */
509     rustsecp256k1_v0_4_1_modinv64_normalize_62(&d, f.v[4], modinfo);
510     *x = d;
511 }
512 
513 /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */
rustsecp256k1_v0_4_1_modinv64_var(rustsecp256k1_v0_4_1_modinv64_signed62 * x,const rustsecp256k1_v0_4_1_modinv64_modinfo * modinfo)514 static void rustsecp256k1_v0_4_1_modinv64_var(rustsecp256k1_v0_4_1_modinv64_signed62 *x, const rustsecp256k1_v0_4_1_modinv64_modinfo *modinfo) {
515     /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */
516     rustsecp256k1_v0_4_1_modinv64_signed62 d = {{0, 0, 0, 0, 0}};
517     rustsecp256k1_v0_4_1_modinv64_signed62 e = {{1, 0, 0, 0, 0}};
518     rustsecp256k1_v0_4_1_modinv64_signed62 f = modinfo->modulus;
519     rustsecp256k1_v0_4_1_modinv64_signed62 g = *x;
520 #ifdef VERIFY
521     int i = 0;
522 #endif
523     int j, len = 5;
524     int64_t eta = -1; /* eta = -delta; delta is initially 1 */
525     int64_t cond, fn, gn;
526 
527     /* Do iterations of 62 divsteps each until g=0. */
528     while (1) {
529         /* Compute transition matrix and new eta after 62 divsteps. */
530         rustsecp256k1_v0_4_1_modinv64_trans2x2 t;
531         eta = rustsecp256k1_v0_4_1_modinv64_divsteps_62_var(eta, f.v[0], g.v[0], &t);
532         /* Update d,e using that transition matrix. */
533         rustsecp256k1_v0_4_1_modinv64_update_de_62(&d, &e, &t, modinfo);
534         /* Update f,g using that transition matrix. */
535 #ifdef VERIFY
536         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
537         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
538         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
539         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0);  /* g <  modulus */
540 #endif
541         rustsecp256k1_v0_4_1_modinv64_update_fg_62_var(len, &f, &g, &t);
542         /* If the bottom limb of g is zero, there is a chance that g=0. */
543         if (g.v[0] == 0) {
544             cond = 0;
545             /* Check if the other limbs are also 0. */
546             for (j = 1; j < len; ++j) {
547                 cond |= g.v[j];
548             }
549             /* If so, we're done. */
550             if (cond == 0) break;
551         }
552 
553         /* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */
554         fn = f.v[len - 1];
555         gn = g.v[len - 1];
556         cond = ((int64_t)len - 2) >> 63;
557         cond |= fn ^ (fn >> 63);
558         cond |= gn ^ (gn >> 63);
559         /* If so, reduce length, propagating the sign of f and g's top limb into the one below. */
560         if (cond == 0) {
561             f.v[len - 2] |= (uint64_t)fn << 62;
562             g.v[len - 2] |= (uint64_t)gn << 62;
563             --len;
564         }
565 #ifdef VERIFY
566         VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */
567         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
568         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
569         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
570         VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0);  /* g <  modulus */
571 #endif
572     }
573 
574     /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of
575      * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
576 #ifdef VERIFY
577     /* g == 0 */
578     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0);
579     /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
580     VERIFY_CHECK(rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 ||
581                  rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 ||
582                  (rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
583                   rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
584                   (rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 ||
585                    rustsecp256k1_v0_4_1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0)));
586 #endif
587 
588     /* Optionally negate d, normalize to [0,modulus), and return it. */
589     rustsecp256k1_v0_4_1_modinv64_normalize_62(&d, f.v[len - 1], modinfo);
590     *x = d;
591 }
592 
593 #endif /* SECP256K1_MODINV64_IMPL_H */
594