1 /**********************************************************************
2  * Copyright (c) 2013, 2014 Pieter Wuille                             *
3  * Distributed under the MIT software license, see the accompanying   *
4  * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5  **********************************************************************/
6 
7 #ifndef SECP256K1_FIELD_REPR_IMPL_H
8 #define SECP256K1_FIELD_REPR_IMPL_H
9 
10 #if defined HAVE_CONFIG_H
11 #include "libsecp256k1-config.h"
12 #endif
13 
14 #include "util.h"
15 #include "field.h"
16 
17 #if defined(USE_ASM_X86_64)
18 #include "field_5x52_asm_impl.h"
19 #else
20 #include "field_5x52_int128_impl.h"
21 #endif
22 
23 /** Implements arithmetic modulo FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE FFFFFC2F,
24  *  represented as 5 uint64_t's in base 2^52. The values are allowed to contain >52 each. In particular,
25  *  each FieldElem has a 'magnitude' associated with it. Internally, a magnitude M means each element
26  *  is at most M*(2^53-1), except the most significant one, which is limited to M*(2^49-1). All operations
27  *  accept any input with magnitude at most M, and have different rules for propagating magnitude to their
28  *  output.
29  */
30 
31 #ifdef VERIFY
secp256k1_fe_verify(const secp256k1_fe * a)32 static void secp256k1_fe_verify(const secp256k1_fe *a) {
33     const uint64_t *d = a->n;
34     int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
35    /* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
36     r &= (d[0] <= 0xFFFFFFFFFFFFFULL * m);
37     r &= (d[1] <= 0xFFFFFFFFFFFFFULL * m);
38     r &= (d[2] <= 0xFFFFFFFFFFFFFULL * m);
39     r &= (d[3] <= 0xFFFFFFFFFFFFFULL * m);
40     r &= (d[4] <= 0x0FFFFFFFFFFFFULL * m);
41     r &= (a->magnitude >= 0);
42     r &= (a->magnitude <= 2048);
43     if (a->normalized) {
44         r &= (a->magnitude <= 1);
45         if (r && (d[4] == 0x0FFFFFFFFFFFFULL) && ((d[3] & d[2] & d[1]) == 0xFFFFFFFFFFFFFULL)) {
46             r &= (d[0] < 0xFFFFEFFFFFC2FULL);
47         }
48     }
49     VERIFY_CHECK(r == 1);
50 }
51 #endif
52 
secp256k1_fe_normalize(secp256k1_fe * r)53 static void secp256k1_fe_normalize(secp256k1_fe *r) {
54     uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
55 
56     /* Reduce t4 at the start so there will be at most a single carry from the first pass */
57     uint64_t m;
58     uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
59 
60     /* The first pass ensures the magnitude is 1, ... */
61     t0 += x * 0x1000003D1ULL;
62     t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
63     t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
64     t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
65     t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
66 
67     /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
68     VERIFY_CHECK(t4 >> 49 == 0);
69 
70     /* At most a single final reduction is needed; check if the value is >= the field characteristic */
71     x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
72         & (t0 >= 0xFFFFEFFFFFC2FULL));
73 
74     /* Apply the final reduction (for constant-time behaviour, we do it always) */
75     t0 += x * 0x1000003D1ULL;
76     t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
77     t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
78     t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
79     t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
80 
81     /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
82     VERIFY_CHECK(t4 >> 48 == x);
83 
84     /* Mask off the possible multiple of 2^256 from the final reduction */
85     t4 &= 0x0FFFFFFFFFFFFULL;
86 
87     r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
88 
89 #ifdef VERIFY
90     r->magnitude = 1;
91     r->normalized = 1;
92     secp256k1_fe_verify(r);
93 #endif
94 }
95 
secp256k1_fe_normalize_weak(secp256k1_fe * r)96 static void secp256k1_fe_normalize_weak(secp256k1_fe *r) {
97     uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
98 
99     /* Reduce t4 at the start so there will be at most a single carry from the first pass */
100     uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
101 
102     /* The first pass ensures the magnitude is 1, ... */
103     t0 += x * 0x1000003D1ULL;
104     t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
105     t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
106     t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
107     t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
108 
109     /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
110     VERIFY_CHECK(t4 >> 49 == 0);
111 
112     r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
113 
114 #ifdef VERIFY
115     r->magnitude = 1;
116     secp256k1_fe_verify(r);
117 #endif
118 }
119 
secp256k1_fe_normalize_var(secp256k1_fe * r)120 static void secp256k1_fe_normalize_var(secp256k1_fe *r) {
121     uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
122 
123     /* Reduce t4 at the start so there will be at most a single carry from the first pass */
124     uint64_t m;
125     uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
126 
127     /* The first pass ensures the magnitude is 1, ... */
128     t0 += x * 0x1000003D1ULL;
129     t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
130     t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
131     t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
132     t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
133 
134     /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
135     VERIFY_CHECK(t4 >> 49 == 0);
136 
137     /* At most a single final reduction is needed; check if the value is >= the field characteristic */
138     x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
139         & (t0 >= 0xFFFFEFFFFFC2FULL));
140 
141     if (x) {
142         t0 += 0x1000003D1ULL;
143         t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
144         t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
145         t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
146         t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
147 
148         /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
149         VERIFY_CHECK(t4 >> 48 == x);
150 
151         /* Mask off the possible multiple of 2^256 from the final reduction */
152         t4 &= 0x0FFFFFFFFFFFFULL;
153     }
154 
155     r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
156 
157 #ifdef VERIFY
158     r->magnitude = 1;
159     r->normalized = 1;
160     secp256k1_fe_verify(r);
161 #endif
162 }
163 
secp256k1_fe_normalizes_to_zero(secp256k1_fe * r)164 static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) {
165     uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
166 
167     /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
168     uint64_t z0, z1;
169 
170     /* Reduce t4 at the start so there will be at most a single carry from the first pass */
171     uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
172 
173     /* The first pass ensures the magnitude is 1, ... */
174     t0 += x * 0x1000003D1ULL;
175     t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; z0  = t0; z1  = t0 ^ 0x1000003D0ULL;
176     t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
177     t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
178     t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
179                                                 z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
180 
181     /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
182     VERIFY_CHECK(t4 >> 49 == 0);
183 
184     return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
185 }
186 
secp256k1_fe_normalizes_to_zero_var(secp256k1_fe * r)187 static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) {
188     uint64_t t0, t1, t2, t3, t4;
189     uint64_t z0, z1;
190     uint64_t x;
191 
192     t0 = r->n[0];
193     t4 = r->n[4];
194 
195     /* Reduce t4 at the start so there will be at most a single carry from the first pass */
196     x = t4 >> 48;
197 
198     /* The first pass ensures the magnitude is 1, ... */
199     t0 += x * 0x1000003D1ULL;
200 
201     /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
202     z0 = t0 & 0xFFFFFFFFFFFFFULL;
203     z1 = z0 ^ 0x1000003D0ULL;
204 
205     /* Fast return path should catch the majority of cases */
206     if ((z0 != 0ULL) & (z1 != 0xFFFFFFFFFFFFFULL)) {
207         return 0;
208     }
209 
210     t1 = r->n[1];
211     t2 = r->n[2];
212     t3 = r->n[3];
213 
214     t4 &= 0x0FFFFFFFFFFFFULL;
215 
216     t1 += (t0 >> 52);
217     t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
218     t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
219     t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
220                                                 z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
221 
222     /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
223     VERIFY_CHECK(t4 >> 49 == 0);
224 
225     return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
226 }
227 
secp256k1_fe_set_int(secp256k1_fe * r,int a)228 SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) {
229     r->n[0] = a;
230     r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
231 #ifdef VERIFY
232     r->magnitude = 1;
233     r->normalized = 1;
234     secp256k1_fe_verify(r);
235 #endif
236 }
237 
secp256k1_fe_is_zero(const secp256k1_fe * a)238 SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) {
239     const uint64_t *t = a->n;
240 #ifdef VERIFY
241     VERIFY_CHECK(a->normalized);
242     secp256k1_fe_verify(a);
243 #endif
244     return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0;
245 }
246 
secp256k1_fe_is_odd(const secp256k1_fe * a)247 SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) {
248 #ifdef VERIFY
249     VERIFY_CHECK(a->normalized);
250     secp256k1_fe_verify(a);
251 #endif
252     return a->n[0] & 1;
253 }
254 
secp256k1_fe_clear(secp256k1_fe * a)255 SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) {
256     int i;
257 #ifdef VERIFY
258     a->magnitude = 0;
259     a->normalized = 1;
260 #endif
261     for (i=0; i<5; i++) {
262         a->n[i] = 0;
263     }
264 }
265 
secp256k1_fe_cmp_var(const secp256k1_fe * a,const secp256k1_fe * b)266 static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
267     int i;
268 #ifdef VERIFY
269     VERIFY_CHECK(a->normalized);
270     VERIFY_CHECK(b->normalized);
271     secp256k1_fe_verify(a);
272     secp256k1_fe_verify(b);
273 #endif
274     for (i = 4; i >= 0; i--) {
275         if (a->n[i] > b->n[i]) {
276             return 1;
277         }
278         if (a->n[i] < b->n[i]) {
279             return -1;
280         }
281     }
282     return 0;
283 }
284 
secp256k1_fe_set_b32(secp256k1_fe * r,const unsigned char * a)285 static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) {
286     r->n[0] = (uint64_t)a[31]
287             | ((uint64_t)a[30] << 8)
288             | ((uint64_t)a[29] << 16)
289             | ((uint64_t)a[28] << 24)
290             | ((uint64_t)a[27] << 32)
291             | ((uint64_t)a[26] << 40)
292             | ((uint64_t)(a[25] & 0xF)  << 48);
293     r->n[1] = (uint64_t)((a[25] >> 4) & 0xF)
294             | ((uint64_t)a[24] << 4)
295             | ((uint64_t)a[23] << 12)
296             | ((uint64_t)a[22] << 20)
297             | ((uint64_t)a[21] << 28)
298             | ((uint64_t)a[20] << 36)
299             | ((uint64_t)a[19] << 44);
300     r->n[2] = (uint64_t)a[18]
301             | ((uint64_t)a[17] << 8)
302             | ((uint64_t)a[16] << 16)
303             | ((uint64_t)a[15] << 24)
304             | ((uint64_t)a[14] << 32)
305             | ((uint64_t)a[13] << 40)
306             | ((uint64_t)(a[12] & 0xF) << 48);
307     r->n[3] = (uint64_t)((a[12] >> 4) & 0xF)
308             | ((uint64_t)a[11] << 4)
309             | ((uint64_t)a[10] << 12)
310             | ((uint64_t)a[9]  << 20)
311             | ((uint64_t)a[8]  << 28)
312             | ((uint64_t)a[7]  << 36)
313             | ((uint64_t)a[6]  << 44);
314     r->n[4] = (uint64_t)a[5]
315             | ((uint64_t)a[4] << 8)
316             | ((uint64_t)a[3] << 16)
317             | ((uint64_t)a[2] << 24)
318             | ((uint64_t)a[1] << 32)
319             | ((uint64_t)a[0] << 40);
320     if (r->n[4] == 0x0FFFFFFFFFFFFULL && (r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL && r->n[0] >= 0xFFFFEFFFFFC2FULL) {
321         return 0;
322     }
323 #ifdef VERIFY
324     r->magnitude = 1;
325     r->normalized = 1;
326     secp256k1_fe_verify(r);
327 #endif
328     return 1;
329 }
330 
331 /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
secp256k1_fe_get_b32(unsigned char * r,const secp256k1_fe * a)332 static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) {
333 #ifdef VERIFY
334     VERIFY_CHECK(a->normalized);
335     secp256k1_fe_verify(a);
336 #endif
337     r[0] = (a->n[4] >> 40) & 0xFF;
338     r[1] = (a->n[4] >> 32) & 0xFF;
339     r[2] = (a->n[4] >> 24) & 0xFF;
340     r[3] = (a->n[4] >> 16) & 0xFF;
341     r[4] = (a->n[4] >> 8) & 0xFF;
342     r[5] = a->n[4] & 0xFF;
343     r[6] = (a->n[3] >> 44) & 0xFF;
344     r[7] = (a->n[3] >> 36) & 0xFF;
345     r[8] = (a->n[3] >> 28) & 0xFF;
346     r[9] = (a->n[3] >> 20) & 0xFF;
347     r[10] = (a->n[3] >> 12) & 0xFF;
348     r[11] = (a->n[3] >> 4) & 0xFF;
349     r[12] = ((a->n[2] >> 48) & 0xF) | ((a->n[3] & 0xF) << 4);
350     r[13] = (a->n[2] >> 40) & 0xFF;
351     r[14] = (a->n[2] >> 32) & 0xFF;
352     r[15] = (a->n[2] >> 24) & 0xFF;
353     r[16] = (a->n[2] >> 16) & 0xFF;
354     r[17] = (a->n[2] >> 8) & 0xFF;
355     r[18] = a->n[2] & 0xFF;
356     r[19] = (a->n[1] >> 44) & 0xFF;
357     r[20] = (a->n[1] >> 36) & 0xFF;
358     r[21] = (a->n[1] >> 28) & 0xFF;
359     r[22] = (a->n[1] >> 20) & 0xFF;
360     r[23] = (a->n[1] >> 12) & 0xFF;
361     r[24] = (a->n[1] >> 4) & 0xFF;
362     r[25] = ((a->n[0] >> 48) & 0xF) | ((a->n[1] & 0xF) << 4);
363     r[26] = (a->n[0] >> 40) & 0xFF;
364     r[27] = (a->n[0] >> 32) & 0xFF;
365     r[28] = (a->n[0] >> 24) & 0xFF;
366     r[29] = (a->n[0] >> 16) & 0xFF;
367     r[30] = (a->n[0] >> 8) & 0xFF;
368     r[31] = a->n[0] & 0xFF;
369 }
370 
secp256k1_fe_negate(secp256k1_fe * r,const secp256k1_fe * a,int m)371 SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) {
372 #ifdef VERIFY
373     VERIFY_CHECK(a->magnitude <= m);
374     secp256k1_fe_verify(a);
375 #endif
376     r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0];
377     r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1];
378     r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[2];
379     r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[3];
380     r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * (m + 1) - a->n[4];
381 #ifdef VERIFY
382     r->magnitude = m + 1;
383     r->normalized = 0;
384     secp256k1_fe_verify(r);
385 #endif
386 }
387 
secp256k1_fe_mul_int(secp256k1_fe * r,int a)388 SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) {
389     r->n[0] *= a;
390     r->n[1] *= a;
391     r->n[2] *= a;
392     r->n[3] *= a;
393     r->n[4] *= a;
394 #ifdef VERIFY
395     r->magnitude *= a;
396     r->normalized = 0;
397     secp256k1_fe_verify(r);
398 #endif
399 }
400 
secp256k1_fe_add(secp256k1_fe * r,const secp256k1_fe * a)401 SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) {
402 #ifdef VERIFY
403     secp256k1_fe_verify(a);
404 #endif
405     r->n[0] += a->n[0];
406     r->n[1] += a->n[1];
407     r->n[2] += a->n[2];
408     r->n[3] += a->n[3];
409     r->n[4] += a->n[4];
410 #ifdef VERIFY
411     r->magnitude += a->magnitude;
412     r->normalized = 0;
413     secp256k1_fe_verify(r);
414 #endif
415 }
416 
secp256k1_fe_mul(secp256k1_fe * r,const secp256k1_fe * a,const secp256k1_fe * SECP256K1_RESTRICT b)417 static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) {
418 #ifdef VERIFY
419     VERIFY_CHECK(a->magnitude <= 8);
420     VERIFY_CHECK(b->magnitude <= 8);
421     secp256k1_fe_verify(a);
422     secp256k1_fe_verify(b);
423     VERIFY_CHECK(r != b);
424     VERIFY_CHECK(a != b);
425 #endif
426     secp256k1_fe_mul_inner(r->n, a->n, b->n);
427 #ifdef VERIFY
428     r->magnitude = 1;
429     r->normalized = 0;
430     secp256k1_fe_verify(r);
431 #endif
432 }
433 
secp256k1_fe_sqr(secp256k1_fe * r,const secp256k1_fe * a)434 static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
435 #ifdef VERIFY
436     VERIFY_CHECK(a->magnitude <= 8);
437     secp256k1_fe_verify(a);
438 #endif
439     secp256k1_fe_sqr_inner(r->n, a->n);
440 #ifdef VERIFY
441     r->magnitude = 1;
442     r->normalized = 0;
443     secp256k1_fe_verify(r);
444 #endif
445 }
446 
secp256k1_fe_cmov(secp256k1_fe * r,const secp256k1_fe * a,int flag)447 static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
448     uint64_t mask0, mask1;
449     mask0 = flag + ~((uint64_t)0);
450     mask1 = ~mask0;
451     r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
452     r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
453     r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
454     r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
455     r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
456 #ifdef VERIFY
457     if (a->magnitude > r->magnitude) {
458         r->magnitude = a->magnitude;
459     }
460     r->normalized &= a->normalized;
461 #endif
462 }
463 
secp256k1_fe_storage_cmov(secp256k1_fe_storage * r,const secp256k1_fe_storage * a,int flag)464 static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
465     uint64_t mask0, mask1;
466     mask0 = flag + ~((uint64_t)0);
467     mask1 = ~mask0;
468     r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
469     r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
470     r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
471     r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
472 }
473 
secp256k1_fe_to_storage(secp256k1_fe_storage * r,const secp256k1_fe * a)474 static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) {
475 #ifdef VERIFY
476     VERIFY_CHECK(a->normalized);
477 #endif
478     r->n[0] = a->n[0] | a->n[1] << 52;
479     r->n[1] = a->n[1] >> 12 | a->n[2] << 40;
480     r->n[2] = a->n[2] >> 24 | a->n[3] << 28;
481     r->n[3] = a->n[3] >> 36 | a->n[4] << 16;
482 }
483 
secp256k1_fe_from_storage(secp256k1_fe * r,const secp256k1_fe_storage * a)484 static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a) {
485     r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL;
486     r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL);
487     r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL);
488     r->n[3] = a->n[2] >> 28 | ((a->n[3] << 36) & 0xFFFFFFFFFFFFFULL);
489     r->n[4] = a->n[3] >> 16;
490 #ifdef VERIFY
491     r->magnitude = 1;
492     r->normalized = 1;
493 #endif
494 }
495 
496 #endif /* SECP256K1_FIELD_REPR_IMPL_H */
497