1 /***********************************************************************
2  * Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell       *
3  * Distributed under the MIT software license, see the accompanying    *
4  * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5  ***********************************************************************/
6 
7 #ifndef SECP256K1_ECMULT_GEN_IMPL_H
8 #define SECP256K1_ECMULT_GEN_IMPL_H
9 
10 #include "util.h"
11 #include "scalar.h"
12 #include "group.h"
13 #include "ecmult_gen.h"
14 #include "hash_impl.h"
15 #ifdef USE_ECMULT_STATIC_PRECOMPUTATION
16 #include "ecmult_static_context.h"
17 #endif
18 
19 #ifndef USE_ECMULT_STATIC_PRECOMPUTATION
20     static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((rustsecp256k1_v0_4_1_ecmult_gen_context*) NULL)->prec));
21 #else
22     static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = 0;
23 #endif
24 
rustsecp256k1_v0_4_1_ecmult_gen_context_init(rustsecp256k1_v0_4_1_ecmult_gen_context * ctx)25 static void rustsecp256k1_v0_4_1_ecmult_gen_context_init(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx) {
26     ctx->prec = NULL;
27 }
28 
rustsecp256k1_v0_4_1_ecmult_gen_context_build(rustsecp256k1_v0_4_1_ecmult_gen_context * ctx,void ** prealloc)29 static void rustsecp256k1_v0_4_1_ecmult_gen_context_build(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, void **prealloc) {
30 #ifndef USE_ECMULT_STATIC_PRECOMPUTATION
31     rustsecp256k1_v0_4_1_ge prec[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G];
32     rustsecp256k1_v0_4_1_gej gj;
33     rustsecp256k1_v0_4_1_gej nums_gej;
34     int i, j;
35     size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
36     void* const base = *prealloc;
37 #endif
38 
39     if (ctx->prec != NULL) {
40         return;
41     }
42 #ifndef USE_ECMULT_STATIC_PRECOMPUTATION
43     ctx->prec = (rustsecp256k1_v0_4_1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])manual_alloc(prealloc, prealloc_size, base, prealloc_size);
44 
45     /* get the generator */
46     rustsecp256k1_v0_4_1_gej_set_ge(&gj, &rustsecp256k1_v0_4_1_ge_const_g);
47 
48     /* Construct a group element with no known corresponding scalar (nothing up my sleeve). */
49     {
50         static const unsigned char nums_b32[33] = "The scalar for this x is unknown";
51         rustsecp256k1_v0_4_1_fe nums_x;
52         rustsecp256k1_v0_4_1_ge nums_ge;
53         int r;
54         r = rustsecp256k1_v0_4_1_fe_set_b32(&nums_x, nums_b32);
55         (void)r;
56         VERIFY_CHECK(r);
57         r = rustsecp256k1_v0_4_1_ge_set_xo_var(&nums_ge, &nums_x, 0);
58         (void)r;
59         VERIFY_CHECK(r);
60         rustsecp256k1_v0_4_1_gej_set_ge(&nums_gej, &nums_ge);
61         /* Add G to make the bits in x uniformly distributed. */
62         rustsecp256k1_v0_4_1_gej_add_ge_var(&nums_gej, &nums_gej, &rustsecp256k1_v0_4_1_ge_const_g, NULL);
63     }
64 
65     /* compute prec. */
66     {
67         rustsecp256k1_v0_4_1_gej precj[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G]; /* Jacobian versions of prec. */
68         rustsecp256k1_v0_4_1_gej gbase;
69         rustsecp256k1_v0_4_1_gej numsbase;
70         gbase = gj; /* PREC_G^j * G */
71         numsbase = nums_gej; /* 2^j * nums. */
72         for (j = 0; j < ECMULT_GEN_PREC_N; j++) {
73             /* Set precj[j*PREC_G .. j*PREC_G+(PREC_G-1)] to (numsbase, numsbase + gbase, ..., numsbase + (PREC_G-1)*gbase). */
74             precj[j*ECMULT_GEN_PREC_G] = numsbase;
75             for (i = 1; i < ECMULT_GEN_PREC_G; i++) {
76                 rustsecp256k1_v0_4_1_gej_add_var(&precj[j*ECMULT_GEN_PREC_G + i], &precj[j*ECMULT_GEN_PREC_G + i - 1], &gbase, NULL);
77             }
78             /* Multiply gbase by PREC_G. */
79             for (i = 0; i < ECMULT_GEN_PREC_B; i++) {
80                 rustsecp256k1_v0_4_1_gej_double_var(&gbase, &gbase, NULL);
81             }
82             /* Multiply numbase by 2. */
83             rustsecp256k1_v0_4_1_gej_double_var(&numsbase, &numsbase, NULL);
84             if (j == ECMULT_GEN_PREC_N - 2) {
85                 /* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
86                 rustsecp256k1_v0_4_1_gej_neg(&numsbase, &numsbase);
87                 rustsecp256k1_v0_4_1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
88             }
89         }
90         rustsecp256k1_v0_4_1_ge_set_all_gej_var(prec, precj, ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G);
91     }
92     for (j = 0; j < ECMULT_GEN_PREC_N; j++) {
93         for (i = 0; i < ECMULT_GEN_PREC_G; i++) {
94             rustsecp256k1_v0_4_1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*ECMULT_GEN_PREC_G + i]);
95         }
96     }
97 #else
98     (void)prealloc;
99     ctx->prec = (rustsecp256k1_v0_4_1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])rustsecp256k1_v0_4_1_ecmult_static_context;
100 #endif
101     rustsecp256k1_v0_4_1_ecmult_gen_blind(ctx, NULL);
102 }
103 
rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(const rustsecp256k1_v0_4_1_ecmult_gen_context * ctx)104 static int rustsecp256k1_v0_4_1_ecmult_gen_context_is_built(const rustsecp256k1_v0_4_1_ecmult_gen_context* ctx) {
105     return ctx->prec != NULL;
106 }
107 
rustsecp256k1_v0_4_1_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_4_1_ecmult_gen_context * dst,const rustsecp256k1_v0_4_1_ecmult_gen_context * src)108 static void rustsecp256k1_v0_4_1_ecmult_gen_context_finalize_memcpy(rustsecp256k1_v0_4_1_ecmult_gen_context *dst, const rustsecp256k1_v0_4_1_ecmult_gen_context *src) {
109 #ifndef USE_ECMULT_STATIC_PRECOMPUTATION
110     if (src->prec != NULL) {
111         /* We cast to void* first to suppress a -Wcast-align warning. */
112         dst->prec = (rustsecp256k1_v0_4_1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src));
113     }
114 #else
115     (void)dst, (void)src;
116 #endif
117 }
118 
rustsecp256k1_v0_4_1_ecmult_gen_context_clear(rustsecp256k1_v0_4_1_ecmult_gen_context * ctx)119 static void rustsecp256k1_v0_4_1_ecmult_gen_context_clear(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx) {
120     rustsecp256k1_v0_4_1_scalar_clear(&ctx->blind);
121     rustsecp256k1_v0_4_1_gej_clear(&ctx->initial);
122     ctx->prec = NULL;
123 }
124 
rustsecp256k1_v0_4_1_ecmult_gen(const rustsecp256k1_v0_4_1_ecmult_gen_context * ctx,rustsecp256k1_v0_4_1_gej * r,const rustsecp256k1_v0_4_1_scalar * gn)125 static void rustsecp256k1_v0_4_1_ecmult_gen(const rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, rustsecp256k1_v0_4_1_gej *r, const rustsecp256k1_v0_4_1_scalar *gn) {
126     rustsecp256k1_v0_4_1_ge add;
127     rustsecp256k1_v0_4_1_ge_storage adds;
128     rustsecp256k1_v0_4_1_scalar gnb;
129     int bits;
130     int i, j;
131     memset(&adds, 0, sizeof(adds));
132     *r = ctx->initial;
133     /* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
134     rustsecp256k1_v0_4_1_scalar_add(&gnb, gn, &ctx->blind);
135     add.infinity = 0;
136     for (j = 0; j < ECMULT_GEN_PREC_N; j++) {
137         bits = rustsecp256k1_v0_4_1_scalar_get_bits(&gnb, j * ECMULT_GEN_PREC_B, ECMULT_GEN_PREC_B);
138         for (i = 0; i < ECMULT_GEN_PREC_G; i++) {
139             /** This uses a conditional move to avoid any secret data in array indexes.
140              *   _Any_ use of secret indexes has been demonstrated to result in timing
141              *   sidechannels, even when the cache-line access patterns are uniform.
142              *  See also:
143              *   "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe
144              *    (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and
145              *   "Cache Attacks and Countermeasures: the Case of AES", RSA 2006,
146              *    by Dag Arne Osvik, Adi Shamir, and Eran Tromer
147              *    (https://www.tau.ac.il/~tromer/papers/cache.pdf)
148              */
149             rustsecp256k1_v0_4_1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits);
150         }
151         rustsecp256k1_v0_4_1_ge_from_storage(&add, &adds);
152         rustsecp256k1_v0_4_1_gej_add_ge(r, r, &add);
153     }
154     bits = 0;
155     rustsecp256k1_v0_4_1_ge_clear(&add);
156     rustsecp256k1_v0_4_1_scalar_clear(&gnb);
157 }
158 
159 /* Setup blinding values for rustsecp256k1_v0_4_1_ecmult_gen. */
rustsecp256k1_v0_4_1_ecmult_gen_blind(rustsecp256k1_v0_4_1_ecmult_gen_context * ctx,const unsigned char * seed32)160 static void rustsecp256k1_v0_4_1_ecmult_gen_blind(rustsecp256k1_v0_4_1_ecmult_gen_context *ctx, const unsigned char *seed32) {
161     rustsecp256k1_v0_4_1_scalar b;
162     rustsecp256k1_v0_4_1_gej gb;
163     rustsecp256k1_v0_4_1_fe s;
164     unsigned char nonce32[32];
165     rustsecp256k1_v0_4_1_rfc6979_hmac_sha256 rng;
166     int overflow;
167     unsigned char keydata[64] = {0};
168     if (seed32 == NULL) {
169         /* When seed is NULL, reset the initial point and blinding value. */
170         rustsecp256k1_v0_4_1_gej_set_ge(&ctx->initial, &rustsecp256k1_v0_4_1_ge_const_g);
171         rustsecp256k1_v0_4_1_gej_neg(&ctx->initial, &ctx->initial);
172         rustsecp256k1_v0_4_1_scalar_set_int(&ctx->blind, 1);
173     }
174     /* The prior blinding value (if not reset) is chained forward by including it in the hash. */
175     rustsecp256k1_v0_4_1_scalar_get_b32(nonce32, &ctx->blind);
176     /** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data,
177      *   and guards against weak or adversarial seeds.  This is a simpler and safer interface than
178      *   asking the caller for blinding values directly and expecting them to retry on failure.
179      */
180     memcpy(keydata, nonce32, 32);
181     if (seed32 != NULL) {
182         memcpy(keydata + 32, seed32, 32);
183     }
184     rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32);
185     memset(keydata, 0, sizeof(keydata));
186     /* Accept unobservably small non-uniformity. */
187     rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
188     overflow = !rustsecp256k1_v0_4_1_fe_set_b32(&s, nonce32);
189     overflow |= rustsecp256k1_v0_4_1_fe_is_zero(&s);
190     rustsecp256k1_v0_4_1_fe_cmov(&s, &rustsecp256k1_v0_4_1_fe_one, overflow);
191     /* Randomize the projection to defend against multiplier sidechannels. */
192     rustsecp256k1_v0_4_1_gej_rescale(&ctx->initial, &s);
193     rustsecp256k1_v0_4_1_fe_clear(&s);
194     rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
195     rustsecp256k1_v0_4_1_scalar_set_b32(&b, nonce32, NULL);
196     /* A blinding value of 0 works, but would undermine the projection hardening. */
197     rustsecp256k1_v0_4_1_scalar_cmov(&b, &rustsecp256k1_v0_4_1_scalar_one, rustsecp256k1_v0_4_1_scalar_is_zero(&b));
198     rustsecp256k1_v0_4_1_rfc6979_hmac_sha256_finalize(&rng);
199     memset(nonce32, 0, 32);
200     rustsecp256k1_v0_4_1_ecmult_gen(ctx, &gb, &b);
201     rustsecp256k1_v0_4_1_scalar_negate(&b, &b);
202     ctx->blind = b;
203     ctx->initial = gb;
204     rustsecp256k1_v0_4_1_scalar_clear(&b);
205     rustsecp256k1_v0_4_1_gej_clear(&gb);
206 }
207 
208 #endif /* SECP256K1_ECMULT_GEN_IMPL_H */
209