1 /* Copyright 2016 Brian Smith.
2  *
3  * Permission to use, copy, modify, and/or distribute this software for any
4  * purpose with or without fee is hereby granted, provided that the above
5  * copyright notice and this permission notice appear in all copies.
6  *
7  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
8  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
10  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
14 
15 #include "../../limbs/limbs.h"
16 
17 #include <string.h>
18 
19 #include "ecp_nistz384.h"
20 #include "../bn/internal.h"
21 #include "../../internal.h"
22 
23 #include "../../limbs/limbs.inl"
24 
25  /* XXX: Here we assume that the conversion from |Carry| to |Limb| is
26   * constant-time, but we haven't verified that assumption. TODO: Fix it so
27   * we don't need to make that assumption. */
28 
29 
30 typedef Limb Elem[P384_LIMBS];
31 typedef Limb ScalarMont[P384_LIMBS];
32 typedef Limb Scalar[P384_LIMBS];
33 
34 
35 /* Prototypes to avoid -Wmissing-prototypes warnings. */
36 void GFp_p384_elem_add(Elem r, const Elem a, const Elem b);
37 void GFp_p384_elem_sub(Elem r, const Elem a, const Elem b);
38 void GFp_p384_elem_div_by_2(Elem r, const Elem a);
39 void GFp_p384_elem_mul_mont(Elem r, const Elem a, const Elem b);
40 void GFp_p384_elem_neg(Elem r, const Elem a);
41 void GFp_p384_scalar_inv_to_mont(ScalarMont r, const Scalar a);
42 void GFp_p384_scalar_mul_mont(ScalarMont r, const ScalarMont a,
43                               const ScalarMont b);
44 
45 
46 static const BN_ULONG Q[P384_LIMBS] = {
47   TOBN(0x00000000, 0xffffffff),
48   TOBN(0xffffffff, 0x00000000),
49   TOBN(0xffffffff, 0xfffffffe),
50   TOBN(0xffffffff, 0xffffffff),
51   TOBN(0xffffffff, 0xffffffff),
52   TOBN(0xffffffff, 0xffffffff),
53 };
54 
55 static const BN_ULONG N[P384_LIMBS] = {
56   TOBN(0xecec196a, 0xccc52973),
57   TOBN(0x581a0db2, 0x48b0a77a),
58   TOBN(0xc7634d81, 0xf4372ddf),
59   TOBN(0xffffffff, 0xffffffff),
60   TOBN(0xffffffff, 0xffffffff),
61   TOBN(0xffffffff, 0xffffffff),
62 };
63 
64 
65 static const BN_ULONG ONE[P384_LIMBS] = {
66   TOBN(0xffffffff, 1), TOBN(0, 0xffffffff), TOBN(0, 1), TOBN(0, 0), TOBN(0, 0),
67   TOBN(0, 0),
68 };
69 
70 
71 /* XXX: MSVC for x86 warns when it fails to inline these functions it should
72  * probably inline. */
73 #if defined(_MSC_VER)  && defined(OPENSSL_X86)
74 #define INLINE_IF_POSSIBLE __forceinline
75 #else
76 #define INLINE_IF_POSSIBLE inline
77 #endif
78 
79 
is_equal(const Elem a,const Elem b)80 static INLINE_IF_POSSIBLE Limb is_equal(const Elem a, const Elem b) {
81   return LIMBS_equal(a, b, P384_LIMBS);
82 }
83 
copy_conditional(Elem r,const Elem a,const Limb condition)84 static INLINE_IF_POSSIBLE void copy_conditional(Elem r, const Elem a,
85                                                 const Limb condition) {
86   for (size_t i = 0; i < P384_LIMBS; ++i) {
87     r[i] = constant_time_select_w(condition, a[i], r[i]);
88   }
89 }
90 
91 
elem_add(Elem r,const Elem a,const Elem b)92 static void elem_add(Elem r, const Elem a, const Elem b) {
93   LIMBS_add_mod(r, a, b, Q, P384_LIMBS);
94 }
95 
elem_sub(Elem r,const Elem a,const Elem b)96 static void elem_sub(Elem r, const Elem a, const Elem b) {
97   LIMBS_sub_mod(r, a, b, Q, P384_LIMBS);
98 }
99 
elem_div_by_2(Elem r,const Elem a)100 static void elem_div_by_2(Elem r, const Elem a) {
101   /* Consider the case where `a` is even. Then we can shift `a` right one bit
102    * and the result will still be valid because we didn't lose any bits and so
103    * `(a >> 1) * 2 == a (mod q)`, which is the invariant we must satisfy.
104    *
105    * The remainder of this comment is considering the case where `a` is odd.
106    *
107    * Since `a` is odd, it isn't the case that `(a >> 1) * 2 == a (mod q)`
108    * because the lowest bit is lost during the shift. For example, consider:
109    *
110    * ```python
111    * q = 2**384 - 2**128 - 2**96 + 2**32 - 1
112    * a = 2**383
113    * two_a = a * 2 % q
114    * assert two_a == 0x100000000ffffffffffffffff00000001
115    * ```
116    *
117    * Notice there how `(2 * a) % q` wrapped around to a smaller odd value. When
118    * we divide `two_a` by two (mod q), we need to get the value `2**383`, which
119    * we obviously can't get with just a right shift.
120    *
121    * `q` is odd, and `a` is odd, so `a + q` is even. We could calculate
122    * `(a + q) >> 1` and then reduce it mod `q`. However, then we would have to
123    * keep track of an extra most significant bit. We can avoid that by instead
124    * calculating `(a >> 1) + ((q + 1) >> 1)`. The `1` in `q + 1` is the least
125    * significant bit of `a`. `q + 1` is even, which means it can be shifted
126    * without losing any bits. Since `q` is odd, `q - 1` is even, so the largest
127    * odd field element is `q - 2`. Thus we know that `a <= q - 2`. We know
128    * `(q + 1) >> 1` is `(q + 1) / 2` since (`q + 1`) is even. The value of
129    * `a >> 1` is `(a - 1)/2` since the shift will drop the least significant
130    * bit of `a`, which is 1. Thus:
131    *
132    * sum  =  ((q + 1) >> 1) + (a >> 1)
133    * sum  =  (q + 1)/2 + (a >> 1)       (substituting (q + 1)/2)
134    *     <=  (q + 1)/2 + (q - 2 - 1)/2  (substituting a <= q - 2)
135    *     <=  (q + 1)/2 + (q - 3)/2      (simplifying)
136    *     <=  (q + 1 + q - 3)/2          (factoring out the common divisor)
137    *     <=  (2q - 2)/2                 (simplifying)
138    *     <=  q - 1                      (simplifying)
139    *
140    * Thus, no reduction of the sum mod `q` is necessary. */
141 
142   Limb is_odd = constant_time_is_nonzero_w(a[0] & 1);
143 
144   /* r = a >> 1. */
145   Limb carry = a[P384_LIMBS - 1] & 1;
146   r[P384_LIMBS - 1] = a[P384_LIMBS - 1] >> 1;
147   for (size_t i = 1; i < P384_LIMBS; ++i) {
148     Limb new_carry = a[P384_LIMBS - i - 1];
149     r[P384_LIMBS - i - 1] =
150         (a[P384_LIMBS - i - 1] >> 1) | (carry << (LIMB_BITS - 1));
151     carry = new_carry;
152   }
153 
154   static const Elem Q_PLUS_1_SHR_1 = {
155     TOBN(0x00000000, 0x80000000), TOBN(0x7fffffff, 0x80000000),
156     TOBN(0xffffffff, 0xffffffff), TOBN(0xffffffff, 0xffffffff),
157     TOBN(0xffffffff, 0xffffffff), TOBN(0x7fffffff, 0xffffffff),
158   };
159 
160   Elem adjusted;
161   BN_ULONG carry2 = limbs_add(adjusted, r, Q_PLUS_1_SHR_1, P384_LIMBS);
162 #if defined(NDEBUG)
163   (void)carry2;
164 #endif
165   assert(carry2 == 0);
166 
167   copy_conditional(r, adjusted, is_odd);
168 }
169 
elem_mul_mont(Elem r,const Elem a,const Elem b)170 static inline void elem_mul_mont(Elem r, const Elem a, const Elem b) {
171   static const BN_ULONG Q_N0[] = {
172     BN_MONT_CTX_N0(0x1, 0x1)
173   };
174   /* XXX: Not (clearly) constant-time; inefficient.*/
175   GFp_bn_mul_mont(r, a, b, Q, Q_N0, P384_LIMBS);
176 }
177 
elem_mul_by_2(Elem r,const Elem a)178 static inline void elem_mul_by_2(Elem r, const Elem a) {
179   LIMBS_shl_mod(r, a, Q, P384_LIMBS);
180 }
181 
elem_mul_by_3(Elem r,const Elem a)182 static INLINE_IF_POSSIBLE void elem_mul_by_3(Elem r, const Elem a) {
183   /* XXX: inefficient. TODO: Replace with an integrated shift + add. */
184   Elem doubled;
185   elem_add(doubled, a, a);
186   elem_add(r, doubled, a);
187 }
188 
elem_sqr_mont(Elem r,const Elem a)189 static inline void elem_sqr_mont(Elem r, const Elem a) {
190   /* XXX: Inefficient. TODO: Add a dedicated squaring routine. */
191   elem_mul_mont(r, a, a);
192 }
193 
GFp_p384_elem_add(Elem r,const Elem a,const Elem b)194 void GFp_p384_elem_add(Elem r, const Elem a, const Elem b) {
195   elem_add(r, a, b);
196 }
197 
GFp_p384_elem_sub(Elem r,const Elem a,const Elem b)198 void GFp_p384_elem_sub(Elem r, const Elem a, const Elem b) {
199   elem_sub(r, a, b);
200 }
201 
GFp_p384_elem_div_by_2(Elem r,const Elem a)202 void GFp_p384_elem_div_by_2(Elem r, const Elem a) {
203   elem_div_by_2(r, a);
204 }
205 
GFp_p384_elem_mul_mont(Elem r,const Elem a,const Elem b)206 void GFp_p384_elem_mul_mont(Elem r, const Elem a, const Elem b) {
207   elem_mul_mont(r, a, b);
208 }
209 
GFp_p384_elem_neg(Elem r,const Elem a)210 void GFp_p384_elem_neg(Elem r, const Elem a) {
211   Limb is_zero = LIMBS_are_zero(a, P384_LIMBS);
212   Carry borrow = limbs_sub(r, Q, a, P384_LIMBS);
213 #if defined(NDEBUG)
214   (void)borrow;
215 #endif
216   assert(borrow == 0);
217   for (size_t i = 0; i < P384_LIMBS; ++i) {
218     r[i] = constant_time_select_w(is_zero, 0, r[i]);
219   }
220 }
221 
222 
GFp_p384_scalar_mul_mont(ScalarMont r,const ScalarMont a,const ScalarMont b)223 void GFp_p384_scalar_mul_mont(ScalarMont r, const ScalarMont a,
224                               const ScalarMont b) {
225   static const BN_ULONG N_N0[] = {
226     BN_MONT_CTX_N0(0x6ed46089, 0xe88fdc45)
227   };
228   /* XXX: Inefficient. TODO: Add dedicated multiplication routine. */
229   GFp_bn_mul_mont(r, a, b, N, N_N0, P384_LIMBS);
230 }
231 
232 
233 /* TODO(perf): Optimize this. */
234 
gfp_p384_point_select_w5(P384_POINT * out,const P384_POINT table[16],size_t index)235 static void gfp_p384_point_select_w5(P384_POINT *out,
236                                      const P384_POINT table[16], size_t index) {
237   Elem x; memset(x, 0, sizeof(x));
238   Elem y; memset(y, 0, sizeof(y));
239   Elem z; memset(z, 0, sizeof(z));
240 
241   for (size_t i = 0; i < 16; ++i) {
242     Limb mask = constant_time_eq_w(index, i + 1);
243     for (size_t j = 0; j < P384_LIMBS; ++j) {
244       x[j] |= table[i].X[j] & mask;
245       y[j] |= table[i].Y[j] & mask;
246       z[j] |= table[i].Z[j] & mask;
247     }
248   }
249 
250   limbs_copy(out->X, x, P384_LIMBS);
251   limbs_copy(out->Y, y, P384_LIMBS);
252   limbs_copy(out->Z, z, P384_LIMBS);
253 }
254 
255 
256 #include "ecp_nistz384.inl"
257