1 typedef uint32_t bignum25519[10];
2 
3 static const uint32_t reduce_mask_26 = (1 << 26) - 1;
4 static const uint32_t reduce_mask_25 = (1 << 25) - 1;
5 
6 /* out = in */
7 DONNA_INLINE static void
curve25519_copy(bignum25519 out,const bignum25519 in)8 curve25519_copy(bignum25519 out, const bignum25519 in) {
9 	out[0] = in[0];
10 	out[1] = in[1];
11 	out[2] = in[2];
12 	out[3] = in[3];
13 	out[4] = in[4];
14 	out[5] = in[5];
15 	out[6] = in[6];
16 	out[7] = in[7];
17 	out[8] = in[8];
18 	out[9] = in[9];
19 }
20 
21 /* out = a + b */
22 DONNA_INLINE static void
curve25519_add(bignum25519 out,const bignum25519 a,const bignum25519 b)23 curve25519_add(bignum25519 out, const bignum25519 a, const bignum25519 b) {
24 	out[0] = a[0] + b[0];
25 	out[1] = a[1] + b[1];
26 	out[2] = a[2] + b[2];
27 	out[3] = a[3] + b[3];
28 	out[4] = a[4] + b[4];
29 	out[5] = a[5] + b[5];
30 	out[6] = a[6] + b[6];
31 	out[7] = a[7] + b[7];
32 	out[8] = a[8] + b[8];
33 	out[9] = a[9] + b[9];
34 }
35 
36 /* out = a - b */
37 DONNA_INLINE static void
curve25519_sub(bignum25519 out,const bignum25519 a,const bignum25519 b)38 curve25519_sub(bignum25519 out, const bignum25519 a, const bignum25519 b) {
39 	uint32_t c;
40 	out[0] = 0x7ffffda + a[0] - b[0]    ; c = (out[0] >> 26); out[0] &= reduce_mask_26;
41 	out[1] = 0x3fffffe + a[1] - b[1] + c; c = (out[1] >> 25); out[1] &= reduce_mask_25;
42 	out[2] = 0x7fffffe + a[2] - b[2] + c; c = (out[2] >> 26); out[2] &= reduce_mask_26;
43 	out[3] = 0x3fffffe + a[3] - b[3] + c; c = (out[3] >> 25); out[3] &= reduce_mask_25;
44 	out[4] = 0x7fffffe + a[4] - b[4] + c; c = (out[4] >> 26); out[4] &= reduce_mask_26;
45 	out[5] = 0x3fffffe + a[5] - b[5] + c; c = (out[5] >> 25); out[5] &= reduce_mask_25;
46 	out[6] = 0x7fffffe + a[6] - b[6] + c; c = (out[6] >> 26); out[6] &= reduce_mask_26;
47 	out[7] = 0x3fffffe + a[7] - b[7] + c; c = (out[7] >> 25); out[7] &= reduce_mask_25;
48 	out[8] = 0x7fffffe + a[8] - b[8] + c; c = (out[8] >> 26); out[8] &= reduce_mask_26;
49 	out[9] = 0x3fffffe + a[9] - b[9] + c; c = (out[9] >> 25); out[9] &= reduce_mask_25;
50 	out[0] += 19 * c;
51 }
52 
53 /* out = in * scalar */
54 DONNA_INLINE static void
curve25519_scalar_product(bignum25519 out,const bignum25519 in,const uint32_t scalar)55 curve25519_scalar_product(bignum25519 out, const bignum25519 in, const uint32_t scalar) {
56 	uint64_t a;
57 	uint32_t c;
58 	a = mul32x32_64(in[0], scalar);     out[0] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26);
59 	a = mul32x32_64(in[1], scalar) + c; out[1] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25);
60 	a = mul32x32_64(in[2], scalar) + c; out[2] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26);
61 	a = mul32x32_64(in[3], scalar) + c; out[3] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25);
62 	a = mul32x32_64(in[4], scalar) + c; out[4] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26);
63 	a = mul32x32_64(in[5], scalar) + c; out[5] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25);
64 	a = mul32x32_64(in[6], scalar) + c; out[6] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26);
65 	a = mul32x32_64(in[7], scalar) + c; out[7] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25);
66 	a = mul32x32_64(in[8], scalar) + c; out[8] = (uint32_t)a & reduce_mask_26; c = (uint32_t)(a >> 26);
67 	a = mul32x32_64(in[9], scalar) + c; out[9] = (uint32_t)a & reduce_mask_25; c = (uint32_t)(a >> 25);
68 	                                    out[0] += c * 19;
69 }
70 
71 /* out = a * b */
72 DONNA_INLINE static void
curve25519_mul(bignum25519 out,const bignum25519 a,const bignum25519 b)73 curve25519_mul(bignum25519 out, const bignum25519 a, const bignum25519 b) {
74 	uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9;
75 	uint32_t s0,s1,s2,s3,s4,s5,s6,s7,s8,s9;
76 	uint64_t m0,m1,m2,m3,m4,m5,m6,m7,m8,m9,c;
77 	uint32_t p;
78 
79 	r0 = b[0];
80 	r1 = b[1];
81 	r2 = b[2];
82 	r3 = b[3];
83 	r4 = b[4];
84 	r5 = b[5];
85 	r6 = b[6];
86 	r7 = b[7];
87 	r8 = b[8];
88 	r9 = b[9];
89 
90 	s0 = a[0];
91 	s1 = a[1];
92 	s2 = a[2];
93 	s3 = a[3];
94 	s4 = a[4];
95 	s5 = a[5];
96 	s6 = a[6];
97 	s7 = a[7];
98 	s8 = a[8];
99 	s9 = a[9];
100 
101 	m1 = mul32x32_64(r0, s1) + mul32x32_64(r1, s0);
102 	m3 = mul32x32_64(r0, s3) + mul32x32_64(r1, s2) + mul32x32_64(r2, s1) + mul32x32_64(r3, s0);
103 	m5 = mul32x32_64(r0, s5) + mul32x32_64(r1, s4) + mul32x32_64(r2, s3) + mul32x32_64(r3, s2) + mul32x32_64(r4, s1) + mul32x32_64(r5, s0);
104 	m7 = mul32x32_64(r0, s7) + mul32x32_64(r1, s6) + mul32x32_64(r2, s5) + mul32x32_64(r3, s4) + mul32x32_64(r4, s3) + mul32x32_64(r5, s2) + mul32x32_64(r6, s1) + mul32x32_64(r7, s0);
105 	m9 = mul32x32_64(r0, s9) + mul32x32_64(r1, s8) + mul32x32_64(r2, s7) + mul32x32_64(r3, s6) + mul32x32_64(r4, s5) + mul32x32_64(r5, s4) + mul32x32_64(r6, s3) + mul32x32_64(r7, s2) + mul32x32_64(r8, s1) + mul32x32_64(r9, s0);
106 
107 	r1 *= 2;
108 	r3 *= 2;
109 	r5 *= 2;
110 	r7 *= 2;
111 
112 	m0 = mul32x32_64(r0, s0);
113 	m2 = mul32x32_64(r0, s2) + mul32x32_64(r1, s1) + mul32x32_64(r2, s0);
114 	m4 = mul32x32_64(r0, s4) + mul32x32_64(r1, s3) + mul32x32_64(r2, s2) + mul32x32_64(r3, s1) + mul32x32_64(r4, s0);
115 	m6 = mul32x32_64(r0, s6) + mul32x32_64(r1, s5) + mul32x32_64(r2, s4) + mul32x32_64(r3, s3) + mul32x32_64(r4, s2) + mul32x32_64(r5, s1) + mul32x32_64(r6, s0);
116 	m8 = mul32x32_64(r0, s8) + mul32x32_64(r1, s7) + mul32x32_64(r2, s6) + mul32x32_64(r3, s5) + mul32x32_64(r4, s4) + mul32x32_64(r5, s3) + mul32x32_64(r6, s2) + mul32x32_64(r7, s1) + mul32x32_64(r8, s0);
117 
118 	r1 *= 19;
119 	r2 *= 19;
120 	r3 = (r3 / 2) * 19;
121 	r4 *= 19;
122 	r5 = (r5 / 2) * 19;
123 	r6 *= 19;
124 	r7 = (r7 / 2) * 19;
125 	r8 *= 19;
126 	r9 *= 19;
127 
128 	m1 += (mul32x32_64(r9, s2) + mul32x32_64(r8, s3) + mul32x32_64(r7, s4) + mul32x32_64(r6, s5) + mul32x32_64(r5, s6) + mul32x32_64(r4, s7) + mul32x32_64(r3, s8) + mul32x32_64(r2, s9));
129 	m3 += (mul32x32_64(r9, s4) + mul32x32_64(r8, s5) + mul32x32_64(r7, s6) + mul32x32_64(r6, s7) + mul32x32_64(r5, s8) + mul32x32_64(r4, s9));
130 	m5 += (mul32x32_64(r9, s6) + mul32x32_64(r8, s7) + mul32x32_64(r7, s8) + mul32x32_64(r6, s9));
131 	m7 += (mul32x32_64(r9, s8) + mul32x32_64(r8, s9));
132 
133 	r3 *= 2;
134 	r5 *= 2;
135 	r7 *= 2;
136 	r9 *= 2;
137 
138 	m0 += (mul32x32_64(r9, s1) + mul32x32_64(r8, s2) + mul32x32_64(r7, s3) + mul32x32_64(r6, s4) + mul32x32_64(r5, s5) + mul32x32_64(r4, s6) + mul32x32_64(r3, s7) + mul32x32_64(r2, s8) + mul32x32_64(r1, s9));
139 	m2 += (mul32x32_64(r9, s3) + mul32x32_64(r8, s4) + mul32x32_64(r7, s5) + mul32x32_64(r6, s6) + mul32x32_64(r5, s7) + mul32x32_64(r4, s8) + mul32x32_64(r3, s9));
140 	m4 += (mul32x32_64(r9, s5) + mul32x32_64(r8, s6) + mul32x32_64(r7, s7) + mul32x32_64(r6, s8) + mul32x32_64(r5, s9));
141 	m6 += (mul32x32_64(r9, s7) + mul32x32_64(r8, s8) + mul32x32_64(r7, s9));
142 	m8 += (mul32x32_64(r9, s9));
143 
144 	                             r0 = (uint32_t)m0 & reduce_mask_26; c = (m0 >> 26);
145 	m1 += c;                     r1 = (uint32_t)m1 & reduce_mask_25; c = (m1 >> 25);
146 	m2 += c;                     r2 = (uint32_t)m2 & reduce_mask_26; c = (m2 >> 26);
147 	m3 += c;                     r3 = (uint32_t)m3 & reduce_mask_25; c = (m3 >> 25);
148 	m4 += c;                     r4 = (uint32_t)m4 & reduce_mask_26; c = (m4 >> 26);
149 	m5 += c;                     r5 = (uint32_t)m5 & reduce_mask_25; c = (m5 >> 25);
150 	m6 += c;                     r6 = (uint32_t)m6 & reduce_mask_26; c = (m6 >> 26);
151 	m7 += c;                     r7 = (uint32_t)m7 & reduce_mask_25; c = (m7 >> 25);
152 	m8 += c;                     r8 = (uint32_t)m8 & reduce_mask_26; c = (m8 >> 26);
153 	m9 += c;                     r9 = (uint32_t)m9 & reduce_mask_25; p = (uint32_t)(m9 >> 25);
154 	m0 = r0 + mul32x32_64(p,19); r0 = (uint32_t)m0 & reduce_mask_26; p = (uint32_t)(m0 >> 26);
155 	r1 += p;
156 
157 	out[0] = r0;
158 	out[1] = r1;
159 	out[2] = r2;
160 	out[3] = r3;
161 	out[4] = r4;
162 	out[5] = r5;
163 	out[6] = r6;
164 	out[7] = r7;
165 	out[8] = r8;
166 	out[9] = r9;
167 }
168 
169 /* out = in * in */
170 DONNA_INLINE static void
curve25519_square(bignum25519 out,const bignum25519 in)171 curve25519_square(bignum25519 out, const bignum25519 in) {
172 	uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9;
173 	uint32_t d6,d7,d8,d9;
174 	uint64_t m0,m1,m2,m3,m4,m5,m6,m7,m8,m9,c;
175 	uint32_t p;
176 
177 	r0 = in[0];
178 	r1 = in[1];
179 	r2 = in[2];
180 	r3 = in[3];
181 	r4 = in[4];
182 	r5 = in[5];
183 	r6 = in[6];
184 	r7 = in[7];
185 	r8 = in[8];
186 	r9 = in[9];
187 
188 
189 	m0 = mul32x32_64(r0, r0);
190 	r0 *= 2;
191 	m1 = mul32x32_64(r0, r1);
192 	m2 = mul32x32_64(r0, r2) + mul32x32_64(r1, r1 * 2);
193 	r1 *= 2;
194 	m3 = mul32x32_64(r0, r3) + mul32x32_64(r1, r2    );
195 	m4 = mul32x32_64(r0, r4) + mul32x32_64(r1, r3 * 2) + mul32x32_64(r2, r2);
196 	r2 *= 2;
197 	m5 = mul32x32_64(r0, r5) + mul32x32_64(r1, r4    ) + mul32x32_64(r2, r3);
198 	m6 = mul32x32_64(r0, r6) + mul32x32_64(r1, r5 * 2) + mul32x32_64(r2, r4) + mul32x32_64(r3, r3 * 2);
199 	r3 *= 2;
200 	m7 = mul32x32_64(r0, r7) + mul32x32_64(r1, r6    ) + mul32x32_64(r2, r5) + mul32x32_64(r3, r4    );
201 	m8 = mul32x32_64(r0, r8) + mul32x32_64(r1, r7 * 2) + mul32x32_64(r2, r6) + mul32x32_64(r3, r5 * 2) + mul32x32_64(r4, r4    );
202 	m9 = mul32x32_64(r0, r9) + mul32x32_64(r1, r8    ) + mul32x32_64(r2, r7) + mul32x32_64(r3, r6    ) + mul32x32_64(r4, r5 * 2);
203 
204 	d6 = r6 * 19;
205 	d7 = r7 * 2 * 19;
206 	d8 = r8 * 19;
207 	d9 = r9 * 2 * 19;
208 
209 	m0 += (mul32x32_64(d9, r1    ) + mul32x32_64(d8, r2    ) + mul32x32_64(d7, r3    ) + mul32x32_64(d6, r4 * 2) + mul32x32_64(r5, r5 * 2 * 19));
210 	m1 += (mul32x32_64(d9, r2 / 2) + mul32x32_64(d8, r3    ) + mul32x32_64(d7, r4    ) + mul32x32_64(d6, r5 * 2));
211 	m2 += (mul32x32_64(d9, r3    ) + mul32x32_64(d8, r4 * 2) + mul32x32_64(d7, r5 * 2) + mul32x32_64(d6, r6    ));
212 	m3 += (mul32x32_64(d9, r4    ) + mul32x32_64(d8, r5 * 2) + mul32x32_64(d7, r6    ));
213 	m4 += (mul32x32_64(d9, r5 * 2) + mul32x32_64(d8, r6 * 2) + mul32x32_64(d7, r7    ));
214 	m5 += (mul32x32_64(d9, r6    ) + mul32x32_64(d8, r7 * 2));
215 	m6 += (mul32x32_64(d9, r7 * 2) + mul32x32_64(d8, r8    ));
216 	m7 += (mul32x32_64(d9, r8    ));
217 	m8 += (mul32x32_64(d9, r9    ));
218 
219 	                             r0 = (uint32_t)m0 & reduce_mask_26; c = (m0 >> 26);
220 	m1 += c;                     r1 = (uint32_t)m1 & reduce_mask_25; c = (m1 >> 25);
221 	m2 += c;                     r2 = (uint32_t)m2 & reduce_mask_26; c = (m2 >> 26);
222 	m3 += c;                     r3 = (uint32_t)m3 & reduce_mask_25; c = (m3 >> 25);
223 	m4 += c;                     r4 = (uint32_t)m4 & reduce_mask_26; c = (m4 >> 26);
224 	m5 += c;                     r5 = (uint32_t)m5 & reduce_mask_25; c = (m5 >> 25);
225 	m6 += c;                     r6 = (uint32_t)m6 & reduce_mask_26; c = (m6 >> 26);
226 	m7 += c;                     r7 = (uint32_t)m7 & reduce_mask_25; c = (m7 >> 25);
227 	m8 += c;                     r8 = (uint32_t)m8 & reduce_mask_26; c = (m8 >> 26);
228 	m9 += c;                     r9 = (uint32_t)m9 & reduce_mask_25; p = (uint32_t)(m9 >> 25);
229 	m0 = r0 + mul32x32_64(p,19); r0 = (uint32_t)m0 & reduce_mask_26; p = (uint32_t)(m0 >> 26);
230 	r1 += p;
231 
232 	out[0] = r0;
233 	out[1] = r1;
234 	out[2] = r2;
235 	out[3] = r3;
236 	out[4] = r4;
237 	out[5] = r5;
238 	out[6] = r6;
239 	out[7] = r7;
240 	out[8] = r8;
241 	out[9] = r9;
242 }
243 
244 /* out = in^(2 * count) */
245 static void
curve25519_square_times(bignum25519 out,const bignum25519 in,int count)246 curve25519_square_times(bignum25519 out, const bignum25519 in, int count) {
247 	uint32_t r0,r1,r2,r3,r4,r5,r6,r7,r8,r9;
248 	uint32_t d6,d7,d8,d9;
249 	uint64_t m0,m1,m2,m3,m4,m5,m6,m7,m8,m9,c;
250 	uint32_t p;
251 
252 	r0 = in[0];
253 	r1 = in[1];
254 	r2 = in[2];
255 	r3 = in[3];
256 	r4 = in[4];
257 	r5 = in[5];
258 	r6 = in[6];
259 	r7 = in[7];
260 	r8 = in[8];
261 	r9 = in[9];
262 
263 	do {
264 		m0 = mul32x32_64(r0, r0);
265 		r0 *= 2;
266 		m1 = mul32x32_64(r0, r1);
267 		m2 = mul32x32_64(r0, r2) + mul32x32_64(r1, r1 * 2);
268 		r1 *= 2;
269 		m3 = mul32x32_64(r0, r3) + mul32x32_64(r1, r2    );
270 		m4 = mul32x32_64(r0, r4) + mul32x32_64(r1, r3 * 2) + mul32x32_64(r2, r2);
271 		r2 *= 2;
272 		m5 = mul32x32_64(r0, r5) + mul32x32_64(r1, r4    ) + mul32x32_64(r2, r3);
273 		m6 = mul32x32_64(r0, r6) + mul32x32_64(r1, r5 * 2) + mul32x32_64(r2, r4) + mul32x32_64(r3, r3 * 2);
274 		r3 *= 2;
275 		m7 = mul32x32_64(r0, r7) + mul32x32_64(r1, r6    ) + mul32x32_64(r2, r5) + mul32x32_64(r3, r4    );
276 		m8 = mul32x32_64(r0, r8) + mul32x32_64(r1, r7 * 2) + mul32x32_64(r2, r6) + mul32x32_64(r3, r5 * 2) + mul32x32_64(r4, r4    );
277 		m9 = mul32x32_64(r0, r9) + mul32x32_64(r1, r8    ) + mul32x32_64(r2, r7) + mul32x32_64(r3, r6    ) + mul32x32_64(r4, r5 * 2);
278 
279 		d6 = r6 * 19;
280 		d7 = r7 * 2 * 19;
281 		d8 = r8 * 19;
282 		d9 = r9 * 2 * 19;
283 
284 		m0 += (mul32x32_64(d9, r1    ) + mul32x32_64(d8, r2    ) + mul32x32_64(d7, r3    ) + mul32x32_64(d6, r4 * 2) + mul32x32_64(r5, r5 * 2 * 19));
285 		m1 += (mul32x32_64(d9, r2 / 2) + mul32x32_64(d8, r3    ) + mul32x32_64(d7, r4    ) + mul32x32_64(d6, r5 * 2));
286 		m2 += (mul32x32_64(d9, r3    ) + mul32x32_64(d8, r4 * 2) + mul32x32_64(d7, r5 * 2) + mul32x32_64(d6, r6    ));
287 		m3 += (mul32x32_64(d9, r4    ) + mul32x32_64(d8, r5 * 2) + mul32x32_64(d7, r6    ));
288 		m4 += (mul32x32_64(d9, r5 * 2) + mul32x32_64(d8, r6 * 2) + mul32x32_64(d7, r7    ));
289 		m5 += (mul32x32_64(d9, r6    ) + mul32x32_64(d8, r7 * 2));
290 		m6 += (mul32x32_64(d9, r7 * 2) + mul32x32_64(d8, r8    ));
291 		m7 += (mul32x32_64(d9, r8    ));
292 		m8 += (mul32x32_64(d9, r9    ));
293 
294 		                             r0 = (uint32_t)m0 & reduce_mask_26; c = (m0 >> 26);
295 		m1 += c;                     r1 = (uint32_t)m1 & reduce_mask_25; c = (m1 >> 25);
296 		m2 += c;                     r2 = (uint32_t)m2 & reduce_mask_26; c = (m2 >> 26);
297 		m3 += c;                     r3 = (uint32_t)m3 & reduce_mask_25; c = (m3 >> 25);
298 		m4 += c;                     r4 = (uint32_t)m4 & reduce_mask_26; c = (m4 >> 26);
299 		m5 += c;                     r5 = (uint32_t)m5 & reduce_mask_25; c = (m5 >> 25);
300 		m6 += c;                     r6 = (uint32_t)m6 & reduce_mask_26; c = (m6 >> 26);
301 		m7 += c;                     r7 = (uint32_t)m7 & reduce_mask_25; c = (m7 >> 25);
302 		m8 += c;                     r8 = (uint32_t)m8 & reduce_mask_26; c = (m8 >> 26);
303 		m9 += c;                     r9 = (uint32_t)m9 & reduce_mask_25; p = (uint32_t)(m9 >> 25);
304 		m0 = r0 + mul32x32_64(p,19); r0 = (uint32_t)m0 & reduce_mask_26; p = (uint32_t)(m0 >> 26);
305 		r1 += p;
306 	} while (--count);
307 
308 	out[0] = r0;
309 	out[1] = r1;
310 	out[2] = r2;
311 	out[3] = r3;
312 	out[4] = r4;
313 	out[5] = r5;
314 	out[6] = r6;
315 	out[7] = r7;
316 	out[8] = r8;
317 	out[9] = r9;
318 }
319 
320 
321 /* Take a little-endian, 32-byte number and expand it into polynomial form */
322 static void
curve25519_expand(bignum25519 out,const unsigned char in[32])323 curve25519_expand(bignum25519 out, const unsigned char in[32]) {
324 	static const union { uint8_t b[2]; uint16_t s; } endian_check = {{1,0}};
325 	uint32_t x0,x1,x2,x3,x4,x5,x6,x7;
326 
327 	if (endian_check.s == 1) {
328 		x0 = *(uint32_t *)(in + 0);
329 		x1 = *(uint32_t *)(in + 4);
330 		x2 = *(uint32_t *)(in + 8);
331 		x3 = *(uint32_t *)(in + 12);
332 		x4 = *(uint32_t *)(in + 16);
333 		x5 = *(uint32_t *)(in + 20);
334 		x6 = *(uint32_t *)(in + 24);
335 		x7 = *(uint32_t *)(in + 28);
336     } else {
337 		#define F(s)                         \
338 			((((uint32_t)in[s + 0])      ) | \
339 			 (((uint32_t)in[s + 1]) <<  8) | \
340 			 (((uint32_t)in[s + 2]) << 16) | \
341 			 (((uint32_t)in[s + 3]) << 24))
342 		x0 = F(0);
343 		x1 = F(4);
344 		x2 = F(8);
345 		x3 = F(12);
346 		x4 = F(16);
347 		x5 = F(20);
348 		x6 = F(24);
349 		x7 = F(28);
350 		#undef F
351 	}
352 
353 	out[0] = (                        x0       ) & reduce_mask_26;
354 	out[1] = ((((uint64_t)x1 << 32) | x0) >> 26) & reduce_mask_25;
355 	out[2] = ((((uint64_t)x2 << 32) | x1) >> 19) & reduce_mask_26;
356 	out[3] = ((((uint64_t)x3 << 32) | x2) >> 13) & reduce_mask_25;
357 	out[4] = ((                       x3) >>  6) & reduce_mask_26;
358 	out[5] = (                        x4       ) & reduce_mask_25;
359 	out[6] = ((((uint64_t)x5 << 32) | x4) >> 25) & reduce_mask_26;
360 	out[7] = ((((uint64_t)x6 << 32) | x5) >> 19) & reduce_mask_25;
361 	out[8] = ((((uint64_t)x7 << 32) | x6) >> 12) & reduce_mask_26;
362 	out[9] = ((                       x7) >>  6) & reduce_mask_25; /* ignore the top bit */
363 }
364 
365 /* Take a fully reduced polynomial form number and contract it into a little-endian, 32-byte array */
366 static void
curve25519_contract(unsigned char out[32],const bignum25519 in)367 curve25519_contract(unsigned char out[32], const bignum25519 in) {
368 	bignum25519 f;
369 	curve25519_copy(f, in);
370 
371 	#define carry_pass() \
372 		f[1] += f[0] >> 26; f[0] &= reduce_mask_26; \
373 		f[2] += f[1] >> 25; f[1] &= reduce_mask_25; \
374 		f[3] += f[2] >> 26; f[2] &= reduce_mask_26; \
375 		f[4] += f[3] >> 25; f[3] &= reduce_mask_25; \
376 		f[5] += f[4] >> 26; f[4] &= reduce_mask_26; \
377 		f[6] += f[5] >> 25; f[5] &= reduce_mask_25; \
378 		f[7] += f[6] >> 26; f[6] &= reduce_mask_26; \
379 		f[8] += f[7] >> 25; f[7] &= reduce_mask_25; \
380 		f[9] += f[8] >> 26; f[8] &= reduce_mask_26;
381 
382 	#define carry_pass_full() \
383 		carry_pass() \
384 		f[0] += 19 * (f[9] >> 25); f[9] &= reduce_mask_25;
385 
386 	#define carry_pass_final() \
387 		carry_pass() \
388 		f[9] &= reduce_mask_25;
389 
390 	carry_pass_full()
391 	carry_pass_full()
392 
393 	/* now t is between 0 and 2^255-1, properly carried. */
394 	/* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */
395 	f[0] += 19;
396 	carry_pass_full()
397 
398 	/* now between 19 and 2^255-1 in both cases, and offset by 19. */
399 	f[0] += (1 << 26) - 19;
400 	f[1] += (1 << 25) - 1;
401 	f[2] += (1 << 26) - 1;
402 	f[3] += (1 << 25) - 1;
403 	f[4] += (1 << 26) - 1;
404 	f[5] += (1 << 25) - 1;
405 	f[6] += (1 << 26) - 1;
406 	f[7] += (1 << 25) - 1;
407 	f[8] += (1 << 26) - 1;
408 	f[9] += (1 << 25) - 1;
409 
410 	/* now between 2^255 and 2^256-20, and offset by 2^255. */
411 	carry_pass_final()
412 
413 	#undef carry_pass
414 	#undef carry_full
415 	#undef carry_final
416 
417 	f[1] <<= 2;
418 	f[2] <<= 3;
419 	f[3] <<= 5;
420 	f[4] <<= 6;
421 	f[6] <<= 1;
422 	f[7] <<= 3;
423 	f[8] <<= 4;
424 	f[9] <<= 6;
425 
426 	#define F(i, s) \
427 		out[s+0] |= (unsigned char )(f[i] & 0xff); \
428 		out[s+1] = (unsigned char )((f[i] >> 8) & 0xff); \
429 		out[s+2] = (unsigned char )((f[i] >> 16) & 0xff); \
430 		out[s+3] = (unsigned char )((f[i] >> 24) & 0xff);
431 
432 	out[0] = 0;
433 	out[16] = 0;
434 	F(0,0);
435 	F(1,3);
436 	F(2,6);
437 	F(3,9);
438 	F(4,12);
439 	F(5,16);
440 	F(6,19);
441 	F(7,22);
442 	F(8,25);
443 	F(9,28);
444 	#undef F
445 }
446 
447 /*
448  * Swap the contents of [qx] and [qpx] iff @swap is non-zero
449  */
450 DONNA_INLINE static void
curve25519_swap_conditional(bignum25519 x,bignum25519 qpx,uint32_t iswap)451 curve25519_swap_conditional(bignum25519 x, bignum25519 qpx, uint32_t iswap) {
452 	const uint32_t swap = (uint32_t)(-(int32_t)iswap);
453 	uint32_t x0,x1,x2,x3,x4,x5,x6,x7,x8,x9;
454 
455 	x0 = swap & (x[0] ^ qpx[0]); x[0] ^= x0; qpx[0] ^= x0;
456 	x1 = swap & (x[1] ^ qpx[1]); x[1] ^= x1; qpx[1] ^= x1;
457 	x2 = swap & (x[2] ^ qpx[2]); x[2] ^= x2; qpx[2] ^= x2;
458 	x3 = swap & (x[3] ^ qpx[3]); x[3] ^= x3; qpx[3] ^= x3;
459 	x4 = swap & (x[4] ^ qpx[4]); x[4] ^= x4; qpx[4] ^= x4;
460 	x5 = swap & (x[5] ^ qpx[5]); x[5] ^= x5; qpx[5] ^= x5;
461 	x6 = swap & (x[6] ^ qpx[6]); x[6] ^= x6; qpx[6] ^= x6;
462 	x7 = swap & (x[7] ^ qpx[7]); x[7] ^= x7; qpx[7] ^= x7;
463 	x8 = swap & (x[8] ^ qpx[8]); x[8] ^= x8; qpx[8] ^= x8;
464 	x9 = swap & (x[9] ^ qpx[9]); x[9] ^= x9; qpx[9] ^= x9;
465 }
466 
467