1 // -*- mode: rust; -*-
2 //
3 // This file is part of curve25519-dalek.
4 // Copyright (c) 2016-2019 Isis Lovecruft, Henry de Valence
5 // Portions Copyright 2017 Brian Smith
6 // See LICENSE for licensing information.
7 //
8 // Authors:
9 // - Isis Agora Lovecruft <isis@patternsinthevoid.net>
10 // - Henry de Valence <hdevalence@hdevalence.ca>
11 // - Brian Smith <brian@briansmith.org>
12 
13 //! Arithmetic on scalars (integers mod the group order).
14 //!
15 //! Both the Ristretto group and the Ed25519 basepoint have prime order
16 //! \\( \ell = 2\^{252} + 27742317777372353535851937790883648493 \\).
17 //!
18 //! This code is intended to be useful with both the Ristretto group
19 //! (where everything is done modulo \\( \ell \\)), and the X/Ed25519
20 //! setting, which mandates specific bit-twiddles that are not
21 //! well-defined modulo \\( \ell \\).
22 //!
23 //! All arithmetic on `Scalars` is done modulo \\( \ell \\).
24 //!
25 //! # Constructing a scalar
26 //!
27 //! To create a [`Scalar`](struct.Scalar.html) from a supposedly canonical encoding, use
28 //! [`Scalar::from_canonical_bytes`](struct.Scalar.html#method.from_canonical_bytes).
29 //!
30 //! This function does input validation, ensuring that the input bytes
31 //! are the canonical encoding of a `Scalar`.
32 //! If they are, we'll get
33 //! `Some(Scalar)` in return:
34 //!
35 //! ```
36 //! use curve25519_dalek_ng::scalar::Scalar;
37 //!
38 //! let one_as_bytes: [u8; 32] = Scalar::one().to_bytes();
39 //! let a: Option<Scalar> = Scalar::from_canonical_bytes(one_as_bytes);
40 //!
41 //! assert!(a.is_some());
42 //! ```
43 //!
44 //! However, if we give it bytes representing a scalar larger than \\( \ell \\)
45 //! (in this case, \\( \ell + 2 \\)), we'll get `None` back:
46 //!
47 //! ```
48 //! use curve25519_dalek_ng::scalar::Scalar;
49 //!
50 //! let l_plus_two_bytes: [u8; 32] = [
51 //!    0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
52 //!    0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
53 //!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
54 //!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
55 //! ];
56 //! let a: Option<Scalar> = Scalar::from_canonical_bytes(l_plus_two_bytes);
57 //!
58 //! assert!(a.is_none());
59 //! ```
60 //!
61 //! Another way to create a `Scalar` is by reducing a \\(256\\)-bit integer mod
62 //! \\( \ell \\), for which one may use the
63 //! [`Scalar::from_bytes_mod_order`](struct.Scalar.html#method.from_bytes_mod_order)
64 //! method.  In the case of the second example above, this would reduce the
65 //! resultant scalar \\( \mod \ell \\), producing \\( 2 \\):
66 //!
67 //! ```
68 //! use curve25519_dalek_ng::scalar::Scalar;
69 //!
70 //! let l_plus_two_bytes: [u8; 32] = [
71 //!    0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
72 //!    0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
73 //!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
74 //!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
75 //! ];
76 //! let a: Scalar = Scalar::from_bytes_mod_order(l_plus_two_bytes);
77 //!
78 //! let two: Scalar = Scalar::one() + Scalar::one();
79 //!
80 //! assert!(a == two);
81 //! ```
82 //!
83 //! There is also a constructor that reduces a \\(512\\)-bit integer,
84 //! [`Scalar::from_bytes_mod_order_wide`](struct.Scalar.html#method.from_bytes_mod_order_wide).
85 //!
86 //! To construct a `Scalar` as the hash of some input data, use
87 //! [`Scalar::hash_from_bytes`](struct.Scalar.html#method.hash_from_bytes),
88 //! which takes a buffer, or
89 //! [`Scalar::from_hash`](struct.Scalar.html#method.from_hash),
90 //! which allows an IUF API.
91 //!
92 //! ```
93 //! # extern crate curve25519_dalek_ng;
94 //! # extern crate sha2;
95 //! #
96 //! # fn main() {
97 //! use sha2::{Digest, Sha512};
98 //! use curve25519_dalek_ng::scalar::Scalar;
99 //!
100 //! // Hashing a single byte slice
101 //! let a = Scalar::hash_from_bytes::<Sha512>(b"Abolish ICE");
102 //!
103 //! // Streaming data into a hash object
104 //! let mut hasher = Sha512::default();
105 //! hasher.update(b"Abolish ");
106 //! hasher.update(b"ICE");
107 //! let a2 = Scalar::from_hash(hasher);
108 //!
109 //! assert_eq!(a, a2);
110 //! # }
111 //! ```
112 //!
113 //! Finally, to create a `Scalar` with a specific bit-pattern
114 //! (e.g., for compatibility with X/Ed25519
115 //! ["clamping"](https://github.com/isislovecruft/ed25519-dalek/blob/f790bd2ce/src/ed25519.rs#L349)),
116 //! use [`Scalar::from_bits`](struct.Scalar.html#method.from_bits). This
117 //! constructs a scalar with exactly the bit pattern given, without any
118 //! assurances as to reduction modulo the group order:
119 //!
120 //! ```
121 //! use curve25519_dalek_ng::scalar::Scalar;
122 //!
123 //! let l_plus_two_bytes: [u8; 32] = [
124 //!    0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
125 //!    0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
126 //!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
127 //!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
128 //! ];
129 //! let a: Scalar = Scalar::from_bits(l_plus_two_bytes);
130 //!
131 //! let two: Scalar = Scalar::one() + Scalar::one();
132 //!
133 //! assert!(a != two);              // the scalar is not reduced (mod l)…
134 //! assert!(! a.is_canonical());    // …and therefore is not canonical.
135 //! assert!(a.reduce() == two);     // if we were to reduce it manually, it would be.
136 //! ```
137 //!
138 //! The resulting `Scalar` has exactly the specified bit pattern,
139 //! **except for the highest bit, which will be set to 0**.
140 
141 use core::borrow::Borrow;
142 use core::cmp::{Eq, PartialEq};
143 use core::fmt::Debug;
144 use core::iter::{Product, Sum};
145 use core::ops::Index;
146 use core::ops::Neg;
147 use core::ops::{Add, AddAssign};
148 use core::ops::{Mul, MulAssign};
149 use core::ops::{Sub, SubAssign};
150 
151 #[allow(unused_imports)]
152 use prelude::*;
153 
154 use rand_core::{CryptoRng, RngCore};
155 
156 use digest::generic_array::typenum::U64;
157 use digest::Digest;
158 
159 use subtle::Choice;
160 use subtle::ConditionallySelectable;
161 use subtle::ConstantTimeEq;
162 
163 use zeroize::Zeroize;
164 
165 use backend;
166 use constants;
167 
168 /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
169 ///
170 /// This is a type alias for one of the scalar types in the `backend`
171 /// module.
172 #[cfg(feature = "u64_backend")]
173 type UnpackedScalar = backend::serial::u64::scalar::Scalar52;
174 
175 /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
176 ///
177 /// This is a type alias for one of the scalar types in the `backend`
178 /// module.
179 #[cfg(feature = "u32_backend")]
180 type UnpackedScalar = backend::serial::u32::scalar::Scalar29;
181 
182 
183 /// The `Scalar` struct holds an integer \\(s < 2\^{255} \\) which
184 /// represents an element of \\(\mathbb Z / \ell\\).
185 #[derive(Copy, Clone, Hash)]
186 pub struct Scalar {
187     /// `bytes` is a little-endian byte encoding of an integer representing a scalar modulo the
188     /// group order.
189     ///
190     /// # Invariant
191     ///
192     /// The integer representing this scalar must be bounded above by \\(2\^{255}\\), or
193     /// equivalently the high bit of `bytes[31]` must be zero.
194     ///
195     /// This ensures that there is room for a carry bit when computing a NAF representation.
196     //
197     // XXX This is pub(crate) so we can write literal constants.  If const fns were stable, we could
198     //     make the Scalar constructors const fns and use those instead.
199     pub(crate) bytes: [u8; 32],
200 }
201 
202 impl Scalar {
203     /// Construct a `Scalar` by reducing a 256-bit little-endian integer
204     /// modulo the group order \\( \ell \\).
from_bytes_mod_order(bytes: [u8; 32]) -> Scalar205     pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar {
206         // Temporarily allow s_unreduced.bytes > 2^255 ...
207         let s_unreduced = Scalar{bytes};
208 
209         // Then reduce mod the group order and return the reduced representative.
210         let s = s_unreduced.reduce();
211         debug_assert_eq!(0u8, s[31] >> 7);
212 
213         s
214     }
215 
216     /// Construct a `Scalar` by reducing a 512-bit little-endian integer
217     /// modulo the group order \\( \ell \\).
from_bytes_mod_order_wide(input: &[u8; 64]) -> Scalar218     pub fn from_bytes_mod_order_wide(input: &[u8; 64]) -> Scalar {
219         UnpackedScalar::from_bytes_wide(input).pack()
220     }
221 
222     /// Attempt to construct a `Scalar` from a canonical byte representation.
223     ///
224     /// # Return
225     ///
226     /// - `Some(s)`, where `s` is the `Scalar` corresponding to `bytes`,
227     ///   if `bytes` is a canonical byte representation;
228     /// - `None` if `bytes` is not a canonical byte representation.
from_canonical_bytes(bytes: [u8; 32]) -> Option<Scalar>229     pub fn from_canonical_bytes(bytes: [u8; 32]) -> Option<Scalar> {
230         // Check that the high bit is not set
231         if (bytes[31] >> 7) != 0u8 { return None; }
232         let candidate = Scalar::from_bits(bytes);
233 
234         if candidate.is_canonical() {
235             Some(candidate)
236         } else {
237             None
238         }
239     }
240 
241     /// Construct a `Scalar` from the low 255 bits of a 256-bit integer.
242     ///
243     /// This function is intended for applications like X25519 which
244     /// require specific bit-patterns when performing scalar
245     /// multiplication.
from_bits(bytes: [u8; 32]) -> Scalar246     pub const fn from_bits(bytes: [u8; 32]) -> Scalar {
247         let mut s = Scalar{bytes};
248         // Ensure that s < 2^255 by masking the high bit
249         s.bytes[31] &= 0b0111_1111;
250 
251         s
252     }
253 }
254 
255 impl Debug for Scalar {
fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result256     fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
257         write!(f, "Scalar{{\n\tbytes: {:?},\n}}", &self.bytes)
258     }
259 }
260 
261 impl Eq for Scalar {}
262 impl PartialEq for Scalar {
eq(&self, other: &Self) -> bool263     fn eq(&self, other: &Self) -> bool {
264         self.ct_eq(other).unwrap_u8() == 1u8
265     }
266 }
267 
268 impl ConstantTimeEq for Scalar {
ct_eq(&self, other: &Self) -> Choice269     fn ct_eq(&self, other: &Self) -> Choice {
270         self.bytes.ct_eq(&other.bytes)
271     }
272 }
273 
274 impl Index<usize> for Scalar {
275     type Output = u8;
276 
277     /// Index the bytes of the representative for this `Scalar`.  Mutation is not permitted.
index(&self, _index: usize) -> &u8278     fn index(&self, _index: usize) -> &u8 {
279         &(self.bytes[_index])
280     }
281 }
282 
283 impl<'b> MulAssign<&'b Scalar> for Scalar {
mul_assign(&mut self, _rhs: &'b Scalar)284     fn mul_assign(&mut self, _rhs: &'b Scalar) {
285         *self = UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack();
286     }
287 }
288 
289 define_mul_assign_variants!(LHS = Scalar, RHS = Scalar);
290 
291 impl<'a, 'b> Mul<&'b Scalar> for &'a Scalar {
292     type Output = Scalar;
mul(self, _rhs: &'b Scalar) -> Scalar293     fn mul(self, _rhs: &'b Scalar) -> Scalar {
294         UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack()
295     }
296 }
297 
298 define_mul_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
299 
300 impl<'b> AddAssign<&'b Scalar> for Scalar {
add_assign(&mut self, _rhs: &'b Scalar)301     fn add_assign(&mut self, _rhs: &'b Scalar) {
302         *self = *self + _rhs;
303     }
304 }
305 
306 define_add_assign_variants!(LHS = Scalar, RHS = Scalar);
307 
308 impl<'a, 'b> Add<&'b Scalar> for &'a Scalar {
309     type Output = Scalar;
310     #[allow(non_snake_case)]
add(self, _rhs: &'b Scalar) -> Scalar311     fn add(self, _rhs: &'b Scalar) -> Scalar {
312         // The UnpackedScalar::add function produces reduced outputs
313         // if the inputs are reduced.  However, these inputs may not
314         // be reduced -- they might come from Scalar::from_bits.  So
315         // after computing the sum, we explicitly reduce it mod l
316         // before repacking.
317         let sum = UnpackedScalar::add(&self.unpack(), &_rhs.unpack());
318         let sum_R = UnpackedScalar::mul_internal(&sum, &constants::R);
319         let sum_mod_l = UnpackedScalar::montgomery_reduce(&sum_R);
320         sum_mod_l.pack()
321     }
322 }
323 
324 define_add_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
325 
326 impl<'b> SubAssign<&'b Scalar> for Scalar {
sub_assign(&mut self, _rhs: &'b Scalar)327     fn sub_assign(&mut self, _rhs: &'b Scalar) {
328         *self = *self - _rhs;
329     }
330 }
331 
332 define_sub_assign_variants!(LHS = Scalar, RHS = Scalar);
333 
334 impl<'a, 'b> Sub<&'b Scalar> for &'a Scalar {
335     type Output = Scalar;
336     #[allow(non_snake_case)]
sub(self, rhs: &'b Scalar) -> Scalar337     fn sub(self, rhs: &'b Scalar) -> Scalar {
338         // The UnpackedScalar::sub function requires reduced inputs
339         // and produces reduced output. However, these inputs may not
340         // be reduced -- they might come from Scalar::from_bits.  So
341         // we explicitly reduce the inputs.
342         let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R);
343         let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R);
344         let rhs_R = UnpackedScalar::mul_internal(&rhs.unpack(), &constants::R);
345         let rhs_mod_l = UnpackedScalar::montgomery_reduce(&rhs_R);
346 
347         UnpackedScalar::sub(&self_mod_l, &rhs_mod_l).pack()
348     }
349 }
350 
351 define_sub_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
352 
353 impl<'a> Neg for &'a Scalar {
354     type Output = Scalar;
355     #[allow(non_snake_case)]
neg(self) -> Scalar356     fn neg(self) -> Scalar {
357         let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R);
358         let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R);
359         UnpackedScalar::sub(&UnpackedScalar::zero(), &self_mod_l).pack()
360     }
361 }
362 
363 impl<'a> Neg for Scalar {
364     type Output = Scalar;
neg(self) -> Scalar365     fn neg(self) -> Scalar {
366         -&self
367     }
368 }
369 
370 impl ConditionallySelectable for Scalar {
conditional_select(a: &Self, b: &Self, choice: Choice) -> Self371     fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
372         let mut bytes = [0u8; 32];
373         for i in 0..32 {
374             bytes[i] = u8::conditional_select(&a.bytes[i], &b.bytes[i], choice);
375         }
376         Scalar { bytes }
377     }
378 }
379 
380 #[cfg(feature = "serde")]
381 use serde::{self, Serialize, Deserialize, Serializer, Deserializer};
382 #[cfg(feature = "serde")]
383 use serde::de::Visitor;
384 
385 #[cfg(feature = "serde")]
386 impl Serialize for Scalar {
serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer387     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
388         where S: Serializer
389     {
390         use serde::ser::SerializeTuple;
391         let mut tup = serializer.serialize_tuple(32)?;
392         for byte in self.as_bytes().iter() {
393             tup.serialize_element(byte)?;
394         }
395         tup.end()
396     }
397 }
398 
399 #[cfg(feature = "serde")]
400 impl<'de> Deserialize<'de> for Scalar {
deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>401     fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
402         where D: Deserializer<'de>
403     {
404         struct ScalarVisitor;
405 
406         impl<'de> Visitor<'de> for ScalarVisitor {
407             type Value = Scalar;
408 
409             fn expecting(&self, formatter: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
410                 formatter.write_str("a valid point in Edwards y + sign format")
411             }
412 
413             fn visit_seq<A>(self, mut seq: A) -> Result<Scalar, A::Error>
414                 where A: serde::de::SeqAccess<'de>
415             {
416                 let mut bytes = [0u8; 32];
417                 for i in 0..32 {
418                     bytes[i] = seq.next_element()?
419                         .ok_or(serde::de::Error::invalid_length(i, &"expected 32 bytes"))?;
420                 }
421                 Scalar::from_canonical_bytes(bytes)
422                     .ok_or(serde::de::Error::custom(
423                         &"scalar was not canonically encoded"
424                     ))
425             }
426         }
427 
428         deserializer.deserialize_tuple(32, ScalarVisitor)
429     }
430 }
431 
432 impl<T> Product<T> for Scalar
433 where
434     T: Borrow<Scalar>
435 {
product<I>(iter: I) -> Self where I: Iterator<Item = T>436     fn product<I>(iter: I) -> Self
437     where
438         I: Iterator<Item = T>
439     {
440         iter.fold(Scalar::one(), |acc, item| acc * item.borrow())
441     }
442 }
443 
444 impl<T> Sum<T> for Scalar
445 where
446     T: Borrow<Scalar>
447 {
sum<I>(iter: I) -> Self where I: Iterator<Item = T>448     fn sum<I>(iter: I) -> Self
449     where
450         I: Iterator<Item = T>
451     {
452         iter.fold(Scalar::zero(), |acc, item| acc + item.borrow())
453     }
454 }
455 
456 impl Default for Scalar {
default() -> Scalar457     fn default() -> Scalar {
458         Scalar::zero()
459     }
460 }
461 
462 impl From<u8> for Scalar {
from(x: u8) -> Scalar463     fn from(x: u8) -> Scalar {
464         let mut s_bytes = [0u8; 32];
465         s_bytes[0] = x;
466         Scalar{ bytes: s_bytes }
467     }
468 }
469 
470 impl From<u16> for Scalar {
from(x: u16) -> Scalar471     fn from(x: u16) -> Scalar {
472         use byteorder::{ByteOrder, LittleEndian};
473         let mut s_bytes = [0u8; 32];
474         LittleEndian::write_u16(&mut s_bytes, x);
475         Scalar{ bytes: s_bytes }
476     }
477 }
478 
479 impl From<u32> for Scalar {
from(x: u32) -> Scalar480     fn from(x: u32) -> Scalar {
481         use byteorder::{ByteOrder, LittleEndian};
482         let mut s_bytes = [0u8; 32];
483         LittleEndian::write_u32(&mut s_bytes, x);
484         Scalar{ bytes: s_bytes }
485     }
486 }
487 
488 impl From<u64> for Scalar {
489     /// Construct a scalar from the given `u64`.
490     ///
491     /// # Inputs
492     ///
493     /// An `u64` to convert to a `Scalar`.
494     ///
495     /// # Returns
496     ///
497     /// A `Scalar` corresponding to the input `u64`.
498     ///
499     /// # Example
500     ///
501     /// ```
502     /// use curve25519_dalek_ng::scalar::Scalar;
503     ///
504     /// let fourtytwo = Scalar::from(42u64);
505     /// let six = Scalar::from(6u64);
506     /// let seven = Scalar::from(7u64);
507     ///
508     /// assert!(fourtytwo == six * seven);
509     /// ```
from(x: u64) -> Scalar510     fn from(x: u64) -> Scalar {
511         use byteorder::{ByteOrder, LittleEndian};
512         let mut s_bytes = [0u8; 32];
513         LittleEndian::write_u64(&mut s_bytes, x);
514         Scalar{ bytes: s_bytes }
515     }
516 }
517 
518 impl From<u128> for Scalar {
from(x: u128) -> Scalar519     fn from(x: u128) -> Scalar {
520         use byteorder::{ByteOrder, LittleEndian};
521         let mut s_bytes = [0u8; 32];
522         LittleEndian::write_u128(&mut s_bytes, x);
523         Scalar{ bytes: s_bytes }
524     }
525 }
526 
527 impl Zeroize for Scalar {
zeroize(&mut self)528     fn zeroize(&mut self) {
529         self.bytes.zeroize();
530     }
531 }
532 
533 impl Scalar {
534     /// Return a `Scalar` chosen uniformly at random using a user-provided RNG.
535     ///
536     /// # Inputs
537     ///
538     /// * `rng`: any RNG which implements the `RngCore + CryptoRng` interface.
539     ///
540     /// # Returns
541     ///
542     /// A random scalar within ℤ/lℤ.
543     ///
544     /// # Example
545     ///
546     /// ```
547     /// extern crate rand_core;
548     /// # extern crate curve25519_dalek_ng;
549     /// #
550     /// # fn main() {
551     /// use curve25519_dalek_ng::scalar::Scalar;
552     ///
553     /// use rand_core::OsRng;
554     ///
555     /// let mut csprng = OsRng;
556     /// let a: Scalar = Scalar::random(&mut csprng);
557     /// # }
random<R: RngCore + CryptoRng>(rng: &mut R) -> Self558     pub fn random<R: RngCore + CryptoRng>(rng: &mut R) -> Self {
559         let mut scalar_bytes = [0u8; 64];
560         rng.fill_bytes(&mut scalar_bytes);
561         Scalar::from_bytes_mod_order_wide(&scalar_bytes)
562     }
563 
564     /// Hash a slice of bytes into a scalar.
565     ///
566     /// Takes a type parameter `D`, which is any `Digest` producing 64
567     /// bytes (512 bits) of output.
568     ///
569     /// Convenience wrapper around `from_hash`.
570     ///
571     /// # Example
572     ///
573     /// ```
574     /// # extern crate curve25519_dalek_ng;
575     /// # use curve25519_dalek_ng::scalar::Scalar;
576     /// extern crate sha2;
577     ///
578     /// use sha2::Sha512;
579     ///
580     /// # // Need fn main() here in comment so the doctest compiles
581     /// # // See https://doc.rust-lang.org/book/documentation.html#documentation-as-tests
582     /// # fn main() {
583     /// let msg = "To really appreciate architecture, you may even need to commit a murder";
584     /// let s = Scalar::hash_from_bytes::<Sha512>(msg.as_bytes());
585     /// # }
586     /// ```
hash_from_bytes<D>(input: &[u8]) -> Scalar where D: Digest<OutputSize = U64> + Default587     pub fn hash_from_bytes<D>(input: &[u8]) -> Scalar
588         where D: Digest<OutputSize = U64> + Default
589     {
590         let mut hash = D::default();
591         hash.update(input);
592         Scalar::from_hash(hash)
593     }
594 
595     /// Construct a scalar from an existing `Digest` instance.
596     ///
597     /// Use this instead of `hash_from_bytes` if it is more convenient
598     /// to stream data into the `Digest` than to pass a single byte
599     /// slice.
600     ///
601     /// # Example
602     ///
603     /// ```
604     /// # extern crate curve25519_dalek_ng;
605     /// # use curve25519_dalek_ng::scalar::Scalar;
606     /// extern crate sha2;
607     ///
608     /// use sha2::Digest;
609     /// use sha2::Sha512;
610     ///
611     /// # fn main() {
612     /// let mut h = Sha512::new()
613     ///     .chain("To really appreciate architecture, you may even need to commit a murder.")
614     ///     .chain("While the programs used for The Manhattan Transcripts are of the most extreme")
615     ///     .chain("nature, they also parallel the most common formula plot: the archetype of")
616     ///     .chain("murder. Other phantasms were occasionally used to underline the fact that")
617     ///     .chain("perhaps all architecture, rather than being about functional standards, is")
618     ///     .chain("about love and death.");
619     ///
620     /// let s = Scalar::from_hash(h);
621     ///
622     /// println!("{:?}", s.to_bytes());
623     /// assert!(s == Scalar::from_bits([ 21,  88, 208, 252,  63, 122, 210, 152,
624     ///                                 154,  38,  15,  23,  16, 167,  80, 150,
625     ///                                 192, 221,  77, 226,  62,  25, 224, 148,
626     ///                                 239,  48, 176,  10, 185,  69, 168,  11, ]));
627     /// # }
628     /// ```
from_hash<D>(hash: D) -> Scalar where D: Digest<OutputSize = U64>629     pub fn from_hash<D>(hash: D) -> Scalar
630         where D: Digest<OutputSize = U64>
631     {
632         let mut output = [0u8; 64];
633         output.copy_from_slice(hash.finalize().as_slice());
634         Scalar::from_bytes_mod_order_wide(&output)
635     }
636 
637     /// Convert this `Scalar` to its underlying sequence of bytes.
638     ///
639     /// # Example
640     ///
641     /// ```
642     /// use curve25519_dalek_ng::scalar::Scalar;
643     ///
644     /// let s: Scalar = Scalar::zero();
645     ///
646     /// assert!(s.to_bytes() == [0u8; 32]);
647     /// ```
to_bytes(&self) -> [u8; 32]648     pub fn to_bytes(&self) -> [u8; 32] {
649         self.bytes
650     }
651 
652     /// View the little-endian byte encoding of the integer representing this Scalar.
653     ///
654     /// # Example
655     ///
656     /// ```
657     /// use curve25519_dalek_ng::scalar::Scalar;
658     ///
659     /// let s: Scalar = Scalar::zero();
660     ///
661     /// assert!(s.as_bytes() == &[0u8; 32]);
662     /// ```
as_bytes(&self) -> &[u8; 32]663     pub fn as_bytes(&self) -> &[u8; 32] {
664         &self.bytes
665     }
666 
667     /// Construct the scalar \\( 0 \\).
zero() -> Self668     pub fn zero() -> Self {
669         Scalar { bytes: [0u8; 32]}
670     }
671 
672     /// Construct the scalar \\( 1 \\).
one() -> Self673     pub fn one() -> Self {
674         Scalar {
675             bytes: [
676                 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
677                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
678             ],
679         }
680     }
681 
682     /// Given a nonzero `Scalar`, compute its multiplicative inverse.
683     ///
684     /// # Warning
685     ///
686     /// `self` **MUST** be nonzero.  If you cannot
687     /// *prove* that this is the case, you **SHOULD NOT USE THIS
688     /// FUNCTION**.
689     ///
690     /// # Returns
691     ///
692     /// The multiplicative inverse of the this `Scalar`.
693     ///
694     /// # Example
695     ///
696     /// ```
697     /// use curve25519_dalek_ng::scalar::Scalar;
698     ///
699     /// // x = 2238329342913194256032495932344128051776374960164957527413114840482143558222
700     /// let X: Scalar = Scalar::from_bytes_mod_order([
701     ///         0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84,
702     ///         0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2, 0x7d, 0x52,
703     ///         0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44,
704     ///         0xd4, 0x49, 0xf4, 0xa8, 0x79, 0xd9, 0xf2, 0x04,
705     ///     ]);
706     /// // 1/x = 6859937278830797291664592131120606308688036382723378951768035303146619657244
707     /// let XINV: Scalar = Scalar::from_bytes_mod_order([
708     ///         0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb,
709     ///         0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01, 0x63, 0x47,
710     ///         0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96,
711     ///         0xd5, 0x0b, 0xcd, 0x7a, 0x3f, 0x96, 0x2a, 0x0f,
712     ///     ]);
713     ///
714     /// let inv_X: Scalar = X.invert();
715     /// assert!(XINV == inv_X);
716     /// let should_be_one: Scalar = &inv_X * &X;
717     /// assert!(should_be_one == Scalar::one());
718     /// ```
invert(&self) -> Scalar719     pub fn invert(&self) -> Scalar {
720         self.unpack().invert().pack()
721     }
722 
723     /// Given a slice of nonzero (possibly secret) `Scalar`s,
724     /// compute their inverses in a batch.
725     ///
726     /// # Return
727     ///
728     /// Each element of `inputs` is replaced by its inverse.
729     ///
730     /// The product of all inverses is returned.
731     ///
732     /// # Warning
733     ///
734     /// All input `Scalars` **MUST** be nonzero.  If you cannot
735     /// *prove* that this is the case, you **SHOULD NOT USE THIS
736     /// FUNCTION**.
737     ///
738     /// # Example
739     ///
740     /// ```
741     /// # extern crate curve25519_dalek_ng;
742     /// # use curve25519_dalek_ng::scalar::Scalar;
743     /// # fn main() {
744     /// let mut scalars = [
745     ///     Scalar::from(3u64),
746     ///     Scalar::from(5u64),
747     ///     Scalar::from(7u64),
748     ///     Scalar::from(11u64),
749     /// ];
750     ///
751     /// let allinv = Scalar::batch_invert(&mut scalars);
752     ///
753     /// assert_eq!(allinv, Scalar::from(3*5*7*11u64).invert());
754     /// assert_eq!(scalars[0], Scalar::from(3u64).invert());
755     /// assert_eq!(scalars[1], Scalar::from(5u64).invert());
756     /// assert_eq!(scalars[2], Scalar::from(7u64).invert());
757     /// assert_eq!(scalars[3], Scalar::from(11u64).invert());
758     /// # }
759     /// ```
760     #[cfg(feature = "alloc")]
batch_invert(inputs: &mut [Scalar]) -> Scalar761     pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar {
762         // This code is essentially identical to the FieldElement
763         // implementation, and is documented there.  Unfortunately,
764         // it's not easy to write it generically, since here we want
765         // to use `UnpackedScalar`s internally, and `Scalar`s
766         // externally, but there's no corresponding distinction for
767         // field elements.
768 
769         use zeroize::Zeroizing;
770 
771         let n = inputs.len();
772         let one: UnpackedScalar = Scalar::one().unpack().to_montgomery();
773 
774         // Place scratch storage in a Zeroizing wrapper to wipe it when
775         // we pass out of scope.
776         let scratch_vec = vec![one; n];
777         let mut scratch = Zeroizing::new(scratch_vec);
778 
779         // Keep an accumulator of all of the previous products
780         let mut acc = Scalar::one().unpack().to_montgomery();
781 
782         // Pass through the input vector, recording the previous
783         // products in the scratch space
784         for (input, scratch) in inputs.iter_mut().zip(scratch.iter_mut()) {
785             *scratch = acc;
786 
787             // Avoid unnecessary Montgomery multiplication in second pass by
788             // keeping inputs in Montgomery form
789             let tmp = input.unpack().to_montgomery();
790             *input = tmp.pack();
791             acc = UnpackedScalar::montgomery_mul(&acc, &tmp);
792         }
793 
794         // acc is nonzero iff all inputs are nonzero
795         debug_assert!(acc.pack() != Scalar::zero());
796 
797         // Compute the inverse of all products
798         acc = acc.montgomery_invert().from_montgomery();
799 
800         // We need to return the product of all inverses later
801         let ret = acc.pack();
802 
803         // Pass through the vector backwards to compute the inverses
804         // in place
805         for (input, scratch) in inputs.iter_mut().rev().zip(scratch.iter().rev()) {
806             let tmp = UnpackedScalar::montgomery_mul(&acc, &input.unpack());
807             *input = UnpackedScalar::montgomery_mul(&acc, &scratch).pack();
808             acc = tmp;
809         }
810 
811         ret
812     }
813 
814     /// Get the bits of the scalar.
bits(&self) -> [i8; 256]815     pub(crate) fn bits(&self) -> [i8; 256] {
816         let mut bits = [0i8; 256];
817         for i in 0..256 {
818             // As i runs from 0..256, the bottom 3 bits index the bit,
819             // while the upper bits index the byte.
820             bits[i] = ((self.bytes[i>>3] >> (i&7)) & 1u8) as i8;
821         }
822         bits
823     }
824 
825     /// Compute a width-\\(w\\) "Non-Adjacent Form" of this scalar.
826     ///
827     /// A width-\\(w\\) NAF of a positive integer \\(k\\) is an expression
828     /// $$
829     /// k = \sum_{i=0}\^m n\_i 2\^i,
830     /// $$
831     /// where each nonzero
832     /// coefficient \\(n\_i\\) is odd and bounded by \\(|n\_i| < 2\^{w-1}\\),
833     /// \\(n\_{m-1}\\) is nonzero, and at most one of any \\(w\\) consecutive
834     /// coefficients is nonzero.  (Hankerson, Menezes, Vanstone; def 3.32).
835     ///
836     /// The length of the NAF is at most one more than the length of
837     /// the binary representation of \\(k\\).  This is why the
838     /// `Scalar` type maintains an invariant that the top bit is
839     /// \\(0\\), so that the NAF of a scalar has at most 256 digits.
840     ///
841     /// Intuitively, this is like a binary expansion, except that we
842     /// allow some coefficients to grow in magnitude up to
843     /// \\(2\^{w-1}\\) so that the nonzero coefficients are as sparse
844     /// as possible.
845     ///
846     /// When doing scalar multiplication, we can then use a lookup
847     /// table of precomputed multiples of a point to add the nonzero
848     /// terms \\( k_i P \\).  Using signed digits cuts the table size
849     /// in half, and using odd digits cuts the table size in half
850     /// again.
851     ///
852     /// To compute a \\(w\\)-NAF, we use a modification of Algorithm 3.35 of HMV:
853     ///
854     /// 1. \\( i \gets 0 \\)
855     /// 2. While \\( k \ge 1 \\):
856     ///     1. If \\(k\\) is odd, \\( n_i \gets k \operatorname{mods} 2^w \\), \\( k \gets k - n_i \\).
857     ///     2. If \\(k\\) is even, \\( n_i \gets 0 \\).
858     ///     3. \\( k \gets k / 2 \\), \\( i \gets i + 1 \\).
859     /// 3. Return \\( n_0, n_1, ... , \\)
860     ///
861     /// Here \\( \bar x = x \operatorname{mods} 2^w \\) means the
862     /// \\( \bar x \\) with \\( \bar x \equiv x \pmod{2^w} \\) and
863     /// \\( -2^{w-1} \leq \bar x < 2^w \\).
864     ///
865     /// We implement this by scanning across the bits of \\(k\\) from
866     /// least-significant bit to most-significant-bit.
867     /// Write the bits of \\(k\\) as
868     /// $$
869     /// k = \sum\_{i=0}\^m k\_i 2^i,
870     /// $$
871     /// and split the sum as
872     /// $$
873     /// k = \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \sum\_{i=0} k\_{i+w} 2^i
874     /// $$
875     /// where the first part is \\( k \mod 2^w \\).
876     ///
877     /// If \\( k \mod 2^w\\) is odd, and \\( k \mod 2^w < 2^{w-1} \\), then we emit
878     /// \\( n_0 = k \mod 2^w \\).  Instead of computing
879     /// \\( k - n_0 \\), we just advance \\(w\\) bits and reindex.
880     ///
881     /// If \\( k \mod 2^w\\) is odd, and \\( k \mod 2^w \ge 2^{w-1} \\), then
882     /// \\( n_0 = k \operatorname{mods} 2^w = k \mod 2^w - 2^w \\).
883     /// The quantity \\( k - n_0 \\) is
884     /// $$
885     /// \begin{aligned}
886     /// k - n_0 &= \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \sum\_{i=0} k\_{i+w} 2^i
887     ///          - \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \\\\
888     /// &= 2^w + 2^w \sum\_{i=0} k\_{i+w} 2^i
889     /// \end{aligned}
890     /// $$
891     /// so instead of computing the subtraction, we can set a carry
892     /// bit, advance \\(w\\) bits, and reindex.
893     ///
894     /// If \\( k \mod 2^w\\) is even, we emit \\(0\\), advance 1 bit
895     /// and reindex.  In fact, by setting all digits to \\(0\\)
896     /// initially, we don't need to emit anything.
non_adjacent_form(&self, w: usize) -> [i8; 256]897     pub(crate) fn non_adjacent_form(&self, w: usize) -> [i8; 256] {
898         // required by the NAF definition
899         debug_assert!( w >= 2 );
900         // required so that the NAF digits fit in i8
901         debug_assert!( w <= 8 );
902 
903         use byteorder::{ByteOrder, LittleEndian};
904 
905         let mut naf = [0i8; 256];
906 
907         let mut x_u64 = [0u64; 5];
908         LittleEndian::read_u64_into(&self.bytes, &mut x_u64[0..4]);
909 
910         let width = 1 << w;
911         let window_mask = width - 1;
912 
913         let mut pos = 0;
914         let mut carry = 0;
915         while pos < 256 {
916             // Construct a buffer of bits of the scalar, starting at bit `pos`
917             let u64_idx = pos / 64;
918             let bit_idx = pos % 64;
919             let bit_buf: u64;
920             if bit_idx < 64 - w {
921                 // This window's bits are contained in a single u64
922                 bit_buf = x_u64[u64_idx] >> bit_idx;
923             } else {
924                 // Combine the current u64's bits with the bits from the next u64
925                 bit_buf = (x_u64[u64_idx] >> bit_idx) | (x_u64[1+u64_idx] << (64 - bit_idx));
926             }
927 
928             // Add the carry into the current window
929             let window = carry + (bit_buf & window_mask);
930 
931             if window & 1 == 0 {
932                 // If the window value is even, preserve the carry and continue.
933                 // Why is the carry preserved?
934                 // If carry == 0 and window & 1 == 0, then the next carry should be 0
935                 // If carry == 1 and window & 1 == 0, then bit_buf & 1 == 1 so the next carry should be 1
936                 pos += 1;
937                 continue;
938             }
939 
940             if window < width/2 {
941                 carry = 0;
942                 naf[pos] = window as i8;
943             } else {
944                 carry = 1;
945                 naf[pos] = (window as i8).wrapping_sub(width as i8);
946             }
947 
948             pos += w;
949         }
950 
951         naf
952     }
953 
954     /// Write this scalar in radix 16, with coefficients in \\([-8,8)\\),
955     /// i.e., compute \\(a\_i\\) such that
956     /// $$
957     ///    a = a\_0 + a\_1 16\^1 + \cdots + a_{63} 16\^{63},
958     /// $$
959     /// with \\(-8 \leq a_i < 8\\) for \\(0 \leq i < 63\\) and \\(-8 \leq a_{63} \leq 8\\).
to_radix_16(&self) -> [i8; 64]960     pub(crate) fn to_radix_16(&self) -> [i8; 64] {
961         debug_assert!(self[31] <= 127);
962         let mut output = [0i8; 64];
963 
964         // Step 1: change radix.
965         // Convert from radix 256 (bytes) to radix 16 (nibbles)
966         #[inline(always)]
bot_half(x: u8) -> u8967         fn bot_half(x: u8) -> u8 { (x >> 0) & 15 }
968         #[inline(always)]
top_half(x: u8) -> u8969         fn top_half(x: u8) -> u8 { (x >> 4) & 15 }
970 
971         for i in 0..32 {
972             output[2*i  ] = bot_half(self[i]) as i8;
973             output[2*i+1] = top_half(self[i]) as i8;
974         }
975         // Precondition note: since self[31] <= 127, output[63] <= 7
976 
977         // Step 2: recenter coefficients from [0,16) to [-8,8)
978         for i in 0..63 {
979             let carry    = (output[i] + 8) >> 4;
980             output[i  ] -= carry << 4;
981             output[i+1] += carry;
982         }
983         // Precondition note: output[63] is not recentered.  It
984         // increases by carry <= 1.  Thus output[63] <= 8.
985 
986         output
987     }
988 
989     /// Returns a size hint indicating how many entries of the return
990     /// value of `to_radix_2w` are nonzero.
to_radix_2w_size_hint(w: usize) -> usize991     pub(crate) fn to_radix_2w_size_hint(w: usize) -> usize {
992         debug_assert!(w >= 6);
993         debug_assert!(w <= 8);
994 
995         let digits_count = match w {
996             6 => (256 + w - 1)/w as usize,
997             7 => (256 + w - 1)/w as usize,
998             // See comment in to_radix_2w on handling the terminal carry.
999             8 => (256 + w - 1)/w + 1 as usize,
1000             _ => panic!("invalid radix parameter"),
1001         };
1002 
1003         debug_assert!(digits_count <= 43);
1004         digits_count
1005     }
1006 
1007     /// Creates a representation of a Scalar in radix 64, 128 or 256 for use with the Pippenger algorithm.
1008     /// For lower radix, use `to_radix_16`, which is used by the Straus multi-scalar multiplication.
1009     /// Higher radixes are not supported to save cache space. Radix 256 is near-optimal even for very
1010     /// large inputs.
1011     ///
1012     /// Radix below 64 or above 256 is prohibited.
1013     /// This method returns digits in a fixed-sized array, excess digits are zeroes.
1014     /// The second returned value is the number of digits.
1015     ///
1016     /// ## Scalar representation
1017     ///
1018     /// Radix \\(2\^w\\), with \\(n = ceil(256/w)\\) coefficients in \\([-(2\^w)/2,(2\^w)/2)\\),
1019     /// i.e., scalar is represented using digits \\(a\_i\\) such that
1020     /// $$
1021     ///    a = a\_0 + a\_1 2\^1w + \cdots + a_{n-1} 2\^{w*(n-1)},
1022     /// $$
1023     /// with \\(-2\^w/2 \leq a_i < 2\^w/2\\) for \\(0 \leq i < (n-1)\\) and \\(-2\^w/2 \leq a_{n-1} \leq 2\^w/2\\).
1024     ///
to_radix_2w(&self, w: usize) -> [i8; 43]1025     pub(crate) fn to_radix_2w(&self, w: usize) -> [i8; 43] {
1026         debug_assert!(w >= 6);
1027         debug_assert!(w <= 8);
1028 
1029         use byteorder::{ByteOrder, LittleEndian};
1030 
1031         // Scalar formatted as four `u64`s with carry bit packed into the highest bit.
1032         let mut scalar64x4 = [0u64; 4];
1033         LittleEndian::read_u64_into(&self.bytes, &mut scalar64x4[0..4]);
1034 
1035         let radix: u64 = 1 << w;
1036         let window_mask: u64 = radix - 1;
1037 
1038         let mut carry = 0u64;
1039         let mut digits = [0i8; 43];
1040         let digits_count = (256 + w - 1)/w as usize;
1041         for i in 0..digits_count {
1042             // Construct a buffer of bits of the scalar, starting at `bit_offset`.
1043             let bit_offset = i*w;
1044             let u64_idx = bit_offset / 64;
1045             let bit_idx = bit_offset % 64;
1046 
1047             // Read the bits from the scalar
1048             let bit_buf: u64;
1049             if bit_idx < 64 - w  || u64_idx == 3 {
1050                 // This window's bits are contained in a single u64,
1051                 // or it's the last u64 anyway.
1052                 bit_buf = scalar64x4[u64_idx] >> bit_idx;
1053             } else {
1054                 // Combine the current u64's bits with the bits from the next u64
1055                 bit_buf = (scalar64x4[u64_idx] >> bit_idx) | (scalar64x4[1+u64_idx] << (64 - bit_idx));
1056             }
1057 
1058             // Read the actual coefficient value from the window
1059             let coef = carry + (bit_buf & window_mask); // coef = [0, 2^r)
1060 
1061              // Recenter coefficients from [0,2^w) to [-2^w/2, 2^w/2)
1062             carry = (coef + (radix/2) as u64) >> w;
1063             digits[i] = ((coef as i64) - (carry << w) as i64) as i8;
1064         }
1065 
1066         // When w < 8, we can fold the final carry onto the last digit d,
1067         // because d < 2^w/2 so d + carry*2^w = d + 1*2^w < 2^(w+1) < 2^8.
1068         //
1069         // When w = 8, we can't fit carry*2^w into an i8.  This should
1070         // not happen anyways, because the final carry will be 0 for
1071         // reduced scalars, but the Scalar invariant allows 255-bit scalars.
1072         // To handle this, we expand the size_hint by 1 when w=8,
1073         // and accumulate the final carry onto another digit.
1074         match w {
1075             8 => digits[digits_count] += carry as i8,
1076             _ => digits[digits_count-1] += (carry << w) as i8,
1077         }
1078 
1079         digits
1080     }
1081 
1082     /// Unpack this `Scalar` to an `UnpackedScalar` for faster arithmetic.
unpack(&self) -> UnpackedScalar1083     pub(crate) fn unpack(&self) -> UnpackedScalar {
1084         UnpackedScalar::from_bytes(&self.bytes)
1085     }
1086 
1087     /// Reduce this `Scalar` modulo \\(\ell\\).
1088     #[allow(non_snake_case)]
reduce(&self) -> Scalar1089     pub fn reduce(&self) -> Scalar {
1090         let x = self.unpack();
1091         let xR = UnpackedScalar::mul_internal(&x, &constants::R);
1092         let x_mod_l = UnpackedScalar::montgomery_reduce(&xR);
1093         x_mod_l.pack()
1094     }
1095 
1096     /// Check whether this `Scalar` is the canonical representative mod \\(\ell\\).
1097     ///
1098     /// This is intended for uses like input validation, where variable-time code is acceptable.
1099     ///
1100     /// ```
1101     /// # extern crate curve25519_dalek_ng;
1102     /// # extern crate subtle;
1103     /// # use curve25519_dalek_ng::scalar::Scalar;
1104     /// # use subtle::ConditionallySelectable;
1105     /// # fn main() {
1106     /// // 2^255 - 1, since `from_bits` clears the high bit
1107     /// let _2_255_minus_1 = Scalar::from_bits([0xff;32]);
1108     /// assert!(!_2_255_minus_1.is_canonical());
1109     ///
1110     /// let reduced = _2_255_minus_1.reduce();
1111     /// assert!(reduced.is_canonical());
1112     /// # }
1113     /// ```
is_canonical(&self) -> bool1114     pub fn is_canonical(&self) -> bool {
1115         *self == self.reduce()
1116     }
1117 }
1118 
1119 impl UnpackedScalar {
1120     /// Pack the limbs of this `UnpackedScalar` into a `Scalar`.
pack(&self) -> Scalar1121     fn pack(&self) -> Scalar {
1122         Scalar{ bytes: self.to_bytes() }
1123     }
1124 
1125     /// Inverts an UnpackedScalar in Montgomery form.
montgomery_invert(&self) -> UnpackedScalar1126     pub fn montgomery_invert(&self) -> UnpackedScalar {
1127         // Uses the addition chain from
1128         // https://briansmith.org/ecc-inversion-addition-chains-01#curve25519_scalar_inversion
1129         let    _1 = self;
1130         let   _10 = _1.montgomery_square();
1131         let  _100 = _10.montgomery_square();
1132         let   _11 = UnpackedScalar::montgomery_mul(&_10,     &_1);
1133         let  _101 = UnpackedScalar::montgomery_mul(&_10,    &_11);
1134         let  _111 = UnpackedScalar::montgomery_mul(&_10,   &_101);
1135         let _1001 = UnpackedScalar::montgomery_mul(&_10,   &_111);
1136         let _1011 = UnpackedScalar::montgomery_mul(&_10,  &_1001);
1137         let _1111 = UnpackedScalar::montgomery_mul(&_100, &_1011);
1138 
1139         // _10000
1140         let mut y = UnpackedScalar::montgomery_mul(&_1111, &_1);
1141 
1142         #[inline]
1143         fn square_multiply(y: &mut UnpackedScalar, squarings: usize, x: &UnpackedScalar) {
1144             for _ in 0..squarings {
1145                 *y = y.montgomery_square();
1146             }
1147             *y = UnpackedScalar::montgomery_mul(y, x);
1148         }
1149 
1150         square_multiply(&mut y, 123 + 3, &_101);
1151         square_multiply(&mut y,   2 + 2, &_11);
1152         square_multiply(&mut y,   1 + 4, &_1111);
1153         square_multiply(&mut y,   1 + 4, &_1111);
1154         square_multiply(&mut y,       4, &_1001);
1155         square_multiply(&mut y,       2, &_11);
1156         square_multiply(&mut y,   1 + 4, &_1111);
1157         square_multiply(&mut y,   1 + 3, &_101);
1158         square_multiply(&mut y,   3 + 3, &_101);
1159         square_multiply(&mut y,       3, &_111);
1160         square_multiply(&mut y,   1 + 4, &_1111);
1161         square_multiply(&mut y,   2 + 3, &_111);
1162         square_multiply(&mut y,   2 + 2, &_11);
1163         square_multiply(&mut y,   1 + 4, &_1011);
1164         square_multiply(&mut y,   2 + 4, &_1011);
1165         square_multiply(&mut y,   6 + 4, &_1001);
1166         square_multiply(&mut y,   2 + 2, &_11);
1167         square_multiply(&mut y,   3 + 2, &_11);
1168         square_multiply(&mut y,   3 + 2, &_11);
1169         square_multiply(&mut y,   1 + 4, &_1001);
1170         square_multiply(&mut y,   1 + 3, &_111);
1171         square_multiply(&mut y,   2 + 4, &_1111);
1172         square_multiply(&mut y,   1 + 4, &_1011);
1173         square_multiply(&mut y,       3, &_101);
1174         square_multiply(&mut y,   2 + 4, &_1111);
1175         square_multiply(&mut y,       3, &_101);
1176         square_multiply(&mut y,   1 + 2, &_11);
1177 
1178         y
1179     }
1180 
1181     /// Inverts an UnpackedScalar not in Montgomery form.
invert(&self) -> UnpackedScalar1182     pub fn invert(&self) -> UnpackedScalar {
1183         self.to_montgomery().montgomery_invert().from_montgomery()
1184     }
1185 }
1186 
1187 #[cfg(test)]
1188 mod test {
1189     use super::*;
1190     use constants;
1191 
1192     /// x = 2238329342913194256032495932344128051776374960164957527413114840482143558222
1193     pub static X: Scalar = Scalar{
1194         bytes: [
1195             0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84,
1196             0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2, 0x7d, 0x52,
1197             0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44,
1198             0xd4, 0x49, 0xf4, 0xa8, 0x79, 0xd9, 0xf2, 0x04,
1199         ],
1200     };
1201     /// 1/x = 6859937278830797291664592131120606308688036382723378951768035303146619657244
1202     pub static XINV: Scalar = Scalar{
1203         bytes: [
1204             0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb,
1205             0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01, 0x63, 0x47,
1206             0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96,
1207             0xd5, 0x0b, 0xcd, 0x7a, 0x3f, 0x96, 0x2a, 0x0f,
1208         ],
1209     };
1210     /// y = 2592331292931086675770238855846338635550719849568364935475441891787804997264
1211     pub static Y: Scalar = Scalar{
1212         bytes: [
1213             0x90, 0x76, 0x33, 0xfe, 0x1c, 0x4b, 0x66, 0xa4,
1214             0xa2, 0x8d, 0x2d, 0xd7, 0x67, 0x83, 0x86, 0xc3,
1215             0x53, 0xd0, 0xde, 0x54, 0x55, 0xd4, 0xfc, 0x9d,
1216             0xe8, 0xef, 0x7a, 0xc3, 0x1f, 0x35, 0xbb, 0x05,
1217         ],
1218     };
1219 
1220     /// x*y = 5690045403673944803228348699031245560686958845067437804563560795922180092780
1221     static X_TIMES_Y: Scalar = Scalar{
1222         bytes: [
1223             0x6c, 0x33, 0x74, 0xa1, 0x89, 0x4f, 0x62, 0x21,
1224             0x0a, 0xaa, 0x2f, 0xe1, 0x86, 0xa6, 0xf9, 0x2c,
1225             0xe0, 0xaa, 0x75, 0xc2, 0x77, 0x95, 0x81, 0xc2,
1226             0x95, 0xfc, 0x08, 0x17, 0x9a, 0x73, 0x94, 0x0c,
1227         ],
1228     };
1229 
1230     /// sage: l = 2^252 + 27742317777372353535851937790883648493
1231     /// sage: big = 2^256 - 1
1232     /// sage: repr((big % l).digits(256))
1233     static CANONICAL_2_256_MINUS_1: Scalar = Scalar{
1234         bytes: [
1235               28, 149, 152, 141, 116,  49, 236, 214,
1236              112, 207, 125, 115, 244,  91, 239, 198,
1237              254, 255, 255, 255, 255, 255, 255, 255,
1238              255, 255, 255, 255, 255, 255, 255,  15,
1239         ],
1240     };
1241 
1242     static A_SCALAR: Scalar = Scalar{
1243         bytes: [
1244             0x1a, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d,
1245             0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8, 0x26, 0x4d,
1246             0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1,
1247             0x58, 0x9e, 0x7b, 0x7f, 0x23, 0x76, 0xef, 0x09,
1248         ],
1249     };
1250 
1251     static A_NAF: [i8; 256] =
1252         [0,13,0,0,0,0,0,0,0,7,0,0,0,0,0,0,-9,0,0,0,0,-11,0,0,0,0,3,0,0,0,0,1,
1253          0,0,0,0,9,0,0,0,0,-5,0,0,0,0,0,0,3,0,0,0,0,11,0,0,0,0,11,0,0,0,0,0,
1254          -9,0,0,0,0,0,-3,0,0,0,0,9,0,0,0,0,0,1,0,0,0,0,0,0,-1,0,0,0,0,0,9,0,
1255          0,0,0,-15,0,0,0,0,-7,0,0,0,0,-9,0,0,0,0,0,5,0,0,0,0,13,0,0,0,0,0,-3,0,
1256          0,0,0,-11,0,0,0,0,-7,0,0,0,0,-13,0,0,0,0,11,0,0,0,0,-9,0,0,0,0,0,1,0,0,
1257          0,0,0,-15,0,0,0,0,1,0,0,0,0,7,0,0,0,0,0,0,0,0,5,0,0,0,0,0,13,0,0,0,
1258          0,0,0,11,0,0,0,0,0,15,0,0,0,0,0,-9,0,0,0,0,0,0,0,-1,0,0,0,0,0,0,0,7,
1259          0,0,0,0,0,-15,0,0,0,0,0,15,0,0,0,0,15,0,0,0,0,15,0,0,0,0,0,1,0,0,0,0];
1260 
1261     static LARGEST_ED25519_S: Scalar = Scalar {
1262         bytes: [
1263             0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1264             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1265             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1266             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
1267         ],
1268     };
1269 
1270     static CANONICAL_LARGEST_ED25519_S_PLUS_ONE: Scalar = Scalar {
1271         bytes: [
1272             0x7e, 0x34, 0x47, 0x75, 0x47, 0x4a, 0x7f, 0x97,
1273             0x23, 0xb6, 0x3a, 0x8b, 0xe9, 0x2a, 0xe7, 0x6d,
1274             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1275             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f,
1276         ],
1277     };
1278 
1279     static CANONICAL_LARGEST_ED25519_S_MINUS_ONE: Scalar = Scalar {
1280         bytes: [
1281             0x7c, 0x34, 0x47, 0x75, 0x47, 0x4a, 0x7f, 0x97,
1282             0x23, 0xb6, 0x3a, 0x8b, 0xe9, 0x2a, 0xe7, 0x6d,
1283             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1284             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f,
1285         ],
1286     };
1287 
1288     #[test]
fuzzer_testcase_reduction()1289     fn fuzzer_testcase_reduction() {
1290         // LE bytes of 24519928653854221733733552434404946937899825954937634815
1291         let a_bytes = [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0];
1292         // LE bytes of 4975441334397345751130612518500927154628011511324180036903450236863266160640
1293         let b_bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 210, 210, 210, 255, 255, 255, 255, 10];
1294         // LE bytes of 6432735165214683820902750800207468552549813371247423777071615116673864412038
1295         let c_bytes = [134, 171, 119, 216, 180, 128, 178, 62, 171, 132, 32, 62, 34, 119, 104, 193, 47, 215, 181, 250, 14, 207, 172, 93, 75, 207, 211, 103, 144, 204, 56, 14];
1296 
1297         let a = Scalar::from_bytes_mod_order(a_bytes);
1298         let b = Scalar::from_bytes_mod_order(b_bytes);
1299         let c = Scalar::from_bytes_mod_order(c_bytes);
1300 
1301         let mut tmp = [0u8; 64];
1302 
1303         // also_a = (a mod l)
1304         tmp[0..32].copy_from_slice(&a_bytes[..]);
1305         let also_a = Scalar::from_bytes_mod_order_wide(&tmp);
1306 
1307         // also_b = (b mod l)
1308         tmp[0..32].copy_from_slice(&b_bytes[..]);
1309         let also_b = Scalar::from_bytes_mod_order_wide(&tmp);
1310 
1311         let expected_c = &a * &b;
1312         let also_expected_c = &also_a * &also_b;
1313 
1314         assert_eq!(c, expected_c);
1315         assert_eq!(c, also_expected_c);
1316     }
1317 
1318     #[test]
non_adjacent_form_test_vector()1319     fn non_adjacent_form_test_vector() {
1320         let naf = A_SCALAR.non_adjacent_form(5);
1321         for i in 0..256 {
1322             assert_eq!(naf[i], A_NAF[i]);
1323         }
1324     }
1325 
non_adjacent_form_iter(w: usize, x: &Scalar)1326     fn non_adjacent_form_iter(w: usize, x: &Scalar) {
1327         let naf = x.non_adjacent_form(w);
1328 
1329         // Reconstruct the scalar from the computed NAF
1330         let mut y = Scalar::zero();
1331         for i in (0..256).rev() {
1332             y += y;
1333             let digit = if naf[i] < 0 {
1334                 -Scalar::from((-naf[i]) as u64)
1335             } else {
1336                 Scalar::from(naf[i] as u64)
1337             };
1338             y += digit;
1339         }
1340 
1341         assert_eq!(*x, y);
1342     }
1343 
1344     #[test]
non_adjacent_form_random()1345     fn non_adjacent_form_random() {
1346         let mut rng = rand::thread_rng();
1347         for _ in 0..1_000 {
1348             let x = Scalar::random(&mut rng);
1349             for w in &[5, 6, 7, 8] {
1350                 non_adjacent_form_iter(*w, &x);
1351             }
1352         }
1353     }
1354 
1355     #[test]
from_u64()1356     fn from_u64() {
1357         let val: u64 = 0xdeadbeefdeadbeef;
1358         let s = Scalar::from(val);
1359         assert_eq!(s[7], 0xde);
1360         assert_eq!(s[6], 0xad);
1361         assert_eq!(s[5], 0xbe);
1362         assert_eq!(s[4], 0xef);
1363         assert_eq!(s[3], 0xde);
1364         assert_eq!(s[2], 0xad);
1365         assert_eq!(s[1], 0xbe);
1366         assert_eq!(s[0], 0xef);
1367     }
1368 
1369     #[test]
scalar_mul_by_one()1370     fn scalar_mul_by_one() {
1371         let test_scalar = &X * &Scalar::one();
1372         for i in 0..32 {
1373             assert!(test_scalar[i] == X[i]);
1374         }
1375     }
1376 
1377     #[test]
add_reduces()1378     fn add_reduces() {
1379         // Check that the addition works
1380         assert_eq!(
1381             (LARGEST_ED25519_S + Scalar::one()).reduce(),
1382             CANONICAL_LARGEST_ED25519_S_PLUS_ONE
1383         );
1384         // Check that the addition reduces
1385         assert_eq!(
1386             LARGEST_ED25519_S + Scalar::one(),
1387             CANONICAL_LARGEST_ED25519_S_PLUS_ONE
1388         );
1389     }
1390 
1391     #[test]
sub_reduces()1392     fn sub_reduces() {
1393         // Check that the subtraction works
1394         assert_eq!(
1395             (LARGEST_ED25519_S - Scalar::one()).reduce(),
1396             CANONICAL_LARGEST_ED25519_S_MINUS_ONE
1397         );
1398         // Check that the subtraction reduces
1399         assert_eq!(
1400             LARGEST_ED25519_S - Scalar::one(),
1401             CANONICAL_LARGEST_ED25519_S_MINUS_ONE
1402         );
1403     }
1404 
1405     #[test]
quarkslab_scalar_overflow_does_not_occur()1406     fn quarkslab_scalar_overflow_does_not_occur() {
1407         // Check that manually-constructing large Scalars with
1408         // from_bits cannot produce incorrect results.
1409         //
1410         // The from_bits function is required to implement X/Ed25519,
1411         // while all other methods of constructing a Scalar produce
1412         // reduced Scalars.  However, this "invariant loophole" allows
1413         // constructing large scalars which are not reduced mod l.
1414         //
1415         // This issue was discovered independently by both Jack
1416         // "str4d" Grigg (issue #238), who noted that reduction was
1417         // not performed on addition, and Laurent Grémy & Nicolas
1418         // Surbayrole of Quarkslab, who noted that it was possible to
1419         // cause an overflow and compute incorrect results.
1420         //
1421         // This test is adapted from the one suggested by Quarkslab.
1422 
1423         let large_bytes = [
1424             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1425             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1426             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1427             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
1428         ];
1429 
1430         let a = Scalar::from_bytes_mod_order(large_bytes);
1431         let b = Scalar::from_bits(large_bytes);
1432 
1433         assert_eq!(a, b.reduce());
1434 
1435         let a_3 = a + a + a;
1436         let b_3 = b + b + b;
1437 
1438         assert_eq!(a_3, b_3);
1439 
1440         let neg_a = -a;
1441         let neg_b = -b;
1442 
1443         assert_eq!(neg_a, neg_b);
1444 
1445         let minus_a_3 = Scalar::zero() - a - a - a;
1446         let minus_b_3 = Scalar::zero() - b - b - b;
1447 
1448         assert_eq!(minus_a_3, minus_b_3);
1449         assert_eq!(minus_a_3, -a_3);
1450         assert_eq!(minus_b_3, -b_3);
1451     }
1452 
1453     #[test]
impl_add()1454     fn impl_add() {
1455         let two = Scalar::from(2u64);
1456         let one = Scalar::one();
1457         let should_be_two = &one + &one;
1458         assert_eq!(should_be_two, two);
1459     }
1460 
1461     #[allow(non_snake_case)]
1462     #[test]
impl_mul()1463     fn impl_mul() {
1464         let should_be_X_times_Y = &X * &Y;
1465         assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1466     }
1467 
1468     #[allow(non_snake_case)]
1469     #[test]
impl_product()1470     fn impl_product() {
1471         // Test that product works for non-empty iterators
1472         let X_Y_vector = vec![X, Y];
1473         let should_be_X_times_Y: Scalar = X_Y_vector.iter().product();
1474         assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1475 
1476         // Test that product works for the empty iterator
1477         let one = Scalar::one();
1478         let empty_vector = vec![];
1479         let should_be_one: Scalar = empty_vector.iter().product();
1480         assert_eq!(should_be_one, one);
1481 
1482         // Test that product works for iterators where Item = Scalar
1483         let xs = [Scalar::from(2u64); 10];
1484         let ys = [Scalar::from(3u64); 10];
1485         // now zs is an iterator with Item = Scalar
1486         let zs = xs.iter().zip(ys.iter()).map(|(x,y)| x * y);
1487 
1488         let x_prod: Scalar = xs.iter().product();
1489         let y_prod: Scalar = ys.iter().product();
1490         let z_prod: Scalar = zs.product();
1491 
1492         assert_eq!(x_prod, Scalar::from(1024u64));
1493         assert_eq!(y_prod, Scalar::from(59049u64));
1494         assert_eq!(z_prod, Scalar::from(60466176u64));
1495         assert_eq!(x_prod * y_prod, z_prod);
1496 
1497     }
1498 
1499     #[test]
impl_sum()1500     fn impl_sum() {
1501 
1502         // Test that sum works for non-empty iterators
1503         let two = Scalar::from(2u64);
1504         let one_vector = vec![Scalar::one(), Scalar::one()];
1505         let should_be_two: Scalar = one_vector.iter().sum();
1506         assert_eq!(should_be_two, two);
1507 
1508         // Test that sum works for the empty iterator
1509         let zero = Scalar::zero();
1510         let empty_vector = vec![];
1511         let should_be_zero: Scalar = empty_vector.iter().sum();
1512         assert_eq!(should_be_zero, zero);
1513 
1514         // Test that sum works for owned types
1515         let xs = [Scalar::from(1u64); 10];
1516         let ys = [Scalar::from(2u64); 10];
1517         // now zs is an iterator with Item = Scalar
1518         let zs = xs.iter().zip(ys.iter()).map(|(x,y)| x + y);
1519 
1520         let x_sum: Scalar = xs.iter().sum();
1521         let y_sum: Scalar = ys.iter().sum();
1522         let z_sum: Scalar = zs.sum();
1523 
1524         assert_eq!(x_sum, Scalar::from(10u64));
1525         assert_eq!(y_sum, Scalar::from(20u64));
1526         assert_eq!(z_sum, Scalar::from(30u64));
1527         assert_eq!(x_sum + y_sum, z_sum);
1528     }
1529 
1530     #[test]
square()1531     fn square() {
1532         let expected = &X * &X;
1533         let actual = X.unpack().square().pack();
1534         for i in 0..32 {
1535             assert!(expected[i] == actual[i]);
1536         }
1537     }
1538 
1539     #[test]
reduce()1540     fn reduce() {
1541         let biggest = Scalar::from_bytes_mod_order([0xff; 32]);
1542         assert_eq!(biggest, CANONICAL_2_256_MINUS_1);
1543     }
1544 
1545     #[test]
from_bytes_mod_order_wide()1546     fn from_bytes_mod_order_wide() {
1547         let mut bignum = [0u8; 64];
1548         // set bignum = x + 2^256x
1549         for i in 0..32 {
1550             bignum[   i] = X[i];
1551             bignum[32+i] = X[i];
1552         }
1553         // 3958878930004874126169954872055634648693766179881526445624823978500314864344
1554         // = x + 2^256x (mod l)
1555         let reduced = Scalar{
1556             bytes: [
1557                 216, 154, 179, 139, 210, 121,   2,  71,
1558                  69,  99, 158, 216,  23, 173,  63, 100,
1559                 204,   0,  91,  50, 219, 153,  57, 249,
1560                  28,  82,  31, 197, 100, 165, 192,   8,
1561             ],
1562         };
1563         let test_red = Scalar::from_bytes_mod_order_wide(&bignum);
1564         for i in 0..32 {
1565             assert!(test_red[i] == reduced[i]);
1566         }
1567     }
1568 
1569     #[allow(non_snake_case)]
1570     #[test]
invert()1571     fn invert() {
1572         let inv_X = X.invert();
1573         assert_eq!(inv_X, XINV);
1574         let should_be_one = &inv_X * &X;
1575         assert_eq!(should_be_one, Scalar::one());
1576     }
1577 
1578     // Negating a scalar twice should result in the original scalar.
1579     #[allow(non_snake_case)]
1580     #[test]
neg_twice_is_identity()1581     fn neg_twice_is_identity() {
1582         let negative_X = -&X;
1583         let should_be_X = -&negative_X;
1584 
1585         assert_eq!(should_be_X, X);
1586     }
1587 
1588     #[test]
to_bytes_from_bytes_roundtrips()1589     fn to_bytes_from_bytes_roundtrips() {
1590         let unpacked = X.unpack();
1591         let bytes = unpacked.to_bytes();
1592         let should_be_unpacked = UnpackedScalar::from_bytes(&bytes);
1593 
1594         assert_eq!(should_be_unpacked.0, unpacked.0);
1595     }
1596 
1597     #[test]
montgomery_reduce_matches_from_bytes_mod_order_wide()1598     fn montgomery_reduce_matches_from_bytes_mod_order_wide() {
1599         let mut bignum = [0u8; 64];
1600 
1601         // set bignum = x + 2^256x
1602         for i in 0..32 {
1603             bignum[   i] = X[i];
1604             bignum[32+i] = X[i];
1605         }
1606         // x + 2^256x (mod l)
1607         //         = 3958878930004874126169954872055634648693766179881526445624823978500314864344
1608         let expected = Scalar{
1609             bytes: [
1610                 216, 154, 179, 139, 210, 121,   2,  71,
1611                  69,  99, 158, 216,  23, 173,  63, 100,
1612                 204,   0,  91,  50, 219, 153,  57, 249,
1613                  28,  82,  31, 197, 100, 165, 192,   8
1614             ],
1615         };
1616         let reduced = Scalar::from_bytes_mod_order_wide(&bignum);
1617 
1618         // The reduced scalar should match the expected
1619         assert_eq!(reduced.bytes, expected.bytes);
1620 
1621         //  (x + 2^256x) * R
1622         let interim = UnpackedScalar::mul_internal(&UnpackedScalar::from_bytes_wide(&bignum),
1623                                                    &constants::R);
1624         // ((x + 2^256x) * R) / R  (mod l)
1625         let montgomery_reduced = UnpackedScalar::montgomery_reduce(&interim);
1626 
1627         // The Montgomery reduced scalar should match the reduced one, as well as the expected
1628         assert_eq!(montgomery_reduced.0, reduced.unpack().0);
1629         assert_eq!(montgomery_reduced.0, expected.unpack().0)
1630     }
1631 
1632     #[test]
canonical_decoding()1633     fn canonical_decoding() {
1634         // canonical encoding of 1667457891
1635         let canonical_bytes = [99, 99, 99, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,];
1636 
1637         // encoding of
1638         //   7265385991361016183439748078976496179028704920197054998554201349516117938192
1639         // = 28380414028753969466561515933501938171588560817147392552250411230663687203 (mod l)
1640         // non_canonical because unreduced mod l
1641         let non_canonical_bytes_because_unreduced = [16; 32];
1642 
1643         // encoding with high bit set, to check that the parser isn't pre-masking the high bit
1644         let non_canonical_bytes_because_highbit = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128];
1645 
1646         assert!( Scalar::from_canonical_bytes(canonical_bytes).is_some() );
1647         assert!( Scalar::from_canonical_bytes(non_canonical_bytes_because_unreduced).is_none() );
1648         assert!( Scalar::from_canonical_bytes(non_canonical_bytes_because_highbit).is_none() );
1649     }
1650 
1651     #[test]
1652     #[cfg(feature = "serde")]
serde_bincode_scalar_roundtrip()1653     fn serde_bincode_scalar_roundtrip() {
1654         use bincode;
1655         let encoded = bincode::serialize(&X).unwrap();
1656         let parsed: Scalar = bincode::deserialize(&encoded).unwrap();
1657         assert_eq!(parsed, X);
1658 
1659         // Check that the encoding is 32 bytes exactly
1660         assert_eq!(encoded.len(), 32);
1661 
1662         // Check that the encoding itself matches the usual one
1663         assert_eq!(
1664             X,
1665             bincode::deserialize(X.as_bytes()).unwrap(),
1666         );
1667     }
1668 
1669     #[cfg(debug_assertions)]
1670     #[test]
1671     #[should_panic]
batch_invert_with_a_zero_input_panics()1672     fn batch_invert_with_a_zero_input_panics() {
1673         let mut xs = vec![Scalar::one(); 16];
1674         xs[3] = Scalar::zero();
1675         // This should panic in debug mode.
1676         Scalar::batch_invert(&mut xs);
1677     }
1678 
1679     #[test]
batch_invert_empty()1680     fn batch_invert_empty() {
1681         assert_eq!(Scalar::one(), Scalar::batch_invert(&mut []));
1682     }
1683 
1684     #[test]
batch_invert_consistency()1685     fn batch_invert_consistency() {
1686         let mut x = Scalar::from(1u64);
1687         let mut v1: Vec<_> = (0..16).map(|_| {let tmp = x; x = x + x; tmp}).collect();
1688         let v2 = v1.clone();
1689 
1690         let expected: Scalar = v1.iter().product();
1691         let expected = expected.invert();
1692         let ret = Scalar::batch_invert(&mut v1);
1693         assert_eq!(ret, expected);
1694 
1695         for (a, b) in v1.iter().zip(v2.iter()) {
1696             assert_eq!(a * b, Scalar::one());
1697         }
1698     }
1699 
test_pippenger_radix_iter(scalar: Scalar, w: usize)1700     fn test_pippenger_radix_iter(scalar: Scalar, w: usize) {
1701         let digits_count = Scalar::to_radix_2w_size_hint(w);
1702         let digits = scalar.to_radix_2w(w);
1703 
1704         let radix = Scalar::from((1<<w) as u64);
1705         let mut term = Scalar::one();
1706         let mut recovered_scalar = Scalar::zero();
1707         for digit in &digits[0..digits_count] {
1708             let digit = *digit;
1709             if digit != 0 {
1710                 let sdigit = if digit < 0 {
1711                     -Scalar::from((-(digit as i64)) as u64)
1712                 } else {
1713                     Scalar::from(digit as u64)
1714                 };
1715                 recovered_scalar += term * sdigit;
1716             }
1717             term *= radix;
1718         }
1719         // When the input is unreduced, we may only recover the scalar mod l.
1720         assert_eq!(recovered_scalar, scalar.reduce());
1721     }
1722 
1723     #[test]
test_pippenger_radix()1724     fn test_pippenger_radix() {
1725         use core::iter;
1726         // For each valid radix it tests that 1000 random-ish scalars can be restored
1727         // from the produced representation precisely.
1728         let cases = (2..100)
1729             .map(|s| Scalar::from(s as u64).invert())
1730             // The largest unreduced scalar, s = 2^255-1
1731             .chain(iter::once(Scalar::from_bits([0xff; 32])));
1732 
1733         for scalar in cases {
1734             test_pippenger_radix_iter(scalar, 6);
1735             test_pippenger_radix_iter(scalar, 7);
1736             test_pippenger_radix_iter(scalar, 8);
1737         }
1738     }
1739 }
1740