1 // -*- mode: rust; -*-
2 //
3 // This file is part of curve25519-dalek.
4 // Copyright (c) 2016-2021 isis lovecruft
5 // Copyright (c) 2016-2019 Henry de Valence
6 // Portions Copyright 2017 Brian Smith
7 // See LICENSE for licensing information.
8 //
9 // Authors:
10 // - Isis Agora Lovecruft <isis@patternsinthevoid.net>
11 // - Henry de Valence <hdevalence@hdevalence.ca>
12 // - Brian Smith <brian@briansmith.org>
13 
14 //! Arithmetic on scalars (integers mod the group order).
15 //!
16 //! Both the Ristretto group and the Ed25519 basepoint have prime order
17 //! \\( \ell = 2\^{252} + 27742317777372353535851937790883648493 \\).
18 //!
19 //! This code is intended to be useful with both the Ristretto group
20 //! (where everything is done modulo \\( \ell \\)), and the X/Ed25519
21 //! setting, which mandates specific bit-twiddles that are not
22 //! well-defined modulo \\( \ell \\).
23 //!
24 //! All arithmetic on `Scalars` is done modulo \\( \ell \\).
25 //!
26 //! # Constructing a scalar
27 //!
28 //! To create a [`Scalar`](struct.Scalar.html) from a supposedly canonical encoding, use
29 //! [`Scalar::from_canonical_bytes`](struct.Scalar.html#method.from_canonical_bytes).
30 //!
31 //! This function does input validation, ensuring that the input bytes
32 //! are the canonical encoding of a `Scalar`.
33 //! If they are, we'll get
34 //! `Some(Scalar)` in return:
35 //!
36 //! ```
37 //! use curve25519_dalek::scalar::Scalar;
38 //!
39 //! let one_as_bytes: [u8; 32] = Scalar::one().to_bytes();
40 //! let a: Option<Scalar> = Scalar::from_canonical_bytes(one_as_bytes);
41 //!
42 //! assert!(a.is_some());
43 //! ```
44 //!
45 //! However, if we give it bytes representing a scalar larger than \\( \ell \\)
46 //! (in this case, \\( \ell + 2 \\)), we'll get `None` back:
47 //!
48 //! ```
49 //! use curve25519_dalek::scalar::Scalar;
50 //!
51 //! let l_plus_two_bytes: [u8; 32] = [
52 //!    0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
53 //!    0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
54 //!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
55 //!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
56 //! ];
57 //! let a: Option<Scalar> = Scalar::from_canonical_bytes(l_plus_two_bytes);
58 //!
59 //! assert!(a.is_none());
60 //! ```
61 //!
62 //! Another way to create a `Scalar` is by reducing a \\(256\\)-bit integer mod
63 //! \\( \ell \\), for which one may use the
64 //! [`Scalar::from_bytes_mod_order`](struct.Scalar.html#method.from_bytes_mod_order)
65 //! method.  In the case of the second example above, this would reduce the
66 //! resultant scalar \\( \mod \ell \\), producing \\( 2 \\):
67 //!
68 //! ```
69 //! use curve25519_dalek::scalar::Scalar;
70 //!
71 //! let l_plus_two_bytes: [u8; 32] = [
72 //!    0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
73 //!    0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
74 //!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
75 //!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
76 //! ];
77 //! let a: Scalar = Scalar::from_bytes_mod_order(l_plus_two_bytes);
78 //!
79 //! let two: Scalar = Scalar::one() + Scalar::one();
80 //!
81 //! assert!(a == two);
82 //! ```
83 //!
84 //! There is also a constructor that reduces a \\(512\\)-bit integer,
85 //! [`Scalar::from_bytes_mod_order_wide`](struct.Scalar.html#method.from_bytes_mod_order_wide).
86 //!
87 //! To construct a `Scalar` as the hash of some input data, use
88 //! [`Scalar::hash_from_bytes`](struct.Scalar.html#method.hash_from_bytes),
89 //! which takes a buffer, or
90 //! [`Scalar::from_hash`](struct.Scalar.html#method.from_hash),
91 //! which allows an IUF API.
92 //!
93 //! ```
94 //! # extern crate curve25519_dalek;
95 //! # extern crate sha2;
96 //! #
97 //! # fn main() {
98 //! use sha2::{Digest, Sha512};
99 //! use curve25519_dalek::scalar::Scalar;
100 //!
101 //! // Hashing a single byte slice
102 //! let a = Scalar::hash_from_bytes::<Sha512>(b"Abolish ICE");
103 //!
104 //! // Streaming data into a hash object
105 //! let mut hasher = Sha512::default();
106 //! hasher.update(b"Abolish ");
107 //! hasher.update(b"ICE");
108 //! let a2 = Scalar::from_hash(hasher);
109 //!
110 //! assert_eq!(a, a2);
111 //! # }
112 //! ```
113 //!
114 //! Finally, to create a `Scalar` with a specific bit-pattern
115 //! (e.g., for compatibility with X/Ed25519
116 //! ["clamping"](https://github.com/isislovecruft/ed25519-dalek/blob/f790bd2ce/src/ed25519.rs#L349)),
117 //! use [`Scalar::from_bits`](struct.Scalar.html#method.from_bits). This
118 //! constructs a scalar with exactly the bit pattern given, without any
119 //! assurances as to reduction modulo the group order:
120 //!
121 //! ```
122 //! use curve25519_dalek::scalar::Scalar;
123 //!
124 //! let l_plus_two_bytes: [u8; 32] = [
125 //!    0xef, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
126 //!    0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
127 //!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
128 //!    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
129 //! ];
130 //! let a: Scalar = Scalar::from_bits(l_plus_two_bytes);
131 //!
132 //! let two: Scalar = Scalar::one() + Scalar::one();
133 //!
134 //! assert!(a != two);              // the scalar is not reduced (mod l)…
135 //! assert!(! a.is_canonical());    // …and therefore is not canonical.
136 //! assert!(a.reduce() == two);     // if we were to reduce it manually, it would be.
137 //! ```
138 //!
139 //! The resulting `Scalar` has exactly the specified bit pattern,
140 //! **except for the highest bit, which will be set to 0**.
141 
142 use core::borrow::Borrow;
143 use core::cmp::{Eq, PartialEq};
144 use core::fmt::Debug;
145 use core::iter::{Product, Sum};
146 use core::ops::Index;
147 use core::ops::Neg;
148 use core::ops::{Add, AddAssign};
149 use core::ops::{Mul, MulAssign};
150 use core::ops::{Sub, SubAssign};
151 
152 #[allow(unused_imports)]
153 use prelude::*;
154 
155 use rand_core::{CryptoRng, RngCore};
156 
157 use digest::generic_array::typenum::U64;
158 use digest::Digest;
159 
160 use subtle::Choice;
161 use subtle::ConditionallySelectable;
162 use subtle::ConstantTimeEq;
163 
164 use zeroize::Zeroize;
165 
166 use backend;
167 use constants;
168 
169 /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
170 ///
171 /// This is a type alias for one of the scalar types in the `backend`
172 /// module.
173 #[cfg(feature = "fiat_u32_backend")]
174 type UnpackedScalar = backend::serial::fiat_u32::scalar::Scalar29;
175 #[cfg(feature = "fiat_u64_backend")]
176 type UnpackedScalar = backend::serial::fiat_u64::scalar::Scalar52;
177 
178 /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
179 ///
180 /// This is a type alias for one of the scalar types in the `backend`
181 /// module.
182 #[cfg(feature = "u64_backend")]
183 type UnpackedScalar = backend::serial::u64::scalar::Scalar52;
184 
185 /// An `UnpackedScalar` represents an element of the field GF(l), optimized for speed.
186 ///
187 /// This is a type alias for one of the scalar types in the `backend`
188 /// module.
189 #[cfg(feature = "u32_backend")]
190 type UnpackedScalar = backend::serial::u32::scalar::Scalar29;
191 
192 
193 /// The `Scalar` struct holds an integer \\(s < 2\^{255} \\) which
194 /// represents an element of \\(\mathbb Z / \ell\\).
195 #[derive(Copy, Clone, Hash)]
196 pub struct Scalar {
197     /// `bytes` is a little-endian byte encoding of an integer representing a scalar modulo the
198     /// group order.
199     ///
200     /// # Invariant
201     ///
202     /// The integer representing this scalar must be bounded above by \\(2\^{255}\\), or
203     /// equivalently the high bit of `bytes[31]` must be zero.
204     ///
205     /// This ensures that there is room for a carry bit when computing a NAF representation.
206     //
207     // XXX This is pub(crate) so we can write literal constants.  If const fns were stable, we could
208     //     make the Scalar constructors const fns and use those instead.
209     pub(crate) bytes: [u8; 32],
210 }
211 
212 impl Scalar {
213     /// Construct a `Scalar` by reducing a 256-bit little-endian integer
214     /// modulo the group order \\( \ell \\).
from_bytes_mod_order(bytes: [u8; 32]) -> Scalar215     pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar {
216         // Temporarily allow s_unreduced.bytes > 2^255 ...
217         let s_unreduced = Scalar{bytes};
218 
219         // Then reduce mod the group order and return the reduced representative.
220         let s = s_unreduced.reduce();
221         debug_assert_eq!(0u8, s[31] >> 7);
222 
223         s
224     }
225 
226     /// Construct a `Scalar` by reducing a 512-bit little-endian integer
227     /// modulo the group order \\( \ell \\).
from_bytes_mod_order_wide(input: &[u8; 64]) -> Scalar228     pub fn from_bytes_mod_order_wide(input: &[u8; 64]) -> Scalar {
229         UnpackedScalar::from_bytes_wide(input).pack()
230     }
231 
232     /// Attempt to construct a `Scalar` from a canonical byte representation.
233     ///
234     /// # Return
235     ///
236     /// - `Some(s)`, where `s` is the `Scalar` corresponding to `bytes`,
237     ///   if `bytes` is a canonical byte representation;
238     /// - `None` if `bytes` is not a canonical byte representation.
from_canonical_bytes(bytes: [u8; 32]) -> Option<Scalar>239     pub fn from_canonical_bytes(bytes: [u8; 32]) -> Option<Scalar> {
240         // Check that the high bit is not set
241         if (bytes[31] >> 7) != 0u8 { return None; }
242         let candidate = Scalar::from_bits(bytes);
243 
244         if candidate.is_canonical() {
245             Some(candidate)
246         } else {
247             None
248         }
249     }
250 
251     /// Construct a `Scalar` from the low 255 bits of a 256-bit integer.
252     ///
253     /// This function is intended for applications like X25519 which
254     /// require specific bit-patterns when performing scalar
255     /// multiplication.
from_bits(bytes: [u8; 32]) -> Scalar256     pub const fn from_bits(bytes: [u8; 32]) -> Scalar {
257         let mut s = Scalar{bytes};
258         // Ensure that s < 2^255 by masking the high bit
259         s.bytes[31] &= 0b0111_1111;
260 
261         s
262     }
263 }
264 
265 impl Debug for Scalar {
fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result266     fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
267         write!(f, "Scalar{{\n\tbytes: {:?},\n}}", &self.bytes)
268     }
269 }
270 
271 impl Eq for Scalar {}
272 impl PartialEq for Scalar {
eq(&self, other: &Self) -> bool273     fn eq(&self, other: &Self) -> bool {
274         self.ct_eq(other).unwrap_u8() == 1u8
275     }
276 }
277 
278 impl ConstantTimeEq for Scalar {
ct_eq(&self, other: &Self) -> Choice279     fn ct_eq(&self, other: &Self) -> Choice {
280         self.bytes.ct_eq(&other.bytes)
281     }
282 }
283 
284 impl Index<usize> for Scalar {
285     type Output = u8;
286 
287     /// Index the bytes of the representative for this `Scalar`.  Mutation is not permitted.
index(&self, _index: usize) -> &u8288     fn index(&self, _index: usize) -> &u8 {
289         &(self.bytes[_index])
290     }
291 }
292 
293 impl<'b> MulAssign<&'b Scalar> for Scalar {
mul_assign(&mut self, _rhs: &'b Scalar)294     fn mul_assign(&mut self, _rhs: &'b Scalar) {
295         *self = UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack();
296     }
297 }
298 
299 define_mul_assign_variants!(LHS = Scalar, RHS = Scalar);
300 
301 impl<'a, 'b> Mul<&'b Scalar> for &'a Scalar {
302     type Output = Scalar;
mul(self, _rhs: &'b Scalar) -> Scalar303     fn mul(self, _rhs: &'b Scalar) -> Scalar {
304         UnpackedScalar::mul(&self.unpack(), &_rhs.unpack()).pack()
305     }
306 }
307 
308 define_mul_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
309 
310 impl<'b> AddAssign<&'b Scalar> for Scalar {
add_assign(&mut self, _rhs: &'b Scalar)311     fn add_assign(&mut self, _rhs: &'b Scalar) {
312         *self = *self + _rhs;
313     }
314 }
315 
316 define_add_assign_variants!(LHS = Scalar, RHS = Scalar);
317 
318 impl<'a, 'b> Add<&'b Scalar> for &'a Scalar {
319     type Output = Scalar;
320     #[allow(non_snake_case)]
add(self, _rhs: &'b Scalar) -> Scalar321     fn add(self, _rhs: &'b Scalar) -> Scalar {
322         // The UnpackedScalar::add function produces reduced outputs
323         // if the inputs are reduced.  However, these inputs may not
324         // be reduced -- they might come from Scalar::from_bits.  So
325         // after computing the sum, we explicitly reduce it mod l
326         // before repacking.
327         let sum = UnpackedScalar::add(&self.unpack(), &_rhs.unpack());
328         let sum_R = UnpackedScalar::mul_internal(&sum, &constants::R);
329         let sum_mod_l = UnpackedScalar::montgomery_reduce(&sum_R);
330         sum_mod_l.pack()
331     }
332 }
333 
334 define_add_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
335 
336 impl<'b> SubAssign<&'b Scalar> for Scalar {
sub_assign(&mut self, _rhs: &'b Scalar)337     fn sub_assign(&mut self, _rhs: &'b Scalar) {
338         *self = *self - _rhs;
339     }
340 }
341 
342 define_sub_assign_variants!(LHS = Scalar, RHS = Scalar);
343 
344 impl<'a, 'b> Sub<&'b Scalar> for &'a Scalar {
345     type Output = Scalar;
346     #[allow(non_snake_case)]
sub(self, rhs: &'b Scalar) -> Scalar347     fn sub(self, rhs: &'b Scalar) -> Scalar {
348         // The UnpackedScalar::sub function requires reduced inputs
349         // and produces reduced output. However, these inputs may not
350         // be reduced -- they might come from Scalar::from_bits.  So
351         // we explicitly reduce the inputs.
352         let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R);
353         let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R);
354         let rhs_R = UnpackedScalar::mul_internal(&rhs.unpack(), &constants::R);
355         let rhs_mod_l = UnpackedScalar::montgomery_reduce(&rhs_R);
356 
357         UnpackedScalar::sub(&self_mod_l, &rhs_mod_l).pack()
358     }
359 }
360 
361 define_sub_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar);
362 
363 impl<'a> Neg for &'a Scalar {
364     type Output = Scalar;
365     #[allow(non_snake_case)]
neg(self) -> Scalar366     fn neg(self) -> Scalar {
367         let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R);
368         let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R);
369         UnpackedScalar::sub(&UnpackedScalar::zero(), &self_mod_l).pack()
370     }
371 }
372 
373 impl<'a> Neg for Scalar {
374     type Output = Scalar;
neg(self) -> Scalar375     fn neg(self) -> Scalar {
376         -&self
377     }
378 }
379 
380 impl ConditionallySelectable for Scalar {
conditional_select(a: &Self, b: &Self, choice: Choice) -> Self381     fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
382         let mut bytes = [0u8; 32];
383         for i in 0..32 {
384             bytes[i] = u8::conditional_select(&a.bytes[i], &b.bytes[i], choice);
385         }
386         Scalar { bytes }
387     }
388 }
389 
390 #[cfg(feature = "serde")]
391 use serde::{self, Serialize, Deserialize, Serializer, Deserializer};
392 #[cfg(feature = "serde")]
393 use serde::de::Visitor;
394 
395 #[cfg(feature = "serde")]
396 impl Serialize for Scalar {
serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer397     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
398         where S: Serializer
399     {
400         use serde::ser::SerializeTuple;
401         let mut tup = serializer.serialize_tuple(32)?;
402         for byte in self.as_bytes().iter() {
403             tup.serialize_element(byte)?;
404         }
405         tup.end()
406     }
407 }
408 
409 #[cfg(feature = "serde")]
410 impl<'de> Deserialize<'de> for Scalar {
deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>411     fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
412         where D: Deserializer<'de>
413     {
414         struct ScalarVisitor;
415 
416         impl<'de> Visitor<'de> for ScalarVisitor {
417             type Value = Scalar;
418 
419             fn expecting(&self, formatter: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
420                 formatter.write_str("a valid point in Edwards y + sign format")
421             }
422 
423             fn visit_seq<A>(self, mut seq: A) -> Result<Scalar, A::Error>
424                 where A: serde::de::SeqAccess<'de>
425             {
426                 let mut bytes = [0u8; 32];
427                 for i in 0..32 {
428                     bytes[i] = seq.next_element()?
429                         .ok_or(serde::de::Error::invalid_length(i, &"expected 32 bytes"))?;
430                 }
431                 Scalar::from_canonical_bytes(bytes)
432                     .ok_or(serde::de::Error::custom(
433                         &"scalar was not canonically encoded"
434                     ))
435             }
436         }
437 
438         deserializer.deserialize_tuple(32, ScalarVisitor)
439     }
440 }
441 
442 impl<T> Product<T> for Scalar
443 where
444     T: Borrow<Scalar>
445 {
product<I>(iter: I) -> Self where I: Iterator<Item = T>446     fn product<I>(iter: I) -> Self
447     where
448         I: Iterator<Item = T>
449     {
450         iter.fold(Scalar::one(), |acc, item| acc * item.borrow())
451     }
452 }
453 
454 impl<T> Sum<T> for Scalar
455 where
456     T: Borrow<Scalar>
457 {
sum<I>(iter: I) -> Self where I: Iterator<Item = T>458     fn sum<I>(iter: I) -> Self
459     where
460         I: Iterator<Item = T>
461     {
462         iter.fold(Scalar::zero(), |acc, item| acc + item.borrow())
463     }
464 }
465 
466 impl Default for Scalar {
default() -> Scalar467     fn default() -> Scalar {
468         Scalar::zero()
469     }
470 }
471 
472 impl From<u8> for Scalar {
from(x: u8) -> Scalar473     fn from(x: u8) -> Scalar {
474         let mut s_bytes = [0u8; 32];
475         s_bytes[0] = x;
476         Scalar{ bytes: s_bytes }
477     }
478 }
479 
480 impl From<u16> for Scalar {
from(x: u16) -> Scalar481     fn from(x: u16) -> Scalar {
482         use byteorder::{ByteOrder, LittleEndian};
483         let mut s_bytes = [0u8; 32];
484         LittleEndian::write_u16(&mut s_bytes, x);
485         Scalar{ bytes: s_bytes }
486     }
487 }
488 
489 impl From<u32> for Scalar {
from(x: u32) -> Scalar490     fn from(x: u32) -> Scalar {
491         use byteorder::{ByteOrder, LittleEndian};
492         let mut s_bytes = [0u8; 32];
493         LittleEndian::write_u32(&mut s_bytes, x);
494         Scalar{ bytes: s_bytes }
495     }
496 }
497 
498 impl From<u64> for Scalar {
499     /// Construct a scalar from the given `u64`.
500     ///
501     /// # Inputs
502     ///
503     /// An `u64` to convert to a `Scalar`.
504     ///
505     /// # Returns
506     ///
507     /// A `Scalar` corresponding to the input `u64`.
508     ///
509     /// # Example
510     ///
511     /// ```
512     /// use curve25519_dalek::scalar::Scalar;
513     ///
514     /// let fourtytwo = Scalar::from(42u64);
515     /// let six = Scalar::from(6u64);
516     /// let seven = Scalar::from(7u64);
517     ///
518     /// assert!(fourtytwo == six * seven);
519     /// ```
from(x: u64) -> Scalar520     fn from(x: u64) -> Scalar {
521         use byteorder::{ByteOrder, LittleEndian};
522         let mut s_bytes = [0u8; 32];
523         LittleEndian::write_u64(&mut s_bytes, x);
524         Scalar{ bytes: s_bytes }
525     }
526 }
527 
528 impl From<u128> for Scalar {
from(x: u128) -> Scalar529     fn from(x: u128) -> Scalar {
530         use byteorder::{ByteOrder, LittleEndian};
531         let mut s_bytes = [0u8; 32];
532         LittleEndian::write_u128(&mut s_bytes, x);
533         Scalar{ bytes: s_bytes }
534     }
535 }
536 
537 impl Zeroize for Scalar {
zeroize(&mut self)538     fn zeroize(&mut self) {
539         self.bytes.zeroize();
540     }
541 }
542 
543 impl Scalar {
544     /// Return a `Scalar` chosen uniformly at random using a user-provided RNG.
545     ///
546     /// # Inputs
547     ///
548     /// * `rng`: any RNG which implements the `RngCore + CryptoRng` interface.
549     ///
550     /// # Returns
551     ///
552     /// A random scalar within ℤ/lℤ.
553     ///
554     /// # Example
555     ///
556     /// ```
557     /// extern crate rand_core;
558     /// # extern crate curve25519_dalek;
559     /// #
560     /// # fn main() {
561     /// use curve25519_dalek::scalar::Scalar;
562     ///
563     /// use rand_core::OsRng;
564     ///
565     /// let mut csprng = OsRng;
566     /// let a: Scalar = Scalar::random(&mut csprng);
567     /// # }
random<R: RngCore + CryptoRng>(rng: &mut R) -> Self568     pub fn random<R: RngCore + CryptoRng>(rng: &mut R) -> Self {
569         let mut scalar_bytes = [0u8; 64];
570         rng.fill_bytes(&mut scalar_bytes);
571         Scalar::from_bytes_mod_order_wide(&scalar_bytes)
572     }
573 
574     /// Hash a slice of bytes into a scalar.
575     ///
576     /// Takes a type parameter `D`, which is any `Digest` producing 64
577     /// bytes (512 bits) of output.
578     ///
579     /// Convenience wrapper around `from_hash`.
580     ///
581     /// # Example
582     ///
583     /// ```
584     /// # extern crate curve25519_dalek;
585     /// # use curve25519_dalek::scalar::Scalar;
586     /// extern crate sha2;
587     ///
588     /// use sha2::Sha512;
589     ///
590     /// # // Need fn main() here in comment so the doctest compiles
591     /// # // See https://doc.rust-lang.org/book/documentation.html#documentation-as-tests
592     /// # fn main() {
593     /// let msg = "To really appreciate architecture, you may even need to commit a murder";
594     /// let s = Scalar::hash_from_bytes::<Sha512>(msg.as_bytes());
595     /// # }
596     /// ```
hash_from_bytes<D>(input: &[u8]) -> Scalar where D: Digest<OutputSize = U64> + Default597     pub fn hash_from_bytes<D>(input: &[u8]) -> Scalar
598         where D: Digest<OutputSize = U64> + Default
599     {
600         let mut hash = D::default();
601         hash.update(input);
602         Scalar::from_hash(hash)
603     }
604 
605     /// Construct a scalar from an existing `Digest` instance.
606     ///
607     /// Use this instead of `hash_from_bytes` if it is more convenient
608     /// to stream data into the `Digest` than to pass a single byte
609     /// slice.
610     ///
611     /// # Example
612     ///
613     /// ```
614     /// # extern crate curve25519_dalek;
615     /// # use curve25519_dalek::scalar::Scalar;
616     /// extern crate sha2;
617     ///
618     /// use sha2::Digest;
619     /// use sha2::Sha512;
620     ///
621     /// # fn main() {
622     /// let mut h = Sha512::new()
623     ///     .chain("To really appreciate architecture, you may even need to commit a murder.")
624     ///     .chain("While the programs used for The Manhattan Transcripts are of the most extreme")
625     ///     .chain("nature, they also parallel the most common formula plot: the archetype of")
626     ///     .chain("murder. Other phantasms were occasionally used to underline the fact that")
627     ///     .chain("perhaps all architecture, rather than being about functional standards, is")
628     ///     .chain("about love and death.");
629     ///
630     /// let s = Scalar::from_hash(h);
631     ///
632     /// println!("{:?}", s.to_bytes());
633     /// assert!(s == Scalar::from_bits([ 21,  88, 208, 252,  63, 122, 210, 152,
634     ///                                 154,  38,  15,  23,  16, 167,  80, 150,
635     ///                                 192, 221,  77, 226,  62,  25, 224, 148,
636     ///                                 239,  48, 176,  10, 185,  69, 168,  11, ]));
637     /// # }
638     /// ```
from_hash<D>(hash: D) -> Scalar where D: Digest<OutputSize = U64>639     pub fn from_hash<D>(hash: D) -> Scalar
640         where D: Digest<OutputSize = U64>
641     {
642         let mut output = [0u8; 64];
643         output.copy_from_slice(hash.finalize().as_slice());
644         Scalar::from_bytes_mod_order_wide(&output)
645     }
646 
647     /// Convert this `Scalar` to its underlying sequence of bytes.
648     ///
649     /// # Example
650     ///
651     /// ```
652     /// use curve25519_dalek::scalar::Scalar;
653     ///
654     /// let s: Scalar = Scalar::zero();
655     ///
656     /// assert!(s.to_bytes() == [0u8; 32]);
657     /// ```
to_bytes(&self) -> [u8; 32]658     pub fn to_bytes(&self) -> [u8; 32] {
659         self.bytes
660     }
661 
662     /// View the little-endian byte encoding of the integer representing this Scalar.
663     ///
664     /// # Example
665     ///
666     /// ```
667     /// use curve25519_dalek::scalar::Scalar;
668     ///
669     /// let s: Scalar = Scalar::zero();
670     ///
671     /// assert!(s.as_bytes() == &[0u8; 32]);
672     /// ```
as_bytes(&self) -> &[u8; 32]673     pub fn as_bytes(&self) -> &[u8; 32] {
674         &self.bytes
675     }
676 
677     /// Construct the scalar \\( 0 \\).
zero() -> Self678     pub fn zero() -> Self {
679         Scalar { bytes: [0u8; 32]}
680     }
681 
682     /// Construct the scalar \\( 1 \\).
one() -> Self683     pub fn one() -> Self {
684         Scalar {
685             bytes: [
686                 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
687                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
688             ],
689         }
690     }
691 
692     /// Given a nonzero `Scalar`, compute its multiplicative inverse.
693     ///
694     /// # Warning
695     ///
696     /// `self` **MUST** be nonzero.  If you cannot
697     /// *prove* that this is the case, you **SHOULD NOT USE THIS
698     /// FUNCTION**.
699     ///
700     /// # Returns
701     ///
702     /// The multiplicative inverse of the this `Scalar`.
703     ///
704     /// # Example
705     ///
706     /// ```
707     /// use curve25519_dalek::scalar::Scalar;
708     ///
709     /// // x = 2238329342913194256032495932344128051776374960164957527413114840482143558222
710     /// let X: Scalar = Scalar::from_bytes_mod_order([
711     ///         0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84,
712     ///         0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2, 0x7d, 0x52,
713     ///         0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44,
714     ///         0xd4, 0x49, 0xf4, 0xa8, 0x79, 0xd9, 0xf2, 0x04,
715     ///     ]);
716     /// // 1/x = 6859937278830797291664592131120606308688036382723378951768035303146619657244
717     /// let XINV: Scalar = Scalar::from_bytes_mod_order([
718     ///         0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb,
719     ///         0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01, 0x63, 0x47,
720     ///         0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96,
721     ///         0xd5, 0x0b, 0xcd, 0x7a, 0x3f, 0x96, 0x2a, 0x0f,
722     ///     ]);
723     ///
724     /// let inv_X: Scalar = X.invert();
725     /// assert!(XINV == inv_X);
726     /// let should_be_one: Scalar = &inv_X * &X;
727     /// assert!(should_be_one == Scalar::one());
728     /// ```
invert(&self) -> Scalar729     pub fn invert(&self) -> Scalar {
730         self.unpack().invert().pack()
731     }
732 
733     /// Given a slice of nonzero (possibly secret) `Scalar`s,
734     /// compute their inverses in a batch.
735     ///
736     /// # Return
737     ///
738     /// Each element of `inputs` is replaced by its inverse.
739     ///
740     /// The product of all inverses is returned.
741     ///
742     /// # Warning
743     ///
744     /// All input `Scalars` **MUST** be nonzero.  If you cannot
745     /// *prove* that this is the case, you **SHOULD NOT USE THIS
746     /// FUNCTION**.
747     ///
748     /// # Example
749     ///
750     /// ```
751     /// # extern crate curve25519_dalek;
752     /// # use curve25519_dalek::scalar::Scalar;
753     /// # fn main() {
754     /// let mut scalars = [
755     ///     Scalar::from(3u64),
756     ///     Scalar::from(5u64),
757     ///     Scalar::from(7u64),
758     ///     Scalar::from(11u64),
759     /// ];
760     ///
761     /// let allinv = Scalar::batch_invert(&mut scalars);
762     ///
763     /// assert_eq!(allinv, Scalar::from(3*5*7*11u64).invert());
764     /// assert_eq!(scalars[0], Scalar::from(3u64).invert());
765     /// assert_eq!(scalars[1], Scalar::from(5u64).invert());
766     /// assert_eq!(scalars[2], Scalar::from(7u64).invert());
767     /// assert_eq!(scalars[3], Scalar::from(11u64).invert());
768     /// # }
769     /// ```
770     #[cfg(feature = "alloc")]
batch_invert(inputs: &mut [Scalar]) -> Scalar771     pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar {
772         // This code is essentially identical to the FieldElement
773         // implementation, and is documented there.  Unfortunately,
774         // it's not easy to write it generically, since here we want
775         // to use `UnpackedScalar`s internally, and `Scalar`s
776         // externally, but there's no corresponding distinction for
777         // field elements.
778 
779         use zeroize::Zeroizing;
780 
781         let n = inputs.len();
782         let one: UnpackedScalar = Scalar::one().unpack().to_montgomery();
783 
784         // Place scratch storage in a Zeroizing wrapper to wipe it when
785         // we pass out of scope.
786         let scratch_vec = vec![one; n];
787         let mut scratch = Zeroizing::new(scratch_vec);
788 
789         // Keep an accumulator of all of the previous products
790         let mut acc = Scalar::one().unpack().to_montgomery();
791 
792         // Pass through the input vector, recording the previous
793         // products in the scratch space
794         for (input, scratch) in inputs.iter_mut().zip(scratch.iter_mut()) {
795             *scratch = acc;
796 
797             // Avoid unnecessary Montgomery multiplication in second pass by
798             // keeping inputs in Montgomery form
799             let tmp = input.unpack().to_montgomery();
800             *input = tmp.pack();
801             acc = UnpackedScalar::montgomery_mul(&acc, &tmp);
802         }
803 
804         // acc is nonzero iff all inputs are nonzero
805         debug_assert!(acc.pack() != Scalar::zero());
806 
807         // Compute the inverse of all products
808         acc = acc.montgomery_invert().from_montgomery();
809 
810         // We need to return the product of all inverses later
811         let ret = acc.pack();
812 
813         // Pass through the vector backwards to compute the inverses
814         // in place
815         for (input, scratch) in inputs.iter_mut().rev().zip(scratch.iter().rev()) {
816             let tmp = UnpackedScalar::montgomery_mul(&acc, &input.unpack());
817             *input = UnpackedScalar::montgomery_mul(&acc, &scratch).pack();
818             acc = tmp;
819         }
820 
821         ret
822     }
823 
824     /// Get the bits of the scalar.
bits(&self) -> [i8; 256]825     pub(crate) fn bits(&self) -> [i8; 256] {
826         let mut bits = [0i8; 256];
827         for i in 0..256 {
828             // As i runs from 0..256, the bottom 3 bits index the bit,
829             // while the upper bits index the byte.
830             bits[i] = ((self.bytes[i>>3] >> (i&7)) & 1u8) as i8;
831         }
832         bits
833     }
834 
835     /// Compute a width-\\(w\\) "Non-Adjacent Form" of this scalar.
836     ///
837     /// A width-\\(w\\) NAF of a positive integer \\(k\\) is an expression
838     /// $$
839     /// k = \sum_{i=0}\^m n\_i 2\^i,
840     /// $$
841     /// where each nonzero
842     /// coefficient \\(n\_i\\) is odd and bounded by \\(|n\_i| < 2\^{w-1}\\),
843     /// \\(n\_{m-1}\\) is nonzero, and at most one of any \\(w\\) consecutive
844     /// coefficients is nonzero.  (Hankerson, Menezes, Vanstone; def 3.32).
845     ///
846     /// The length of the NAF is at most one more than the length of
847     /// the binary representation of \\(k\\).  This is why the
848     /// `Scalar` type maintains an invariant that the top bit is
849     /// \\(0\\), so that the NAF of a scalar has at most 256 digits.
850     ///
851     /// Intuitively, this is like a binary expansion, except that we
852     /// allow some coefficients to grow in magnitude up to
853     /// \\(2\^{w-1}\\) so that the nonzero coefficients are as sparse
854     /// as possible.
855     ///
856     /// When doing scalar multiplication, we can then use a lookup
857     /// table of precomputed multiples of a point to add the nonzero
858     /// terms \\( k_i P \\).  Using signed digits cuts the table size
859     /// in half, and using odd digits cuts the table size in half
860     /// again.
861     ///
862     /// To compute a \\(w\\)-NAF, we use a modification of Algorithm 3.35 of HMV:
863     ///
864     /// 1. \\( i \gets 0 \\)
865     /// 2. While \\( k \ge 1 \\):
866     ///     1. If \\(k\\) is odd, \\( n_i \gets k \operatorname{mods} 2^w \\), \\( k \gets k - n_i \\).
867     ///     2. If \\(k\\) is even, \\( n_i \gets 0 \\).
868     ///     3. \\( k \gets k / 2 \\), \\( i \gets i + 1 \\).
869     /// 3. Return \\( n_0, n_1, ... , \\)
870     ///
871     /// Here \\( \bar x = x \operatorname{mods} 2^w \\) means the
872     /// \\( \bar x \\) with \\( \bar x \equiv x \pmod{2^w} \\) and
873     /// \\( -2^{w-1} \leq \bar x < 2^w \\).
874     ///
875     /// We implement this by scanning across the bits of \\(k\\) from
876     /// least-significant bit to most-significant-bit.
877     /// Write the bits of \\(k\\) as
878     /// $$
879     /// k = \sum\_{i=0}\^m k\_i 2^i,
880     /// $$
881     /// and split the sum as
882     /// $$
883     /// k = \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \sum\_{i=0} k\_{i+w} 2^i
884     /// $$
885     /// where the first part is \\( k \mod 2^w \\).
886     ///
887     /// If \\( k \mod 2^w\\) is odd, and \\( k \mod 2^w < 2^{w-1} \\), then we emit
888     /// \\( n_0 = k \mod 2^w \\).  Instead of computing
889     /// \\( k - n_0 \\), we just advance \\(w\\) bits and reindex.
890     ///
891     /// If \\( k \mod 2^w\\) is odd, and \\( k \mod 2^w \ge 2^{w-1} \\), then
892     /// \\( n_0 = k \operatorname{mods} 2^w = k \mod 2^w - 2^w \\).
893     /// The quantity \\( k - n_0 \\) is
894     /// $$
895     /// \begin{aligned}
896     /// k - n_0 &= \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \sum\_{i=0} k\_{i+w} 2^i
897     ///          - \sum\_{i=0}^{w-1} k\_i 2^i + 2^w \\\\
898     /// &= 2^w + 2^w \sum\_{i=0} k\_{i+w} 2^i
899     /// \end{aligned}
900     /// $$
901     /// so instead of computing the subtraction, we can set a carry
902     /// bit, advance \\(w\\) bits, and reindex.
903     ///
904     /// If \\( k \mod 2^w\\) is even, we emit \\(0\\), advance 1 bit
905     /// and reindex.  In fact, by setting all digits to \\(0\\)
906     /// initially, we don't need to emit anything.
non_adjacent_form(&self, w: usize) -> [i8; 256]907     pub(crate) fn non_adjacent_form(&self, w: usize) -> [i8; 256] {
908         // required by the NAF definition
909         debug_assert!( w >= 2 );
910         // required so that the NAF digits fit in i8
911         debug_assert!( w <= 8 );
912 
913         use byteorder::{ByteOrder, LittleEndian};
914 
915         let mut naf = [0i8; 256];
916 
917         let mut x_u64 = [0u64; 5];
918         LittleEndian::read_u64_into(&self.bytes, &mut x_u64[0..4]);
919 
920         let width = 1 << w;
921         let window_mask = width - 1;
922 
923         let mut pos = 0;
924         let mut carry = 0;
925         while pos < 256 {
926             // Construct a buffer of bits of the scalar, starting at bit `pos`
927             let u64_idx = pos / 64;
928             let bit_idx = pos % 64;
929             let bit_buf: u64;
930             if bit_idx < 64 - w {
931                 // This window's bits are contained in a single u64
932                 bit_buf = x_u64[u64_idx] >> bit_idx;
933             } else {
934                 // Combine the current u64's bits with the bits from the next u64
935                 bit_buf = (x_u64[u64_idx] >> bit_idx) | (x_u64[1+u64_idx] << (64 - bit_idx));
936             }
937 
938             // Add the carry into the current window
939             let window = carry + (bit_buf & window_mask);
940 
941             if window & 1 == 0 {
942                 // If the window value is even, preserve the carry and continue.
943                 // Why is the carry preserved?
944                 // If carry == 0 and window & 1 == 0, then the next carry should be 0
945                 // If carry == 1 and window & 1 == 0, then bit_buf & 1 == 1 so the next carry should be 1
946                 pos += 1;
947                 continue;
948             }
949 
950             if window < width/2 {
951                 carry = 0;
952                 naf[pos] = window as i8;
953             } else {
954                 carry = 1;
955                 naf[pos] = (window as i8).wrapping_sub(width as i8);
956             }
957 
958             pos += w;
959         }
960 
961         naf
962     }
963 
964     /// Write this scalar in radix 16, with coefficients in \\([-8,8)\\),
965     /// i.e., compute \\(a\_i\\) such that
966     /// $$
967     ///    a = a\_0 + a\_1 16\^1 + \cdots + a_{63} 16\^{63},
968     /// $$
969     /// with \\(-8 \leq a_i < 8\\) for \\(0 \leq i < 63\\) and \\(-8 \leq a_{63} \leq 8\\).
to_radix_16(&self) -> [i8; 64]970     pub(crate) fn to_radix_16(&self) -> [i8; 64] {
971         debug_assert!(self[31] <= 127);
972         let mut output = [0i8; 64];
973 
974         // Step 1: change radix.
975         // Convert from radix 256 (bytes) to radix 16 (nibbles)
976         #[inline(always)]
bot_half(x: u8) -> u8977         fn bot_half(x: u8) -> u8 { (x >> 0) & 15 }
978         #[inline(always)]
top_half(x: u8) -> u8979         fn top_half(x: u8) -> u8 { (x >> 4) & 15 }
980 
981         for i in 0..32 {
982             output[2*i  ] = bot_half(self[i]) as i8;
983             output[2*i+1] = top_half(self[i]) as i8;
984         }
985         // Precondition note: since self[31] <= 127, output[63] <= 7
986 
987         // Step 2: recenter coefficients from [0,16) to [-8,8)
988         for i in 0..63 {
989             let carry    = (output[i] + 8) >> 4;
990             output[i  ] -= carry << 4;
991             output[i+1] += carry;
992         }
993         // Precondition note: output[63] is not recentered.  It
994         // increases by carry <= 1.  Thus output[63] <= 8.
995 
996         output
997     }
998 
999     /// Returns a size hint indicating how many entries of the return
1000     /// value of `to_radix_2w` are nonzero.
to_radix_2w_size_hint(w: usize) -> usize1001     pub(crate) fn to_radix_2w_size_hint(w: usize) -> usize {
1002         debug_assert!(w >= 4);
1003         debug_assert!(w <= 8);
1004 
1005         let digits_count = match w {
1006             4 => (256 + w - 1)/w as usize,
1007             5 => (256 + w - 1)/w as usize,
1008             6 => (256 + w - 1)/w as usize,
1009             7 => (256 + w - 1)/w as usize,
1010             // See comment in to_radix_2w on handling the terminal carry.
1011             8 => (256 + w - 1)/w + 1 as usize,
1012             _ => panic!("invalid radix parameter"),
1013         };
1014 
1015         debug_assert!(digits_count <= 64);
1016         digits_count
1017     }
1018 
1019     /// Creates a representation of a Scalar in radix 32, 64, 128 or 256 for use with the Pippenger algorithm.
1020     /// For lower radix, use `to_radix_16`, which is used by the Straus multi-scalar multiplication.
1021     /// Higher radixes are not supported to save cache space. Radix 256 is near-optimal even for very
1022     /// large inputs.
1023     ///
1024     /// Radix below 32 or above 256 is prohibited.
1025     /// This method returns digits in a fixed-sized array, excess digits are zeroes.
1026     ///
1027     /// ## Scalar representation
1028     ///
1029     /// Radix \\(2\^w\\), with \\(n = ceil(256/w)\\) coefficients in \\([-(2\^w)/2,(2\^w)/2)\\),
1030     /// i.e., scalar is represented using digits \\(a\_i\\) such that
1031     /// $$
1032     ///    a = a\_0 + a\_1 2\^1w + \cdots + a_{n-1} 2\^{w*(n-1)},
1033     /// $$
1034     /// with \\(-2\^w/2 \leq a_i < 2\^w/2\\) for \\(0 \leq i < (n-1)\\) and \\(-2\^w/2 \leq a_{n-1} \leq 2\^w/2\\).
1035     ///
to_radix_2w(&self, w: usize) -> [i8; 64]1036     pub(crate) fn to_radix_2w(&self, w: usize) -> [i8; 64] {
1037         debug_assert!(w >= 4);
1038         debug_assert!(w <= 8);
1039 
1040         if w == 4 {
1041             return self.to_radix_16();
1042         }
1043 
1044         use byteorder::{ByteOrder, LittleEndian};
1045 
1046         // Scalar formatted as four `u64`s with carry bit packed into the highest bit.
1047         let mut scalar64x4 = [0u64; 4];
1048         LittleEndian::read_u64_into(&self.bytes, &mut scalar64x4[0..4]);
1049 
1050         let radix: u64 = 1 << w;
1051         let window_mask: u64 = radix - 1;
1052 
1053         let mut carry = 0u64;
1054         let mut digits = [0i8; 64];
1055         let digits_count = (256 + w - 1)/w as usize;
1056         for i in 0..digits_count {
1057             // Construct a buffer of bits of the scalar, starting at `bit_offset`.
1058             let bit_offset = i*w;
1059             let u64_idx = bit_offset / 64;
1060             let bit_idx = bit_offset % 64;
1061 
1062             // Read the bits from the scalar
1063             let bit_buf: u64;
1064             if bit_idx < 64 - w  || u64_idx == 3 {
1065                 // This window's bits are contained in a single u64,
1066                 // or it's the last u64 anyway.
1067                 bit_buf = scalar64x4[u64_idx] >> bit_idx;
1068             } else {
1069                 // Combine the current u64's bits with the bits from the next u64
1070                 bit_buf = (scalar64x4[u64_idx] >> bit_idx) | (scalar64x4[1+u64_idx] << (64 - bit_idx));
1071             }
1072 
1073             // Read the actual coefficient value from the window
1074             let coef = carry + (bit_buf & window_mask); // coef = [0, 2^r)
1075 
1076              // Recenter coefficients from [0,2^w) to [-2^w/2, 2^w/2)
1077             carry = (coef + (radix/2) as u64) >> w;
1078             digits[i] = ((coef as i64) - (carry << w) as i64) as i8;
1079         }
1080 
1081         // When w < 8, we can fold the final carry onto the last digit d,
1082         // because d < 2^w/2 so d + carry*2^w = d + 1*2^w < 2^(w+1) < 2^8.
1083         //
1084         // When w = 8, we can't fit carry*2^w into an i8.  This should
1085         // not happen anyways, because the final carry will be 0 for
1086         // reduced scalars, but the Scalar invariant allows 255-bit scalars.
1087         // To handle this, we expand the size_hint by 1 when w=8,
1088         // and accumulate the final carry onto another digit.
1089         match w {
1090             8 => digits[digits_count] += carry as i8,
1091             _ => digits[digits_count-1] += (carry << w) as i8,
1092         }
1093 
1094         digits
1095     }
1096 
1097     /// Unpack this `Scalar` to an `UnpackedScalar` for faster arithmetic.
unpack(&self) -> UnpackedScalar1098     pub(crate) fn unpack(&self) -> UnpackedScalar {
1099         UnpackedScalar::from_bytes(&self.bytes)
1100     }
1101 
1102     /// Reduce this `Scalar` modulo \\(\ell\\).
1103     #[allow(non_snake_case)]
reduce(&self) -> Scalar1104     pub fn reduce(&self) -> Scalar {
1105         let x = self.unpack();
1106         let xR = UnpackedScalar::mul_internal(&x, &constants::R);
1107         let x_mod_l = UnpackedScalar::montgomery_reduce(&xR);
1108         x_mod_l.pack()
1109     }
1110 
1111     /// Check whether this `Scalar` is the canonical representative mod \\(\ell\\).
1112     ///
1113     /// This is intended for uses like input validation, where variable-time code is acceptable.
1114     ///
1115     /// ```
1116     /// # extern crate curve25519_dalek;
1117     /// # extern crate subtle;
1118     /// # use curve25519_dalek::scalar::Scalar;
1119     /// # use subtle::ConditionallySelectable;
1120     /// # fn main() {
1121     /// // 2^255 - 1, since `from_bits` clears the high bit
1122     /// let _2_255_minus_1 = Scalar::from_bits([0xff;32]);
1123     /// assert!(!_2_255_minus_1.is_canonical());
1124     ///
1125     /// let reduced = _2_255_minus_1.reduce();
1126     /// assert!(reduced.is_canonical());
1127     /// # }
1128     /// ```
is_canonical(&self) -> bool1129     pub fn is_canonical(&self) -> bool {
1130         *self == self.reduce()
1131     }
1132 }
1133 
1134 impl UnpackedScalar {
1135     /// Pack the limbs of this `UnpackedScalar` into a `Scalar`.
pack(&self) -> Scalar1136     fn pack(&self) -> Scalar {
1137         Scalar{ bytes: self.to_bytes() }
1138     }
1139 
1140     /// Inverts an UnpackedScalar in Montgomery form.
montgomery_invert(&self) -> UnpackedScalar1141     pub fn montgomery_invert(&self) -> UnpackedScalar {
1142         // Uses the addition chain from
1143         // https://briansmith.org/ecc-inversion-addition-chains-01#curve25519_scalar_inversion
1144         let    _1 = self;
1145         let   _10 = _1.montgomery_square();
1146         let  _100 = _10.montgomery_square();
1147         let   _11 = UnpackedScalar::montgomery_mul(&_10,     &_1);
1148         let  _101 = UnpackedScalar::montgomery_mul(&_10,    &_11);
1149         let  _111 = UnpackedScalar::montgomery_mul(&_10,   &_101);
1150         let _1001 = UnpackedScalar::montgomery_mul(&_10,   &_111);
1151         let _1011 = UnpackedScalar::montgomery_mul(&_10,  &_1001);
1152         let _1111 = UnpackedScalar::montgomery_mul(&_100, &_1011);
1153 
1154         // _10000
1155         let mut y = UnpackedScalar::montgomery_mul(&_1111, &_1);
1156 
1157         #[inline]
1158         fn square_multiply(y: &mut UnpackedScalar, squarings: usize, x: &UnpackedScalar) {
1159             for _ in 0..squarings {
1160                 *y = y.montgomery_square();
1161             }
1162             *y = UnpackedScalar::montgomery_mul(y, x);
1163         }
1164 
1165         square_multiply(&mut y, 123 + 3, &_101);
1166         square_multiply(&mut y,   2 + 2, &_11);
1167         square_multiply(&mut y,   1 + 4, &_1111);
1168         square_multiply(&mut y,   1 + 4, &_1111);
1169         square_multiply(&mut y,       4, &_1001);
1170         square_multiply(&mut y,       2, &_11);
1171         square_multiply(&mut y,   1 + 4, &_1111);
1172         square_multiply(&mut y,   1 + 3, &_101);
1173         square_multiply(&mut y,   3 + 3, &_101);
1174         square_multiply(&mut y,       3, &_111);
1175         square_multiply(&mut y,   1 + 4, &_1111);
1176         square_multiply(&mut y,   2 + 3, &_111);
1177         square_multiply(&mut y,   2 + 2, &_11);
1178         square_multiply(&mut y,   1 + 4, &_1011);
1179         square_multiply(&mut y,   2 + 4, &_1011);
1180         square_multiply(&mut y,   6 + 4, &_1001);
1181         square_multiply(&mut y,   2 + 2, &_11);
1182         square_multiply(&mut y,   3 + 2, &_11);
1183         square_multiply(&mut y,   3 + 2, &_11);
1184         square_multiply(&mut y,   1 + 4, &_1001);
1185         square_multiply(&mut y,   1 + 3, &_111);
1186         square_multiply(&mut y,   2 + 4, &_1111);
1187         square_multiply(&mut y,   1 + 4, &_1011);
1188         square_multiply(&mut y,       3, &_101);
1189         square_multiply(&mut y,   2 + 4, &_1111);
1190         square_multiply(&mut y,       3, &_101);
1191         square_multiply(&mut y,   1 + 2, &_11);
1192 
1193         y
1194     }
1195 
1196     /// Inverts an UnpackedScalar not in Montgomery form.
invert(&self) -> UnpackedScalar1197     pub fn invert(&self) -> UnpackedScalar {
1198         self.to_montgomery().montgomery_invert().from_montgomery()
1199     }
1200 }
1201 
1202 #[cfg(test)]
1203 mod test {
1204     use super::*;
1205     use constants;
1206 
1207     /// x = 2238329342913194256032495932344128051776374960164957527413114840482143558222
1208     pub static X: Scalar = Scalar{
1209         bytes: [
1210             0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84,
1211             0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2, 0x7d, 0x52,
1212             0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44,
1213             0xd4, 0x49, 0xf4, 0xa8, 0x79, 0xd9, 0xf2, 0x04,
1214         ],
1215     };
1216     /// 1/x = 6859937278830797291664592131120606308688036382723378951768035303146619657244
1217     pub static XINV: Scalar = Scalar{
1218         bytes: [
1219             0x1c, 0xdc, 0x17, 0xfc, 0xe0, 0xe9, 0xa5, 0xbb,
1220             0xd9, 0x24, 0x7e, 0x56, 0xbb, 0x01, 0x63, 0x47,
1221             0xbb, 0xba, 0x31, 0xed, 0xd5, 0xa9, 0xbb, 0x96,
1222             0xd5, 0x0b, 0xcd, 0x7a, 0x3f, 0x96, 0x2a, 0x0f,
1223         ],
1224     };
1225     /// y = 2592331292931086675770238855846338635550719849568364935475441891787804997264
1226     pub static Y: Scalar = Scalar{
1227         bytes: [
1228             0x90, 0x76, 0x33, 0xfe, 0x1c, 0x4b, 0x66, 0xa4,
1229             0xa2, 0x8d, 0x2d, 0xd7, 0x67, 0x83, 0x86, 0xc3,
1230             0x53, 0xd0, 0xde, 0x54, 0x55, 0xd4, 0xfc, 0x9d,
1231             0xe8, 0xef, 0x7a, 0xc3, 0x1f, 0x35, 0xbb, 0x05,
1232         ],
1233     };
1234 
1235     /// x*y = 5690045403673944803228348699031245560686958845067437804563560795922180092780
1236     static X_TIMES_Y: Scalar = Scalar{
1237         bytes: [
1238             0x6c, 0x33, 0x74, 0xa1, 0x89, 0x4f, 0x62, 0x21,
1239             0x0a, 0xaa, 0x2f, 0xe1, 0x86, 0xa6, 0xf9, 0x2c,
1240             0xe0, 0xaa, 0x75, 0xc2, 0x77, 0x95, 0x81, 0xc2,
1241             0x95, 0xfc, 0x08, 0x17, 0x9a, 0x73, 0x94, 0x0c,
1242         ],
1243     };
1244 
1245     /// sage: l = 2^252 + 27742317777372353535851937790883648493
1246     /// sage: big = 2^256 - 1
1247     /// sage: repr((big % l).digits(256))
1248     static CANONICAL_2_256_MINUS_1: Scalar = Scalar{
1249         bytes: [
1250               28, 149, 152, 141, 116,  49, 236, 214,
1251              112, 207, 125, 115, 244,  91, 239, 198,
1252              254, 255, 255, 255, 255, 255, 255, 255,
1253              255, 255, 255, 255, 255, 255, 255,  15,
1254         ],
1255     };
1256 
1257     static A_SCALAR: Scalar = Scalar{
1258         bytes: [
1259             0x1a, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d,
1260             0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8, 0x26, 0x4d,
1261             0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1,
1262             0x58, 0x9e, 0x7b, 0x7f, 0x23, 0x76, 0xef, 0x09,
1263         ],
1264     };
1265 
1266     static A_NAF: [i8; 256] =
1267         [0,13,0,0,0,0,0,0,0,7,0,0,0,0,0,0,-9,0,0,0,0,-11,0,0,0,0,3,0,0,0,0,1,
1268          0,0,0,0,9,0,0,0,0,-5,0,0,0,0,0,0,3,0,0,0,0,11,0,0,0,0,11,0,0,0,0,0,
1269          -9,0,0,0,0,0,-3,0,0,0,0,9,0,0,0,0,0,1,0,0,0,0,0,0,-1,0,0,0,0,0,9,0,
1270          0,0,0,-15,0,0,0,0,-7,0,0,0,0,-9,0,0,0,0,0,5,0,0,0,0,13,0,0,0,0,0,-3,0,
1271          0,0,0,-11,0,0,0,0,-7,0,0,0,0,-13,0,0,0,0,11,0,0,0,0,-9,0,0,0,0,0,1,0,0,
1272          0,0,0,-15,0,0,0,0,1,0,0,0,0,7,0,0,0,0,0,0,0,0,5,0,0,0,0,0,13,0,0,0,
1273          0,0,0,11,0,0,0,0,0,15,0,0,0,0,0,-9,0,0,0,0,0,0,0,-1,0,0,0,0,0,0,0,7,
1274          0,0,0,0,0,-15,0,0,0,0,0,15,0,0,0,0,15,0,0,0,0,15,0,0,0,0,0,1,0,0,0,0];
1275 
1276     static LARGEST_ED25519_S: Scalar = Scalar {
1277         bytes: [
1278             0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1279             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1280             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1281             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
1282         ],
1283     };
1284 
1285     static CANONICAL_LARGEST_ED25519_S_PLUS_ONE: Scalar = Scalar {
1286         bytes: [
1287             0x7e, 0x34, 0x47, 0x75, 0x47, 0x4a, 0x7f, 0x97,
1288             0x23, 0xb6, 0x3a, 0x8b, 0xe9, 0x2a, 0xe7, 0x6d,
1289             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1290             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f,
1291         ],
1292     };
1293 
1294     static CANONICAL_LARGEST_ED25519_S_MINUS_ONE: Scalar = Scalar {
1295         bytes: [
1296             0x7c, 0x34, 0x47, 0x75, 0x47, 0x4a, 0x7f, 0x97,
1297             0x23, 0xb6, 0x3a, 0x8b, 0xe9, 0x2a, 0xe7, 0x6d,
1298             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1299             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f,
1300         ],
1301     };
1302 
1303     #[test]
fuzzer_testcase_reduction()1304     fn fuzzer_testcase_reduction() {
1305         // LE bytes of 24519928653854221733733552434404946937899825954937634815
1306         let a_bytes = [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0];
1307         // LE bytes of 4975441334397345751130612518500927154628011511324180036903450236863266160640
1308         let b_bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 210, 210, 210, 255, 255, 255, 255, 10];
1309         // LE bytes of 6432735165214683820902750800207468552549813371247423777071615116673864412038
1310         let c_bytes = [134, 171, 119, 216, 180, 128, 178, 62, 171, 132, 32, 62, 34, 119, 104, 193, 47, 215, 181, 250, 14, 207, 172, 93, 75, 207, 211, 103, 144, 204, 56, 14];
1311 
1312         let a = Scalar::from_bytes_mod_order(a_bytes);
1313         let b = Scalar::from_bytes_mod_order(b_bytes);
1314         let c = Scalar::from_bytes_mod_order(c_bytes);
1315 
1316         let mut tmp = [0u8; 64];
1317 
1318         // also_a = (a mod l)
1319         tmp[0..32].copy_from_slice(&a_bytes[..]);
1320         let also_a = Scalar::from_bytes_mod_order_wide(&tmp);
1321 
1322         // also_b = (b mod l)
1323         tmp[0..32].copy_from_slice(&b_bytes[..]);
1324         let also_b = Scalar::from_bytes_mod_order_wide(&tmp);
1325 
1326         let expected_c = &a * &b;
1327         let also_expected_c = &also_a * &also_b;
1328 
1329         assert_eq!(c, expected_c);
1330         assert_eq!(c, also_expected_c);
1331     }
1332 
1333     #[test]
non_adjacent_form_test_vector()1334     fn non_adjacent_form_test_vector() {
1335         let naf = A_SCALAR.non_adjacent_form(5);
1336         for i in 0..256 {
1337             assert_eq!(naf[i], A_NAF[i]);
1338         }
1339     }
1340 
non_adjacent_form_iter(w: usize, x: &Scalar)1341     fn non_adjacent_form_iter(w: usize, x: &Scalar) {
1342         let naf = x.non_adjacent_form(w);
1343 
1344         // Reconstruct the scalar from the computed NAF
1345         let mut y = Scalar::zero();
1346         for i in (0..256).rev() {
1347             y += y;
1348             let digit = if naf[i] < 0 {
1349                 -Scalar::from((-naf[i]) as u64)
1350             } else {
1351                 Scalar::from(naf[i] as u64)
1352             };
1353             y += digit;
1354         }
1355 
1356         assert_eq!(*x, y);
1357     }
1358 
1359     #[test]
non_adjacent_form_random()1360     fn non_adjacent_form_random() {
1361         let mut rng = rand::thread_rng();
1362         for _ in 0..1_000 {
1363             let x = Scalar::random(&mut rng);
1364             for w in &[5, 6, 7, 8] {
1365                 non_adjacent_form_iter(*w, &x);
1366             }
1367         }
1368     }
1369 
1370     #[test]
from_u64()1371     fn from_u64() {
1372         let val: u64 = 0xdeadbeefdeadbeef;
1373         let s = Scalar::from(val);
1374         assert_eq!(s[7], 0xde);
1375         assert_eq!(s[6], 0xad);
1376         assert_eq!(s[5], 0xbe);
1377         assert_eq!(s[4], 0xef);
1378         assert_eq!(s[3], 0xde);
1379         assert_eq!(s[2], 0xad);
1380         assert_eq!(s[1], 0xbe);
1381         assert_eq!(s[0], 0xef);
1382     }
1383 
1384     #[test]
scalar_mul_by_one()1385     fn scalar_mul_by_one() {
1386         let test_scalar = &X * &Scalar::one();
1387         for i in 0..32 {
1388             assert!(test_scalar[i] == X[i]);
1389         }
1390     }
1391 
1392     #[test]
add_reduces()1393     fn add_reduces() {
1394         // Check that the addition works
1395         assert_eq!(
1396             (LARGEST_ED25519_S + Scalar::one()).reduce(),
1397             CANONICAL_LARGEST_ED25519_S_PLUS_ONE
1398         );
1399         // Check that the addition reduces
1400         assert_eq!(
1401             LARGEST_ED25519_S + Scalar::one(),
1402             CANONICAL_LARGEST_ED25519_S_PLUS_ONE
1403         );
1404     }
1405 
1406     #[test]
sub_reduces()1407     fn sub_reduces() {
1408         // Check that the subtraction works
1409         assert_eq!(
1410             (LARGEST_ED25519_S - Scalar::one()).reduce(),
1411             CANONICAL_LARGEST_ED25519_S_MINUS_ONE
1412         );
1413         // Check that the subtraction reduces
1414         assert_eq!(
1415             LARGEST_ED25519_S - Scalar::one(),
1416             CANONICAL_LARGEST_ED25519_S_MINUS_ONE
1417         );
1418     }
1419 
1420     #[test]
quarkslab_scalar_overflow_does_not_occur()1421     fn quarkslab_scalar_overflow_does_not_occur() {
1422         // Check that manually-constructing large Scalars with
1423         // from_bits cannot produce incorrect results.
1424         //
1425         // The from_bits function is required to implement X/Ed25519,
1426         // while all other methods of constructing a Scalar produce
1427         // reduced Scalars.  However, this "invariant loophole" allows
1428         // constructing large scalars which are not reduced mod l.
1429         //
1430         // This issue was discovered independently by both Jack
1431         // "str4d" Grigg (issue #238), who noted that reduction was
1432         // not performed on addition, and Laurent Grémy & Nicolas
1433         // Surbayrole of Quarkslab, who noted that it was possible to
1434         // cause an overflow and compute incorrect results.
1435         //
1436         // This test is adapted from the one suggested by Quarkslab.
1437 
1438         let large_bytes = [
1439             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1440             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1441             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1442             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
1443         ];
1444 
1445         let a = Scalar::from_bytes_mod_order(large_bytes);
1446         let b = Scalar::from_bits(large_bytes);
1447 
1448         assert_eq!(a, b.reduce());
1449 
1450         let a_3 = a + a + a;
1451         let b_3 = b + b + b;
1452 
1453         assert_eq!(a_3, b_3);
1454 
1455         let neg_a = -a;
1456         let neg_b = -b;
1457 
1458         assert_eq!(neg_a, neg_b);
1459 
1460         let minus_a_3 = Scalar::zero() - a - a - a;
1461         let minus_b_3 = Scalar::zero() - b - b - b;
1462 
1463         assert_eq!(minus_a_3, minus_b_3);
1464         assert_eq!(minus_a_3, -a_3);
1465         assert_eq!(minus_b_3, -b_3);
1466     }
1467 
1468     #[test]
impl_add()1469     fn impl_add() {
1470         let two = Scalar::from(2u64);
1471         let one = Scalar::one();
1472         let should_be_two = &one + &one;
1473         assert_eq!(should_be_two, two);
1474     }
1475 
1476     #[allow(non_snake_case)]
1477     #[test]
impl_mul()1478     fn impl_mul() {
1479         let should_be_X_times_Y = &X * &Y;
1480         assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1481     }
1482 
1483     #[allow(non_snake_case)]
1484     #[test]
impl_product()1485     fn impl_product() {
1486         // Test that product works for non-empty iterators
1487         let X_Y_vector = vec![X, Y];
1488         let should_be_X_times_Y: Scalar = X_Y_vector.iter().product();
1489         assert_eq!(should_be_X_times_Y, X_TIMES_Y);
1490 
1491         // Test that product works for the empty iterator
1492         let one = Scalar::one();
1493         let empty_vector = vec![];
1494         let should_be_one: Scalar = empty_vector.iter().product();
1495         assert_eq!(should_be_one, one);
1496 
1497         // Test that product works for iterators where Item = Scalar
1498         let xs = [Scalar::from(2u64); 10];
1499         let ys = [Scalar::from(3u64); 10];
1500         // now zs is an iterator with Item = Scalar
1501         let zs = xs.iter().zip(ys.iter()).map(|(x,y)| x * y);
1502 
1503         let x_prod: Scalar = xs.iter().product();
1504         let y_prod: Scalar = ys.iter().product();
1505         let z_prod: Scalar = zs.product();
1506 
1507         assert_eq!(x_prod, Scalar::from(1024u64));
1508         assert_eq!(y_prod, Scalar::from(59049u64));
1509         assert_eq!(z_prod, Scalar::from(60466176u64));
1510         assert_eq!(x_prod * y_prod, z_prod);
1511 
1512     }
1513 
1514     #[test]
impl_sum()1515     fn impl_sum() {
1516 
1517         // Test that sum works for non-empty iterators
1518         let two = Scalar::from(2u64);
1519         let one_vector = vec![Scalar::one(), Scalar::one()];
1520         let should_be_two: Scalar = one_vector.iter().sum();
1521         assert_eq!(should_be_two, two);
1522 
1523         // Test that sum works for the empty iterator
1524         let zero = Scalar::zero();
1525         let empty_vector = vec![];
1526         let should_be_zero: Scalar = empty_vector.iter().sum();
1527         assert_eq!(should_be_zero, zero);
1528 
1529         // Test that sum works for owned types
1530         let xs = [Scalar::from(1u64); 10];
1531         let ys = [Scalar::from(2u64); 10];
1532         // now zs is an iterator with Item = Scalar
1533         let zs = xs.iter().zip(ys.iter()).map(|(x,y)| x + y);
1534 
1535         let x_sum: Scalar = xs.iter().sum();
1536         let y_sum: Scalar = ys.iter().sum();
1537         let z_sum: Scalar = zs.sum();
1538 
1539         assert_eq!(x_sum, Scalar::from(10u64));
1540         assert_eq!(y_sum, Scalar::from(20u64));
1541         assert_eq!(z_sum, Scalar::from(30u64));
1542         assert_eq!(x_sum + y_sum, z_sum);
1543     }
1544 
1545     #[test]
square()1546     fn square() {
1547         let expected = &X * &X;
1548         let actual = X.unpack().square().pack();
1549         for i in 0..32 {
1550             assert!(expected[i] == actual[i]);
1551         }
1552     }
1553 
1554     #[test]
reduce()1555     fn reduce() {
1556         let biggest = Scalar::from_bytes_mod_order([0xff; 32]);
1557         assert_eq!(biggest, CANONICAL_2_256_MINUS_1);
1558     }
1559 
1560     #[test]
from_bytes_mod_order_wide()1561     fn from_bytes_mod_order_wide() {
1562         let mut bignum = [0u8; 64];
1563         // set bignum = x + 2^256x
1564         for i in 0..32 {
1565             bignum[   i] = X[i];
1566             bignum[32+i] = X[i];
1567         }
1568         // 3958878930004874126169954872055634648693766179881526445624823978500314864344
1569         // = x + 2^256x (mod l)
1570         let reduced = Scalar{
1571             bytes: [
1572                 216, 154, 179, 139, 210, 121,   2,  71,
1573                  69,  99, 158, 216,  23, 173,  63, 100,
1574                 204,   0,  91,  50, 219, 153,  57, 249,
1575                  28,  82,  31, 197, 100, 165, 192,   8,
1576             ],
1577         };
1578         let test_red = Scalar::from_bytes_mod_order_wide(&bignum);
1579         for i in 0..32 {
1580             assert!(test_red[i] == reduced[i]);
1581         }
1582     }
1583 
1584     #[allow(non_snake_case)]
1585     #[test]
invert()1586     fn invert() {
1587         let inv_X = X.invert();
1588         assert_eq!(inv_X, XINV);
1589         let should_be_one = &inv_X * &X;
1590         assert_eq!(should_be_one, Scalar::one());
1591     }
1592 
1593     // Negating a scalar twice should result in the original scalar.
1594     #[allow(non_snake_case)]
1595     #[test]
neg_twice_is_identity()1596     fn neg_twice_is_identity() {
1597         let negative_X = -&X;
1598         let should_be_X = -&negative_X;
1599 
1600         assert_eq!(should_be_X, X);
1601     }
1602 
1603     #[test]
to_bytes_from_bytes_roundtrips()1604     fn to_bytes_from_bytes_roundtrips() {
1605         let unpacked = X.unpack();
1606         let bytes = unpacked.to_bytes();
1607         let should_be_unpacked = UnpackedScalar::from_bytes(&bytes);
1608 
1609         assert_eq!(should_be_unpacked.0, unpacked.0);
1610     }
1611 
1612     #[test]
montgomery_reduce_matches_from_bytes_mod_order_wide()1613     fn montgomery_reduce_matches_from_bytes_mod_order_wide() {
1614         let mut bignum = [0u8; 64];
1615 
1616         // set bignum = x + 2^256x
1617         for i in 0..32 {
1618             bignum[   i] = X[i];
1619             bignum[32+i] = X[i];
1620         }
1621         // x + 2^256x (mod l)
1622         //         = 3958878930004874126169954872055634648693766179881526445624823978500314864344
1623         let expected = Scalar{
1624             bytes: [
1625                 216, 154, 179, 139, 210, 121,   2,  71,
1626                  69,  99, 158, 216,  23, 173,  63, 100,
1627                 204,   0,  91,  50, 219, 153,  57, 249,
1628                  28,  82,  31, 197, 100, 165, 192,   8
1629             ],
1630         };
1631         let reduced = Scalar::from_bytes_mod_order_wide(&bignum);
1632 
1633         // The reduced scalar should match the expected
1634         assert_eq!(reduced.bytes, expected.bytes);
1635 
1636         //  (x + 2^256x) * R
1637         let interim = UnpackedScalar::mul_internal(&UnpackedScalar::from_bytes_wide(&bignum),
1638                                                    &constants::R);
1639         // ((x + 2^256x) * R) / R  (mod l)
1640         let montgomery_reduced = UnpackedScalar::montgomery_reduce(&interim);
1641 
1642         // The Montgomery reduced scalar should match the reduced one, as well as the expected
1643         assert_eq!(montgomery_reduced.0, reduced.unpack().0);
1644         assert_eq!(montgomery_reduced.0, expected.unpack().0)
1645     }
1646 
1647     #[test]
canonical_decoding()1648     fn canonical_decoding() {
1649         // canonical encoding of 1667457891
1650         let canonical_bytes = [99, 99, 99, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,];
1651 
1652         // encoding of
1653         //   7265385991361016183439748078976496179028704920197054998554201349516117938192
1654         // = 28380414028753969466561515933501938171588560817147392552250411230663687203 (mod l)
1655         // non_canonical because unreduced mod l
1656         let non_canonical_bytes_because_unreduced = [16; 32];
1657 
1658         // encoding with high bit set, to check that the parser isn't pre-masking the high bit
1659         let non_canonical_bytes_because_highbit = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128];
1660 
1661         assert!( Scalar::from_canonical_bytes(canonical_bytes).is_some() );
1662         assert!( Scalar::from_canonical_bytes(non_canonical_bytes_because_unreduced).is_none() );
1663         assert!( Scalar::from_canonical_bytes(non_canonical_bytes_because_highbit).is_none() );
1664     }
1665 
1666     #[test]
1667     #[cfg(feature = "serde")]
serde_bincode_scalar_roundtrip()1668     fn serde_bincode_scalar_roundtrip() {
1669         use bincode;
1670         let encoded = bincode::serialize(&X).unwrap();
1671         let parsed: Scalar = bincode::deserialize(&encoded).unwrap();
1672         assert_eq!(parsed, X);
1673 
1674         // Check that the encoding is 32 bytes exactly
1675         assert_eq!(encoded.len(), 32);
1676 
1677         // Check that the encoding itself matches the usual one
1678         assert_eq!(
1679             X,
1680             bincode::deserialize(X.as_bytes()).unwrap(),
1681         );
1682     }
1683 
1684     #[cfg(debug_assertions)]
1685     #[test]
1686     #[should_panic]
batch_invert_with_a_zero_input_panics()1687     fn batch_invert_with_a_zero_input_panics() {
1688         let mut xs = vec![Scalar::one(); 16];
1689         xs[3] = Scalar::zero();
1690         // This should panic in debug mode.
1691         Scalar::batch_invert(&mut xs);
1692     }
1693 
1694     #[test]
batch_invert_empty()1695     fn batch_invert_empty() {
1696         assert_eq!(Scalar::one(), Scalar::batch_invert(&mut []));
1697     }
1698 
1699     #[test]
batch_invert_consistency()1700     fn batch_invert_consistency() {
1701         let mut x = Scalar::from(1u64);
1702         let mut v1: Vec<_> = (0..16).map(|_| {let tmp = x; x = x + x; tmp}).collect();
1703         let v2 = v1.clone();
1704 
1705         let expected: Scalar = v1.iter().product();
1706         let expected = expected.invert();
1707         let ret = Scalar::batch_invert(&mut v1);
1708         assert_eq!(ret, expected);
1709 
1710         for (a, b) in v1.iter().zip(v2.iter()) {
1711             assert_eq!(a * b, Scalar::one());
1712         }
1713     }
1714 
test_pippenger_radix_iter(scalar: Scalar, w: usize)1715     fn test_pippenger_radix_iter(scalar: Scalar, w: usize) {
1716         let digits_count = Scalar::to_radix_2w_size_hint(w);
1717         let digits = scalar.to_radix_2w(w);
1718 
1719         let radix = Scalar::from((1<<w) as u64);
1720         let mut term = Scalar::one();
1721         let mut recovered_scalar = Scalar::zero();
1722         for digit in &digits[0..digits_count] {
1723             let digit = *digit;
1724             if digit != 0 {
1725                 let sdigit = if digit < 0 {
1726                     -Scalar::from((-(digit as i64)) as u64)
1727                 } else {
1728                     Scalar::from(digit as u64)
1729                 };
1730                 recovered_scalar += term * sdigit;
1731             }
1732             term *= radix;
1733         }
1734         // When the input is unreduced, we may only recover the scalar mod l.
1735         assert_eq!(recovered_scalar, scalar.reduce());
1736     }
1737 
1738     #[test]
test_pippenger_radix()1739     fn test_pippenger_radix() {
1740         use core::iter;
1741         // For each valid radix it tests that 1000 random-ish scalars can be restored
1742         // from the produced representation precisely.
1743         let cases = (2..100)
1744             .map(|s| Scalar::from(s as u64).invert())
1745             // The largest unreduced scalar, s = 2^255-1
1746             .chain(iter::once(Scalar::from_bits([0xff; 32])));
1747 
1748         for scalar in cases {
1749             test_pippenger_radix_iter(scalar, 6);
1750             test_pippenger_radix_iter(scalar, 7);
1751             test_pippenger_radix_iter(scalar, 8);
1752         }
1753     }
1754 }
1755