1 //! The official Rust implementation of the [BLAKE3] cryptographic hash
2 //! function.
3 //!
4 //! # Examples
5 //!
6 //! ```
7 //! # fn main() -> Result<(), Box<dyn std::error::Error>> {
8 //! // Hash an input all at once.
9 //! let hash1 = blake3::hash(b"foobarbaz");
10 //!
11 //! // Hash an input incrementally.
12 //! let mut hasher = blake3::Hasher::new();
13 //! hasher.update(b"foo");
14 //! hasher.update(b"bar");
15 //! hasher.update(b"baz");
16 //! let hash2 = hasher.finalize();
17 //! assert_eq!(hash1, hash2);
18 //!
19 //! // Extended output. OutputReader also implements Read and Seek.
20 //! # #[cfg(feature = "std")] {
21 //! let mut output = [0; 1000];
22 //! let mut output_reader = hasher.finalize_xof();
23 //! output_reader.fill(&mut output);
24 //! assert_eq!(&output[..32], hash1.as_bytes());
25 //! # }
26 //!
27 //! // Print a hash as hex.
28 //! println!("{}", hash1);
29 //! # Ok(())
30 //! # }
31 //! ```
32 //!
33 //! # Cargo Features
34 //!
35 //! The `std` feature (the only feature enabled by default) is required for
36 //! implementations of the [`Write`] and [`Seek`] traits, and also for runtime
37 //! CPU feature detection. If this feature is disabled, the only way to use the
38 //! SIMD implementations in this crate is to enable the corresponding
39 //! instruction sets globally, with e.g. `RUSTFLAGS="-C target-cpu=native"`. The
40 //! resulting binary will not be portable to other machines.
41 //!
42 //! The `rayon` feature (disabled by default, but enabled for [docs.rs]) adds
43 //! the [`Hasher::update_rayon`] method, for multithreaded hashing. However,
44 //! even if this feature is enabled, all other APIs remain single-threaded.
45 //!
46 //! The `neon` feature enables ARM NEON support. Currently there is no runtime
47 //! CPU feature detection for NEON, so you must only enable this feature for
48 //! targets that are known to have NEON support. In particular, some ARMv7
49 //! targets support NEON, and some don't.
50 //!
51 //! The `traits-preview` feature enables implementations of traits from the
52 //! RustCrypto [`digest`] and [`crypto-mac`] crates, and re-exports those crates
53 //! as `traits::digest` and `traits::crypto_mac`. However, the traits aren't
54 //! stable, and they're expected to change in incompatible ways before those
55 //! crates reach 1.0. For that reason, this crate makes no SemVer guarantees for
56 //! this feature, and callers who use it should expect breaking changes between
57 //! patch versions. (The "-preview" feature name follows the conventions of the
58 //! RustCrypto [`signature`] crate.)
59 //!
60 //! [`Hasher::update_rayon`]: struct.Hasher.html#method.update_rayon
61 //! [BLAKE3]: https://blake3.io
62 //! [Rayon]: https://github.com/rayon-rs/rayon
63 //! [docs.rs]: https://docs.rs/
64 //! [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
65 //! [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
66 //! [`digest`]: https://crates.io/crates/digest
67 //! [`crypto-mac`]: https://crates.io/crates/crypto-mac
68 //! [`signature`]: https://crates.io/crates/signature
69 
70 #![cfg_attr(not(feature = "std"), no_std)]
71 
72 #[cfg(test)]
73 mod test;
74 
75 // The guts module is for incremental use cases like the `bao` crate that need
76 // to explicitly compute chunk and parent chaining values. It is semi-stable
77 // and likely to keep working, but largely undocumented and not intended for
78 // widespread use.
79 #[doc(hidden)]
80 pub mod guts;
81 
82 /// Undocumented and unstable, for benchmarks only.
83 #[doc(hidden)]
84 pub mod platform;
85 
86 // Platform-specific implementations of the compression function. These
87 // BLAKE3-specific cfg flags are set in build.rs.
88 #[cfg(blake3_avx2_rust)]
89 #[path = "rust_avx2.rs"]
90 mod avx2;
91 #[cfg(blake3_avx2_ffi)]
92 #[path = "ffi_avx2.rs"]
93 mod avx2;
94 #[cfg(blake3_avx512_ffi)]
95 #[path = "ffi_avx512.rs"]
96 mod avx512;
97 #[cfg(feature = "neon")]
98 #[path = "ffi_neon.rs"]
99 mod neon;
100 mod portable;
101 #[cfg(blake3_sse2_rust)]
102 #[path = "rust_sse2.rs"]
103 mod sse2;
104 #[cfg(blake3_sse2_ffi)]
105 #[path = "ffi_sse2.rs"]
106 mod sse2;
107 #[cfg(blake3_sse41_rust)]
108 #[path = "rust_sse41.rs"]
109 mod sse41;
110 #[cfg(blake3_sse41_ffi)]
111 #[path = "ffi_sse41.rs"]
112 mod sse41;
113 
114 #[cfg(feature = "traits-preview")]
115 pub mod traits;
116 
117 mod join;
118 
119 use arrayref::{array_mut_ref, array_ref};
120 use arrayvec::{ArrayString, ArrayVec};
121 use core::cmp;
122 use core::fmt;
123 use platform::{Platform, MAX_SIMD_DEGREE, MAX_SIMD_DEGREE_OR_2};
124 
125 /// The number of bytes in a [`Hash`](struct.Hash.html), 32.
126 pub const OUT_LEN: usize = 32;
127 
128 /// The number of bytes in a key, 32.
129 pub const KEY_LEN: usize = 32;
130 
131 const MAX_DEPTH: usize = 54; // 2^54 * CHUNK_LEN = 2^64
132 use guts::{BLOCK_LEN, CHUNK_LEN};
133 
134 // While iterating the compression function within a chunk, the CV is
135 // represented as words, to avoid doing two extra endianness conversions for
136 // each compression in the portable implementation. But the hash_many interface
137 // needs to hash both input bytes and parent nodes, so its better for its
138 // output CVs to be represented as bytes.
139 type CVWords = [u32; 8];
140 type CVBytes = [u8; 32]; // little-endian
141 
142 const IV: &CVWords = &[
143     0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
144 ];
145 
146 const MSG_SCHEDULE: [[usize; 16]; 7] = [
147     [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
148     [2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8],
149     [3, 4, 10, 12, 13, 2, 7, 14, 6, 5, 9, 0, 11, 15, 8, 1],
150     [10, 7, 12, 9, 14, 3, 13, 15, 4, 0, 11, 2, 5, 8, 1, 6],
151     [12, 13, 9, 11, 15, 10, 14, 8, 7, 2, 5, 3, 0, 1, 6, 4],
152     [9, 14, 11, 5, 8, 12, 15, 1, 13, 3, 0, 10, 2, 6, 4, 7],
153     [11, 15, 5, 0, 1, 9, 8, 6, 14, 10, 2, 12, 3, 4, 7, 13],
154 ];
155 
156 // These are the internal flags that we use to domain separate root/non-root,
157 // chunk/parent, and chunk beginning/middle/end. These get set at the high end
158 // of the block flags word in the compression function, so their values start
159 // high and go down.
160 const CHUNK_START: u8 = 1 << 0;
161 const CHUNK_END: u8 = 1 << 1;
162 const PARENT: u8 = 1 << 2;
163 const ROOT: u8 = 1 << 3;
164 const KEYED_HASH: u8 = 1 << 4;
165 const DERIVE_KEY_CONTEXT: u8 = 1 << 5;
166 const DERIVE_KEY_MATERIAL: u8 = 1 << 6;
167 
168 #[inline]
counter_low(counter: u64) -> u32169 fn counter_low(counter: u64) -> u32 {
170     counter as u32
171 }
172 
173 #[inline]
counter_high(counter: u64) -> u32174 fn counter_high(counter: u64) -> u32 {
175     (counter >> 32) as u32
176 }
177 
178 /// An output of the default size, 32 bytes, which provides constant-time
179 /// equality checking.
180 ///
181 /// `Hash` implements [`From`] and [`Into`] for `[u8; 32]`, and it provides an
182 /// explicit [`as_bytes`] method returning `&[u8; 32]`. However, byte arrays
183 /// and slices don't provide constant-time equality checking, which is often a
184 /// security requirement in software that handles private data. `Hash` doesn't
185 /// implement [`Deref`] or [`AsRef`], to avoid situations where a type
186 /// conversion happens implicitly and the constant-time property is
187 /// accidentally lost.
188 ///
189 /// `Hash` provides the [`to_hex`] and [`from_hex`] methods for converting to
190 /// and from hexadecimal. It also implements [`Display`] and [`FromStr`].
191 ///
192 /// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
193 /// [`Into`]: https://doc.rust-lang.org/std/convert/trait.Into.html
194 /// [`as_bytes`]: #method.as_bytes
195 /// [`Deref`]: https://doc.rust-lang.org/stable/std/ops/trait.Deref.html
196 /// [`AsRef`]: https://doc.rust-lang.org/std/convert/trait.AsRef.html
197 /// [`to_hex`]: #method.to_hex
198 /// [`from_hex`]: #method.from_hex
199 /// [`Display`]: https://doc.rust-lang.org/std/fmt/trait.Display.html
200 /// [`FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html
201 #[derive(Clone, Copy, Hash)]
202 pub struct Hash([u8; OUT_LEN]);
203 
204 impl Hash {
205     /// The raw bytes of the `Hash`. Note that byte arrays don't provide
206     /// constant-time equality checking, so if  you need to compare hashes,
207     /// prefer the `Hash` type.
208     #[inline]
as_bytes(&self) -> &[u8; OUT_LEN]209     pub fn as_bytes(&self) -> &[u8; OUT_LEN] {
210         &self.0
211     }
212 
213     /// Encode a `Hash` in lowercase hexadecimal.
214     ///
215     /// The returned [`ArrayString`] is a fixed size and doesn't allocate memory
216     /// on the heap. Note that [`ArrayString`] doesn't provide constant-time
217     /// equality checking, so if you need to compare hashes, prefer the `Hash`
218     /// type.
219     ///
220     /// [`ArrayString`]: https://docs.rs/arrayvec/0.5.1/arrayvec/struct.ArrayString.html
to_hex(&self) -> ArrayString<221     pub fn to_hex(&self) -> ArrayString<{ 2 * OUT_LEN }> {
222         let mut s = ArrayString::new();
223         let table = b"0123456789abcdef";
224         for &b in self.0.iter() {
225             s.push(table[(b >> 4) as usize] as char);
226             s.push(table[(b & 0xf) as usize] as char);
227         }
228         s
229     }
230 
231     /// Decode a `Hash` from hexadecimal. Both uppercase and lowercase ASCII
232     /// bytes are supported.
233     ///
234     /// Any byte outside the ranges `'0'...'9'`, `'a'...'f'`, and `'A'...'F'`
235     /// results in an error. An input length other than 64 also results in an
236     /// error.
237     ///
238     /// Note that `Hash` also implements `FromStr`, so `Hash::from_hex("...")`
239     /// is equivalent to `"...".parse()`.
from_hex(hex: impl AsRef<[u8]>) -> Result<Self, HexError>240     pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, HexError> {
241         fn hex_val(byte: u8) -> Result<u8, HexError> {
242             match byte {
243                 b'A'..=b'F' => Ok(byte - b'A' + 10),
244                 b'a'..=b'f' => Ok(byte - b'a' + 10),
245                 b'0'..=b'9' => Ok(byte - b'0'),
246                 _ => Err(HexError(HexErrorInner::InvalidByte(byte))),
247             }
248         }
249         let hex_bytes: &[u8] = hex.as_ref();
250         if hex_bytes.len() != OUT_LEN * 2 {
251             return Err(HexError(HexErrorInner::InvalidLen(hex_bytes.len())));
252         }
253         let mut hash_bytes: [u8; OUT_LEN] = [0; OUT_LEN];
254         for i in 0..OUT_LEN {
255             hash_bytes[i] = 16 * hex_val(hex_bytes[2 * i])? + hex_val(hex_bytes[2 * i + 1])?;
256         }
257         Ok(Hash::from(hash_bytes))
258     }
259 }
260 
261 impl From<[u8; OUT_LEN]> for Hash {
262     #[inline]
from(bytes: [u8; OUT_LEN]) -> Self263     fn from(bytes: [u8; OUT_LEN]) -> Self {
264         Self(bytes)
265     }
266 }
267 
268 impl From<Hash> for [u8; OUT_LEN] {
269     #[inline]
from(hash: Hash) -> Self270     fn from(hash: Hash) -> Self {
271         hash.0
272     }
273 }
274 
275 impl core::str::FromStr for Hash {
276     type Err = HexError;
277 
from_str(s: &str) -> Result<Self, Self::Err>278     fn from_str(s: &str) -> Result<Self, Self::Err> {
279         Hash::from_hex(s)
280     }
281 }
282 
283 /// This implementation is constant-time.
284 impl PartialEq for Hash {
285     #[inline]
eq(&self, other: &Hash) -> bool286     fn eq(&self, other: &Hash) -> bool {
287         constant_time_eq::constant_time_eq_32(&self.0, &other.0)
288     }
289 }
290 
291 /// This implementation is constant-time.
292 impl PartialEq<[u8; OUT_LEN]> for Hash {
293     #[inline]
eq(&self, other: &[u8; OUT_LEN]) -> bool294     fn eq(&self, other: &[u8; OUT_LEN]) -> bool {
295         constant_time_eq::constant_time_eq_32(&self.0, other)
296     }
297 }
298 
299 /// This implementation is constant-time if the target is 32 bytes long.
300 impl PartialEq<[u8]> for Hash {
301     #[inline]
eq(&self, other: &[u8]) -> bool302     fn eq(&self, other: &[u8]) -> bool {
303         constant_time_eq::constant_time_eq(&self.0, other)
304     }
305 }
306 
307 impl Eq for Hash {}
308 
309 impl fmt::Display for Hash {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result310     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
311         // Formatting field as `&str` to reduce code size since the `Debug`
312         // dynamic dispatch table for `&str` is likely needed elsewhere already,
313         // but that for `ArrayString<[u8; 64]>` is not.
314         let hex = self.to_hex();
315         let hex: &str = hex.as_str();
316 
317         f.write_str(hex)
318     }
319 }
320 
321 impl fmt::Debug for Hash {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result322     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
323         // Formatting field as `&str` to reduce code size since the `Debug`
324         // dynamic dispatch table for `&str` is likely needed elsewhere already,
325         // but that for `ArrayString<[u8; 64]>` is not.
326         let hex = self.to_hex();
327         let hex: &str = hex.as_str();
328 
329         f.debug_tuple("Hash").field(&hex).finish()
330     }
331 }
332 
333 /// The error type for [`Hash::from_hex`].
334 ///
335 /// The `.to_string()` representation of this error currently distinguishes between bad length
336 /// errors and bad character errors. This is to help with logging and debugging, but it isn't a
337 /// stable API detail, and it may change at any time.
338 #[derive(Clone, Debug)]
339 pub struct HexError(HexErrorInner);
340 
341 #[derive(Clone, Debug)]
342 enum HexErrorInner {
343     InvalidByte(u8),
344     InvalidLen(usize),
345 }
346 
347 impl fmt::Display for HexError {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result348     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
349         match self.0 {
350             HexErrorInner::InvalidByte(byte) => {
351                 if byte < 128 {
352                     write!(f, "invalid hex character: {:?}", byte as char)
353                 } else {
354                     write!(f, "invalid hex character: 0x{:x}", byte)
355                 }
356             }
357             HexErrorInner::InvalidLen(len) => {
358                 write!(f, "expected 64 hex bytes, received {}", len)
359             }
360         }
361     }
362 }
363 
364 #[cfg(feature = "std")]
365 impl std::error::Error for HexError {}
366 
367 // Each chunk or parent node can produce either a 32-byte chaining value or, by
368 // setting the ROOT flag, any number of final output bytes. The Output struct
369 // captures the state just prior to choosing between those two possibilities.
370 #[derive(Clone)]
371 struct Output {
372     input_chaining_value: CVWords,
373     block: [u8; 64],
374     block_len: u8,
375     counter: u64,
376     flags: u8,
377     platform: Platform,
378 }
379 
380 impl Output {
chaining_value(&self) -> CVBytes381     fn chaining_value(&self) -> CVBytes {
382         let mut cv = self.input_chaining_value;
383         self.platform.compress_in_place(
384             &mut cv,
385             &self.block,
386             self.block_len,
387             self.counter,
388             self.flags,
389         );
390         platform::le_bytes_from_words_32(&cv)
391     }
392 
root_hash(&self) -> Hash393     fn root_hash(&self) -> Hash {
394         debug_assert_eq!(self.counter, 0);
395         let mut cv = self.input_chaining_value;
396         self.platform
397             .compress_in_place(&mut cv, &self.block, self.block_len, 0, self.flags | ROOT);
398         Hash(platform::le_bytes_from_words_32(&cv))
399     }
400 
root_output_block(&self) -> [u8; 2 * OUT_LEN]401     fn root_output_block(&self) -> [u8; 2 * OUT_LEN] {
402         self.platform.compress_xof(
403             &self.input_chaining_value,
404             &self.block,
405             self.block_len,
406             self.counter,
407             self.flags | ROOT,
408         )
409     }
410 }
411 
412 #[derive(Clone)]
413 struct ChunkState {
414     cv: CVWords,
415     chunk_counter: u64,
416     buf: [u8; BLOCK_LEN],
417     buf_len: u8,
418     blocks_compressed: u8,
419     flags: u8,
420     platform: Platform,
421 }
422 
423 impl ChunkState {
new(key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform) -> Self424     fn new(key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform) -> Self {
425         Self {
426             cv: *key,
427             chunk_counter,
428             buf: [0; BLOCK_LEN],
429             buf_len: 0,
430             blocks_compressed: 0,
431             flags,
432             platform,
433         }
434     }
435 
len(&self) -> usize436     fn len(&self) -> usize {
437         BLOCK_LEN * self.blocks_compressed as usize + self.buf_len as usize
438     }
439 
fill_buf(&mut self, input: &mut &[u8])440     fn fill_buf(&mut self, input: &mut &[u8]) {
441         let want = BLOCK_LEN - self.buf_len as usize;
442         let take = cmp::min(want, input.len());
443         self.buf[self.buf_len as usize..][..take].copy_from_slice(&input[..take]);
444         self.buf_len += take as u8;
445         *input = &input[take..];
446     }
447 
start_flag(&self) -> u8448     fn start_flag(&self) -> u8 {
449         if self.blocks_compressed == 0 {
450             CHUNK_START
451         } else {
452             0
453         }
454     }
455 
456     // Try to avoid buffering as much as possible, by compressing directly from
457     // the input slice when full blocks are available.
update(&mut self, mut input: &[u8]) -> &mut Self458     fn update(&mut self, mut input: &[u8]) -> &mut Self {
459         if self.buf_len > 0 {
460             self.fill_buf(&mut input);
461             if !input.is_empty() {
462                 debug_assert_eq!(self.buf_len as usize, BLOCK_LEN);
463                 let block_flags = self.flags | self.start_flag(); // borrowck
464                 self.platform.compress_in_place(
465                     &mut self.cv,
466                     &self.buf,
467                     BLOCK_LEN as u8,
468                     self.chunk_counter,
469                     block_flags,
470                 );
471                 self.buf_len = 0;
472                 self.buf = [0; BLOCK_LEN];
473                 self.blocks_compressed += 1;
474             }
475         }
476 
477         while input.len() > BLOCK_LEN {
478             debug_assert_eq!(self.buf_len, 0);
479             let block_flags = self.flags | self.start_flag(); // borrowck
480             self.platform.compress_in_place(
481                 &mut self.cv,
482                 array_ref!(input, 0, BLOCK_LEN),
483                 BLOCK_LEN as u8,
484                 self.chunk_counter,
485                 block_flags,
486             );
487             self.blocks_compressed += 1;
488             input = &input[BLOCK_LEN..];
489         }
490 
491         self.fill_buf(&mut input);
492         debug_assert!(input.is_empty());
493         debug_assert!(self.len() <= CHUNK_LEN);
494         self
495     }
496 
output(&self) -> Output497     fn output(&self) -> Output {
498         let block_flags = self.flags | self.start_flag() | CHUNK_END;
499         Output {
500             input_chaining_value: self.cv,
501             block: self.buf,
502             block_len: self.buf_len,
503             counter: self.chunk_counter,
504             flags: block_flags,
505             platform: self.platform,
506         }
507     }
508 }
509 
510 // Don't derive(Debug), because the state may be secret.
511 impl fmt::Debug for ChunkState {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result512     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
513         f.debug_struct("ChunkState")
514             .field("len", &self.len())
515             .field("chunk_counter", &self.chunk_counter)
516             .field("flags", &self.flags)
517             .field("platform", &self.platform)
518             .finish()
519     }
520 }
521 
522 // IMPLEMENTATION NOTE
523 // ===================
524 // The recursive function compress_subtree_wide(), implemented below, is the
525 // basis of high-performance BLAKE3. We use it both for all-at-once hashing,
526 // and for the incremental input with Hasher (though we have to be careful with
527 // subtree boundaries in the incremental case). compress_subtree_wide() applies
528 // several optimizations at the same time:
529 // - Multithreading with Rayon.
530 // - Parallel chunk hashing with SIMD.
531 // - Parallel parent hashing with SIMD. Note that while SIMD chunk hashing
532 //   maxes out at MAX_SIMD_DEGREE*CHUNK_LEN, parallel parent hashing continues
533 //   to benefit from larger inputs, because more levels of the tree benefit can
534 //   use full-width SIMD vectors for parent hashing. Without parallel parent
535 //   hashing, we lose about 10% of overall throughput on AVX2 and AVX-512.
536 
537 /// Undocumented and unstable, for benchmarks only.
538 #[doc(hidden)]
539 #[derive(Clone, Copy)]
540 pub enum IncrementCounter {
541     Yes,
542     No,
543 }
544 
545 impl IncrementCounter {
546     #[inline]
yes(&self) -> bool547     fn yes(&self) -> bool {
548         match self {
549             IncrementCounter::Yes => true,
550             IncrementCounter::No => false,
551         }
552     }
553 }
554 
555 // The largest power of two less than or equal to `n`, used for left_len()
556 // immediately below, and also directly in Hasher::update().
largest_power_of_two_leq(n: usize) -> usize557 fn largest_power_of_two_leq(n: usize) -> usize {
558     ((n / 2) + 1).next_power_of_two()
559 }
560 
561 // Given some input larger than one chunk, return the number of bytes that
562 // should go in the left subtree. This is the largest power-of-2 number of
563 // chunks that leaves at least 1 byte for the right subtree.
left_len(content_len: usize) -> usize564 fn left_len(content_len: usize) -> usize {
565     debug_assert!(content_len > CHUNK_LEN);
566     // Subtract 1 to reserve at least one byte for the right side.
567     let full_chunks = (content_len - 1) / CHUNK_LEN;
568     largest_power_of_two_leq(full_chunks) * CHUNK_LEN
569 }
570 
571 // Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
572 // on a single thread. Write out the chunk chaining values and return the
573 // number of chunks hashed. These chunks are never the root and never empty;
574 // those cases use a different codepath.
compress_chunks_parallel( input: &[u8], key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform, out: &mut [u8], ) -> usize575 fn compress_chunks_parallel(
576     input: &[u8],
577     key: &CVWords,
578     chunk_counter: u64,
579     flags: u8,
580     platform: Platform,
581     out: &mut [u8],
582 ) -> usize {
583     debug_assert!(!input.is_empty(), "empty chunks below the root");
584     debug_assert!(input.len() <= MAX_SIMD_DEGREE * CHUNK_LEN);
585 
586     let mut chunks_exact = input.chunks_exact(CHUNK_LEN);
587     let mut chunks_array = ArrayVec::<&[u8; CHUNK_LEN], MAX_SIMD_DEGREE>::new();
588     for chunk in &mut chunks_exact {
589         chunks_array.push(array_ref!(chunk, 0, CHUNK_LEN));
590     }
591     platform.hash_many(
592         &chunks_array,
593         key,
594         chunk_counter,
595         IncrementCounter::Yes,
596         flags,
597         CHUNK_START,
598         CHUNK_END,
599         out,
600     );
601 
602     // Hash the remaining partial chunk, if there is one. Note that the empty
603     // chunk (meaning the empty message) is a different codepath.
604     let chunks_so_far = chunks_array.len();
605     if !chunks_exact.remainder().is_empty() {
606         let counter = chunk_counter + chunks_so_far as u64;
607         let mut chunk_state = ChunkState::new(key, counter, flags, platform);
608         chunk_state.update(chunks_exact.remainder());
609         *array_mut_ref!(out, chunks_so_far * OUT_LEN, OUT_LEN) =
610             chunk_state.output().chaining_value();
611         chunks_so_far + 1
612     } else {
613         chunks_so_far
614     }
615 }
616 
617 // Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
618 // on a single thread. Write out the parent chaining values and return the
619 // number of parents hashed. (If there's an odd input chaining value left over,
620 // return it as an additional output.) These parents are never the root and
621 // never empty; those cases use a different codepath.
compress_parents_parallel( child_chaining_values: &[u8], key: &CVWords, flags: u8, platform: Platform, out: &mut [u8], ) -> usize622 fn compress_parents_parallel(
623     child_chaining_values: &[u8],
624     key: &CVWords,
625     flags: u8,
626     platform: Platform,
627     out: &mut [u8],
628 ) -> usize {
629     debug_assert_eq!(child_chaining_values.len() % OUT_LEN, 0, "wacky hash bytes");
630     let num_children = child_chaining_values.len() / OUT_LEN;
631     debug_assert!(num_children >= 2, "not enough children");
632     debug_assert!(num_children <= 2 * MAX_SIMD_DEGREE_OR_2, "too many");
633 
634     let mut parents_exact = child_chaining_values.chunks_exact(BLOCK_LEN);
635     // Use MAX_SIMD_DEGREE_OR_2 rather than MAX_SIMD_DEGREE here, because of
636     // the requirements of compress_subtree_wide().
637     let mut parents_array = ArrayVec::<&[u8; BLOCK_LEN], MAX_SIMD_DEGREE_OR_2>::new();
638     for parent in &mut parents_exact {
639         parents_array.push(array_ref!(parent, 0, BLOCK_LEN));
640     }
641     platform.hash_many(
642         &parents_array,
643         key,
644         0, // Parents always use counter 0.
645         IncrementCounter::No,
646         flags | PARENT,
647         0, // Parents have no start flags.
648         0, // Parents have no end flags.
649         out,
650     );
651 
652     // If there's an odd child left over, it becomes an output.
653     let parents_so_far = parents_array.len();
654     if !parents_exact.remainder().is_empty() {
655         out[parents_so_far * OUT_LEN..][..OUT_LEN].copy_from_slice(parents_exact.remainder());
656         parents_so_far + 1
657     } else {
658         parents_so_far
659     }
660 }
661 
662 // The wide helper function returns (writes out) an array of chaining values
663 // and returns the length of that array. The number of chaining values returned
664 // is the dyanmically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
665 // if the input is shorter than that many chunks. The reason for maintaining a
666 // wide array of chaining values going back up the tree, is to allow the
667 // implementation to hash as many parents in parallel as possible.
668 //
669 // As a special case when the SIMD degree is 1, this function will still return
670 // at least 2 outputs. This guarantees that this function doesn't perform the
671 // root compression. (If it did, it would use the wrong flags, and also we
672 // wouldn't be able to implement exendable ouput.) Note that this function is
673 // not used when the whole input is only 1 chunk long; that's a different
674 // codepath.
675 //
676 // Why not just have the caller split the input on the first update(), instead
677 // of implementing this special rule? Because we don't want to limit SIMD or
678 // multithreading parallelism for that update().
compress_subtree_wide<J: join::Join>( input: &[u8], key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform, out: &mut [u8], ) -> usize679 fn compress_subtree_wide<J: join::Join>(
680     input: &[u8],
681     key: &CVWords,
682     chunk_counter: u64,
683     flags: u8,
684     platform: Platform,
685     out: &mut [u8],
686 ) -> usize {
687     // Note that the single chunk case does *not* bump the SIMD degree up to 2
688     // when it is 1. This allows Rayon the option of multithreading even the
689     // 2-chunk case, which can help performance on smaller platforms.
690     if input.len() <= platform.simd_degree() * CHUNK_LEN {
691         return compress_chunks_parallel(input, key, chunk_counter, flags, platform, out);
692     }
693 
694     // With more than simd_degree chunks, we need to recurse. Start by dividing
695     // the input into left and right subtrees. (Note that this is only optimal
696     // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
697     // of 3 or something, we'll need a more complicated strategy.)
698     debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2");
699     let (left, right) = input.split_at(left_len(input.len()));
700     let right_chunk_counter = chunk_counter + (left.len() / CHUNK_LEN) as u64;
701 
702     // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
703     // account for the special case of returning 2 outputs when the SIMD degree
704     // is 1.
705     let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
706     let degree = if left.len() == CHUNK_LEN {
707         // The "simd_degree=1 and we're at the leaf nodes" case.
708         debug_assert_eq!(platform.simd_degree(), 1);
709         1
710     } else {
711         cmp::max(platform.simd_degree(), 2)
712     };
713     let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN);
714 
715     // Recurse! For update_rayon(), this is where we take advantage of RayonJoin and use multiple
716     // threads.
717     let (left_n, right_n) = J::join(
718         || compress_subtree_wide::<J>(left, key, chunk_counter, flags, platform, left_out),
719         || compress_subtree_wide::<J>(right, key, right_chunk_counter, flags, platform, right_out),
720     );
721 
722     // The special case again. If simd_degree=1, then we'll have left_n=1 and
723     // right_n=1. Rather than compressing them into a single output, return
724     // them directly, to make sure we always have at least two outputs.
725     debug_assert_eq!(left_n, degree);
726     debug_assert!(right_n >= 1 && right_n <= left_n);
727     if left_n == 1 {
728         out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
729         return 2;
730     }
731 
732     // Otherwise, do one layer of parent node compression.
733     let num_children = left_n + right_n;
734     compress_parents_parallel(
735         &cv_array[..num_children * OUT_LEN],
736         key,
737         flags,
738         platform,
739         out,
740     )
741 }
742 
743 // Hash a subtree with compress_subtree_wide(), and then condense the resulting
744 // list of chaining values down to a single parent node. Don't compress that
745 // last parent node, however. Instead, return its message bytes (the
746 // concatenated chaining values of its children). This is necessary when the
747 // first call to update() supplies a complete subtree, because the topmost
748 // parent node of that subtree could end up being the root. It's also necessary
749 // for extended output in the general case.
750 //
751 // As with compress_subtree_wide(), this function is not used on inputs of 1
752 // chunk or less. That's a different codepath.
compress_subtree_to_parent_node<J: join::Join>( input: &[u8], key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform, ) -> [u8; BLOCK_LEN]753 fn compress_subtree_to_parent_node<J: join::Join>(
754     input: &[u8],
755     key: &CVWords,
756     chunk_counter: u64,
757     flags: u8,
758     platform: Platform,
759 ) -> [u8; BLOCK_LEN] {
760     debug_assert!(input.len() > CHUNK_LEN);
761     let mut cv_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
762     let mut num_cvs =
763         compress_subtree_wide::<J>(input, &key, chunk_counter, flags, platform, &mut cv_array);
764     debug_assert!(num_cvs >= 2);
765 
766     // If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
767     // compress_subtree_wide() returns more than 2 chaining values. Condense
768     // them into 2 by forming parent nodes repeatedly.
769     let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
770     while num_cvs > 2 {
771         let cv_slice = &cv_array[..num_cvs * OUT_LEN];
772         num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
773         cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
774     }
775     *array_ref!(cv_array, 0, 2 * OUT_LEN)
776 }
777 
778 // Hash a complete input all at once. Unlike compress_subtree_wide() and
779 // compress_subtree_to_parent_node(), this function handles the 1 chunk case.
780 // Note that this we use SerialJoin here, so this is always single-threaded.
hash_all_at_once<J: join::Join>(input: &[u8], key: &CVWords, flags: u8) -> Output781 fn hash_all_at_once<J: join::Join>(input: &[u8], key: &CVWords, flags: u8) -> Output {
782     let platform = Platform::detect();
783 
784     // If the whole subtree is one chunk, hash it directly with a ChunkState.
785     if input.len() <= CHUNK_LEN {
786         return ChunkState::new(key, 0, flags, platform)
787             .update(input)
788             .output();
789     }
790 
791     // Otherwise construct an Output object from the parent node returned by
792     // compress_subtree_to_parent_node().
793     Output {
794         input_chaining_value: *key,
795         block: compress_subtree_to_parent_node::<J>(input, key, 0, flags, platform),
796         block_len: BLOCK_LEN as u8,
797         counter: 0,
798         flags: flags | PARENT,
799         platform,
800     }
801 }
802 
803 /// The default hash function.
804 ///
805 /// For an incremental version that accepts multiple writes, see
806 /// [`Hasher::update`].
807 ///
808 /// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`] and
809 /// [`OutputReader`].
810 ///
811 /// This function is always single-threaded. For multithreading support, see
812 /// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
hash(input: &[u8]) -> Hash813 pub fn hash(input: &[u8]) -> Hash {
814     hash_all_at_once::<join::SerialJoin>(input, IV, 0).root_hash()
815 }
816 
817 /// The keyed hash function.
818 ///
819 /// This is suitable for use as a message authentication code, for example to
820 /// replace an HMAC instance. In that use case, the constant-time equality
821 /// checking provided by [`Hash`](struct.Hash.html) is almost always a security
822 /// requirement, and callers need to be careful not to compare MACs as raw
823 /// bytes.
824 ///
825 /// For output sizes other than 32 bytes, see [`Hasher::new_keyed`],
826 /// [`Hasher::finalize_xof`], and [`OutputReader`].
827 ///
828 /// This function is always single-threaded. For multithreading support, see
829 /// [`Hasher::new_keyed`] and
830 /// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
keyed_hash(key: &[u8; KEY_LEN], input: &[u8]) -> Hash831 pub fn keyed_hash(key: &[u8; KEY_LEN], input: &[u8]) -> Hash {
832     let key_words = platform::words_from_le_bytes_32(key);
833     hash_all_at_once::<join::SerialJoin>(input, &key_words, KEYED_HASH).root_hash()
834 }
835 
836 /// The key derivation function.
837 ///
838 /// Given cryptographic key material of any length and a context string of any
839 /// length, this function outputs a 32-byte derived subkey. **The context string
840 /// should be hardcoded, globally unique, and application-specific.** A good
841 /// default format for such strings is `"[application] [commit timestamp]
842 /// [purpose]"`, e.g., `"example.com 2019-12-25 16:18:03 session tokens v1"`.
843 ///
844 /// Key derivation is important when you want to use the same key in multiple
845 /// algorithms or use cases. Using the same key with different cryptographic
846 /// algorithms is generally forbidden, and deriving a separate subkey for each
847 /// use case protects you from bad interactions. Derived keys also mitigate the
848 /// damage from one part of your application accidentally leaking its key.
849 ///
850 /// As a rare exception to that general rule, however, it is possible to use
851 /// `derive_key` itself with key material that you are already using with
852 /// another algorithm. You might need to do this if you're adding features to
853 /// an existing application, which does not yet use key derivation internally.
854 /// However, you still must not share key material with algorithms that forbid
855 /// key reuse entirely, like a one-time pad.
856 ///
857 /// Note that BLAKE3 is not a password hash, and **`derive_key` should never be
858 /// used with passwords.** Instead, use a dedicated password hash like
859 /// [Argon2]. Password hashes are entirely different from generic hash
860 /// functions, with opposite design requirements.
861 ///
862 /// For output sizes other than 32 bytes, see [`Hasher::new_derive_key`],
863 /// [`Hasher::finalize_xof`], and [`OutputReader`].
864 ///
865 /// This function is always single-threaded. For multithreading support, see
866 /// [`Hasher::new_derive_key`] and
867 /// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
868 ///
869 /// [Argon2]: https://en.wikipedia.org/wiki/Argon2
derive_key(context: &str, key_material: &[u8]) -> [u8; OUT_LEN]870 pub fn derive_key(context: &str, key_material: &[u8]) -> [u8; OUT_LEN] {
871     let context_key =
872         hash_all_at_once::<join::SerialJoin>(context.as_bytes(), IV, DERIVE_KEY_CONTEXT)
873             .root_hash();
874     let context_key_words = platform::words_from_le_bytes_32(context_key.as_bytes());
875     hash_all_at_once::<join::SerialJoin>(key_material, &context_key_words, DERIVE_KEY_MATERIAL)
876         .root_hash()
877         .0
878 }
879 
parent_node_output( left_child: &CVBytes, right_child: &CVBytes, key: &CVWords, flags: u8, platform: Platform, ) -> Output880 fn parent_node_output(
881     left_child: &CVBytes,
882     right_child: &CVBytes,
883     key: &CVWords,
884     flags: u8,
885     platform: Platform,
886 ) -> Output {
887     let mut block = [0; BLOCK_LEN];
888     block[..32].copy_from_slice(left_child);
889     block[32..].copy_from_slice(right_child);
890     Output {
891         input_chaining_value: *key,
892         block,
893         block_len: BLOCK_LEN as u8,
894         counter: 0,
895         flags: flags | PARENT,
896         platform,
897     }
898 }
899 
900 /// An incremental hash state that can accept any number of writes.
901 ///
902 /// When the `traits-preview` Cargo feature is enabled, this type implements
903 /// several commonly used traits from the
904 /// [`digest`](https://crates.io/crates/digest) and
905 /// [`crypto_mac`](https://crates.io/crates/crypto-mac) crates. However, those
906 /// traits aren't stable, and they're expected to change in incompatible ways
907 /// before those crates reach 1.0. For that reason, this crate makes no SemVer
908 /// guarantees for this feature, and callers who use it should expect breaking
909 /// changes between patch versions.
910 ///
911 /// When the `rayon` Cargo feature is enabled, the
912 /// [`update_rayon`](#method.update_rayon) method is available for multithreaded
913 /// hashing.
914 ///
915 /// **Performance note:** The [`update`](#method.update) method can't take full
916 /// advantage of SIMD optimizations if its input buffer is too small or oddly
917 /// sized. Using a 16 KiB buffer, or any multiple of that, enables all currently
918 /// supported SIMD instruction sets.
919 ///
920 /// # Examples
921 ///
922 /// ```
923 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
924 /// // Hash an input incrementally.
925 /// let mut hasher = blake3::Hasher::new();
926 /// hasher.update(b"foo");
927 /// hasher.update(b"bar");
928 /// hasher.update(b"baz");
929 /// assert_eq!(hasher.finalize(), blake3::hash(b"foobarbaz"));
930 ///
931 /// // Extended output. OutputReader also implements Read and Seek.
932 /// # #[cfg(feature = "std")] {
933 /// let mut output = [0; 1000];
934 /// let mut output_reader = hasher.finalize_xof();
935 /// output_reader.fill(&mut output);
936 /// assert_eq!(&output[..32], blake3::hash(b"foobarbaz").as_bytes());
937 /// # }
938 /// # Ok(())
939 /// # }
940 /// ```
941 #[derive(Clone)]
942 pub struct Hasher {
943     key: CVWords,
944     chunk_state: ChunkState,
945     // The stack size is MAX_DEPTH + 1 because we do lazy merging. For example,
946     // with 7 chunks, we have 3 entries in the stack. Adding an 8th chunk
947     // requires a 4th entry, rather than merging everything down to 1, because
948     // we don't know whether more input is coming. This is different from how
949     // the reference implementation does things.
950     cv_stack: ArrayVec<CVBytes, { MAX_DEPTH + 1 }>,
951 }
952 
953 impl Hasher {
new_internal(key: &CVWords, flags: u8) -> Self954     fn new_internal(key: &CVWords, flags: u8) -> Self {
955         Self {
956             key: *key,
957             chunk_state: ChunkState::new(key, 0, flags, Platform::detect()),
958             cv_stack: ArrayVec::new(),
959         }
960     }
961 
962     /// Construct a new `Hasher` for the regular hash function.
new() -> Self963     pub fn new() -> Self {
964         Self::new_internal(IV, 0)
965     }
966 
967     /// Construct a new `Hasher` for the keyed hash function. See
968     /// [`keyed_hash`].
969     ///
970     /// [`keyed_hash`]: fn.keyed_hash.html
new_keyed(key: &[u8; KEY_LEN]) -> Self971     pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
972         let key_words = platform::words_from_le_bytes_32(key);
973         Self::new_internal(&key_words, KEYED_HASH)
974     }
975 
976     /// Construct a new `Hasher` for the key derivation function. See
977     /// [`derive_key`]. The context string should be hardcoded, globally
978     /// unique, and application-specific.
979     ///
980     /// [`derive_key`]: fn.derive_key.html
new_derive_key(context: &str) -> Self981     pub fn new_derive_key(context: &str) -> Self {
982         let context_key =
983             hash_all_at_once::<join::SerialJoin>(context.as_bytes(), IV, DERIVE_KEY_CONTEXT)
984                 .root_hash();
985         let context_key_words = platform::words_from_le_bytes_32(context_key.as_bytes());
986         Self::new_internal(&context_key_words, DERIVE_KEY_MATERIAL)
987     }
988 
989     /// Reset the `Hasher` to its initial state.
990     ///
991     /// This is functionally the same as overwriting the `Hasher` with a new
992     /// one, using the same key or context string if any.
reset(&mut self) -> &mut Self993     pub fn reset(&mut self) -> &mut Self {
994         self.chunk_state = ChunkState::new(
995             &self.key,
996             0,
997             self.chunk_state.flags,
998             self.chunk_state.platform,
999         );
1000         self.cv_stack.clear();
1001         self
1002     }
1003 
1004     // As described in push_cv() below, we do "lazy merging", delaying merges
1005     // until right before the next CV is about to be added. This is different
1006     // from the reference implementation. Another difference is that we aren't
1007     // always merging 1 chunk at a time. Instead, each CV might represent any
1008     // power-of-two number of chunks, as long as the smaller-above-larger stack
1009     // order is maintained. Instead of the "count the trailing 0-bits"
1010     // algorithm described in the spec, we use a "count the total number of
1011     // 1-bits" variant that doesn't require us to retain the subtree size of
1012     // the CV on top of the stack. The principle is the same: each CV that
1013     // should remain in the stack is represented by a 1-bit in the total number
1014     // of chunks (or bytes) so far.
merge_cv_stack(&mut self, total_len: u64)1015     fn merge_cv_stack(&mut self, total_len: u64) {
1016         let post_merge_stack_len = total_len.count_ones() as usize;
1017         while self.cv_stack.len() > post_merge_stack_len {
1018             let right_child = self.cv_stack.pop().unwrap();
1019             let left_child = self.cv_stack.pop().unwrap();
1020             let parent_output = parent_node_output(
1021                 &left_child,
1022                 &right_child,
1023                 &self.key,
1024                 self.chunk_state.flags,
1025                 self.chunk_state.platform,
1026             );
1027             self.cv_stack.push(parent_output.chaining_value());
1028         }
1029     }
1030 
1031     // In reference_impl.rs, we merge the new CV with existing CVs from the
1032     // stack before pushing it. We can do that because we know more input is
1033     // coming, so we know none of the merges are root.
1034     //
1035     // This setting is different. We want to feed as much input as possible to
1036     // compress_subtree_wide(), without setting aside anything for the
1037     // chunk_state. If the user gives us 64 KiB, we want to parallelize over
1038     // all 64 KiB at once as a single subtree, if at all possible.
1039     //
1040     // This leads to two problems:
1041     // 1) This 64 KiB input might be the only call that ever gets made to
1042     //    update. In this case, the root node of the 64 KiB subtree would be
1043     //    the root node of the whole tree, and it would need to be ROOT
1044     //    finalized. We can't compress it until we know.
1045     // 2) This 64 KiB input might complete a larger tree, whose root node is
1046     //    similarly going to be the the root of the whole tree. For example,
1047     //    maybe we have 196 KiB (that is, 128 + 64) hashed so far. We can't
1048     //    compress the node at the root of the 256 KiB subtree until we know
1049     //    how to finalize it.
1050     //
1051     // The second problem is solved with "lazy merging". That is, when we're
1052     // about to add a CV to the stack, we don't merge it with anything first,
1053     // as the reference impl does. Instead we do merges using the *previous* CV
1054     // that was added, which is sitting on top of the stack, and we put the new
1055     // CV (unmerged) on top of the stack afterwards. This guarantees that we
1056     // never merge the root node until finalize().
1057     //
1058     // Solving the first problem requires an additional tool,
1059     // compress_subtree_to_parent_node(). That function always returns the top
1060     // *two* chaining values of the subtree it's compressing. We then do lazy
1061     // merging with each of them separately, so that the second CV will always
1062     // remain unmerged. (That also helps us support extendable output when
1063     // we're hashing an input all-at-once.)
push_cv(&mut self, new_cv: &CVBytes, chunk_counter: u64)1064     fn push_cv(&mut self, new_cv: &CVBytes, chunk_counter: u64) {
1065         self.merge_cv_stack(chunk_counter);
1066         self.cv_stack.push(*new_cv);
1067     }
1068 
1069     /// Add input bytes to the hash state. You can call this any number of
1070     /// times.
1071     ///
1072     /// This method is always single-threaded. For multithreading support, see
1073     /// [`update_rayon`](#method.update_rayon) below (enabled with the `rayon`
1074     /// Cargo feature).
1075     ///
1076     /// Note that the degree of SIMD parallelism that `update` can use is
1077     /// limited by the size of this input buffer. The 8 KiB buffer currently
1078     /// used by [`std::io::copy`] is enough to leverage AVX2, for example, but
1079     /// not enough to leverage AVX-512. A 16 KiB buffer is large enough to
1080     /// leverage all currently supported SIMD instruction sets.
1081     ///
1082     /// [`std::io::copy`]: https://doc.rust-lang.org/std/io/fn.copy.html
update(&mut self, input: &[u8]) -> &mut Self1083     pub fn update(&mut self, input: &[u8]) -> &mut Self {
1084         self.update_with_join::<join::SerialJoin>(input)
1085     }
1086 
1087     /// Identical to [`update`](Hasher::update), but using Rayon-based
1088     /// multithreading internally.
1089     ///
1090     /// This method is gated by the `rayon` Cargo feature, which is disabled by
1091     /// default but enabled on [docs.rs](https://docs.rs).
1092     ///
1093     /// To get any performance benefit from multithreading, the input buffer
1094     /// needs to be large. As a rule of thumb on x86_64, `update_rayon` is
1095     /// _slower_ than `update` for inputs under 128 KiB. That threshold varies
1096     /// quite a lot across different processors, and it's important to benchmark
1097     /// your specific use case.
1098     ///
1099     /// Memory mapping an entire input file is a simple way to take advantage of
1100     /// multithreading without needing to carefully tune your buffer size or
1101     /// offload IO. However, on spinning disks where random access is expensive,
1102     /// that approach can lead to disk thrashing and terrible IO performance.
1103     /// Note that OS page caching can mask this problem, in which case it might
1104     /// only appear for files larger than available RAM. Again, benchmarking
1105     /// your specific use case is important.
1106     #[cfg(feature = "rayon")]
update_rayon(&mut self, input: &[u8]) -> &mut Self1107     pub fn update_rayon(&mut self, input: &[u8]) -> &mut Self {
1108         self.update_with_join::<join::RayonJoin>(input)
1109     }
1110 
update_with_join<J: join::Join>(&mut self, mut input: &[u8]) -> &mut Self1111     fn update_with_join<J: join::Join>(&mut self, mut input: &[u8]) -> &mut Self {
1112         // If we have some partial chunk bytes in the internal chunk_state, we
1113         // need to finish that chunk first.
1114         if self.chunk_state.len() > 0 {
1115             let want = CHUNK_LEN - self.chunk_state.len();
1116             let take = cmp::min(want, input.len());
1117             self.chunk_state.update(&input[..take]);
1118             input = &input[take..];
1119             if !input.is_empty() {
1120                 // We've filled the current chunk, and there's more input
1121                 // coming, so we know it's not the root and we can finalize it.
1122                 // Then we'll proceed to hashing whole chunks below.
1123                 debug_assert_eq!(self.chunk_state.len(), CHUNK_LEN);
1124                 let chunk_cv = self.chunk_state.output().chaining_value();
1125                 self.push_cv(&chunk_cv, self.chunk_state.chunk_counter);
1126                 self.chunk_state = ChunkState::new(
1127                     &self.key,
1128                     self.chunk_state.chunk_counter + 1,
1129                     self.chunk_state.flags,
1130                     self.chunk_state.platform,
1131                 );
1132             } else {
1133                 return self;
1134             }
1135         }
1136 
1137         // Now the chunk_state is clear, and we have more input. If there's
1138         // more than a single chunk (so, definitely not the root chunk), hash
1139         // the largest whole subtree we can, with the full benefits of SIMD and
1140         // multithreading parallelism. Two restrictions:
1141         // - The subtree has to be a power-of-2 number of chunks. Only subtrees
1142         //   along the right edge can be incomplete, and we don't know where
1143         //   the right edge is going to be until we get to finalize().
1144         // - The subtree must evenly divide the total number of chunks up until
1145         //   this point (if total is not 0). If the current incomplete subtree
1146         //   is only waiting for 1 more chunk, we can't hash a subtree of 4
1147         //   chunks. We have to complete the current subtree first.
1148         // Because we might need to break up the input to form powers of 2, or
1149         // to evenly divide what we already have, this part runs in a loop.
1150         while input.len() > CHUNK_LEN {
1151             debug_assert_eq!(self.chunk_state.len(), 0, "no partial chunk data");
1152             debug_assert_eq!(CHUNK_LEN.count_ones(), 1, "power of 2 chunk len");
1153             let mut subtree_len = largest_power_of_two_leq(input.len());
1154             let count_so_far = self.chunk_state.chunk_counter * CHUNK_LEN as u64;
1155             // Shrink the subtree_len until it evenly divides the count so far.
1156             // We know that subtree_len itself is a power of 2, so we can use a
1157             // bitmasking trick instead of an actual remainder operation. (Note
1158             // that if the caller consistently passes power-of-2 inputs of the
1159             // same size, as is hopefully typical, this loop condition will
1160             // always fail, and subtree_len will always be the full length of
1161             // the input.)
1162             //
1163             // An aside: We don't have to shrink subtree_len quite this much.
1164             // For example, if count_so_far is 1, we could pass 2 chunks to
1165             // compress_subtree_to_parent_node. Since we'll get 2 CVs back,
1166             // we'll still get the right answer in the end, and we might get to
1167             // use 2-way SIMD parallelism. The problem with this optimization,
1168             // is that it gets us stuck always hashing 2 chunks. The total
1169             // number of chunks will remain odd, and we'll never graduate to
1170             // higher degrees of parallelism. See
1171             // https://github.com/BLAKE3-team/BLAKE3/issues/69.
1172             while (subtree_len - 1) as u64 & count_so_far != 0 {
1173                 subtree_len /= 2;
1174             }
1175             // The shrunken subtree_len might now be 1 chunk long. If so, hash
1176             // that one chunk by itself. Otherwise, compress the subtree into a
1177             // pair of CVs.
1178             let subtree_chunks = (subtree_len / CHUNK_LEN) as u64;
1179             if subtree_len <= CHUNK_LEN {
1180                 debug_assert_eq!(subtree_len, CHUNK_LEN);
1181                 self.push_cv(
1182                     &ChunkState::new(
1183                         &self.key,
1184                         self.chunk_state.chunk_counter,
1185                         self.chunk_state.flags,
1186                         self.chunk_state.platform,
1187                     )
1188                     .update(&input[..subtree_len])
1189                     .output()
1190                     .chaining_value(),
1191                     self.chunk_state.chunk_counter,
1192                 );
1193             } else {
1194                 // This is the high-performance happy path, though getting here
1195                 // depends on the caller giving us a long enough input.
1196                 let cv_pair = compress_subtree_to_parent_node::<J>(
1197                     &input[..subtree_len],
1198                     &self.key,
1199                     self.chunk_state.chunk_counter,
1200                     self.chunk_state.flags,
1201                     self.chunk_state.platform,
1202                 );
1203                 let left_cv = array_ref!(cv_pair, 0, 32);
1204                 let right_cv = array_ref!(cv_pair, 32, 32);
1205                 // Push the two CVs we received into the CV stack in order. Because
1206                 // the stack merges lazily, this guarantees we aren't merging the
1207                 // root.
1208                 self.push_cv(left_cv, self.chunk_state.chunk_counter);
1209                 self.push_cv(
1210                     right_cv,
1211                     self.chunk_state.chunk_counter + (subtree_chunks / 2),
1212                 );
1213             }
1214             self.chunk_state.chunk_counter += subtree_chunks;
1215             input = &input[subtree_len..];
1216         }
1217 
1218         // What remains is 1 chunk or less. Add it to the chunk state.
1219         debug_assert!(input.len() <= CHUNK_LEN);
1220         if !input.is_empty() {
1221             self.chunk_state.update(input);
1222             // Having added some input to the chunk_state, we know what's in
1223             // the CV stack won't become the root node, and we can do an extra
1224             // merge. This simplifies finalize().
1225             self.merge_cv_stack(self.chunk_state.chunk_counter);
1226         }
1227 
1228         self
1229     }
1230 
final_output(&self) -> Output1231     fn final_output(&self) -> Output {
1232         // If the current chunk is the only chunk, that makes it the root node
1233         // also. Convert it directly into an Output. Otherwise, we need to
1234         // merge subtrees below.
1235         if self.cv_stack.is_empty() {
1236             debug_assert_eq!(self.chunk_state.chunk_counter, 0);
1237             return self.chunk_state.output();
1238         }
1239 
1240         // If there are any bytes in the ChunkState, finalize that chunk and
1241         // merge its CV with everything in the CV stack. In that case, the work
1242         // we did at the end of update() above guarantees that the stack
1243         // doesn't contain any unmerged subtrees that need to be merged first.
1244         // (This is important, because if there were two chunk hashes sitting
1245         // on top of the stack, they would need to merge with each other, and
1246         // merging a new chunk hash into them would be incorrect.)
1247         //
1248         // If there are no bytes in the ChunkState, we'll merge what's already
1249         // in the stack. In this case it's fine if there are unmerged chunks on
1250         // top, because we'll merge them with each other. Note that the case of
1251         // the empty chunk is taken care of above.
1252         let mut output: Output;
1253         let mut num_cvs_remaining = self.cv_stack.len();
1254         if self.chunk_state.len() > 0 {
1255             debug_assert_eq!(
1256                 self.cv_stack.len(),
1257                 self.chunk_state.chunk_counter.count_ones() as usize,
1258                 "cv stack does not need a merge"
1259             );
1260             output = self.chunk_state.output();
1261         } else {
1262             debug_assert!(self.cv_stack.len() >= 2);
1263             output = parent_node_output(
1264                 &self.cv_stack[num_cvs_remaining - 2],
1265                 &self.cv_stack[num_cvs_remaining - 1],
1266                 &self.key,
1267                 self.chunk_state.flags,
1268                 self.chunk_state.platform,
1269             );
1270             num_cvs_remaining -= 2;
1271         }
1272         while num_cvs_remaining > 0 {
1273             output = parent_node_output(
1274                 &self.cv_stack[num_cvs_remaining - 1],
1275                 &output.chaining_value(),
1276                 &self.key,
1277                 self.chunk_state.flags,
1278                 self.chunk_state.platform,
1279             );
1280             num_cvs_remaining -= 1;
1281         }
1282         output
1283     }
1284 
1285     /// Finalize the hash state and return the [`Hash`](struct.Hash.html) of
1286     /// the input.
1287     ///
1288     /// This method is idempotent. Calling it twice will give the same result.
1289     /// You can also add more input and finalize again.
finalize(&self) -> Hash1290     pub fn finalize(&self) -> Hash {
1291         self.final_output().root_hash()
1292     }
1293 
1294     /// Finalize the hash state and return an [`OutputReader`], which can
1295     /// supply any number of output bytes.
1296     ///
1297     /// This method is idempotent. Calling it twice will give the same result.
1298     /// You can also add more input and finalize again.
1299     ///
1300     /// [`OutputReader`]: struct.OutputReader.html
finalize_xof(&self) -> OutputReader1301     pub fn finalize_xof(&self) -> OutputReader {
1302         OutputReader::new(self.final_output())
1303     }
1304 }
1305 
1306 // Don't derive(Debug), because the state may be secret.
1307 impl fmt::Debug for Hasher {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result1308     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1309         f.debug_struct("Hasher")
1310             .field("flags", &self.chunk_state.flags)
1311             .field("platform", &self.chunk_state.platform)
1312             .finish()
1313     }
1314 }
1315 
1316 impl Default for Hasher {
1317     #[inline]
default() -> Self1318     fn default() -> Self {
1319         Self::new()
1320     }
1321 }
1322 
1323 #[cfg(feature = "std")]
1324 impl std::io::Write for Hasher {
1325     /// This is equivalent to [`update`](#method.update).
1326     #[inline]
write(&mut self, input: &[u8]) -> std::io::Result<usize>1327     fn write(&mut self, input: &[u8]) -> std::io::Result<usize> {
1328         self.update(input);
1329         Ok(input.len())
1330     }
1331 
1332     #[inline]
flush(&mut self) -> std::io::Result<()>1333     fn flush(&mut self) -> std::io::Result<()> {
1334         Ok(())
1335     }
1336 }
1337 
1338 /// An incremental reader for extended output, returned by
1339 /// [`Hasher::finalize_xof`](struct.Hasher.html#method.finalize_xof).
1340 ///
1341 /// Outputs shorter than the default length of 32 bytes (256 bits)
1342 /// provide less security. An N-bit BLAKE3 output is intended to provide
1343 /// N bits of first and second preimage resistance and N/2 bits of
1344 /// collision resistance, for any N up to 256. Longer outputs don't
1345 /// provide any additional security.
1346 ///
1347 /// Shorter BLAKE3 outputs are prefixes of longer ones. Explicitly
1348 /// requesting a short output is equivalent to truncating the
1349 /// default-length output. (Note that this is different between BLAKE2
1350 /// and BLAKE3.)
1351 #[derive(Clone)]
1352 pub struct OutputReader {
1353     inner: Output,
1354     position_within_block: u8,
1355 }
1356 
1357 impl OutputReader {
new(inner: Output) -> Self1358     fn new(inner: Output) -> Self {
1359         Self {
1360             inner,
1361             position_within_block: 0,
1362         }
1363     }
1364 
1365     /// Fill a buffer with output bytes and advance the position of the
1366     /// `OutputReader`. This is equivalent to [`Read::read`], except that it
1367     /// doesn't return a `Result`. Both methods always fill the entire buffer.
1368     ///
1369     /// Note that `OutputReader` doesn't buffer output bytes internally, so
1370     /// calling `fill` repeatedly with a short-length or odd-length slice will
1371     /// end up performing the same compression multiple times. If you're
1372     /// reading output in a loop, prefer a slice length that's a multiple of
1373     /// 64.
1374     ///
1375     /// The maximum output size of BLAKE3 is 2<sup>64</sup>-1 bytes. If you try
1376     /// to extract more than that, for example by seeking near the end and
1377     /// reading further, the behavior is unspecified.
1378     ///
1379     /// [`Read::read`]: #method.read
fill(&mut self, mut buf: &mut [u8])1380     pub fn fill(&mut self, mut buf: &mut [u8]) {
1381         while !buf.is_empty() {
1382             let block: [u8; BLOCK_LEN] = self.inner.root_output_block();
1383             let output_bytes = &block[self.position_within_block as usize..];
1384             let take = cmp::min(buf.len(), output_bytes.len());
1385             buf[..take].copy_from_slice(&output_bytes[..take]);
1386             buf = &mut buf[take..];
1387             self.position_within_block += take as u8;
1388             if self.position_within_block == BLOCK_LEN as u8 {
1389                 self.inner.counter += 1;
1390                 self.position_within_block = 0;
1391             }
1392         }
1393     }
1394 
1395     /// Return the current read position in the output stream. The position of
1396     /// a new `OutputReader` starts at 0, and each call to [`fill`] or
1397     /// [`Read::read`] moves the position forward by the number of bytes read.
1398     ///
1399     /// [`fill`]: #method.fill
1400     /// [`Read::read`]: #method.read
position(&self) -> u641401     pub fn position(&self) -> u64 {
1402         self.inner.counter * BLOCK_LEN as u64 + self.position_within_block as u64
1403     }
1404 
1405     /// Seek to a new read position in the output stream. This is equivalent to
1406     /// calling [`Seek::seek`] with [`SeekFrom::Start`], except that it doesn't
1407     /// return a `Result`.
1408     ///
1409     /// [`Seek::seek`]: #method.seek
1410     /// [`SeekFrom::Start`]: https://doc.rust-lang.org/std/io/enum.SeekFrom.html
set_position(&mut self, position: u64)1411     pub fn set_position(&mut self, position: u64) {
1412         self.position_within_block = (position % BLOCK_LEN as u64) as u8;
1413         self.inner.counter = position / BLOCK_LEN as u64;
1414     }
1415 }
1416 
1417 // Don't derive(Debug), because the state may be secret.
1418 impl fmt::Debug for OutputReader {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result1419     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1420         f.debug_struct("OutputReader")
1421             .field("position", &self.position())
1422             .finish()
1423     }
1424 }
1425 
1426 #[cfg(feature = "std")]
1427 impl std::io::Read for OutputReader {
1428     #[inline]
read(&mut self, buf: &mut [u8]) -> std::io::Result<usize>1429     fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
1430         self.fill(buf);
1431         Ok(buf.len())
1432     }
1433 }
1434 
1435 #[cfg(feature = "std")]
1436 impl std::io::Seek for OutputReader {
seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64>1437     fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
1438         let max_position = u64::max_value() as i128;
1439         let target_position: i128 = match pos {
1440             std::io::SeekFrom::Start(x) => x as i128,
1441             std::io::SeekFrom::Current(x) => self.position() as i128 + x as i128,
1442             std::io::SeekFrom::End(_) => {
1443                 return Err(std::io::Error::new(
1444                     std::io::ErrorKind::InvalidInput,
1445                     "seek from end not supported",
1446                 ));
1447             }
1448         };
1449         if target_position < 0 {
1450             return Err(std::io::Error::new(
1451                 std::io::ErrorKind::InvalidInput,
1452                 "seek before start",
1453             ));
1454         }
1455         self.set_position(cmp::min(target_position, max_position) as u64);
1456         Ok(self.position())
1457     }
1458 }
1459