1 //! The official Rust implementation of the [BLAKE3] cryptographic hash
2 //! function.
3 //!
4 //! # Examples
5 //!
6 //! ```
7 //! # fn main() -> Result<(), Box<dyn std::error::Error>> {
8 //! // Hash an input all at once.
9 //! let hash1 = blake3::hash(b"foobarbaz");
10 //!
11 //! // Hash an input incrementally.
12 //! let mut hasher = blake3::Hasher::new();
13 //! hasher.update(b"foo");
14 //! hasher.update(b"bar");
15 //! hasher.update(b"baz");
16 //! let hash2 = hasher.finalize();
17 //! assert_eq!(hash1, hash2);
18 //!
19 //! // Extended output. OutputReader also implements Read and Seek.
20 //! # #[cfg(feature = "std")] {
21 //! let mut output = [0; 1000];
22 //! let mut output_reader = hasher.finalize_xof();
23 //! output_reader.fill(&mut output);
24 //! assert_eq!(&output[..32], hash1.as_bytes());
25 //! # }
26 //!
27 //! // Print a hash as hex.
28 //! println!("{}", hash1.to_hex());
29 //! # Ok(())
30 //! # }
31 //! ```
32 //!
33 //! # Cargo Features
34 //!
35 //! The `rayon` feature provides [Rayon]-based multi-threading, in particular
36 //! the [`join::RayonJoin`] type for use with [`Hasher::update_with_join`]. It
37 //! is disabled by default, but enabled for [docs.rs].
38 //!
39 //! The `neon` feature enables ARM NEON support. Currently there is no runtime
40 //! CPU feature detection for NEON, so you must only enable this feature for
41 //! targets that are known to have NEON support. In particular, some ARMv7
42 //! targets support NEON, and some don't.
43 //!
44 //! The `std` feature (enabled by default) is required for implementations of
45 //! the [`Write`] and [`Seek`] traits, and also for runtime CPU feature
46 //! detection. If this feature is disabled, the only way to use the SIMD
47 //! implementations in this crate is to enable the corresponding instruction
48 //! sets statically for the entire build, with e.g. `RUSTFLAGS="-C
49 //! target-cpu=native"`. The resulting binary will not be portable to other
50 //! machines.
51 //!
52 //! [BLAKE3]: https://blake3.io
53 //! [Rayon]: https://github.com/rayon-rs/rayon
54 //! [`join::RayonJoin`]: join/enum.RayonJoin.html
55 //! [`Hasher::update_with_join`]: struct.Hasher.html#method.update_with_join
56 //! [docs.rs]: https://docs.rs/
57 //! [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
58 //! [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
59 
60 #![cfg_attr(not(feature = "std"), no_std)]
61 
62 #[cfg(test)]
63 mod test;
64 
65 // The guts module is for incremental use cases like the `bao` crate that need
66 // to explicitly compute chunk and parent chaining values. It is semi-stable
67 // and likely to keep working, but largely undocumented and not intended for
68 // widespread use.
69 #[doc(hidden)]
70 pub mod guts;
71 
72 // The platform module is pub for benchmarks only. It is not stable.
73 #[doc(hidden)]
74 pub mod platform;
75 
76 // Platform-specific implementations of the compression function. These
77 // BLAKE3-specific cfg flags are set in build.rs.
78 #[cfg(blake3_avx2_rust)]
79 #[path = "rust_avx2.rs"]
80 mod avx2;
81 #[cfg(blake3_avx2_ffi)]
82 #[path = "ffi_avx2.rs"]
83 mod avx2;
84 #[cfg(blake3_avx512_ffi)]
85 #[path = "ffi_avx512.rs"]
86 mod avx512;
87 #[cfg(feature = "neon")]
88 #[path = "ffi_neon.rs"]
89 mod neon;
90 mod portable;
91 #[cfg(blake3_sse2_rust)]
92 #[path = "rust_sse2.rs"]
93 mod sse2;
94 #[cfg(blake3_sse2_ffi)]
95 #[path = "ffi_sse2.rs"]
96 mod sse2;
97 #[cfg(blake3_sse41_rust)]
98 #[path = "rust_sse41.rs"]
99 mod sse41;
100 #[cfg(blake3_sse41_ffi)]
101 #[path = "ffi_sse41.rs"]
102 mod sse41;
103 
104 pub mod traits;
105 
106 pub mod join;
107 
108 use arrayref::{array_mut_ref, array_ref};
109 use arrayvec::{ArrayString, ArrayVec};
110 use core::cmp;
111 use core::fmt;
112 use join::{Join, SerialJoin};
113 use platform::{Platform, MAX_SIMD_DEGREE, MAX_SIMD_DEGREE_OR_2};
114 
115 /// The number of bytes in a [`Hash`](struct.Hash.html), 32.
116 pub const OUT_LEN: usize = 32;
117 
118 /// The number of bytes in a key, 32.
119 pub const KEY_LEN: usize = 32;
120 
121 // These constants are pub for incremental use cases like `bao`, as well as
122 // tests and benchmarks. Most callers should not need them.
123 #[doc(hidden)]
124 pub const BLOCK_LEN: usize = 64;
125 #[doc(hidden)]
126 pub const CHUNK_LEN: usize = 1024;
127 #[doc(hidden)]
128 pub const MAX_DEPTH: usize = 54; // 2^54 * CHUNK_LEN = 2^64
129 
130 // While iterating the compression function within a chunk, the CV is
131 // represented as words, to avoid doing two extra endianness conversions for
132 // each compression in the portable implementation. But the hash_many interface
133 // needs to hash both input bytes and parent nodes, so its better for its
134 // output CVs to be represented as bytes.
135 type CVWords = [u32; 8];
136 type CVBytes = [u8; 32]; // little-endian
137 
138 const IV: &CVWords = &[
139     0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
140 ];
141 
142 const MSG_SCHEDULE: [[usize; 16]; 7] = [
143     [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
144     [2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8],
145     [3, 4, 10, 12, 13, 2, 7, 14, 6, 5, 9, 0, 11, 15, 8, 1],
146     [10, 7, 12, 9, 14, 3, 13, 15, 4, 0, 11, 2, 5, 8, 1, 6],
147     [12, 13, 9, 11, 15, 10, 14, 8, 7, 2, 5, 3, 0, 1, 6, 4],
148     [9, 14, 11, 5, 8, 12, 15, 1, 13, 3, 0, 10, 2, 6, 4, 7],
149     [11, 15, 5, 0, 1, 9, 8, 6, 14, 10, 2, 12, 3, 4, 7, 13],
150 ];
151 
152 // These are the internal flags that we use to domain separate root/non-root,
153 // chunk/parent, and chunk beginning/middle/end. These get set at the high end
154 // of the block flags word in the compression function, so their values start
155 // high and go down.
156 const CHUNK_START: u8 = 1 << 0;
157 const CHUNK_END: u8 = 1 << 1;
158 const PARENT: u8 = 1 << 2;
159 const ROOT: u8 = 1 << 3;
160 const KEYED_HASH: u8 = 1 << 4;
161 const DERIVE_KEY_CONTEXT: u8 = 1 << 5;
162 const DERIVE_KEY_MATERIAL: u8 = 1 << 6;
163 
164 #[inline]
counter_low(counter: u64) -> u32165 fn counter_low(counter: u64) -> u32 {
166     counter as u32
167 }
168 
169 #[inline]
counter_high(counter: u64) -> u32170 fn counter_high(counter: u64) -> u32 {
171     (counter >> 32) as u32
172 }
173 
174 /// An output of the default size, 32 bytes, which provides constant-time
175 /// equality checking.
176 ///
177 /// `Hash` implements [`From`] and [`Into`] for `[u8; 32]`, and it provides an
178 /// explicit [`as_bytes`] method returning `&[u8; 32]`. However, byte arrays
179 /// and slices don't provide constant-time equality checking, which is often a
180 /// security requirement in software that handles private data. `Hash` doesn't
181 /// implement [`Deref`] or [`AsRef`], to avoid situations where a type
182 /// conversion happens implicitly and the constant-time property is
183 /// accidentally lost.
184 ///
185 /// `Hash` provides the [`to_hex`] method for converting to hexadecimal. It
186 /// doesn't directly support converting from hexadecimal, but here's an example
187 /// of doing that with the [`hex`] crate:
188 ///
189 /// ```
190 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
191 /// use std::convert::TryInto;
192 ///
193 /// let hash_hex = "d74981efa70a0c880b8d8c1985d075dbcbf679b99a5f9914e5aaf96b831a9e24";
194 /// let hash_bytes = hex::decode(hash_hex)?;
195 /// let hash_array: [u8; blake3::OUT_LEN] = hash_bytes[..].try_into()?;
196 /// let hash: blake3::Hash = hash_array.into();
197 /// # Ok(())
198 /// # }
199 /// ```
200 ///
201 /// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
202 /// [`Into`]: https://doc.rust-lang.org/std/convert/trait.Into.html
203 /// [`as_bytes`]: #method.as_bytes
204 /// [`Deref`]: https://doc.rust-lang.org/stable/std/ops/trait.Deref.html
205 /// [`AsRef`]: https://doc.rust-lang.org/std/convert/trait.AsRef.html
206 /// [`to_hex`]: #method.to_hex
207 /// [`hex`]: https://crates.io/crates/hex
208 #[derive(Clone, Copy, Hash)]
209 pub struct Hash([u8; OUT_LEN]);
210 
211 impl Hash {
212     /// The bytes of the `Hash`. Note that byte arrays don't provide
213     /// constant-time equality checking, so if  you need to compare hashes,
214     /// prefer the `Hash` type.
215     #[inline]
as_bytes(&self) -> &[u8; OUT_LEN]216     pub fn as_bytes(&self) -> &[u8; OUT_LEN] {
217         &self.0
218     }
219 
220     /// The hexadecimal encoding of the `Hash`. The returned [`ArrayString`] is
221     /// a fixed size and doesn't allocate memory on the heap. Note that
222     /// [`ArrayString`] doesn't provide constant-time equality checking, so if
223     /// you need to compare hashes, prefer the `Hash` type.
224     ///
225     /// [`ArrayString`]: https://docs.rs/arrayvec/0.5.1/arrayvec/struct.ArrayString.html
to_hex(&self) -> ArrayString<[u8; 2 * OUT_LEN]>226     pub fn to_hex(&self) -> ArrayString<[u8; 2 * OUT_LEN]> {
227         let mut s = ArrayString::new();
228         let table = b"0123456789abcdef";
229         for &b in self.0.iter() {
230             s.push(table[(b >> 4) as usize] as char);
231             s.push(table[(b & 0xf) as usize] as char);
232         }
233         s
234     }
235 }
236 
237 impl From<[u8; OUT_LEN]> for Hash {
238     #[inline]
from(bytes: [u8; OUT_LEN]) -> Self239     fn from(bytes: [u8; OUT_LEN]) -> Self {
240         Self(bytes)
241     }
242 }
243 
244 impl From<Hash> for [u8; OUT_LEN] {
245     #[inline]
from(hash: Hash) -> Self246     fn from(hash: Hash) -> Self {
247         hash.0
248     }
249 }
250 
251 /// This implementation is constant-time.
252 impl PartialEq for Hash {
253     #[inline]
eq(&self, other: &Hash) -> bool254     fn eq(&self, other: &Hash) -> bool {
255         constant_time_eq::constant_time_eq_32(&self.0, &other.0)
256     }
257 }
258 
259 /// This implementation is constant-time.
260 impl PartialEq<[u8; OUT_LEN]> for Hash {
261     #[inline]
eq(&self, other: &[u8; OUT_LEN]) -> bool262     fn eq(&self, other: &[u8; OUT_LEN]) -> bool {
263         constant_time_eq::constant_time_eq_32(&self.0, other)
264     }
265 }
266 
267 impl Eq for Hash {}
268 
269 impl fmt::Debug for Hash {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result270     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
271         // Formatting field as `&str` to reduce code size since the `Debug`
272         // dynamic dispatch table for `&str` is likely needed elsewhere already,
273         // but that for `ArrayString<[u8; 64]>` is not.
274         let hex = self.to_hex();
275         let hex: &str = hex.as_str();
276 
277         f.debug_tuple("Hash").field(&hex).finish()
278     }
279 }
280 
281 // Each chunk or parent node can produce either a 32-byte chaining value or, by
282 // setting the ROOT flag, any number of final output bytes. The Output struct
283 // captures the state just prior to choosing between those two possibilities.
284 #[derive(Clone)]
285 struct Output {
286     input_chaining_value: CVWords,
287     block: [u8; 64],
288     block_len: u8,
289     counter: u64,
290     flags: u8,
291     platform: Platform,
292 }
293 
294 impl Output {
chaining_value(&self) -> CVBytes295     fn chaining_value(&self) -> CVBytes {
296         let mut cv = self.input_chaining_value;
297         self.platform.compress_in_place(
298             &mut cv,
299             &self.block,
300             self.block_len,
301             self.counter,
302             self.flags,
303         );
304         platform::le_bytes_from_words_32(&cv)
305     }
306 
root_hash(&self) -> Hash307     fn root_hash(&self) -> Hash {
308         debug_assert_eq!(self.counter, 0);
309         let mut cv = self.input_chaining_value;
310         self.platform
311             .compress_in_place(&mut cv, &self.block, self.block_len, 0, self.flags | ROOT);
312         Hash(platform::le_bytes_from_words_32(&cv))
313     }
314 
root_output_block(&self) -> [u8; 2 * OUT_LEN]315     fn root_output_block(&self) -> [u8; 2 * OUT_LEN] {
316         self.platform.compress_xof(
317             &self.input_chaining_value,
318             &self.block,
319             self.block_len,
320             self.counter,
321             self.flags | ROOT,
322         )
323     }
324 }
325 
326 #[derive(Clone)]
327 struct ChunkState {
328     cv: CVWords,
329     chunk_counter: u64,
330     buf: [u8; BLOCK_LEN],
331     buf_len: u8,
332     blocks_compressed: u8,
333     flags: u8,
334     platform: Platform,
335 }
336 
337 impl ChunkState {
new(key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform) -> Self338     fn new(key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform) -> Self {
339         Self {
340             cv: *key,
341             chunk_counter,
342             buf: [0; BLOCK_LEN],
343             buf_len: 0,
344             blocks_compressed: 0,
345             flags,
346             platform,
347         }
348     }
349 
len(&self) -> usize350     fn len(&self) -> usize {
351         BLOCK_LEN * self.blocks_compressed as usize + self.buf_len as usize
352     }
353 
fill_buf(&mut self, input: &mut &[u8])354     fn fill_buf(&mut self, input: &mut &[u8]) {
355         let want = BLOCK_LEN - self.buf_len as usize;
356         let take = cmp::min(want, input.len());
357         self.buf[self.buf_len as usize..][..take].copy_from_slice(&input[..take]);
358         self.buf_len += take as u8;
359         *input = &input[take..];
360     }
361 
start_flag(&self) -> u8362     fn start_flag(&self) -> u8 {
363         if self.blocks_compressed == 0 {
364             CHUNK_START
365         } else {
366             0
367         }
368     }
369 
370     // Try to avoid buffering as much as possible, by compressing directly from
371     // the input slice when full blocks are available.
update(&mut self, mut input: &[u8]) -> &mut Self372     fn update(&mut self, mut input: &[u8]) -> &mut Self {
373         if self.buf_len > 0 {
374             self.fill_buf(&mut input);
375             if !input.is_empty() {
376                 debug_assert_eq!(self.buf_len as usize, BLOCK_LEN);
377                 let block_flags = self.flags | self.start_flag(); // borrowck
378                 self.platform.compress_in_place(
379                     &mut self.cv,
380                     &self.buf,
381                     BLOCK_LEN as u8,
382                     self.chunk_counter,
383                     block_flags,
384                 );
385                 self.buf_len = 0;
386                 self.buf = [0; BLOCK_LEN];
387                 self.blocks_compressed += 1;
388             }
389         }
390 
391         while input.len() > BLOCK_LEN {
392             debug_assert_eq!(self.buf_len, 0);
393             let block_flags = self.flags | self.start_flag(); // borrowck
394             self.platform.compress_in_place(
395                 &mut self.cv,
396                 array_ref!(input, 0, BLOCK_LEN),
397                 BLOCK_LEN as u8,
398                 self.chunk_counter,
399                 block_flags,
400             );
401             self.blocks_compressed += 1;
402             input = &input[BLOCK_LEN..];
403         }
404 
405         self.fill_buf(&mut input);
406         debug_assert!(input.is_empty());
407         debug_assert!(self.len() <= CHUNK_LEN);
408         self
409     }
410 
output(&self) -> Output411     fn output(&self) -> Output {
412         let block_flags = self.flags | self.start_flag() | CHUNK_END;
413         Output {
414             input_chaining_value: self.cv,
415             block: self.buf,
416             block_len: self.buf_len,
417             counter: self.chunk_counter,
418             flags: block_flags,
419             platform: self.platform,
420         }
421     }
422 }
423 
424 // Don't derive(Debug), because the state may be secret.
425 impl fmt::Debug for ChunkState {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result426     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
427         f.debug_struct("ChunkState")
428             .field("len", &self.len())
429             .field("chunk_counter", &self.chunk_counter)
430             .field("flags", &self.flags)
431             .field("platform", &self.platform)
432             .finish()
433     }
434 }
435 
436 // IMPLEMENTATION NOTE
437 // ===================
438 // The recursive function compress_subtree_wide(), implemented below, is the
439 // basis of high-performance BLAKE3. We use it both for all-at-once hashing,
440 // and for the incremental input with Hasher (though we have to be careful with
441 // subtree boundaries in the incremental case). compress_subtree_wide() applies
442 // several optimizations at the same time:
443 // - Multi-threading with Rayon.
444 // - Parallel chunk hashing with SIMD.
445 // - Parallel parent hashing with SIMD. Note that while SIMD chunk hashing
446 //   maxes out at MAX_SIMD_DEGREE*CHUNK_LEN, parallel parent hashing continues
447 //   to benefit from larger inputs, because more levels of the tree benefit can
448 //   use full-width SIMD vectors for parent hashing. Without parallel parent
449 //   hashing, we lose about 10% of overall throughput on AVX2 and AVX-512.
450 
451 // pub for benchmarks
452 #[doc(hidden)]
453 #[derive(Clone, Copy)]
454 pub enum IncrementCounter {
455     Yes,
456     No,
457 }
458 
459 impl IncrementCounter {
460     #[inline]
yes(&self) -> bool461     fn yes(&self) -> bool {
462         match self {
463             IncrementCounter::Yes => true,
464             IncrementCounter::No => false,
465         }
466     }
467 }
468 
469 // The largest power of two less than or equal to `n`, used for left_len()
470 // immediately below, and also directly in Hasher::update().
largest_power_of_two_leq(n: usize) -> usize471 fn largest_power_of_two_leq(n: usize) -> usize {
472     ((n / 2) + 1).next_power_of_two()
473 }
474 
475 // Given some input larger than one chunk, return the number of bytes that
476 // should go in the left subtree. This is the largest power-of-2 number of
477 // chunks that leaves at least 1 byte for the right subtree.
left_len(content_len: usize) -> usize478 fn left_len(content_len: usize) -> usize {
479     debug_assert!(content_len > CHUNK_LEN);
480     // Subtract 1 to reserve at least one byte for the right side.
481     let full_chunks = (content_len - 1) / CHUNK_LEN;
482     largest_power_of_two_leq(full_chunks) * CHUNK_LEN
483 }
484 
485 // Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
486 // on a single thread. Write out the chunk chaining values and return the
487 // number of chunks hashed. These chunks are never the root and never empty;
488 // those cases use a different codepath.
compress_chunks_parallel( input: &[u8], key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform, out: &mut [u8], ) -> usize489 fn compress_chunks_parallel(
490     input: &[u8],
491     key: &CVWords,
492     chunk_counter: u64,
493     flags: u8,
494     platform: Platform,
495     out: &mut [u8],
496 ) -> usize {
497     debug_assert!(!input.is_empty(), "empty chunks below the root");
498     debug_assert!(input.len() <= MAX_SIMD_DEGREE * CHUNK_LEN);
499 
500     let mut chunks_exact = input.chunks_exact(CHUNK_LEN);
501     let mut chunks_array = ArrayVec::<[&[u8; CHUNK_LEN]; MAX_SIMD_DEGREE]>::new();
502     for chunk in &mut chunks_exact {
503         chunks_array.push(array_ref!(chunk, 0, CHUNK_LEN));
504     }
505     platform.hash_many(
506         &chunks_array,
507         key,
508         chunk_counter,
509         IncrementCounter::Yes,
510         flags,
511         CHUNK_START,
512         CHUNK_END,
513         out,
514     );
515 
516     // Hash the remaining partial chunk, if there is one. Note that the empty
517     // chunk (meaning the empty message) is a different codepath.
518     let chunks_so_far = chunks_array.len();
519     if !chunks_exact.remainder().is_empty() {
520         let counter = chunk_counter + chunks_so_far as u64;
521         let mut chunk_state = ChunkState::new(key, counter, flags, platform);
522         chunk_state.update(chunks_exact.remainder());
523         *array_mut_ref!(out, chunks_so_far * OUT_LEN, OUT_LEN) =
524             chunk_state.output().chaining_value();
525         chunks_so_far + 1
526     } else {
527         chunks_so_far
528     }
529 }
530 
531 // Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
532 // on a single thread. Write out the parent chaining values and return the
533 // number of parents hashed. (If there's an odd input chaining value left over,
534 // return it as an additional output.) These parents are never the root and
535 // never empty; those cases use a different codepath.
compress_parents_parallel( child_chaining_values: &[u8], key: &CVWords, flags: u8, platform: Platform, out: &mut [u8], ) -> usize536 fn compress_parents_parallel(
537     child_chaining_values: &[u8],
538     key: &CVWords,
539     flags: u8,
540     platform: Platform,
541     out: &mut [u8],
542 ) -> usize {
543     debug_assert_eq!(child_chaining_values.len() % OUT_LEN, 0, "wacky hash bytes");
544     let num_children = child_chaining_values.len() / OUT_LEN;
545     debug_assert!(num_children >= 2, "not enough children");
546     debug_assert!(num_children <= 2 * MAX_SIMD_DEGREE_OR_2, "too many");
547 
548     let mut parents_exact = child_chaining_values.chunks_exact(BLOCK_LEN);
549     // Use MAX_SIMD_DEGREE_OR_2 rather than MAX_SIMD_DEGREE here, because of
550     // the requirements of compress_subtree_wide().
551     let mut parents_array = ArrayVec::<[&[u8; BLOCK_LEN]; MAX_SIMD_DEGREE_OR_2]>::new();
552     for parent in &mut parents_exact {
553         parents_array.push(array_ref!(parent, 0, BLOCK_LEN));
554     }
555     platform.hash_many(
556         &parents_array,
557         key,
558         0, // Parents always use counter 0.
559         IncrementCounter::No,
560         flags | PARENT,
561         0, // Parents have no start flags.
562         0, // Parents have no end flags.
563         out,
564     );
565 
566     // If there's an odd child left over, it becomes an output.
567     let parents_so_far = parents_array.len();
568     if !parents_exact.remainder().is_empty() {
569         out[parents_so_far * OUT_LEN..][..OUT_LEN].copy_from_slice(parents_exact.remainder());
570         parents_so_far + 1
571     } else {
572         parents_so_far
573     }
574 }
575 
576 // The wide helper function returns (writes out) an array of chaining values
577 // and returns the length of that array. The number of chaining values returned
578 // is the dyanmically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
579 // if the input is shorter than that many chunks. The reason for maintaining a
580 // wide array of chaining values going back up the tree, is to allow the
581 // implementation to hash as many parents in parallel as possible.
582 //
583 // As a special case when the SIMD degree is 1, this function will still return
584 // at least 2 outputs. This guarantees that this function doesn't perform the
585 // root compression. (If it did, it would use the wrong flags, and also we
586 // wouldn't be able to implement exendable ouput.) Note that this function is
587 // not used when the whole input is only 1 chunk long; that's a different
588 // codepath.
589 //
590 // Why not just have the caller split the input on the first update(), instead
591 // of implementing this special rule? Because we don't want to limit SIMD or
592 // multi-threading parallelism for that update().
compress_subtree_wide<J: Join>( input: &[u8], key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform, out: &mut [u8], ) -> usize593 fn compress_subtree_wide<J: Join>(
594     input: &[u8],
595     key: &CVWords,
596     chunk_counter: u64,
597     flags: u8,
598     platform: Platform,
599     out: &mut [u8],
600 ) -> usize {
601     // Note that the single chunk case does *not* bump the SIMD degree up to 2
602     // when it is 1. This allows Rayon the option of multi-threading even the
603     // 2-chunk case, which can help performance on smaller platforms.
604     if input.len() <= platform.simd_degree() * CHUNK_LEN {
605         return compress_chunks_parallel(input, key, chunk_counter, flags, platform, out);
606     }
607 
608     // With more than simd_degree chunks, we need to recurse. Start by dividing
609     // the input into left and right subtrees. (Note that this is only optimal
610     // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
611     // of 3 or something, we'll need a more complicated strategy.)
612     debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2");
613     let (left, right) = input.split_at(left_len(input.len()));
614     let right_chunk_counter = chunk_counter + (left.len() / CHUNK_LEN) as u64;
615 
616     // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
617     // account for the special case of returning 2 outputs when the SIMD degree
618     // is 1.
619     let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
620     let degree = if left.len() == CHUNK_LEN {
621         // The "simd_degree=1 and we're at the leaf nodes" case.
622         debug_assert_eq!(platform.simd_degree(), 1);
623         1
624     } else {
625         cmp::max(platform.simd_degree(), 2)
626     };
627     let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN);
628 
629     // Recurse! This uses multiple threads if the "rayon" feature is enabled.
630     let (left_n, right_n) = J::join(
631         || compress_subtree_wide::<J>(left, key, chunk_counter, flags, platform, left_out),
632         || compress_subtree_wide::<J>(right, key, right_chunk_counter, flags, platform, right_out),
633         left.len(),
634         right.len(),
635     );
636 
637     // The special case again. If simd_degree=1, then we'll have left_n=1 and
638     // right_n=1. Rather than compressing them into a single output, return
639     // them directly, to make sure we always have at least two outputs.
640     debug_assert_eq!(left_n, degree);
641     debug_assert!(right_n >= 1 && right_n <= left_n);
642     if left_n == 1 {
643         out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
644         return 2;
645     }
646 
647     // Otherwise, do one layer of parent node compression.
648     let num_children = left_n + right_n;
649     compress_parents_parallel(
650         &cv_array[..num_children * OUT_LEN],
651         key,
652         flags,
653         platform,
654         out,
655     )
656 }
657 
658 // Hash a subtree with compress_subtree_wide(), and then condense the resulting
659 // list of chaining values down to a single parent node. Don't compress that
660 // last parent node, however. Instead, return its message bytes (the
661 // concatenated chaining values of its children). This is necessary when the
662 // first call to update() supplies a complete subtree, because the topmost
663 // parent node of that subtree could end up being the root. It's also necessary
664 // for extended output in the general case.
665 //
666 // As with compress_subtree_wide(), this function is not used on inputs of 1
667 // chunk or less. That's a different codepath.
compress_subtree_to_parent_node<J: Join>( input: &[u8], key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform, ) -> [u8; BLOCK_LEN]668 fn compress_subtree_to_parent_node<J: Join>(
669     input: &[u8],
670     key: &CVWords,
671     chunk_counter: u64,
672     flags: u8,
673     platform: Platform,
674 ) -> [u8; BLOCK_LEN] {
675     debug_assert!(input.len() > CHUNK_LEN);
676     let mut cv_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
677     let mut num_cvs =
678         compress_subtree_wide::<J>(input, &key, chunk_counter, flags, platform, &mut cv_array);
679     debug_assert!(num_cvs >= 2);
680 
681     // If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
682     // compress_subtree_wide() returns more than 2 chaining values. Condense
683     // them into 2 by forming parent nodes repeatedly.
684     let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
685     while num_cvs > 2 {
686         let cv_slice = &cv_array[..num_cvs * OUT_LEN];
687         num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
688         cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
689     }
690     *array_ref!(cv_array, 0, 2 * OUT_LEN)
691 }
692 
693 // Hash a complete input all at once. Unlike compress_subtree_wide() and
694 // compress_subtree_to_parent_node(), this function handles the 1 chunk case.
695 // Note that this we use SerialJoin here, so this is always single-threaded.
hash_all_at_once(input: &[u8], key: &CVWords, flags: u8) -> Output696 fn hash_all_at_once(input: &[u8], key: &CVWords, flags: u8) -> Output {
697     let platform = Platform::detect();
698 
699     // If the whole subtree is one chunk, hash it directly with a ChunkState.
700     if input.len() <= CHUNK_LEN {
701         return ChunkState::new(key, 0, flags, platform)
702             .update(input)
703             .output();
704     }
705 
706     // Otherwise construct an Output object from the parent node returned by
707     // compress_subtree_to_parent_node().
708     Output {
709         input_chaining_value: *key,
710         block: compress_subtree_to_parent_node::<SerialJoin>(input, key, 0, flags, platform),
711         block_len: BLOCK_LEN as u8,
712         counter: 0,
713         flags: flags | PARENT,
714         platform,
715     }
716 }
717 
718 /// The default hash function.
719 ///
720 /// For an incremental version that accepts multiple writes, see [`Hasher::update`].
721 ///
722 /// This function is always single-threaded. For multi-threading support, see
723 /// [`Hasher::update_with_join`].
724 ///
725 /// [`Hasher::update`]: struct.Hasher.html#method.update
726 /// [`Hasher::update_with_join`]: struct.Hasher.html#method.update_with_join
hash(input: &[u8]) -> Hash727 pub fn hash(input: &[u8]) -> Hash {
728     hash_all_at_once(input, IV, 0).root_hash()
729 }
730 
731 /// The keyed hash function.
732 ///
733 /// This is suitable for use as a message authentication code, for
734 /// example to replace an HMAC instance.
735 ///  In that use case, the constant-time equality checking provided by
736 /// [`Hash`](struct.Hash.html) is almost always a security requirement, and
737 /// callers need to be careful not to compare MACs as raw bytes.
738 ///
739 /// This function is always single-threaded. For multi-threading support, see
740 /// [`Hasher::update_with_join`].
741 ///
742 /// [`Hasher::update_with_join`]: struct.Hasher.html#method.update_with_join
keyed_hash(key: &[u8; KEY_LEN], input: &[u8]) -> Hash743 pub fn keyed_hash(key: &[u8; KEY_LEN], input: &[u8]) -> Hash {
744     let key_words = platform::words_from_le_bytes_32(key);
745     hash_all_at_once(input, &key_words, KEYED_HASH).root_hash()
746 }
747 
748 /// The key derivation function.
749 ///
750 /// Given cryptographic key material of any length and a context string of any
751 /// length, this function outputs a derived subkey of any length. **The context
752 /// string should be hardcoded, globally unique, and application-specific.** A
753 /// good default format for such strings is `"[application] [commit timestamp]
754 /// [purpose]"`, e.g., `"example.com 2019-12-25 16:18:03 session tokens v1"`.
755 ///
756 /// Key derivation is important when you want to use the same key in multiple
757 /// algorithms or use cases. Using the same key with different cryptographic
758 /// algorithms is generally forbidden, and deriving a separate subkey for each
759 /// use case protects you from bad interactions. Derived keys also mitigate the
760 /// damage from one part of your application accidentally leaking its key.
761 ///
762 /// As a rare exception to that general rule, however, it is possible to use
763 /// `derive_key` itself with key material that you are already using with
764 /// another algorithm. You might need to do this if you're adding features to
765 /// an existing application, which does not yet use key derivation internally.
766 /// However, you still must not share key material with algorithms that forbid
767 /// key reuse entirely, like a one-time pad.
768 ///
769 /// Note that BLAKE3 is not a password hash, and **`derive_key` should never be
770 /// used with passwords.** Instead, use a dedicated password hash like
771 /// [Argon2]. Password hashes are entirely different from generic hash
772 /// functions, with opposite design requirements.
773 ///
774 /// This function is always single-threaded. For multi-threading support, see
775 /// [`Hasher::update_with_join`].
776 ///
777 /// [`Hasher::new_derive_key`]: struct.Hasher.html#method.new_derive_key
778 /// [`Hasher::finalize_xof`]: struct.Hasher.html#method.finalize_xof
779 /// [Argon2]: https://en.wikipedia.org/wiki/Argon2
780 /// [`Hasher::update_with_join`]: struct.Hasher.html#method.update_with_join
derive_key(context: &str, key_material: &[u8], output: &mut [u8])781 pub fn derive_key(context: &str, key_material: &[u8], output: &mut [u8]) {
782     let context_key = hash_all_at_once(context.as_bytes(), IV, DERIVE_KEY_CONTEXT).root_hash();
783     let context_key_words = platform::words_from_le_bytes_32(context_key.as_bytes());
784     let inner_output = hash_all_at_once(key_material, &context_key_words, DERIVE_KEY_MATERIAL);
785     OutputReader::new(inner_output).fill(output);
786 }
787 
parent_node_output( left_child: &CVBytes, right_child: &CVBytes, key: &CVWords, flags: u8, platform: Platform, ) -> Output788 fn parent_node_output(
789     left_child: &CVBytes,
790     right_child: &CVBytes,
791     key: &CVWords,
792     flags: u8,
793     platform: Platform,
794 ) -> Output {
795     let mut block = [0; BLOCK_LEN];
796     block[..32].copy_from_slice(left_child);
797     block[32..].copy_from_slice(right_child);
798     Output {
799         input_chaining_value: *key,
800         block,
801         block_len: BLOCK_LEN as u8,
802         counter: 0,
803         flags: flags | PARENT,
804         platform,
805     }
806 }
807 
808 /// An incremental hash state that can accept any number of writes.
809 ///
810 /// In addition to its inherent methods, this type implements several commonly
811 /// used traits from the [`digest`](https://crates.io/crates/digest) and
812 /// [`crypto_mac`](https://crates.io/crates/crypto-mac) crates.
813 ///
814 /// **Performance note:** The [`update`] and [`update_with_join`] methods
815 /// perform poorly when the caller's input buffer is small. See their method
816 /// docs below. A 16 KiB buffer is large enough to leverage all currently
817 /// supported SIMD instruction sets.
818 ///
819 /// # Examples
820 ///
821 /// ```
822 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
823 /// // Hash an input incrementally.
824 /// let mut hasher = blake3::Hasher::new();
825 /// hasher.update(b"foo");
826 /// hasher.update(b"bar");
827 /// hasher.update(b"baz");
828 /// assert_eq!(hasher.finalize(), blake3::hash(b"foobarbaz"));
829 ///
830 /// // Extended output. OutputReader also implements Read and Seek.
831 /// # #[cfg(feature = "std")] {
832 /// let mut output = [0; 1000];
833 /// let mut output_reader = hasher.finalize_xof();
834 /// output_reader.fill(&mut output);
835 /// assert_eq!(&output[..32], blake3::hash(b"foobarbaz").as_bytes());
836 /// # }
837 /// # Ok(())
838 /// # }
839 /// ```
840 ///
841 /// [`update`]: #method.update
842 /// [`update_with_join`]: #method.update_with_join
843 #[derive(Clone)]
844 pub struct Hasher {
845     key: CVWords,
846     chunk_state: ChunkState,
847     // The stack size is MAX_DEPTH + 1 because we do lazy merging. For example,
848     // with 7 chunks, we have 3 entries in the stack. Adding an 8th chunk
849     // requires a 4th entry, rather than merging everything down to 1, because
850     // we don't know whether more input is coming. This is different from how
851     // the reference implementation does things.
852     cv_stack: ArrayVec<[CVBytes; MAX_DEPTH + 1]>,
853 }
854 
855 impl Hasher {
new_internal(key: &CVWords, flags: u8) -> Self856     fn new_internal(key: &CVWords, flags: u8) -> Self {
857         Self {
858             key: *key,
859             chunk_state: ChunkState::new(key, 0, flags, Platform::detect()),
860             cv_stack: ArrayVec::new(),
861         }
862     }
863 
864     /// Construct a new `Hasher` for the regular hash function.
new() -> Self865     pub fn new() -> Self {
866         Self::new_internal(IV, 0)
867     }
868 
869     /// Construct a new `Hasher` for the keyed hash function. See
870     /// [`keyed_hash`].
871     ///
872     /// [`keyed_hash`]: fn.keyed_hash.html
new_keyed(key: &[u8; KEY_LEN]) -> Self873     pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
874         let key_words = platform::words_from_le_bytes_32(key);
875         Self::new_internal(&key_words, KEYED_HASH)
876     }
877 
878     /// Construct a new `Hasher` for the key derivation function. See
879     /// [`derive_key`]. The context string should be hardcoded, globally
880     /// unique, and application-specific.
881     ///
882     /// [`derive_key`]: fn.derive_key.html
new_derive_key(context: &str) -> Self883     pub fn new_derive_key(context: &str) -> Self {
884         let context_key = hash_all_at_once(context.as_bytes(), IV, DERIVE_KEY_CONTEXT).root_hash();
885         let context_key_words = platform::words_from_le_bytes_32(context_key.as_bytes());
886         Self::new_internal(&context_key_words, DERIVE_KEY_MATERIAL)
887     }
888 
889     /// Reset the `Hasher` to its initial state.
890     ///
891     /// This is functionally the same as overwriting the `Hasher` with a new
892     /// one, using the same key or context string if any. However, depending on
893     /// how much inlining the optimizer does, moving a `Hasher` might copy its
894     /// entire CV stack, most of which is useless uninitialized bytes. This
895     /// methods avoids that copy.
reset(&mut self) -> &mut Self896     pub fn reset(&mut self) -> &mut Self {
897         self.chunk_state = ChunkState::new(
898             &self.key,
899             0,
900             self.chunk_state.flags,
901             self.chunk_state.platform,
902         );
903         self.cv_stack.clear();
904         self
905     }
906 
907     // As described in push_cv() below, we do "lazy merging", delaying merges
908     // until right before the next CV is about to be added. This is different
909     // from the reference implementation. Another difference is that we aren't
910     // always merging 1 chunk at a time. Instead, each CV might represent any
911     // power-of-two number of chunks, as long as the smaller-above-larger stack
912     // order is maintained. Instead of the "count the trailing 0-bits"
913     // algorithm described in the spec, we use a "count the total number of
914     // 1-bits" variant that doesn't require us to retain the subtree size of
915     // the CV on top of the stack. The principle is the same: each CV that
916     // should remain in the stack is represented by a 1-bit in the total number
917     // of chunks (or bytes) so far.
merge_cv_stack(&mut self, total_len: u64)918     fn merge_cv_stack(&mut self, total_len: u64) {
919         let post_merge_stack_len = total_len.count_ones() as usize;
920         while self.cv_stack.len() > post_merge_stack_len {
921             let right_child = self.cv_stack.pop().unwrap();
922             let left_child = self.cv_stack.pop().unwrap();
923             let parent_output = parent_node_output(
924                 &left_child,
925                 &right_child,
926                 &self.key,
927                 self.chunk_state.flags,
928                 self.chunk_state.platform,
929             );
930             self.cv_stack.push(parent_output.chaining_value());
931         }
932     }
933 
934     // In reference_impl.rs, we merge the new CV with existing CVs from the
935     // stack before pushing it. We can do that because we know more input is
936     // coming, so we know none of the merges are root.
937     //
938     // This setting is different. We want to feed as much input as possible to
939     // compress_subtree_wide(), without setting aside anything for the
940     // chunk_state. If the user gives us 64 KiB, we want to parallelize over
941     // all 64 KiB at once as a single subtree, if at all possible.
942     //
943     // This leads to two problems:
944     // 1) This 64 KiB input might be the only call that ever gets made to
945     //    update. In this case, the root node of the 64 KiB subtree would be
946     //    the root node of the whole tree, and it would need to be ROOT
947     //    finalized. We can't compress it until we know.
948     // 2) This 64 KiB input might complete a larger tree, whose root node is
949     //    similarly going to be the the root of the whole tree. For example,
950     //    maybe we have 196 KiB (that is, 128 + 64) hashed so far. We can't
951     //    compress the node at the root of the 256 KiB subtree until we know
952     //    how to finalize it.
953     //
954     // The second problem is solved with "lazy merging". That is, when we're
955     // about to add a CV to the stack, we don't merge it with anything first,
956     // as the reference impl does. Instead we do merges using the *previous* CV
957     // that was added, which is sitting on top of the stack, and we put the new
958     // CV (unmerged) on top of the stack afterwards. This guarantees that we
959     // never merge the root node until finalize().
960     //
961     // Solving the first problem requires an additional tool,
962     // compress_subtree_to_parent_node(). That function always returns the top
963     // *two* chaining values of the subtree it's compressing. We then do lazy
964     // merging with each of them separately, so that the second CV will always
965     // remain unmerged. (That also helps us support extendable output when
966     // we're hashing an input all-at-once.)
push_cv(&mut self, new_cv: &CVBytes, chunk_counter: u64)967     fn push_cv(&mut self, new_cv: &CVBytes, chunk_counter: u64) {
968         self.merge_cv_stack(chunk_counter);
969         self.cv_stack.push(*new_cv);
970     }
971 
972     /// Add input bytes to the hash state. You can call this any number of
973     /// times.
974     ///
975     /// This method is always single-threaded. For multi-threading support, see
976     /// `update_with_join` below.
977     ///
978     /// Note that the degree of SIMD parallelism that `update` can use is
979     /// limited by the size of this input buffer. The 8 KiB buffer currently
980     /// used by [`std::io::copy`] is enough to leverage AVX2, for example, but
981     /// not enough to leverage AVX-512. A 16 KiB buffer is large enough to
982     /// leverage all currently supported SIMD instruction sets.
983     ///
984     /// [`std::io::copy`]: https://doc.rust-lang.org/std/io/fn.copy.html
update(&mut self, input: &[u8]) -> &mut Self985     pub fn update(&mut self, input: &[u8]) -> &mut Self {
986         self.update_with_join::<SerialJoin>(input)
987     }
988 
989     /// Add input bytes to the hash state, as with `update`, but potentially
990     /// using multi-threading. See the example below, and the
991     /// [`join`](join/index.html) module for a more detailed explanation.
992     ///
993     /// To get any performance benefit from multi-threading, the input buffer
994     /// size needs to be very large. As a rule of thumb on x86_64, there is no
995     /// benefit to multi-threading inputs less than 128 KiB. Other platforms
996     /// have different thresholds, and in general you need to benchmark your
997     /// specific use case. Where possible, memory mapping an entire input file
998     /// is recommended, to take maximum advantage of multi-threading without
999     /// needing to tune a specific buffer size. Where memory mapping is not
1000     /// possible, good multi-threading performance requires doing IO on a
1001     /// background thread, to avoid sleeping all your worker threads while the
1002     /// input buffer is (serially) refilled. This is quite complicated compared
1003     /// to memory mapping.
1004     ///
1005     /// # Example
1006     ///
1007     /// ```
1008     /// // Hash a large input using multi-threading. Note that multi-threading
1009     /// // comes with some overhead, and it can actually hurt performance for small
1010     /// // inputs. The meaning of "small" varies, however, depending on the
1011     /// // platform and the number of threads. (On x86_64, the cutoff tends to be
1012     /// // around 128 KiB.) You should benchmark your own use case to see whether
1013     /// // multi-threading helps.
1014     /// # #[cfg(feature = "rayon")]
1015     /// # {
1016     /// # fn some_large_input() -> &'static [u8] { b"foo" }
1017     /// let input: &[u8] = some_large_input();
1018     /// let mut hasher = blake3::Hasher::new();
1019     /// hasher.update_with_join::<blake3::join::RayonJoin>(input);
1020     /// let hash = hasher.finalize();
1021     /// # }
1022     /// ```
update_with_join<J: Join>(&mut self, mut input: &[u8]) -> &mut Self1023     pub fn update_with_join<J: Join>(&mut self, mut input: &[u8]) -> &mut Self {
1024         // If we have some partial chunk bytes in the internal chunk_state, we
1025         // need to finish that chunk first.
1026         if self.chunk_state.len() > 0 {
1027             let want = CHUNK_LEN - self.chunk_state.len();
1028             let take = cmp::min(want, input.len());
1029             self.chunk_state.update(&input[..take]);
1030             input = &input[take..];
1031             if !input.is_empty() {
1032                 // We've filled the current chunk, and there's more input
1033                 // coming, so we know it's not the root and we can finalize it.
1034                 // Then we'll proceed to hashing whole chunks below.
1035                 debug_assert_eq!(self.chunk_state.len(), CHUNK_LEN);
1036                 let chunk_cv = self.chunk_state.output().chaining_value();
1037                 self.push_cv(&chunk_cv, self.chunk_state.chunk_counter);
1038                 self.chunk_state = ChunkState::new(
1039                     &self.key,
1040                     self.chunk_state.chunk_counter + 1,
1041                     self.chunk_state.flags,
1042                     self.chunk_state.platform,
1043                 );
1044             } else {
1045                 return self;
1046             }
1047         }
1048 
1049         // Now the chunk_state is clear, and we have more input. If there's
1050         // more than a single chunk (so, definitely not the root chunk), hash
1051         // the largest whole subtree we can, with the full benefits of SIMD and
1052         // multi-threading parallelism. Two restrictions:
1053         // - The subtree has to be a power-of-2 number of chunks. Only subtrees
1054         //   along the right edge can be incomplete, and we don't know where
1055         //   the right edge is going to be until we get to finalize().
1056         // - The subtree must evenly divide the total number of chunks up until
1057         //   this point (if total is not 0). If the current incomplete subtree
1058         //   is only waiting for 1 more chunk, we can't hash a subtree of 4
1059         //   chunks. We have to complete the current subtree first.
1060         // Because we might need to break up the input to form powers of 2, or
1061         // to evenly divide what we already have, this part runs in a loop.
1062         while input.len() > CHUNK_LEN {
1063             debug_assert_eq!(self.chunk_state.len(), 0, "no partial chunk data");
1064             debug_assert_eq!(CHUNK_LEN.count_ones(), 1, "power of 2 chunk len");
1065             let mut subtree_len = largest_power_of_two_leq(input.len());
1066             let count_so_far = self.chunk_state.chunk_counter * CHUNK_LEN as u64;
1067             // Shrink the subtree_len until it evenly divides the count so far.
1068             // We know that subtree_len itself is a power of 2, so we can use a
1069             // bitmasking trick instead of an actual remainder operation. (Note
1070             // that if the caller consistently passes power-of-2 inputs of the
1071             // same size, as is hopefully typical, this loop condition will
1072             // always fail, and subtree_len will always be the full length of
1073             // the input.)
1074             //
1075             // An aside: We don't have to shrink subtree_len quite this much.
1076             // For example, if count_so_far is 1, we could pass 2 chunks to
1077             // compress_subtree_to_parent_node. Since we'll get 2 CVs back,
1078             // we'll still get the right answer in the end, and we might get to
1079             // use 2-way SIMD parallelism. The problem with this optimization,
1080             // is that it gets us stuck always hashing 2 chunks. The total
1081             // number of chunks will remain odd, and we'll never graduate to
1082             // higher degrees of parallelism. See
1083             // https://github.com/BLAKE3-team/BLAKE3/issues/69.
1084             while (subtree_len - 1) as u64 & count_so_far != 0 {
1085                 subtree_len /= 2;
1086             }
1087             // The shrunken subtree_len might now be 1 chunk long. If so, hash
1088             // that one chunk by itself. Otherwise, compress the subtree into a
1089             // pair of CVs.
1090             let subtree_chunks = (subtree_len / CHUNK_LEN) as u64;
1091             if subtree_len <= CHUNK_LEN {
1092                 debug_assert_eq!(subtree_len, CHUNK_LEN);
1093                 self.push_cv(
1094                     &ChunkState::new(
1095                         &self.key,
1096                         self.chunk_state.chunk_counter,
1097                         self.chunk_state.flags,
1098                         self.chunk_state.platform,
1099                     )
1100                     .update(&input[..subtree_len])
1101                     .output()
1102                     .chaining_value(),
1103                     self.chunk_state.chunk_counter,
1104                 );
1105             } else {
1106                 // This is the high-performance happy path, though getting here
1107                 // depends on the caller giving us a long enough input.
1108                 let cv_pair = compress_subtree_to_parent_node::<J>(
1109                     &input[..subtree_len],
1110                     &self.key,
1111                     self.chunk_state.chunk_counter,
1112                     self.chunk_state.flags,
1113                     self.chunk_state.platform,
1114                 );
1115                 let left_cv = array_ref!(cv_pair, 0, 32);
1116                 let right_cv = array_ref!(cv_pair, 32, 32);
1117                 // Push the two CVs we received into the CV stack in order. Because
1118                 // the stack merges lazily, this guarantees we aren't merging the
1119                 // root.
1120                 self.push_cv(left_cv, self.chunk_state.chunk_counter);
1121                 self.push_cv(
1122                     right_cv,
1123                     self.chunk_state.chunk_counter + (subtree_chunks / 2),
1124                 );
1125             }
1126             self.chunk_state.chunk_counter += subtree_chunks;
1127             input = &input[subtree_len..];
1128         }
1129 
1130         // What remains is 1 chunk or less. Add it to the chunk state.
1131         debug_assert!(input.len() <= CHUNK_LEN);
1132         if !input.is_empty() {
1133             self.chunk_state.update(input);
1134             // Having added some input to the chunk_state, we know what's in
1135             // the CV stack won't become the root node, and we can do an extra
1136             // merge. This simplifies finalize().
1137             self.merge_cv_stack(self.chunk_state.chunk_counter);
1138         }
1139 
1140         self
1141     }
1142 
final_output(&self) -> Output1143     fn final_output(&self) -> Output {
1144         // If the current chunk is the only chunk, that makes it the root node
1145         // also. Convert it directly into an Output. Otherwise, we need to
1146         // merge subtrees below.
1147         if self.cv_stack.is_empty() {
1148             debug_assert_eq!(self.chunk_state.chunk_counter, 0);
1149             return self.chunk_state.output();
1150         }
1151 
1152         // If there are any bytes in the ChunkState, finalize that chunk and
1153         // merge its CV with everything in the CV stack. In that case, the work
1154         // we did at the end of update() above guarantees that the stack
1155         // doesn't contain any unmerged subtrees that need to be merged first.
1156         // (This is important, because if there were two chunk hashes sitting
1157         // on top of the stack, they would need to merge with each other, and
1158         // merging a new chunk hash into them would be incorrect.)
1159         //
1160         // If there are no bytes in the ChunkState, we'll merge what's already
1161         // in the stack. In this case it's fine if there are unmerged chunks on
1162         // top, because we'll merge them with each other. Note that the case of
1163         // the empty chunk is taken care of above.
1164         let mut output: Output;
1165         let mut num_cvs_remaining = self.cv_stack.len();
1166         if self.chunk_state.len() > 0 {
1167             debug_assert_eq!(
1168                 self.cv_stack.len(),
1169                 self.chunk_state.chunk_counter.count_ones() as usize,
1170                 "cv stack does not need a merge"
1171             );
1172             output = self.chunk_state.output();
1173         } else {
1174             debug_assert!(self.cv_stack.len() >= 2);
1175             output = parent_node_output(
1176                 &self.cv_stack[num_cvs_remaining - 2],
1177                 &self.cv_stack[num_cvs_remaining - 1],
1178                 &self.key,
1179                 self.chunk_state.flags,
1180                 self.chunk_state.platform,
1181             );
1182             num_cvs_remaining -= 2;
1183         }
1184         while num_cvs_remaining > 0 {
1185             output = parent_node_output(
1186                 &self.cv_stack[num_cvs_remaining - 1],
1187                 &output.chaining_value(),
1188                 &self.key,
1189                 self.chunk_state.flags,
1190                 self.chunk_state.platform,
1191             );
1192             num_cvs_remaining -= 1;
1193         }
1194         output
1195     }
1196 
1197     /// Finalize the hash state and return the [`Hash`](struct.Hash.html) of
1198     /// the input.
1199     ///
1200     /// This method is idempotent. Calling it twice will give the same result.
1201     /// You can also add more input and finalize again.
finalize(&self) -> Hash1202     pub fn finalize(&self) -> Hash {
1203         self.final_output().root_hash()
1204     }
1205 
1206     /// Finalize the hash state and return an [`OutputReader`], which can
1207     /// supply any number of output bytes.
1208     ///
1209     /// This method is idempotent. Calling it twice will give the same result.
1210     /// You can also add more input and finalize again.
1211     ///
1212     /// [`OutputReader`]: struct.OutputReader.html
finalize_xof(&self) -> OutputReader1213     pub fn finalize_xof(&self) -> OutputReader {
1214         OutputReader::new(self.final_output())
1215     }
1216 }
1217 
1218 // Don't derive(Debug), because the state may be secret.
1219 impl fmt::Debug for Hasher {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result1220     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1221         f.debug_struct("Hasher")
1222             .field("flags", &self.chunk_state.flags)
1223             .field("platform", &self.chunk_state.platform)
1224             .finish()
1225     }
1226 }
1227 
1228 impl Default for Hasher {
1229     #[inline]
default() -> Self1230     fn default() -> Self {
1231         Self::new()
1232     }
1233 }
1234 
1235 #[cfg(feature = "std")]
1236 impl std::io::Write for Hasher {
1237     /// This is equivalent to [`update`](#method.update).
1238     #[inline]
write(&mut self, input: &[u8]) -> std::io::Result<usize>1239     fn write(&mut self, input: &[u8]) -> std::io::Result<usize> {
1240         self.update(input);
1241         Ok(input.len())
1242     }
1243 
1244     #[inline]
flush(&mut self) -> std::io::Result<()>1245     fn flush(&mut self) -> std::io::Result<()> {
1246         Ok(())
1247     }
1248 }
1249 
1250 /// An incremental reader for extended output, returned by
1251 /// [`Hasher::finalize_xof`](struct.Hasher.html#method.finalize_xof).
1252 #[derive(Clone)]
1253 pub struct OutputReader {
1254     inner: Output,
1255     position_within_block: u8,
1256 }
1257 
1258 impl OutputReader {
new(inner: Output) -> Self1259     fn new(inner: Output) -> Self {
1260         Self {
1261             inner,
1262             position_within_block: 0,
1263         }
1264     }
1265 
1266     /// Fill a buffer with output bytes and advance the position of the
1267     /// `OutputReader`. This is equivalent to [`Read::read`], except that it
1268     /// doesn't return a `Result`. Both methods always fill the entire buffer.
1269     ///
1270     /// Note that `OutputReader` doesn't buffer output bytes internally, so
1271     /// calling `fill` repeatedly with a short-length or odd-length slice will
1272     /// end up performing the same compression multiple times. If you're
1273     /// reading output in a loop, prefer a slice length that's a multiple of
1274     /// 64.
1275     ///
1276     /// The maximum output size of BLAKE3 is 2<sup>64</sup>-1 bytes. If you try
1277     /// to extract more than that, for example by seeking near the end and
1278     /// reading further, the behavior is unspecified.
1279     ///
1280     /// [`Read::read`]: #method.read
fill(&mut self, mut buf: &mut [u8])1281     pub fn fill(&mut self, mut buf: &mut [u8]) {
1282         while !buf.is_empty() {
1283             let block: [u8; BLOCK_LEN] = self.inner.root_output_block();
1284             let output_bytes = &block[self.position_within_block as usize..];
1285             let take = cmp::min(buf.len(), output_bytes.len());
1286             buf[..take].copy_from_slice(&output_bytes[..take]);
1287             buf = &mut buf[take..];
1288             self.position_within_block += take as u8;
1289             if self.position_within_block == BLOCK_LEN as u8 {
1290                 self.inner.counter += 1;
1291                 self.position_within_block = 0;
1292             }
1293         }
1294     }
1295 
1296     /// Return the current read position in the output stream. The position of
1297     /// a new `OutputReader` starts at 0, and each call to [`fill`] or
1298     /// [`Read::read`] moves the position forward by the number of bytes read.
1299     ///
1300     /// [`fill`]: #method.fill
1301     /// [`Read::read`]: #method.read
position(&self) -> u641302     pub fn position(&self) -> u64 {
1303         self.inner.counter * BLOCK_LEN as u64 + self.position_within_block as u64
1304     }
1305 
1306     /// Seek to a new read position in the output stream. This is equivalent to
1307     /// calling [`Seek::seek`] with [`SeekFrom::Start`], except that it doesn't
1308     /// return a `Result`.
1309     ///
1310     /// [`Seek::seek`]: #method.seek
1311     /// [`SeekFrom::Start`]: https://doc.rust-lang.org/std/io/enum.SeekFrom.html
set_position(&mut self, position: u64)1312     pub fn set_position(&mut self, position: u64) {
1313         self.position_within_block = (position % BLOCK_LEN as u64) as u8;
1314         self.inner.counter = position / BLOCK_LEN as u64;
1315     }
1316 }
1317 
1318 // Don't derive(Debug), because the state may be secret.
1319 impl fmt::Debug for OutputReader {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result1320     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1321         f.debug_struct("OutputReader")
1322             .field("position", &self.position())
1323             .finish()
1324     }
1325 }
1326 
1327 #[cfg(feature = "std")]
1328 impl std::io::Read for OutputReader {
1329     #[inline]
read(&mut self, buf: &mut [u8]) -> std::io::Result<usize>1330     fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
1331         self.fill(buf);
1332         Ok(buf.len())
1333     }
1334 }
1335 
1336 #[cfg(feature = "std")]
1337 impl std::io::Seek for OutputReader {
seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64>1338     fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
1339         let max_position = u64::max_value() as i128;
1340         let target_position: i128 = match pos {
1341             std::io::SeekFrom::Start(x) => x as i128,
1342             std::io::SeekFrom::Current(x) => self.position() as i128 + x as i128,
1343             std::io::SeekFrom::End(_) => {
1344                 return Err(std::io::Error::new(
1345                     std::io::ErrorKind::InvalidInput,
1346                     "seek from end not supported",
1347                 ));
1348             }
1349         };
1350         if target_position < 0 {
1351             return Err(std::io::Error::new(
1352                 std::io::ErrorKind::InvalidInput,
1353                 "seek before start",
1354             ));
1355         }
1356         self.set_position(cmp::min(target_position, max_position) as u64);
1357         Ok(self.position())
1358     }
1359 }
1360