1 #![no_std]
2 pub use generic_array;
3 #[cfg(feature = "block-padding")]
4 pub use block_padding;
5
6 use core::{slice, convert::TryInto};
7 use generic_array::{GenericArray, ArrayLength};
8 #[cfg(feature = "block-padding")]
9 use block_padding::{Padding, PadError};
10
11 /// Buffer for block processing of data
12 #[derive(Clone, Default)]
13 pub struct BlockBuffer<BlockSize: ArrayLength<u8>> {
14 buffer: GenericArray<u8, BlockSize>,
15 pos: usize,
16 }
17
18 impl<BlockSize: ArrayLength<u8>> BlockBuffer<BlockSize> {
19 /// Process data in `input` in blocks of size `BlockSize` using function `f`.
20 #[inline]
input_block( &mut self, mut input: &[u8], mut f: impl FnMut(&GenericArray<u8, BlockSize>), )21 pub fn input_block(
22 &mut self, mut input: &[u8], mut f: impl FnMut(&GenericArray<u8, BlockSize>),
23 ) {
24 let r = self.remaining();
25 if input.len() < r {
26 let n = input.len();
27 self.buffer[self.pos..self.pos + n].copy_from_slice(input);
28 self.pos += n;
29 return;
30 }
31 if self.pos != 0 && input.len() >= r {
32 let (l, r) = input.split_at(r);
33 input = r;
34 self.buffer[self.pos..].copy_from_slice(l);
35 f(&self.buffer);
36 }
37
38 let mut chunks_iter = input.chunks_exact(self.size());
39 for chunk in &mut chunks_iter {
40 f(chunk.try_into().unwrap());
41 }
42 let rem = chunks_iter.remainder();
43
44 // Copy any remaining data into the buffer.
45 self.buffer[..rem.len()].copy_from_slice(rem);
46 self.pos = rem.len();
47 }
48
49 /// Process data in `input` in blocks of size `BlockSize` using function `f`, which accepts
50 /// slice of blocks.
51 #[inline]
input_blocks( &mut self, mut input: &[u8], mut f: impl FnMut(&[GenericArray<u8, BlockSize>]), )52 pub fn input_blocks(
53 &mut self, mut input: &[u8], mut f: impl FnMut(&[GenericArray<u8, BlockSize>]),
54 ) {
55 let r = self.remaining();
56 if input.len() < r {
57 let n = input.len();
58 self.buffer[self.pos..self.pos + n].copy_from_slice(input);
59 self.pos += n;
60 return;
61 }
62 if self.pos != 0 && input.len() >= r {
63 let (l, r) = input.split_at(r);
64 input = r;
65 self.buffer[self.pos..].copy_from_slice(l);
66 self.pos = 0;
67 f(slice::from_ref(&self.buffer));
68 }
69
70 // While we have at least a full buffer size chunks's worth of data,
71 // process its data without copying into the buffer
72 let n_blocks = input.len()/self.size();
73 let (left, right) = input.split_at(n_blocks*self.size());
74 // SAFETY: we guarantee that `blocks` does not point outside of `input`
75 let blocks = unsafe {
76 slice::from_raw_parts(
77 left.as_ptr() as *const GenericArray<u8, BlockSize>,
78 n_blocks,
79 )
80 };
81 f(blocks);
82
83 // Copy remaining data into the buffer.
84 let n = right.len();
85 self.buffer[..n].copy_from_slice(right);
86 self.pos = n;
87 }
88
89 /// Variant that doesn't flush the buffer until there's additional
90 /// data to be processed. Suitable for tweakable block ciphers
91 /// like Threefish that need to know whether a block is the *last*
92 /// data block before processing it.
93 #[inline]
input_lazy( &mut self, mut input: &[u8], mut f: impl FnMut(&GenericArray<u8, BlockSize>), )94 pub fn input_lazy(
95 &mut self, mut input: &[u8], mut f: impl FnMut(&GenericArray<u8, BlockSize>),
96 ) {
97 let r = self.remaining();
98 if input.len() <= r {
99 let n = input.len();
100 self.buffer[self.pos..self.pos + n].copy_from_slice(input);
101 self.pos += n;
102 return;
103 }
104 if self.pos != 0 && input.len() > r {
105 let (l, r) = input.split_at(r);
106 input = r;
107 self.buffer[self.pos..].copy_from_slice(l);
108 f(&self.buffer);
109 }
110
111 while input.len() > self.size() {
112 let (block, r) = input.split_at(self.size());
113 input = r;
114 f(block.try_into().unwrap());
115 }
116
117 self.buffer[..input.len()].copy_from_slice(input);
118 self.pos = input.len();
119 }
120
121 /// Pad buffer with `prefix` and make sure that internall buffer
122 /// has at least `up_to` free bytes. All remaining bytes get
123 /// zeroed-out.
124 #[inline]
digest_pad( &mut self, up_to: usize, mut f: impl FnMut(&GenericArray<u8, BlockSize>), )125 fn digest_pad(
126 &mut self, up_to: usize, mut f: impl FnMut(&GenericArray<u8, BlockSize>),
127 ) {
128 if self.pos == self.size() {
129 f(&self.buffer);
130 self.pos = 0;
131 }
132 self.buffer[self.pos] = 0x80;
133 self.pos += 1;
134
135 set_zero(&mut self.buffer[self.pos..]);
136
137 if self.remaining() < up_to {
138 f(&self.buffer);
139 set_zero(&mut self.buffer[..self.pos]);
140 }
141 }
142
143 /// Pad message with 0x80, zeros and 64-bit message length
144 /// using big-endian byte order
145 #[inline]
len64_padding_be( &mut self, data_len: u64, mut f: impl FnMut(&GenericArray<u8, BlockSize>), )146 pub fn len64_padding_be(
147 &mut self, data_len: u64, mut f: impl FnMut(&GenericArray<u8, BlockSize>),
148 ) {
149 self.digest_pad(8, &mut f);
150 let b = data_len.to_be_bytes();
151 let n = self.buffer.len() - b.len();
152 self.buffer[n..].copy_from_slice(&b);
153 f(&self.buffer);
154 self.pos = 0;
155 }
156
157 /// Pad message with 0x80, zeros and 64-bit message length
158 /// using little-endian byte order
159 #[inline]
len64_padding_le( &mut self, data_len: u64, mut f: impl FnMut(&GenericArray<u8, BlockSize>), )160 pub fn len64_padding_le(
161 &mut self, data_len: u64, mut f: impl FnMut(&GenericArray<u8, BlockSize>),
162 ) {
163 self.digest_pad(8, &mut f);
164 let b = data_len.to_le_bytes();
165 let n = self.buffer.len() - b.len();
166 self.buffer[n..].copy_from_slice(&b);
167 f(&self.buffer);
168 self.pos = 0;
169 }
170
171 /// Pad message with 0x80, zeros and 128-bit message length
172 /// using big-endian byte order
173 #[inline]
len128_padding_be( &mut self, data_len: u128, mut f: impl FnMut(&GenericArray<u8, BlockSize>), )174 pub fn len128_padding_be(
175 &mut self, data_len: u128, mut f: impl FnMut(&GenericArray<u8, BlockSize>),
176 ) {
177 self.digest_pad(16, &mut f);
178 let b = data_len.to_be_bytes();
179 let n = self.buffer.len() - b.len();
180 self.buffer[n..].copy_from_slice(&b);
181 f(&self.buffer);
182 self.pos = 0;
183 }
184
185 /// Pad message with a given padding `P`
186 ///
187 /// Returns `PadError` if internall buffer is full, which can only happen if
188 /// `input_lazy` was used.
189 #[cfg(feature = "block-padding")]
190 #[inline]
pad_with<P: Padding>(&mut self) -> Result<&mut GenericArray<u8, BlockSize>, PadError>191 pub fn pad_with<P: Padding>(&mut self)
192 -> Result<&mut GenericArray<u8, BlockSize>, PadError>
193 {
194 P::pad_block(&mut self.buffer[..], self.pos)?;
195 self.pos = 0;
196 Ok(&mut self.buffer)
197 }
198
199 /// Return size of the internall buffer in bytes
200 #[inline]
size(&self) -> usize201 pub fn size(&self) -> usize {
202 BlockSize::to_usize()
203 }
204
205 /// Return current cursor position
206 #[inline]
position(&self) -> usize207 pub fn position(&self) -> usize {
208 self.pos
209 }
210
211 /// Return number of remaining bytes in the internall buffer
212 #[inline]
remaining(&self) -> usize213 pub fn remaining(&self) -> usize {
214 self.size() - self.pos
215 }
216
217 /// Reset buffer by setting cursor position to zero
218 #[inline]
reset(&mut self)219 pub fn reset(&mut self) {
220 self.pos = 0
221 }
222 }
223
224 /// Sets all bytes in `dst` to zero
225 #[inline(always)]
set_zero(dst: &mut [u8])226 fn set_zero(dst: &mut [u8]) {
227 // SAFETY: we overwrite valid memory behind `dst`
228 // note: loop is not used here because it produces
229 // unnecessary branch which tests for zero-length slices
230 unsafe {
231 core::ptr::write_bytes(dst.as_mut_ptr(), 0, dst.len());
232 }
233 }
234