1 use byteorder::{BigEndian, ByteOrder};
2 use {Config, STANDARD};
3 
4 ///Encode arbitrary octets as base64.
5 ///Returns a String.
6 ///Convenience for `encode_config(input, base64::STANDARD);`.
7 ///
8 ///# Example
9 ///
10 ///```rust
11 ///extern crate base64;
12 ///
13 ///fn main() {
14 ///    let b64 = base64::encode(b"hello world");
15 ///    println!("{}", b64);
16 ///}
17 ///```
encode<T: ?Sized + AsRef<[u8]>>(input: &T) -> String18 pub fn encode<T: ?Sized + AsRef<[u8]>>(input: &T) -> String {
19     encode_config(input, STANDARD)
20 }
21 
22 ///Encode arbitrary octets as base64.
23 ///Returns a String.
24 ///
25 ///# Example
26 ///
27 ///```rust
28 ///extern crate base64;
29 ///
30 ///fn main() {
31 ///    let b64 = base64::encode_config(b"hello world~", base64::STANDARD);
32 ///    println!("{}", b64);
33 ///
34 ///    let b64_url = base64::encode_config(b"hello internet~", base64::URL_SAFE);
35 ///    println!("{}", b64_url);
36 ///}
37 ///```
encode_config<T: ?Sized + AsRef<[u8]>>(input: &T, config: Config) -> String38 pub fn encode_config<T: ?Sized + AsRef<[u8]>>(input: &T, config: Config) -> String {
39     let mut buf = match encoded_size(input.as_ref().len(), config) {
40         Some(n) => vec![0; n],
41         None => panic!("integer overflow when calculating buffer size"),
42     };
43 
44     let encoded_len = encode_config_slice(input.as_ref(), config, &mut buf[..]);
45     debug_assert_eq!(encoded_len, buf.len());
46 
47     String::from_utf8(buf).expect("Invalid UTF8")
48 }
49 
50 ///Encode arbitrary octets as base64.
51 ///Writes into the supplied output buffer, which will grow the buffer if needed.
52 ///
53 ///# Example
54 ///
55 ///```rust
56 ///extern crate base64;
57 ///
58 ///fn main() {
59 ///    let mut buf = String::new();
60 ///    base64::encode_config_buf(b"hello world~", base64::STANDARD, &mut buf);
61 ///    println!("{}", buf);
62 ///
63 ///    buf.clear();
64 ///    base64::encode_config_buf(b"hello internet~", base64::URL_SAFE, &mut buf);
65 ///    println!("{}", buf);
66 ///}
67 ///```
encode_config_buf<T: ?Sized + AsRef<[u8]>>(input: &T, config: Config, buf: &mut String)68 pub fn encode_config_buf<T: ?Sized + AsRef<[u8]>>(input: &T, config: Config, buf: &mut String) {
69     let input_bytes = input.as_ref();
70 
71     {
72         let mut sink = ::chunked_encoder::StringSink::new(buf);
73         let encoder = ::chunked_encoder::ChunkedEncoder::new(config);
74 
75         encoder
76             .encode(input_bytes, &mut sink)
77             .expect("Writing to a String shouldn't fail")
78     }
79 }
80 
81 /// Encode arbitrary octets as base64.
82 /// Writes into the supplied output buffer.
83 ///
84 /// This is useful if you wish to avoid allocation entirely (e.g. encoding into a stack-resident
85 /// or statically-allocated buffer).
86 ///
87 /// # Panics
88 ///
89 /// If `output` is too small to hold the encoded version of `input`, a panic will result.
90 ///
91 /// # Example
92 ///
93 /// ```rust
94 /// extern crate base64;
95 ///
96 /// fn main() {
97 ///     let s = b"hello internet!";
98 ///     let mut buf = Vec::new();
99 ///     // make sure we'll have a slice big enough for base64 + padding
100 ///     buf.resize(s.len() * 4 / 3 + 4, 0);
101 ///
102 ///     let bytes_written = base64::encode_config_slice(s,
103 ///                             base64::STANDARD, &mut buf);
104 ///
105 ///     // shorten our vec down to just what was written
106 ///     buf.resize(bytes_written, 0);
107 ///
108 ///     assert_eq!(s, base64::decode(&buf).unwrap().as_slice());
109 /// }
110 /// ```
encode_config_slice<T: ?Sized + AsRef<[u8]>>( input: &T, config: Config, output: &mut [u8], ) -> usize111 pub fn encode_config_slice<T: ?Sized + AsRef<[u8]>>(
112     input: &T,
113     config: Config,
114     output: &mut [u8],
115 ) -> usize {
116     let input_bytes = input.as_ref();
117 
118     let encoded_size = encoded_size(input_bytes.len(), config)
119         .expect("usize overflow when calculating buffer size");
120 
121     let mut b64_output = &mut output[0..encoded_size];
122 
123     encode_with_padding(&input_bytes, config, encoded_size, &mut b64_output);
124 
125     encoded_size
126 }
127 
128 /// B64-encode and pad (if configured).
129 ///
130 /// This helper exists to avoid recalculating encoded_size, which is relatively expensive on short
131 /// inputs.
132 ///
133 /// `encoded_size` is the encoded size calculated for `input`.
134 ///
135 /// `output` must be of size `encoded_size`.
136 ///
137 /// All bytes in `output` will be written to since it is exactly the size of the output.
encode_with_padding(input: &[u8], config: Config, encoded_size: usize, output: &mut [u8])138 fn encode_with_padding(input: &[u8], config: Config, encoded_size: usize, output: &mut [u8]) {
139     debug_assert_eq!(encoded_size, output.len());
140 
141     let b64_bytes_written = encode_to_slice(input, output, config.char_set.encode_table());
142 
143     let padding_bytes = if config.pad {
144         add_padding(input.len(), &mut output[b64_bytes_written..])
145     } else {
146         0
147     };
148 
149     let encoded_bytes = b64_bytes_written
150         .checked_add(padding_bytes)
151         .expect("usize overflow when calculating b64 length");
152 
153     debug_assert_eq!(encoded_size, encoded_bytes);
154 }
155 
156 /// Encode input bytes to utf8 base64 bytes. Does not pad.
157 /// `output` must be long enough to hold the encoded `input` without padding.
158 /// Returns the number of bytes written.
159 #[inline]
encode_to_slice(input: &[u8], output: &mut [u8], encode_table: &[u8; 64]) -> usize160 pub fn encode_to_slice(input: &[u8], output: &mut [u8], encode_table: &[u8; 64]) -> usize {
161     let mut input_index: usize = 0;
162 
163     const BLOCKS_PER_FAST_LOOP: usize = 4;
164     const LOW_SIX_BITS: u64 = 0x3F;
165 
166     // we read 8 bytes at a time (u64) but only actually consume 6 of those bytes. Thus, we need
167     // 2 trailing bytes to be available to read..
168     let last_fast_index = input.len().saturating_sub(BLOCKS_PER_FAST_LOOP * 6 + 2);
169     let mut output_index = 0;
170 
171     if last_fast_index > 0 {
172         while input_index <= last_fast_index {
173             // Major performance wins from letting the optimizer do the bounds check once, mostly
174             // on the output side
175             let input_chunk = &input[input_index..(input_index + (BLOCKS_PER_FAST_LOOP * 6 + 2))];
176             let output_chunk = &mut output[output_index..(output_index + BLOCKS_PER_FAST_LOOP * 8)];
177 
178             // Hand-unrolling for 32 vs 16 or 8 bytes produces yields performance about equivalent
179             // to unsafe pointer code on a Xeon E5-1650v3. 64 byte unrolling was slightly better for
180             // large inputs but significantly worse for 50-byte input, unsurprisingly. I suspect
181             // that it's a not uncommon use case to encode smallish chunks of data (e.g. a 64-byte
182             // SHA-512 digest), so it would be nice if that fit in the unrolled loop at least once.
183             // Plus, single-digit percentage performance differences might well be quite different
184             // on different hardware.
185 
186             let input_u64 = BigEndian::read_u64(&input_chunk[0..]);
187 
188             output_chunk[0] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
189             output_chunk[1] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
190             output_chunk[2] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
191             output_chunk[3] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
192             output_chunk[4] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
193             output_chunk[5] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
194             output_chunk[6] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
195             output_chunk[7] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
196 
197             let input_u64 = BigEndian::read_u64(&input_chunk[6..]);
198 
199             output_chunk[8] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
200             output_chunk[9] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
201             output_chunk[10] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
202             output_chunk[11] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
203             output_chunk[12] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
204             output_chunk[13] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
205             output_chunk[14] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
206             output_chunk[15] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
207 
208             let input_u64 = BigEndian::read_u64(&input_chunk[12..]);
209 
210             output_chunk[16] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
211             output_chunk[17] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
212             output_chunk[18] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
213             output_chunk[19] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
214             output_chunk[20] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
215             output_chunk[21] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
216             output_chunk[22] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
217             output_chunk[23] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
218 
219             let input_u64 = BigEndian::read_u64(&input_chunk[18..]);
220 
221             output_chunk[24] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
222             output_chunk[25] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
223             output_chunk[26] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
224             output_chunk[27] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
225             output_chunk[28] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
226             output_chunk[29] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
227             output_chunk[30] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
228             output_chunk[31] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
229 
230             output_index += BLOCKS_PER_FAST_LOOP * 8;
231             input_index += BLOCKS_PER_FAST_LOOP * 6;
232         }
233     }
234 
235     // Encode what's left after the fast loop.
236 
237     const LOW_SIX_BITS_U8: u8 = 0x3F;
238 
239     let rem = input.len() % 3;
240     let start_of_rem = input.len() - rem;
241 
242     // start at the first index not handled by fast loop, which may be 0.
243 
244     while input_index < start_of_rem {
245         let input_chunk = &input[input_index..(input_index + 3)];
246         let output_chunk = &mut output[output_index..(output_index + 4)];
247 
248         output_chunk[0] = encode_table[(input_chunk[0] >> 2) as usize];
249         output_chunk[1] =
250             encode_table[((input_chunk[0] << 4 | input_chunk[1] >> 4) & LOW_SIX_BITS_U8) as usize];
251         output_chunk[2] =
252             encode_table[((input_chunk[1] << 2 | input_chunk[2] >> 6) & LOW_SIX_BITS_U8) as usize];
253         output_chunk[3] = encode_table[(input_chunk[2] & LOW_SIX_BITS_U8) as usize];
254 
255         input_index += 3;
256         output_index += 4;
257     }
258 
259     if rem == 2 {
260         output[output_index] = encode_table[(input[start_of_rem] >> 2) as usize];
261         output[output_index + 1] =
262             encode_table[((input[start_of_rem] << 4 | input[start_of_rem + 1] >> 4)
263                              & LOW_SIX_BITS_U8) as usize];
264         output[output_index + 2] =
265             encode_table[((input[start_of_rem + 1] << 2) & LOW_SIX_BITS_U8) as usize];
266         output_index += 3;
267     } else if rem == 1 {
268         output[output_index] = encode_table[(input[start_of_rem] >> 2) as usize];
269         output[output_index + 1] =
270             encode_table[((input[start_of_rem] << 4) & LOW_SIX_BITS_U8) as usize];
271         output_index += 2;
272     }
273 
274     output_index
275 }
276 
277 /// calculate the base64 encoded string size, including padding if appropriate
encoded_size(bytes_len: usize, config: Config) -> Option<usize>278 pub fn encoded_size(bytes_len: usize, config: Config) -> Option<usize> {
279     let rem = bytes_len % 3;
280 
281     let complete_input_chunks = bytes_len / 3;
282     let complete_chunk_output = complete_input_chunks.checked_mul(4);
283 
284     if rem > 0 {
285         if config.pad {
286             complete_chunk_output.and_then(|c| c.checked_add(4))
287         } else {
288             let encoded_rem = match rem {
289                 1 => 2,
290                 2 => 3,
291                 _ => unreachable!("Impossible remainder"),
292             };
293             complete_chunk_output.and_then(|c| c.checked_add(encoded_rem))
294         }
295     } else {
296         complete_chunk_output
297     }
298 }
299 
300 /// Write padding characters.
301 /// `output` is the slice where padding should be written, of length at least 2.
302 ///
303 /// Returns the number of padding bytes written.
add_padding(input_len: usize, output: &mut [u8]) -> usize304 pub fn add_padding(input_len: usize, output: &mut [u8]) -> usize {
305     let rem = input_len % 3;
306     let mut bytes_written = 0;
307     for _ in 0..((3 - rem) % 3) {
308         output[bytes_written] = b'=';
309         bytes_written += 1;
310     }
311 
312     bytes_written
313 }
314 
315 #[cfg(test)]
316 mod tests {
317     extern crate rand;
318 
319     use super::*;
320     use decode::decode_config_buf;
321     use tests::{assert_encode_sanity, random_config};
322     use {Config, STANDARD, URL_SAFE_NO_PAD};
323 
324     use self::rand::distributions::{Distribution, Uniform};
325     use self::rand::{FromEntropy, Rng};
326     use std;
327     use std::str;
328 
329     #[test]
encoded_size_correct_standard()330     fn encoded_size_correct_standard() {
331         assert_encoded_length(0, 0, STANDARD);
332 
333         assert_encoded_length(1, 4, STANDARD);
334         assert_encoded_length(2, 4, STANDARD);
335         assert_encoded_length(3, 4, STANDARD);
336 
337         assert_encoded_length(4, 8, STANDARD);
338         assert_encoded_length(5, 8, STANDARD);
339         assert_encoded_length(6, 8, STANDARD);
340 
341         assert_encoded_length(7, 12, STANDARD);
342         assert_encoded_length(8, 12, STANDARD);
343         assert_encoded_length(9, 12, STANDARD);
344 
345         assert_encoded_length(54, 72, STANDARD);
346 
347         assert_encoded_length(55, 76, STANDARD);
348         assert_encoded_length(56, 76, STANDARD);
349         assert_encoded_length(57, 76, STANDARD);
350 
351         assert_encoded_length(58, 80, STANDARD);
352     }
353 
354     #[test]
encoded_size_correct_no_pad()355     fn encoded_size_correct_no_pad() {
356         assert_encoded_length(0, 0, URL_SAFE_NO_PAD);
357 
358         assert_encoded_length(1, 2, URL_SAFE_NO_PAD);
359         assert_encoded_length(2, 3, URL_SAFE_NO_PAD);
360         assert_encoded_length(3, 4, URL_SAFE_NO_PAD);
361 
362         assert_encoded_length(4, 6, URL_SAFE_NO_PAD);
363         assert_encoded_length(5, 7, URL_SAFE_NO_PAD);
364         assert_encoded_length(6, 8, URL_SAFE_NO_PAD);
365 
366         assert_encoded_length(7, 10, URL_SAFE_NO_PAD);
367         assert_encoded_length(8, 11, URL_SAFE_NO_PAD);
368         assert_encoded_length(9, 12, URL_SAFE_NO_PAD);
369 
370         assert_encoded_length(54, 72, URL_SAFE_NO_PAD);
371 
372         assert_encoded_length(55, 74, URL_SAFE_NO_PAD);
373         assert_encoded_length(56, 75, URL_SAFE_NO_PAD);
374         assert_encoded_length(57, 76, URL_SAFE_NO_PAD);
375 
376         assert_encoded_length(58, 78, URL_SAFE_NO_PAD);
377     }
378 
379     #[test]
encoded_size_overflow()380     fn encoded_size_overflow() {
381         assert_eq!(None, encoded_size(std::usize::MAX, STANDARD));
382     }
383 
384     #[test]
encode_config_buf_into_nonempty_buffer_doesnt_clobber_prefix()385     fn encode_config_buf_into_nonempty_buffer_doesnt_clobber_prefix() {
386         let mut orig_data = Vec::new();
387         let mut prefix = String::new();
388         let mut encoded_data_no_prefix = String::new();
389         let mut encoded_data_with_prefix = String::new();
390         let mut decoded = Vec::new();
391 
392         let prefix_len_range = Uniform::new(0, 1000);
393         let input_len_range = Uniform::new(0, 1000);
394 
395         let mut rng = rand::rngs::SmallRng::from_entropy();
396 
397         for _ in 0..10_000 {
398             orig_data.clear();
399             prefix.clear();
400             encoded_data_no_prefix.clear();
401             encoded_data_with_prefix.clear();
402             decoded.clear();
403 
404             let input_len = input_len_range.sample(&mut rng);
405 
406             for _ in 0..input_len {
407                 orig_data.push(rng.gen());
408             }
409 
410             let prefix_len = prefix_len_range.sample(&mut rng);
411             for _ in 0..prefix_len {
412                 // getting convenient random single-byte printable chars that aren't base64 is
413                 // annoying
414                 prefix.push('#');
415             }
416             encoded_data_with_prefix.push_str(&prefix);
417 
418             let config = random_config(&mut rng);
419             encode_config_buf(&orig_data, config, &mut encoded_data_no_prefix);
420             encode_config_buf(&orig_data, config, &mut encoded_data_with_prefix);
421 
422             assert_eq!(
423                 encoded_data_no_prefix.len() + prefix_len,
424                 encoded_data_with_prefix.len()
425             );
426             assert_encode_sanity(&encoded_data_no_prefix, config, input_len);
427             assert_encode_sanity(&encoded_data_with_prefix[prefix_len..], config, input_len);
428 
429             // append plain encode onto prefix
430             prefix.push_str(&mut encoded_data_no_prefix);
431 
432             assert_eq!(prefix, encoded_data_with_prefix);
433 
434             decode_config_buf(&encoded_data_no_prefix, config, &mut decoded).unwrap();
435             assert_eq!(orig_data, decoded);
436         }
437     }
438 
439     #[test]
encode_config_slice_into_nonempty_buffer_doesnt_clobber_suffix()440     fn encode_config_slice_into_nonempty_buffer_doesnt_clobber_suffix() {
441         let mut orig_data = Vec::new();
442         let mut encoded_data = Vec::new();
443         let mut encoded_data_original_state = Vec::new();
444         let mut decoded = Vec::new();
445 
446         let input_len_range = Uniform::new(0, 1000);
447 
448         let mut rng = rand::rngs::SmallRng::from_entropy();
449 
450         for _ in 0..10_000 {
451             orig_data.clear();
452             encoded_data.clear();
453             encoded_data_original_state.clear();
454             decoded.clear();
455 
456             let input_len = input_len_range.sample(&mut rng);
457 
458             for _ in 0..input_len {
459                 orig_data.push(rng.gen());
460             }
461 
462             // plenty of existing garbage in the encoded buffer
463             for _ in 0..10 * input_len {
464                 encoded_data.push(rng.gen());
465             }
466 
467             encoded_data_original_state.extend_from_slice(&encoded_data);
468 
469             let config = random_config(&mut rng);
470 
471             let encoded_size = encoded_size(input_len, config).unwrap();
472 
473             assert_eq!(
474                 encoded_size,
475                 encode_config_slice(&orig_data, config, &mut encoded_data)
476             );
477 
478             assert_encode_sanity(
479                 std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
480                 config,
481                 input_len,
482             );
483 
484             assert_eq!(
485                 &encoded_data[encoded_size..],
486                 &encoded_data_original_state[encoded_size..]
487             );
488 
489             decode_config_buf(&encoded_data[0..encoded_size], config, &mut decoded).unwrap();
490             assert_eq!(orig_data, decoded);
491         }
492     }
493 
494     #[test]
encode_config_slice_fits_into_precisely_sized_slice()495     fn encode_config_slice_fits_into_precisely_sized_slice() {
496         let mut orig_data = Vec::new();
497         let mut encoded_data = Vec::new();
498         let mut decoded = Vec::new();
499 
500         let input_len_range = Uniform::new(0, 1000);
501 
502         let mut rng = rand::rngs::SmallRng::from_entropy();
503 
504         for _ in 0..10_000 {
505             orig_data.clear();
506             encoded_data.clear();
507             decoded.clear();
508 
509             let input_len = input_len_range.sample(&mut rng);
510 
511             for _ in 0..input_len {
512                 orig_data.push(rng.gen());
513             }
514 
515             let config = random_config(&mut rng);
516 
517             let encoded_size = encoded_size(input_len, config).unwrap();
518 
519             encoded_data.resize(encoded_size, 0);
520 
521             assert_eq!(
522                 encoded_size,
523                 encode_config_slice(&orig_data, config, &mut encoded_data)
524             );
525 
526             assert_encode_sanity(
527                 std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
528                 config,
529                 input_len,
530             );
531 
532             decode_config_buf(&encoded_data[0..encoded_size], config, &mut decoded).unwrap();
533             assert_eq!(orig_data, decoded);
534         }
535     }
536 
537     #[test]
encode_to_slice_random_valid_utf8()538     fn encode_to_slice_random_valid_utf8() {
539         let mut input = Vec::new();
540         let mut output = Vec::new();
541 
542         let input_len_range = Uniform::new(0, 1000);
543 
544         let mut rng = rand::rngs::SmallRng::from_entropy();
545 
546         for _ in 0..10_000 {
547             input.clear();
548             output.clear();
549 
550             let input_len = input_len_range.sample(&mut rng);
551 
552             for _ in 0..input_len {
553                 input.push(rng.gen());
554             }
555 
556             let config = random_config(&mut rng);
557 
558             // fill up the output buffer with garbage
559             let encoded_size = encoded_size(input_len, config).unwrap();
560             for _ in 0..encoded_size {
561                 output.push(rng.gen());
562             }
563 
564             let orig_output_buf = output.to_vec();
565 
566             let bytes_written =
567                 encode_to_slice(&input, &mut output, config.char_set.encode_table());
568 
569             // make sure the part beyond bytes_written is the same garbage it was before
570             assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]);
571 
572             // make sure the encoded bytes are UTF-8
573             let _ = str::from_utf8(&output[0..bytes_written]).unwrap();
574         }
575     }
576 
577     #[test]
encode_with_padding_random_valid_utf8()578     fn encode_with_padding_random_valid_utf8() {
579         let mut input = Vec::new();
580         let mut output = Vec::new();
581 
582         let input_len_range = Uniform::new(0, 1000);
583 
584         let mut rng = rand::rngs::SmallRng::from_entropy();
585 
586         for _ in 0..10_000 {
587             input.clear();
588             output.clear();
589 
590             let input_len = input_len_range.sample(&mut rng);
591 
592             for _ in 0..input_len {
593                 input.push(rng.gen());
594             }
595 
596             let config = random_config(&mut rng);
597 
598             // fill up the output buffer with garbage
599             let encoded_size = encoded_size(input_len, config).unwrap();
600             for _ in 0..encoded_size + 1000 {
601                 output.push(rng.gen());
602             }
603 
604             let orig_output_buf = output.to_vec();
605 
606             encode_with_padding(&input, config, encoded_size, &mut output[0..encoded_size]);
607 
608             // make sure the part beyond b64 is the same garbage it was before
609             assert_eq!(orig_output_buf[encoded_size..], output[encoded_size..]);
610 
611             // make sure the encoded bytes are UTF-8
612             let _ = str::from_utf8(&output[0..encoded_size]).unwrap();
613         }
614     }
615 
616     #[test]
add_padding_random_valid_utf8()617     fn add_padding_random_valid_utf8() {
618         let mut output = Vec::new();
619 
620         let mut rng = rand::rngs::SmallRng::from_entropy();
621 
622         // cover our bases for length % 3
623         for input_len in 0..10 {
624             output.clear();
625 
626             // fill output with random
627             for _ in 0..10 {
628                 output.push(rng.gen());
629             }
630 
631             let orig_output_buf = output.to_vec();
632 
633             let bytes_written = add_padding(input_len, &mut output);
634 
635             // make sure the part beyond bytes_written is the same garbage it was before
636             assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]);
637 
638             // make sure the encoded bytes are UTF-8
639             let _ = str::from_utf8(&output[0..bytes_written]).unwrap();
640         }
641     }
642 
assert_encoded_length(input_len: usize, encoded_len: usize, config: Config)643     fn assert_encoded_length(input_len: usize, encoded_len: usize, config: Config) {
644         assert_eq!(encoded_len, encoded_size(input_len, config).unwrap());
645 
646         let mut bytes: Vec<u8> = Vec::new();
647         let mut rng = rand::rngs::SmallRng::from_entropy();
648 
649         for _ in 0..input_len {
650             bytes.push(rng.gen());
651         }
652 
653         let encoded = encode_config(&bytes, config);
654         assert_encode_sanity(&encoded, config, input_len);
655 
656         assert_eq!(encoded_len, encoded.len());
657     }
658 
659 }
660