1 use std::cmp; 2 use std::collections::VecDeque; 3 use std::io; 4 use std::io::Read; 5 6 /// This is a byte buffer that is built from a vector 7 /// of byte vectors. This avoids extra copies when 8 /// appending a new byte vector, at the expense of 9 /// more complexity when reading out. 10 pub struct ChunkVecBuffer { 11 chunks: VecDeque<Vec<u8>>, 12 limit: usize, 13 } 14 15 impl ChunkVecBuffer { new() -> ChunkVecBuffer16 pub fn new() -> ChunkVecBuffer { 17 ChunkVecBuffer { 18 chunks: VecDeque::new(), 19 limit: 0, 20 } 21 } 22 23 /// Sets the upper limit on how many bytes this 24 /// object can store. 25 /// 26 /// Setting a lower limit than the currently stored 27 /// data is not an error. 28 /// 29 /// A zero limit is interpreted as no limit. set_limit(&mut self, new_limit: usize)30 pub fn set_limit(&mut self, new_limit: usize) { 31 self.limit = new_limit; 32 } 33 34 /// If we're empty is_empty(&self) -> bool35 pub fn is_empty(&self) -> bool { 36 self.chunks.is_empty() 37 } 38 39 /// How many bytes we're storing len(&self) -> usize40 pub fn len(&self) -> usize { 41 let mut len = 0; 42 for ch in &self.chunks { 43 len += ch.len(); 44 } 45 len 46 } 47 48 /// For a proposed append of `len` bytes, how many 49 /// bytes should we actually append to adhere to the 50 /// currently set `limit`? apply_limit(&self, len: usize) -> usize51 pub fn apply_limit(&self, len: usize) -> usize { 52 if self.limit == 0 { 53 len 54 } else { 55 let space = self.limit.saturating_sub(self.len()); 56 cmp::min(len, space) 57 } 58 } 59 60 /// Append a copy of `bytes`, perhaps a prefix if 61 /// we're near the limit. append_limited_copy(&mut self, bytes: &[u8]) -> usize62 pub fn append_limited_copy(&mut self, bytes: &[u8]) -> usize { 63 let take = self.apply_limit(bytes.len()); 64 self.append(bytes[..take].to_vec()); 65 take 66 } 67 68 /// Take and append the given `bytes`. append(&mut self, bytes: Vec<u8>) -> usize69 pub fn append(&mut self, bytes: Vec<u8>) -> usize { 70 let len = bytes.len(); 71 72 if !bytes.is_empty() { 73 self.chunks.push_back(bytes); 74 } 75 76 len 77 } 78 79 /// Take one of the chunks from this object. This 80 /// function panics if the object `is_empty`. take_one(&mut self) -> Vec<u8>81 pub fn take_one(&mut self) -> Vec<u8> { 82 self.chunks.pop_front().unwrap() 83 } 84 85 /// Read data out of this object, writing it into `buf` 86 /// and returning how many bytes were written there. read(&mut self, buf: &mut [u8]) -> io::Result<usize>87 pub fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { 88 let mut offs = 0; 89 90 while offs < buf.len() && !self.is_empty() { 91 let used = self.chunks[0] 92 .as_slice() 93 .read(&mut buf[offs..])?; 94 95 self.consume(used); 96 offs += used; 97 } 98 99 Ok(offs) 100 } 101 consume(&mut self, mut used: usize)102 fn consume(&mut self, mut used: usize) { 103 while used > 0 && !self.is_empty() { 104 if used >= self.chunks[0].len() { 105 used -= self.chunks[0].len(); 106 self.take_one(); 107 } else { 108 self.chunks[0] = self.chunks[0].split_off(used); 109 used = 0; 110 } 111 } 112 } 113 114 /// Read data out of this object, passing it `wr` write_to(&mut self, wr: &mut dyn io::Write) -> io::Result<usize>115 pub fn write_to(&mut self, wr: &mut dyn io::Write) -> io::Result<usize> { 116 if self.is_empty() { 117 return Ok(0); 118 } 119 120 let used = wr.write_vectored( 121 &self 122 .chunks 123 .iter() 124 .map(|ch| io::IoSlice::new(ch)) 125 .collect::<Vec<io::IoSlice>>(), 126 )?; 127 self.consume(used); 128 Ok(used) 129 } 130 } 131 132 #[cfg(test)] 133 mod test { 134 use super::ChunkVecBuffer; 135 136 #[test] short_append_copy_with_limit()137 fn short_append_copy_with_limit() { 138 let mut cvb = ChunkVecBuffer::new(); 139 cvb.set_limit(12); 140 assert_eq!(cvb.append_limited_copy(b"hello"), 5); 141 assert_eq!(cvb.append_limited_copy(b"world"), 5); 142 assert_eq!(cvb.append_limited_copy(b"hello"), 2); 143 assert_eq!(cvb.append_limited_copy(b"world"), 0); 144 145 let mut buf = [0u8; 12]; 146 assert_eq!(cvb.read(&mut buf).unwrap(), 12); 147 assert_eq!(buf.to_vec(), b"helloworldhe".to_vec()); 148 } 149 } 150