1 /*! I/O trait implementations.
2 
3 The standard library defines byte-based I/O protocols that form the basis of
4 exchanging memory buffers with I/O controllers. As `BitSlice` is designed to be
5 used with I/O buffers, it makes sense for it to implement these protocols.
6 
7 This module is a subset of the `field` module because it relies on the
8 `BitField` trait’s ability to map `BitSlice` to a value-storage region. The I/O
9 protocols `Read` and `Write` are strictly byte-based, and cannot be altered to
10 be bit-based. As such, they are only implemented on types with a `BitField`
11 implementation.
12 
13 Calling `BitField` methods in a loop imposes a non-trivial, and irremovable,
14 per-loop overhead cost. Use of `bitvec` data structures directly, rather than
15 their underlying buffers, will have a performance penalty.
16 !*/
17 
18 #![cfg(feature = "std")]
19 
20 use crate::{
21 	field::BitField,
22 	order::BitOrder,
23 	slice::BitSlice,
24 	store::BitStore,
25 	vec::BitVec,
26 };
27 
28 use core::mem;
29 
30 use std::io::{
31 	self,
32 	Read,
33 	Write,
34 };
35 
36 /** Mirrors the implementation on `[u8]` (found [here]).
37 
38 The implementation loads bytes out of the `&BitSlice` reference until exhaustion
39 of either the source `BitSlice` or destination `[u8]`. When `.read()` returns,
40 `self` will have been updated to no longer include the leading segment copied
41 out as bytes of `buf`.
42 
43 [here]: https://doc.rust-lang.org/std/primitive.slice.html#impl-Read
44 **/
45 impl<'a, O, T> Read for &'a BitSlice<O, T>
46 where
47 	O: BitOrder,
48 	T: BitStore,
49 	BitSlice<O, T>: BitField,
50 {
51 	#[inline]
read(&mut self, buf: &mut [u8]) -> io::Result<usize>52 	fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
53 		let mut idx = 0;
54 		for (byte, slot) in self.chunks_exact(8).zip(buf.iter_mut()) {
55 			*slot = byte.load();
56 			idx += 1;
57 		}
58 		*self = unsafe { self.get_unchecked(idx * 8 ..) };
59 		Ok(idx)
60 	}
61 }
62 
63 /** Mirrors the implementation on `[u8]` (found [here]).
64 
65 The implementation copies bytes into the `&mut BitSlice` reference until
66 exhaustion of either the source `[u8]` or destination `BitSlice`. When
67 `.write()` returns, `self` will have been updated to no longer include the
68 leading segment containing bytes copied in from `buf`.
69 
70 [here]: https://doc.rust-lang.org/std/primitive.slice.html#impl-Write
71 **/
72 impl<'a, O, T> Write for &'a mut BitSlice<O, T>
73 where
74 	O: BitOrder,
75 	T: BitStore,
76 	BitSlice<O, T::Alias>: BitField,
77 {
78 	#[inline]
write(&mut self, buf: &[u8]) -> io::Result<usize>79 	fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
80 		let mut idx = 0;
81 		for (slot, byte) in self.chunks_exact_mut(8).zip(buf.iter().copied()) {
82 			slot.store(byte);
83 			idx += 1;
84 		}
85 		*self = unsafe { mem::take(self).get_unchecked_mut(idx * 8 ..) };
86 		Ok(idx)
87 	}
88 
89 	#[inline(always)]
90 	#[cfg(not(tarpaulin_include))]
flush(&mut self) -> io::Result<()>91 	fn flush(&mut self) -> io::Result<()> {
92 		Ok(())
93 	}
94 }
95 
96 /** Mirrors the implementation on `Vec<u8>` (found [here]).
97 
98 The implementation copies bytes from `buf` into the tail end of `self`. The
99 performance characteristics of this operation are dependent on the type
100 parameters of the `BitVec`, and the position of its tail.
101 
102 [here]: https://doc.rust-lang.org/std/vec/struct.Vec.html#impl-Write
103 **/
104 impl<O, T> Write for BitVec<O, T>
105 where
106 	O: BitOrder,
107 	T: BitStore,
108 	BitSlice<O, T::Alias>: BitField,
109 {
110 	#[inline]
write(&mut self, buf: &[u8]) -> io::Result<usize>111 	fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
112 		let len = self.len();
113 		self.resize(len + buf.len() * 8, false);
114 		unsafe { self.get_unchecked_mut(len ..) }.write(buf)
115 	}
116 
117 	#[inline(always)]
118 	#[cfg(not(tarpaulin_include))]
flush(&mut self) -> io::Result<()>119 	fn flush(&mut self) -> io::Result<()> {
120 		Ok(())
121 	}
122 }
123 
124 #[cfg(test)]
125 mod tests {
126 	use super::*;
127 	use crate::prelude::*;
128 
129 	#[test]
read_bits()130 	fn read_bits() {
131 		let data = [0x136Cu16, 0x8C63];
132 		let mut bits = &data.view_bits::<Msb0>()[4 ..];
133 		assert_eq!(bits.len(), 28);
134 
135 		let mut transfer = [0u8; 4];
136 		let last_ptr = &mut transfer[3] as *mut _;
137 		let mut transfer_handle = &mut transfer[..];
138 
139 		assert_eq!(io::copy(&mut bits, &mut transfer_handle).unwrap(), 3);
140 
141 		//  Once a bitslice cannot produce a byte, it stops `Read`ing
142 		assert_eq!(bits, data.view_bits::<Msb0>()[28 ..]);
143 		//  So the destination slice does not fill up.
144 		assert_eq!(transfer_handle.as_mut_ptr() as *mut _, last_ptr);
145 
146 		if cfg!(target_endian = "little") {
147 			assert_eq!(transfer[.. 3], [0x36, 0x8C, 0xC6][..]);
148 			/* note the backwards nibbles here! ^^
149 
150 			When crossing element boundaries, `.load_le()` assumes that the
151 			lesser memory address is less significant, and the greater memory
152 			address is more significant. The last nibble of the first element
153 			is therefore assumed to be numerically less significant than the
154 			first nibble of the second word.
155 
156 			If this behavior surprises users, then an iterative copy may be more
157 			appropriate than a `BitField`-based load/store behavior. A bitwise
158 			crawl is slower, which is why `BitField` was chosen as the
159 			implementation. But “quickly wrong” is worse than “slowly right”.
160 			*/
161 		}
162 	}
163 
164 	#[test]
write_bits()165 	fn write_bits() {
166 		let mut bv = bitvec![Msb0, usize; 0, 0, 0, 0];
167 		assert_eq!(
168 			3,
169 			io::copy(&mut &[0xC3u8, 0xF0, 0x69][..], &mut bv).unwrap()
170 		);
171 
172 		assert_eq!(bv, bits![
173 			0, 0, 0, 0, // byte 0
174 			1, 1, 0, 0, 0, 0, 1, 1, // byte 1
175 			1, 1, 1, 1, 0, 0, 0, 0, // byte 2
176 			0, 1, 1, 0, 1, 0, 0, 1,
177 		]);
178 	}
179 }
180