1 /*! Parallel bitfield access.
2 
3 This module provides parallel, multiple-bit, access to a `BitSlice`. This
4 functionality permits the use of `BitSlice` as a library-level implementation of
5 the bitfield language feature found in C and C++.
6 
7 The `BitField` trait is not sealed against client implementation, as there is no
8 useful way to automatically use a `BitOrder` implementation to provide a
9 universal behavior. As such, the trait has some requirements that the compiler
10 cannot enforce for client implementations.
11 
12 # Batch Behavior
13 
14 The purpose of this trait is to provide access to arbitrary bit regions as if
15 they were an ordinary memory location. As such, it is important for
16 implementations of this trait to provide shift/mask register transfer behavior
17 where possible, for as wide a span as possible in each action. Implementations
18 of this trait should *not* use bit-by-bit iteration.
19 
20 # Register Bit Order Preservation
21 
22 As a default assumption – user orderings *may* violate this, but *should* not –
23 each element of slice memory used to store part of a value should not reorder
24 the value bits. Transfer between slice memory and a CPU register should solely
25 be an ordinary value load or store between memory and the register, and a
26 shift/mask operation to select the part of the value that is live.
27 
28 # Endianness
29 
30 The `_le` and `_be` methods of `BitField` refer to the order in which
31 `T: BitStore` elements of the slice are assigned significance when containing
32 fragments of a stored data value. Within any `T` element, the order of its
33 constituent bytes is *not* governed by the `BitField` trait method.
34 
35 The provided `BitOrder` implementors `Lsb0` and `Msb0` use the local machine’s
36 byte ordering. Other cursors *may* implement ordering of bytes within `T`
37 elements differently, for instance by calling `.to_be_bytes` before store and
38 `from_be_bytes` after load.
39 !*/
40 
41 use crate::{
42 	access::BitAccess,
43 	array::BitArray,
44 	devel as dvl,
45 	domain::{
46 		Domain,
47 		DomainMut,
48 	},
49 	index::BitMask,
50 	mem::BitMemory,
51 	order::{
52 		BitOrder,
53 		Lsb0,
54 		Msb0,
55 	},
56 	slice::BitSlice,
57 	store::BitStore,
58 	view::BitView,
59 };
60 
61 use core::{
62 	mem,
63 	ops::{
64 		Shl,
65 		Shr,
66 	},
67 	ptr,
68 };
69 
70 use tap::pipe::Pipe;
71 
72 #[cfg(feature = "alloc")]
73 use crate::{
74 	boxed::BitBox,
75 	vec::BitVec,
76 };
77 
78 /** Performs C-style bitfield access through a `BitSlice`.
79 
80 Bit orderings that permit batched access to regions of memory are enabled to
81 load data from, and store data to, a `BitStore` with faster behavior than the
82 default bit-by-bit traversal.
83 
84 This trait transfers data between a `BitSlice` and a local element. The trait
85 functions always place the live bit region of the slice against the least
86 significant bit edge of the local element (return value of `load`, argument of
87 `store`).
88 
89 Implementations are encouraged to preserve in-memory bit ordering within a
90 memory element, so that call sites can provide a value pattern that the user can
91 clearly see matches what they expect for memory ordering. These methods should
92 only move data between locations, without modifying the data itself.
93 
94 Methods should be called as `bits[start .. end].load_or_store()`, where the
95 range subslice selects no mor than the `M::BITS` element width being
96 transferred.
97 **/
98 pub trait BitField {
99 	/// Loads the bits in the `self` region into a local value.
100 	///
101 	/// This can load into any of the unsigned integers which implement
102 	/// `BitMemory`. Any further transformation must be done by the user.
103 	///
104 	/// The default implementation of this function calls [`load_le`] on
105 	/// little-endian byte-ordered CPUs, and [`load_be`] on big-endian
106 	/// byte-ordered CPUs.
107 	///
108 	/// # Parameters
109 	///
110 	/// - `&self`: A read reference to some bits in memory. This slice must be
111 	///   trimmed to have a width no more than the `M::BITS` width of the type
112 	///   being loaded. This can be accomplished with range indexing on a larger
113 	///   slice.
114 	///
115 	/// # Returns
116 	///
117 	/// A value `M` whose least `self.len()` significant bits are filled with
118 	/// the bits of `self`.
119 	///
120 	/// # Panics
121 	///
122 	/// This method is encouraged to panic if `self` is empty, or wider than a
123 	/// single element `M`.
124 	///
125 	/// [`load_be`]: #tymethod.load_be
126 	/// [`load_le`]: #tymethod.load_le
127 	#[inline(always)]
128 	#[cfg(not(tarpaulin_include))]
load<M>(&self) -> M where M: BitMemory129 	fn load<M>(&self) -> M
130 	where M: BitMemory {
131 		#[cfg(target_endian = "little")]
132 		return self.load_le::<M>();
133 
134 		#[cfg(target_endian = "big")]
135 		return self.load_be::<M>();
136 	}
137 
138 	/// Stores a sequence of bits from the user into the domain of `self`.
139 	///
140 	/// This can store any of the unsigned integers which implement
141 	/// `BitMemory`. Any other types must first be transformed by the user.
142 	///
143 	/// The default implementation of this function calls [`store_le`] on
144 	/// little-endian byte-ordered CPUs, and [`store_be`] on big-endian
145 	/// byte-ordered CPUs.
146 	///
147 	/// # Parameters
148 	///
149 	/// - `&mut self`: A write reference to some bits in memory. This slice must
150 	///   be trimmed to have a width no more than the `M::BITS` width of the
151 	///   type being stored. This can be accomplished with range indexing on a
152 	///   larger slice.
153 	/// - `value`: A value, whose `self.len()` least significant bits will be
154 	///   stored into `self`.
155 	///
156 	/// # Behavior
157 	///
158 	/// The `self.len()` least significant bits of `value` are written into the
159 	/// domain of `self`.
160 	///
161 	/// # Panics
162 	///
163 	/// This method is encouraged to panic if `self` is empty, or wider than a
164 	/// single element `M`.
165 	///
166 	/// [`store_be`]: #tymethod.store_be
167 	/// [`store_le`]: #tymethod.store_le
168 	#[inline(always)]
169 	#[cfg(not(tarpaulin_include))]
store<M>(&mut self, value: M) where M: BitMemory170 	fn store<M>(&mut self, value: M)
171 	where M: BitMemory {
172 		#[cfg(target_endian = "little")]
173 		self.store_le(value);
174 
175 		#[cfg(target_endian = "big")]
176 		self.store_be(value);
177 	}
178 
179 	/// Loads from `self`, using little-endian element `T` ordering.
180 	///
181 	/// This function interprets a multi-element slice as having its least
182 	/// significant chunk in the low memory address, and its most significant
183 	/// chunk in the high memory address. Each element `T` is still interpreted
184 	/// from individual bytes according to the local CPU ordering.
185 	///
186 	/// # Parameters
187 	///
188 	/// - `&self`: A read reference to some bits in memory. This slice must be
189 	///   trimmed to have a width no more than the `M::BITS` width of the type
190 	///   being loaded. This can be accomplished with range indexing on a larger
191 	///   slice.
192 	///
193 	/// # Returns
194 	///
195 	/// A value `M` whose least `self.len()` significant bits are filled with
196 	/// the bits of `self`. If `self` spans multiple elements `T`, then the
197 	/// lowest-address `T` is interpreted as containing the least significant
198 	/// bits of the return value `M`, and the highest-address `T` is interpreted
199 	/// as containing its most significant bits.
200 	///
201 	/// # Panics
202 	///
203 	/// This method is encouraged to panic if `self` is empty, or wider than a
204 	/// single element `M`.
load_le<M>(&self) -> M where M: BitMemory205 	fn load_le<M>(&self) -> M
206 	where M: BitMemory;
207 
208 	/// Loads from `self`, using big-endian element `T` ordering.
209 	///
210 	/// This function interprets a multi-element slice as having its most
211 	/// significant chunk in the low memory address, and its least significant
212 	/// chunk in the high memory address. Each element `T` is still interpreted
213 	/// from individual bytes according to the local CPU ordering.
214 	///
215 	/// # Parameters
216 	///
217 	/// - `&self`: A read reference to some bits in memory. This slice must be
218 	///   trimmed to have a width no more than the `M::BITS` width of the type
219 	///   being loaded. This can be accomplished with range indexing on a larger
220 	///   slice.
221 	///
222 	/// # Returns
223 	///
224 	/// A value `M` whose least `self.len()` significant bits are filled with
225 	/// the bits of `self`. If `self` spans multiple elements `T`, then the
226 	/// lowest-address `T` is interpreted as containing the most significant
227 	/// bits of the return value `M`, and the highest-address `T` is interpreted
228 	/// as containing its least significant bits.
229 	///
230 	/// # Panics
231 	///
232 	/// This method is encouraged to panic if `self` is empty, or wider than a
233 	/// single element `M`.
load_be<M>(&self) -> M where M: BitMemory234 	fn load_be<M>(&self) -> M
235 	where M: BitMemory;
236 
237 	/// Stores into `self`, using little-endian element ordering.
238 	///
239 	/// This function interprets a multi-element slice as having its least
240 	/// significant chunk in the low memory address, and its most significant
241 	/// chunk in the high memory address. Each element `T` is still interpreted
242 	/// from individual bytes according to the local CPU ordering.
243 	///
244 	/// # Parameters
245 	///
246 	/// - `&mut self`: A write reference to some bits in memory. This slice must
247 	///   be trimmed to have a width no more than the `M::BITS` width of the
248 	///   type being stored. This can be accomplished with range indexing on a
249 	///   larger slice.
250 	/// - `value`: A value, whose `self.len()` least significant bits will be
251 	///   stored into `self`.
252 	///
253 	/// # Behavior
254 	///
255 	/// The `self.len()` least significant bits of `value` are written into the
256 	/// domain of `self`. If `self` spans multiple elements `T`, then the
257 	/// lowest-address `T` is interpreted as containing the least significant
258 	/// bits of the `M` return value, and the highest-address `T` is interpreted
259 	/// as containing its most significant bits.
260 	///
261 	/// # Panics
262 	///
263 	/// This method is encouraged to panic if `self` is empty, or wider than a
264 	/// single element `M`.
store_le<M>(&mut self, value: M) where M: BitMemory265 	fn store_le<M>(&mut self, value: M)
266 	where M: BitMemory;
267 
268 	/// Stores into `self`, using big-endian element ordering.
269 	///
270 	/// This function interprets a multi-element slice as having its most
271 	/// significant chunk in the low memory address, and its least significant
272 	/// chunk in the high memory address. Each element `T` is still interpreted
273 	/// from individual bytes according to the local CPU ordering.
274 	///
275 	/// # Parameters
276 	///
277 	/// - `&mut self`: A write reference to some bits in memory. This slice must
278 	///   be trimmed to have a width no more than the `M::BITS` width of the
279 	///   type being stored. This can be accomplished with range indexing on a
280 	///   larger slice.
281 	/// - `value`: A value, whose `self.len()` least significant bits will be
282 	///   stored into `self`.
283 	///
284 	/// # Behavior
285 	///
286 	/// The `self.len()` least significant bits of `value` are written into the
287 	/// domain of `self`. If `self` spans multiple elements `T`, then the
288 	/// lowest-address `T` is interpreted as containing the most significant
289 	/// bits of the `M` return value, and the highest-address `T` is interpreted
290 	/// as containing its least significant bits.
291 	///
292 	/// # Panics
293 	///
294 	/// This method is encouraged to panic if `self` is empty, or wider than a
295 	/// single element `M`.
store_be<M>(&mut self, value: M) where M: BitMemory296 	fn store_be<M>(&mut self, value: M)
297 	where M: BitMemory;
298 }
299 
300 impl<T> BitField for BitSlice<Lsb0, T>
301 where T: BitStore
302 {
303 	#[inline]
load_le<M>(&self) -> M where M: BitMemory304 	fn load_le<M>(&self) -> M
305 	where M: BitMemory {
306 		let len = self.len();
307 		check("load", len, M::BITS);
308 
309 		match self.domain() {
310 			//  In Lsb0, a `head` index counts distance from LSedge, and a
311 			//  `tail` index counts element width minus distance from MSedge.
312 			Domain::Enclave { head, elem, tail } => {
313 				get::<T, M>(elem, Lsb0::mask(head, tail), head.value())
314 			},
315 			Domain::Region { head, body, tail } => {
316 				let mut accum = M::ZERO;
317 
318 				/* For multi-`T::Mem` domains, the most significant chunk is
319 				stored in the highest memory address, the tail. Each successive
320 				memory address lower has a chunk of decreasing significance,
321 				until the least significant chunk is stored in the lowest memory
322 				address, the head.
323 				*/
324 
325 				if let Some((elem, tail)) = tail {
326 					accum = get::<T, M>(elem, Lsb0::mask(None, tail), 0);
327 				}
328 
329 				for elem in body.iter().rev().copied() {
330 					/* Rust does not allow the use of shift instructions of
331 					exactly a type width to clear a value. This loop only enters
332 					when `M` is not narrower than `T::Mem`, and the shift is
333 					only needed when `M` occupies *more than one* `T::Mem` slot.
334 					When `M` is exactly as wide as `T::Mem`, this loop either
335 					does not runs (head and tail only), or runs once (single
336 					element), and thus the shift is unnecessary.
337 
338 					As a const-expression, this branch folds at compile-time to
339 					conditionally remove or retain the instruction.
340 					*/
341 					if M::BITS > T::Mem::BITS {
342 						accum <<= T::Mem::BITS;
343 					}
344 					accum |= resize::<T::Mem, M>(elem);
345 				}
346 
347 				if let Some((head, elem)) = head {
348 					let shamt = head.value();
349 					accum <<= T::Mem::BITS - shamt;
350 					accum |= get::<T, M>(elem, Lsb0::mask(head, None), shamt);
351 				}
352 
353 				accum
354 			},
355 		}
356 	}
357 
358 	#[inline]
load_be<M>(&self) -> M where M: BitMemory359 	fn load_be<M>(&self) -> M
360 	where M: BitMemory {
361 		let len = self.len();
362 		check("load", len, M::BITS);
363 
364 		match self.domain() {
365 			Domain::Enclave { head, elem, tail } => {
366 				get::<T, M>(elem, Lsb0::mask(head, tail), head.value())
367 			},
368 			Domain::Region { head, body, tail } => {
369 				let mut accum = M::ZERO;
370 
371 				if let Some((head, elem)) = head {
372 					accum =
373 						get::<T, M>(elem, Lsb0::mask(head, None), head.value());
374 				}
375 
376 				for elem in body.iter().copied() {
377 					if M::BITS > T::Mem::BITS {
378 						accum <<= T::Mem::BITS;
379 					}
380 					accum |= resize::<T::Mem, M>(elem);
381 				}
382 
383 				if let Some((elem, tail)) = tail {
384 					accum <<= tail.value();
385 					accum |= get::<T, M>(elem, Lsb0::mask(None, tail), 0);
386 				}
387 
388 				accum
389 			},
390 		}
391 	}
392 
393 	#[inline]
store_le<M>(&mut self, mut value: M) where M: BitMemory394 	fn store_le<M>(&mut self, mut value: M)
395 	where M: BitMemory {
396 		let len = self.len();
397 		check("store", len, M::BITS);
398 
399 		match self.domain_mut() {
400 			DomainMut::Enclave { head, elem, tail } => {
401 				set::<T, M>(elem, value, Lsb0::mask(head, tail), head.value())
402 			},
403 			DomainMut::Region { head, body, tail } => {
404 				if let Some((head, elem)) = head {
405 					let shamt = head.value();
406 					set::<T, M>(elem, value, Lsb0::mask(head, None), shamt);
407 					value >>= T::Mem::BITS - shamt;
408 				}
409 
410 				for elem in body {
411 					*elem = resize(value);
412 					if M::BITS > T::Mem::BITS {
413 						value >>= T::Mem::BITS;
414 					}
415 				}
416 
417 				if let Some((elem, tail)) = tail {
418 					set::<T, M>(elem, value, Lsb0::mask(None, tail), 0);
419 				}
420 			},
421 		}
422 	}
423 
424 	#[inline]
store_be<M>(&mut self, mut value: M) where M: BitMemory425 	fn store_be<M>(&mut self, mut value: M)
426 	where M: BitMemory {
427 		let len = self.len();
428 		check("store", len, M::BITS);
429 
430 		match self.domain_mut() {
431 			DomainMut::Enclave { head, elem, tail } => {
432 				set::<T, M>(elem, value, Lsb0::mask(head, tail), head.value())
433 			},
434 			DomainMut::Region { head, body, tail } => {
435 				if let Some((elem, tail)) = tail {
436 					set::<T, M>(elem, value, Lsb0::mask(None, tail), 0);
437 					value >>= tail.value()
438 				}
439 
440 				for elem in body.iter_mut().rev() {
441 					*elem = resize(value);
442 					if M::BITS > T::Mem::BITS {
443 						value >>= T::Mem::BITS;
444 					}
445 				}
446 
447 				if let Some((head, elem)) = head {
448 					set::<T, M>(
449 						elem,
450 						value,
451 						Lsb0::mask(head, None),
452 						head.value(),
453 					);
454 				}
455 			},
456 		}
457 	}
458 }
459 
460 impl<T> BitField for BitSlice<Msb0, T>
461 where T: BitStore
462 {
463 	#[inline]
load_le<M>(&self) -> M where M: BitMemory464 	fn load_le<M>(&self) -> M
465 	where M: BitMemory {
466 		let len = self.len();
467 		check("load", len, M::BITS);
468 
469 		match self.domain() {
470 			Domain::Enclave { head, elem, tail } => get::<T, M>(
471 				elem,
472 				Msb0::mask(head, tail),
473 				T::Mem::BITS - tail.value(),
474 			),
475 			Domain::Region { head, body, tail } => {
476 				let mut accum = M::ZERO;
477 
478 				if let Some((elem, tail)) = tail {
479 					accum = get::<T, M>(
480 						elem,
481 						Msb0::mask(None, tail),
482 						T::Mem::BITS - tail.value(),
483 					);
484 				}
485 
486 				for elem in body.iter().rev().copied() {
487 					if M::BITS > T::Mem::BITS {
488 						accum <<= T::Mem::BITS;
489 					}
490 					accum |= resize::<T::Mem, M>(elem);
491 				}
492 
493 				if let Some((head, elem)) = head {
494 					accum <<= T::Mem::BITS - head.value();
495 					accum |= get::<T, M>(elem, Msb0::mask(head, None), 0);
496 				}
497 
498 				accum
499 			},
500 		}
501 	}
502 
503 	#[inline]
load_be<M>(&self) -> M where M: BitMemory504 	fn load_be<M>(&self) -> M
505 	where M: BitMemory {
506 		let len = self.len();
507 		check("load", len, M::BITS);
508 
509 		match self.domain() {
510 			Domain::Enclave { head, elem, tail } => get::<T, M>(
511 				elem,
512 				Msb0::mask(head, tail),
513 				T::Mem::BITS - tail.value(),
514 			),
515 			Domain::Region { head, body, tail } => {
516 				let mut accum = M::ZERO;
517 
518 				if let Some((head, elem)) = head {
519 					accum = get::<T, M>(elem, Msb0::mask(head, None), 0);
520 				}
521 
522 				for elem in body.iter().copied() {
523 					if M::BITS > T::Mem::BITS {
524 						accum <<= T::Mem::BITS;
525 					}
526 					accum |= resize::<T::Mem, M>(elem);
527 				}
528 
529 				if let Some((elem, tail)) = tail {
530 					let width = tail.value();
531 					accum <<= width;
532 					accum |= get::<T, M>(
533 						elem,
534 						Msb0::mask(None, tail),
535 						T::Mem::BITS - width,
536 					);
537 				}
538 
539 				accum
540 			},
541 		}
542 	}
543 
544 	#[inline]
store_le<M>(&mut self, mut value: M) where M: BitMemory545 	fn store_le<M>(&mut self, mut value: M)
546 	where M: BitMemory {
547 		let len = self.len();
548 		check("store", len, M::BITS);
549 
550 		match self.domain_mut() {
551 			DomainMut::Enclave { head, elem, tail } => set::<T, M>(
552 				elem,
553 				value,
554 				Msb0::mask(head, tail),
555 				T::Mem::BITS - tail.value(),
556 			),
557 			DomainMut::Region { head, body, tail } => {
558 				if let Some((head, elem)) = head {
559 					set::<T, M>(elem, value, Msb0::mask(head, None), 0);
560 					value >>= T::Mem::BITS - head.value();
561 				}
562 
563 				for elem in body.iter_mut() {
564 					*elem = resize(value);
565 					if M::BITS > T::Mem::BITS {
566 						value >>= T::Mem::BITS;
567 					}
568 				}
569 
570 				if let Some((elem, tail)) = tail {
571 					set::<T, M>(
572 						elem,
573 						value,
574 						Msb0::mask(None, tail),
575 						T::Mem::BITS - tail.value(),
576 					);
577 				}
578 			},
579 		}
580 	}
581 
582 	#[inline]
store_be<M>(&mut self, mut value: M) where M: BitMemory583 	fn store_be<M>(&mut self, mut value: M)
584 	where M: BitMemory {
585 		let len = self.len();
586 		check("store", len, M::BITS);
587 
588 		match self.domain_mut() {
589 			DomainMut::Enclave { head, elem, tail } => set::<T, M>(
590 				elem,
591 				value,
592 				Msb0::mask(head, tail),
593 				T::Mem::BITS - tail.value(),
594 			),
595 			DomainMut::Region { head, body, tail } => {
596 				if let Some((elem, tail)) = tail {
597 					set::<T, M>(
598 						elem,
599 						value,
600 						Msb0::mask(None, tail),
601 						T::Mem::BITS - tail.value(),
602 					);
603 					value >>= tail.value();
604 				}
605 
606 				for elem in body.iter_mut().rev() {
607 					*elem = resize(value);
608 					if M::BITS > T::Mem::BITS {
609 						value >>= T::Mem::BITS;
610 					}
611 				}
612 
613 				if let Some((head, elem)) = head {
614 					set::<T, M>(elem, value, Msb0::mask(head, None), 0);
615 				}
616 			},
617 		}
618 	}
619 }
620 
621 #[cfg(not(tarpaulin_include))]
622 impl<O, V> BitField for BitArray<O, V>
623 where
624 	O: BitOrder,
625 	V: BitView,
626 	BitSlice<O, V::Store>: BitField,
627 {
628 	#[inline]
load_le<M>(&self) -> M where M: BitMemory629 	fn load_le<M>(&self) -> M
630 	where M: BitMemory {
631 		self.as_bitslice().load_le()
632 	}
633 
634 	#[inline]
load_be<M>(&self) -> M where M: BitMemory635 	fn load_be<M>(&self) -> M
636 	where M: BitMemory {
637 		self.as_bitslice().load_be()
638 	}
639 
640 	#[inline]
store_le<M>(&mut self, value: M) where M: BitMemory641 	fn store_le<M>(&mut self, value: M)
642 	where M: BitMemory {
643 		self.as_mut_bitslice().store_le(value)
644 	}
645 
646 	#[inline]
store_be<M>(&mut self, value: M) where M: BitMemory647 	fn store_be<M>(&mut self, value: M)
648 	where M: BitMemory {
649 		self.as_mut_bitslice().store_be(value)
650 	}
651 }
652 
653 #[cfg(feature = "alloc")]
654 #[cfg(not(tarpaulin_include))]
655 impl<O, T> BitField for BitBox<O, T>
656 where
657 	O: BitOrder,
658 	T: BitStore,
659 	BitSlice<O, T>: BitField,
660 {
661 	#[inline]
load_le<M>(&self) -> M where M: BitMemory662 	fn load_le<M>(&self) -> M
663 	where M: BitMemory {
664 		self.as_bitslice().load_le()
665 	}
666 
667 	#[inline]
load_be<M>(&self) -> M where M: BitMemory668 	fn load_be<M>(&self) -> M
669 	where M: BitMemory {
670 		self.as_bitslice().load_be()
671 	}
672 
673 	#[inline]
store_le<M>(&mut self, value: M) where M: BitMemory674 	fn store_le<M>(&mut self, value: M)
675 	where M: BitMemory {
676 		self.as_mut_bitslice().store_le(value)
677 	}
678 
679 	#[inline]
store_be<M>(&mut self, value: M) where M: BitMemory680 	fn store_be<M>(&mut self, value: M)
681 	where M: BitMemory {
682 		self.as_mut_bitslice().store_be(value)
683 	}
684 }
685 
686 #[cfg(feature = "alloc")]
687 #[cfg(not(tarpaulin_include))]
688 impl<O, T> BitField for BitVec<O, T>
689 where
690 	O: BitOrder,
691 	T: BitStore,
692 	BitSlice<O, T>: BitField,
693 {
694 	#[inline]
load_le<M>(&self) -> M where M: BitMemory695 	fn load_le<M>(&self) -> M
696 	where M: BitMemory {
697 		self.as_bitslice().load_le()
698 	}
699 
700 	#[inline]
load_be<M>(&self) -> M where M: BitMemory701 	fn load_be<M>(&self) -> M
702 	where M: BitMemory {
703 		self.as_bitslice().load_be()
704 	}
705 
706 	#[inline]
store_le<M>(&mut self, value: M) where M: BitMemory707 	fn store_le<M>(&mut self, value: M)
708 	where M: BitMemory {
709 		self.as_mut_bitslice().store_le(value)
710 	}
711 
712 	#[inline]
store_be<M>(&mut self, value: M) where M: BitMemory713 	fn store_be<M>(&mut self, value: M)
714 	where M: BitMemory {
715 		self.as_mut_bitslice().store_be(value)
716 	}
717 }
718 
719 /// Asserts that a slice length is within a memory element width.
720 #[inline]
check(action: &'static str, len: usize, width: u8)721 fn check(action: &'static str, len: usize, width: u8) {
722 	if !(1 ..= width as usize).contains(&len) {
723 		panic!("Cannot {} {} bits from a {}-bit region", action, width, len);
724 	}
725 }
726 
727 /** Reads a value out of a section of a memory element.
728 
729 This function is used to extract a portion of an `M` value from a portion of a
730 `T` value. The `BitField` implementations call it as they assemble a complete
731 `M`. It performs the following steps:
732 
733 1. the referent value of the `elem` pointer is copied into local memory,
734 2. `mask`ed to discard the portions of `*elem` that are not live,
735 3. shifted to the LSedge of the `T::Mem` temporary,
736 4. then `resize`d into an `M` value.
737 
738 This is the exact inverse of `set`.
739 
740 # Type Parameters
741 
742 - `T`: The `BitStore` type of a `BitSlice` that is the source of a read event.
743 - `M`: The local type of the data contained in that `BitSlice`.
744 
745 # Parameters
746 
747 - `elem`: An aliased reference to a single element of a `BitSlice` storage. This
748   is required to remain aliased, as other write-capable references to the
749   location may exist.
750 - `mask`: A `BitMask` of the live region of the value at `*elem` to be used as
751   the contents of the returned value.
752 - `shamt`: The distance of the least significant bit of the mask region from the
753   least significant edge of the `T::Mem` fetched value.
754 
755 # Returns
756 
757 `resize((*elem & mask) >> shamt)`
758 **/
759 #[inline]
get<T, M>(elem: &T, mask: BitMask<T::Mem>, shamt: u8) -> M where T: BitStore, M: BitMemory,760 fn get<T, M>(elem: &T, mask: BitMask<T::Mem>, shamt: u8) -> M
761 where
762 	T: BitStore,
763 	M: BitMemory,
764 {
765 	elem.load_value()
766 		.pipe(|val| mask & val)
767 		.value()
768 		.pipe(|val| Shr::<u8>::shr(val, shamt))
769 		.pipe(resize::<T::Mem, M>)
770 }
771 
772 /** Writes a value into a section of a memory element.
773 
774 This function is used to emplace a portion of an `M` value into a portion of a
775 `T` value. The `BitField` implementations call it as they disassemble a complete
776 `M`. It performs the following steps:
777 
778 1. the provided `value` is `resize`d from `M` to `T::Mem`,
779 2. then shifted from the LSedge of the `T::Mem` temporary by `shamt`,
780 3. `mask`ed to discard the portions of `value` that are not live,
781 4. then written into the `mask`ed portion of `*elem`.
782 
783 This is the exact inverse of `get`.
784 
785 # Type Parameters
786 
787 - `T`: The `BitStore` type of a `BitSlice` that is the sink of a write event.
788 - `M`: The local type of the data being written into that `BitSlice`.
789 
790 # Parameters
791 
792 - `elem`: An aliased reference to a single element of a `BitSlice` storage.
793 - `value`: The value whose least-significant bits will be written into the
794   subsection of `*elt` covered by `mask`.
795 - `mask`: A `BitMask` of the live region of the value at `*elem` to be used as
796   a filter on the provided value.
797 - `shamt`: The distance of the least significant bit of the mask region from the
798   least significant edge of the `T::Mem` destination value.
799 
800 # Effects
801 
802 `*elem &= !mask; *elem |= (resize(value) << shamt) & mask;`
803 **/
804 #[inline]
set<T, M>(elem: &T::Alias, value: M, mask: BitMask<T::Mem>, shamt: u8) where T: BitStore, M: BitMemory,805 fn set<T, M>(elem: &T::Alias, value: M, mask: BitMask<T::Mem>, shamt: u8)
806 where
807 	T: BitStore,
808 	M: BitMemory,
809 {
810 	//  Convert the aliasing reference into its accessing type.
811 	let elem = dvl::accessor(elem);
812 	//  Mark the mask as aliased, to fit into the accessor reference.
813 	let mask = dvl::alias_mask::<T>(mask);
814 	//  Modify `value` to fit the accessor reference, by:
815 	let value = value
816 		//  resizing from `M` to `T::Mem`,
817 		.pipe(resize::<M, T::Mem>)
818 		//  marking it as `T::Alias::Mem`,
819 		.pipe(dvl::alias_mem::<T>)
820 		//  and shifting it left by `shamt` to be in the mask region,
821 		.pipe(|val| Shl::<u8>::shl(val, shamt))
822 		//  then masking it.
823 		.pipe(|val| mask & val);
824 
825 	elem.clear_bits(mask);
826 	elem.set_bits(value);
827 }
828 
829 /** Resizes a value from one register width to another
830 
831 This zero-extends or truncates its source value in order to fit in the target
832 type.
833 
834 # Type Parameters
835 
836 - `T`: The initial register type of the value to resize.
837 - `U`: The final register type of the resized value.
838 
839 # Parameters
840 
841 - `value`: Any register value
842 
843 # Returns
844 
845 `value`, either zero-extended if `U` is wider than `T` or truncated if `U` is
846 narrower than `T`.
847 **/
848 #[inline]
resize<T, U>(value: T) -> U where T: BitMemory, U: BitMemory,849 fn resize<T, U>(value: T) -> U
850 where
851 	T: BitMemory,
852 	U: BitMemory,
853 {
854 	let mut out = U::ZERO;
855 	let size_t = mem::size_of::<T>();
856 	let size_u = mem::size_of::<U>();
857 
858 	unsafe {
859 		resize_inner::<T, U>(&value, &mut out, size_t, size_u);
860 	}
861 
862 	out
863 }
864 
865 /// Performs little-endian byte-order register resizing.
866 #[inline(always)]
867 #[cfg(target_endian = "little")]
868 #[cfg(not(tarpaulin_include))]
resize_inner<T, U>( src: &T, dst: &mut U, size_t: usize, size_u: usize, )869 unsafe fn resize_inner<T, U>(
870 	src: &T,
871 	dst: &mut U,
872 	size_t: usize,
873 	size_u: usize,
874 )
875 {
876 	//  In LE, the least significant byte is the base address, so resizing is
877 	//  just a memcpy into a zeroed slot, taking only the smaller width.
878 	ptr::copy_nonoverlapping(
879 		src as *const T as *const u8,
880 		dst as *mut U as *mut u8,
881 		core::cmp::min(size_t, size_u),
882 	);
883 }
884 
885 /// Performs big-endian byte-order register resizing.
886 #[inline(always)]
887 #[cfg(target_endian = "big")]
888 #[cfg(not(tarpaulin_include))]
resize_inner<T, U>( src: &T, dst: &mut U, size_t: usize, size_u: usize, )889 unsafe fn resize_inner<T, U>(
890 	src: &T,
891 	dst: &mut U,
892 	size_t: usize,
893 	size_u: usize,
894 )
895 {
896 	let src = src as *const T as *const u8;
897 	let dst = dst as *mut U as *mut u8;
898 
899 	//  In BE, shrinking a value requires moving the source base pointer up,
900 	if size_t > size_u {
901 		ptr::copy_nonoverlapping(src.add(size_t - size_u), dst, size_u);
902 	}
903 	//  While expanding a value requires moving the destination base pointer up.
904 	else {
905 		ptr::copy_nonoverlapping(src, dst.add(size_u - size_t), size_t);
906 	}
907 }
908 
909 #[cfg(not(any(target_endian = "big", target_endian = "little")))]
910 compile_fail!(concat!(
911 	"This architecture is currently not supported. File an issue at ",
912 	env!(CARGO_PKG_REPOSITORY)
913 ));
914 
915 #[cfg(feature = "std")]
916 mod io;
917 
918 #[cfg(test)]
919 mod tests;
920 
921 // These tests are purely mathematical, and do not need to run more than once.
922 #[cfg(all(test, feature = "std", not(miri), not(tarpaulin)))]
923 mod permutation_tests;
924