1 //! An interpreter for MIR used in CTFE and by miri.
2 
3 #[macro_export]
4 macro_rules! err_unsup {
5     ($($tt:tt)*) => {
6         $crate::mir::interpret::InterpError::Unsupported(
7             $crate::mir::interpret::UnsupportedOpInfo::$($tt)*
8         )
9     };
10 }
11 
12 #[macro_export]
13 macro_rules! err_unsup_format {
14     ($($tt:tt)*) => { err_unsup!(Unsupported(format!($($tt)*))) };
15 }
16 
17 #[macro_export]
18 macro_rules! err_inval {
19     ($($tt:tt)*) => {
20         $crate::mir::interpret::InterpError::InvalidProgram(
21             $crate::mir::interpret::InvalidProgramInfo::$($tt)*
22         )
23     };
24 }
25 
26 #[macro_export]
27 macro_rules! err_ub {
28     ($($tt:tt)*) => {
29         $crate::mir::interpret::InterpError::UndefinedBehavior(
30             $crate::mir::interpret::UndefinedBehaviorInfo::$($tt)*
31         )
32     };
33 }
34 
35 #[macro_export]
36 macro_rules! err_ub_format {
37     ($($tt:tt)*) => { err_ub!(Ub(format!($($tt)*))) };
38 }
39 
40 #[macro_export]
41 macro_rules! err_exhaust {
42     ($($tt:tt)*) => {
43         $crate::mir::interpret::InterpError::ResourceExhaustion(
44             $crate::mir::interpret::ResourceExhaustionInfo::$($tt)*
45         )
46     };
47 }
48 
49 #[macro_export]
50 macro_rules! err_machine_stop {
51     ($($tt:tt)*) => {
52         $crate::mir::interpret::InterpError::MachineStop(Box::new($($tt)*))
53     };
54 }
55 
56 // In the `throw_*` macros, avoid `return` to make them work with `try {}`.
57 #[macro_export]
58 macro_rules! throw_unsup {
59     ($($tt:tt)*) => { Err::<!, _>(err_unsup!($($tt)*))? };
60 }
61 
62 #[macro_export]
63 macro_rules! throw_unsup_format {
64     ($($tt:tt)*) => { throw_unsup!(Unsupported(format!($($tt)*))) };
65 }
66 
67 #[macro_export]
68 macro_rules! throw_inval {
69     ($($tt:tt)*) => { Err::<!, _>(err_inval!($($tt)*))? };
70 }
71 
72 #[macro_export]
73 macro_rules! throw_ub {
74     ($($tt:tt)*) => { Err::<!, _>(err_ub!($($tt)*))? };
75 }
76 
77 #[macro_export]
78 macro_rules! throw_ub_format {
79     ($($tt:tt)*) => { throw_ub!(Ub(format!($($tt)*))) };
80 }
81 
82 #[macro_export]
83 macro_rules! throw_exhaust {
84     ($($tt:tt)*) => { Err::<!, _>(err_exhaust!($($tt)*))? };
85 }
86 
87 #[macro_export]
88 macro_rules! throw_machine_stop {
89     ($($tt:tt)*) => { Err::<!, _>(err_machine_stop!($($tt)*))? };
90 }
91 
92 mod allocation;
93 mod error;
94 mod pointer;
95 mod queries;
96 mod value;
97 
98 use std::convert::TryFrom;
99 use std::fmt;
100 use std::io;
101 use std::io::{Read, Write};
102 use std::num::{NonZeroU32, NonZeroU64};
103 use std::sync::atomic::{AtomicU32, Ordering};
104 
105 use rustc_ast::LitKind;
106 use rustc_data_structures::fx::FxHashMap;
107 use rustc_data_structures::sync::{HashMapExt, Lock};
108 use rustc_data_structures::tiny_list::TinyList;
109 use rustc_hir::def_id::DefId;
110 use rustc_macros::HashStable;
111 use rustc_middle::ty::print::with_no_trimmed_paths;
112 use rustc_serialize::{Decodable, Encodable};
113 use rustc_target::abi::Endian;
114 
115 use crate::mir;
116 use crate::ty::codec::{TyDecoder, TyEncoder};
117 use crate::ty::subst::GenericArgKind;
118 use crate::ty::{self, Instance, Ty, TyCtxt};
119 
120 pub use self::error::{
121     struct_error, CheckInAllocMsg, ErrorHandled, EvalToAllocationRawResult, EvalToConstValueResult,
122     InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo, MachineStopType,
123     ResourceExhaustionInfo, UndefinedBehaviorInfo, UninitBytesAccess, UnsupportedOpInfo,
124 };
125 
126 pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMaybeUninit};
127 
128 pub use self::allocation::{
129     alloc_range, AllocRange, Allocation, InitChunk, InitChunkIter, InitMask, Relocations,
130 };
131 
132 pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
133 
134 /// Uniquely identifies one of the following:
135 /// - A constant
136 /// - A static
137 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)]
138 #[derive(HashStable, Lift)]
139 pub struct GlobalId<'tcx> {
140     /// For a constant or static, the `Instance` of the item itself.
141     /// For a promoted global, the `Instance` of the function they belong to.
142     pub instance: ty::Instance<'tcx>,
143 
144     /// The index for promoted globals within their function's `mir::Body`.
145     pub promoted: Option<mir::Promoted>,
146 }
147 
148 impl GlobalId<'tcx> {
display(self, tcx: TyCtxt<'tcx>) -> String149     pub fn display(self, tcx: TyCtxt<'tcx>) -> String {
150         let instance_name = with_no_trimmed_paths(|| tcx.def_path_str(self.instance.def.def_id()));
151         if let Some(promoted) = self.promoted {
152             format!("{}::{:?}", instance_name, promoted)
153         } else {
154             instance_name
155         }
156     }
157 }
158 
159 /// Input argument for `tcx.lit_to_const`.
160 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, HashStable)]
161 pub struct LitToConstInput<'tcx> {
162     /// The absolute value of the resultant constant.
163     pub lit: &'tcx LitKind,
164     /// The type of the constant.
165     pub ty: Ty<'tcx>,
166     /// If the constant is negative.
167     pub neg: bool,
168 }
169 
170 /// Error type for `tcx.lit_to_const`.
171 #[derive(Copy, Clone, Debug, Eq, PartialEq, HashStable)]
172 pub enum LitToConstError {
173     /// The literal's inferred type did not match the expected `ty` in the input.
174     /// This is used for graceful error handling (`delay_span_bug`) in
175     /// type checking (`Const::from_anon_const`).
176     TypeError,
177     Reported,
178 }
179 
180 #[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
181 pub struct AllocId(pub NonZeroU64);
182 
183 // We want the `Debug` output to be readable as it is used by `derive(Debug)` for
184 // all the Miri types.
185 impl fmt::Debug for AllocId {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result186     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
187         if f.alternate() { write!(f, "a{}", self.0) } else { write!(f, "alloc{}", self.0) }
188     }
189 }
190 
191 impl fmt::Display for AllocId {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result192     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
193         fmt::Debug::fmt(self, f)
194     }
195 }
196 
197 #[derive(TyDecodable, TyEncodable)]
198 enum AllocDiscriminant {
199     Alloc,
200     Fn,
201     Static,
202 }
203 
specialized_encode_alloc_id<'tcx, E: TyEncoder<'tcx>>( encoder: &mut E, tcx: TyCtxt<'tcx>, alloc_id: AllocId, ) -> Result<(), E::Error>204 pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<'tcx>>(
205     encoder: &mut E,
206     tcx: TyCtxt<'tcx>,
207     alloc_id: AllocId,
208 ) -> Result<(), E::Error> {
209     match tcx.global_alloc(alloc_id) {
210         GlobalAlloc::Memory(alloc) => {
211             trace!("encoding {:?} with {:#?}", alloc_id, alloc);
212             AllocDiscriminant::Alloc.encode(encoder)?;
213             alloc.encode(encoder)?;
214         }
215         GlobalAlloc::Function(fn_instance) => {
216             trace!("encoding {:?} with {:#?}", alloc_id, fn_instance);
217             AllocDiscriminant::Fn.encode(encoder)?;
218             fn_instance.encode(encoder)?;
219         }
220         GlobalAlloc::Static(did) => {
221             assert!(!tcx.is_thread_local_static(did));
222             // References to statics doesn't need to know about their allocations,
223             // just about its `DefId`.
224             AllocDiscriminant::Static.encode(encoder)?;
225             did.encode(encoder)?;
226         }
227     }
228     Ok(())
229 }
230 
231 // Used to avoid infinite recursion when decoding cyclic allocations.
232 type DecodingSessionId = NonZeroU32;
233 
234 #[derive(Clone)]
235 enum State {
236     Empty,
237     InProgressNonAlloc(TinyList<DecodingSessionId>),
238     InProgress(TinyList<DecodingSessionId>, AllocId),
239     Done(AllocId),
240 }
241 
242 pub struct AllocDecodingState {
243     // For each `AllocId`, we keep track of which decoding state it's currently in.
244     decoding_state: Vec<Lock<State>>,
245     // The offsets of each allocation in the data stream.
246     data_offsets: Vec<u32>,
247 }
248 
249 impl AllocDecodingState {
250     #[inline]
new_decoding_session(&self) -> AllocDecodingSession<'_>251     pub fn new_decoding_session(&self) -> AllocDecodingSession<'_> {
252         static DECODER_SESSION_ID: AtomicU32 = AtomicU32::new(0);
253         let counter = DECODER_SESSION_ID.fetch_add(1, Ordering::SeqCst);
254 
255         // Make sure this is never zero.
256         let session_id = DecodingSessionId::new((counter & 0x7FFFFFFF) + 1).unwrap();
257 
258         AllocDecodingSession { state: self, session_id }
259     }
260 
new(data_offsets: Vec<u32>) -> Self261     pub fn new(data_offsets: Vec<u32>) -> Self {
262         let decoding_state = vec![Lock::new(State::Empty); data_offsets.len()];
263 
264         Self { decoding_state, data_offsets }
265     }
266 }
267 
268 #[derive(Copy, Clone)]
269 pub struct AllocDecodingSession<'s> {
270     state: &'s AllocDecodingState,
271     session_id: DecodingSessionId,
272 }
273 
274 impl<'s> AllocDecodingSession<'s> {
275     /// Decodes an `AllocId` in a thread-safe way.
decode_alloc_id<D>(&self, decoder: &mut D) -> Result<AllocId, D::Error> where D: TyDecoder<'tcx>,276     pub fn decode_alloc_id<D>(&self, decoder: &mut D) -> Result<AllocId, D::Error>
277     where
278         D: TyDecoder<'tcx>,
279     {
280         // Read the index of the allocation.
281         let idx = usize::try_from(decoder.read_u32()?).unwrap();
282         let pos = usize::try_from(self.state.data_offsets[idx]).unwrap();
283 
284         // Decode the `AllocDiscriminant` now so that we know if we have to reserve an
285         // `AllocId`.
286         let (alloc_kind, pos) = decoder.with_position(pos, |decoder| {
287             let alloc_kind = AllocDiscriminant::decode(decoder)?;
288             Ok((alloc_kind, decoder.position()))
289         })?;
290 
291         // Check the decoding state to see if it's already decoded or if we should
292         // decode it here.
293         let alloc_id = {
294             let mut entry = self.state.decoding_state[idx].lock();
295 
296             match *entry {
297                 State::Done(alloc_id) => {
298                     return Ok(alloc_id);
299                 }
300                 ref mut entry @ State::Empty => {
301                     // We are allowed to decode.
302                     match alloc_kind {
303                         AllocDiscriminant::Alloc => {
304                             // If this is an allocation, we need to reserve an
305                             // `AllocId` so we can decode cyclic graphs.
306                             let alloc_id = decoder.tcx().reserve_alloc_id();
307                             *entry =
308                                 State::InProgress(TinyList::new_single(self.session_id), alloc_id);
309                             Some(alloc_id)
310                         }
311                         AllocDiscriminant::Fn | AllocDiscriminant::Static => {
312                             // Fns and statics cannot be cyclic, and their `AllocId`
313                             // is determined later by interning.
314                             *entry =
315                                 State::InProgressNonAlloc(TinyList::new_single(self.session_id));
316                             None
317                         }
318                     }
319                 }
320                 State::InProgressNonAlloc(ref mut sessions) => {
321                     if sessions.contains(&self.session_id) {
322                         bug!("this should be unreachable");
323                     } else {
324                         // Start decoding concurrently.
325                         sessions.insert(self.session_id);
326                         None
327                     }
328                 }
329                 State::InProgress(ref mut sessions, alloc_id) => {
330                     if sessions.contains(&self.session_id) {
331                         // Don't recurse.
332                         return Ok(alloc_id);
333                     } else {
334                         // Start decoding concurrently.
335                         sessions.insert(self.session_id);
336                         Some(alloc_id)
337                     }
338                 }
339             }
340         };
341 
342         // Now decode the actual data.
343         let alloc_id = decoder.with_position(pos, |decoder| {
344             match alloc_kind {
345                 AllocDiscriminant::Alloc => {
346                     let alloc = <&'tcx Allocation as Decodable<_>>::decode(decoder)?;
347                     // We already have a reserved `AllocId`.
348                     let alloc_id = alloc_id.unwrap();
349                     trace!("decoded alloc {:?}: {:#?}", alloc_id, alloc);
350                     decoder.tcx().set_alloc_id_same_memory(alloc_id, alloc);
351                     Ok(alloc_id)
352                 }
353                 AllocDiscriminant::Fn => {
354                     assert!(alloc_id.is_none());
355                     trace!("creating fn alloc ID");
356                     let instance = ty::Instance::decode(decoder)?;
357                     trace!("decoded fn alloc instance: {:?}", instance);
358                     let alloc_id = decoder.tcx().create_fn_alloc(instance);
359                     Ok(alloc_id)
360                 }
361                 AllocDiscriminant::Static => {
362                     assert!(alloc_id.is_none());
363                     trace!("creating extern static alloc ID");
364                     let did = <DefId as Decodable<D>>::decode(decoder)?;
365                     trace!("decoded static def-ID: {:?}", did);
366                     let alloc_id = decoder.tcx().create_static_alloc(did);
367                     Ok(alloc_id)
368                 }
369             }
370         })?;
371 
372         self.state.decoding_state[idx].with_lock(|entry| {
373             *entry = State::Done(alloc_id);
374         });
375 
376         Ok(alloc_id)
377     }
378 }
379 
380 /// An allocation in the global (tcx-managed) memory can be either a function pointer,
381 /// a static, or a "real" allocation with some data in it.
382 #[derive(Debug, Clone, Eq, PartialEq, Hash, TyDecodable, TyEncodable, HashStable)]
383 pub enum GlobalAlloc<'tcx> {
384     /// The alloc ID is used as a function pointer.
385     Function(Instance<'tcx>),
386     /// The alloc ID points to a "lazy" static variable that did not get computed (yet).
387     /// This is also used to break the cycle in recursive statics.
388     Static(DefId),
389     /// The alloc ID points to memory.
390     Memory(&'tcx Allocation),
391 }
392 
393 impl GlobalAlloc<'tcx> {
394     /// Panics if the `GlobalAlloc` does not refer to an `GlobalAlloc::Memory`
395     #[track_caller]
396     #[inline]
unwrap_memory(&self) -> &'tcx Allocation397     pub fn unwrap_memory(&self) -> &'tcx Allocation {
398         match *self {
399             GlobalAlloc::Memory(mem) => mem,
400             _ => bug!("expected memory, got {:?}", self),
401         }
402     }
403 
404     /// Panics if the `GlobalAlloc` is not `GlobalAlloc::Function`
405     #[track_caller]
406     #[inline]
unwrap_fn(&self) -> Instance<'tcx>407     pub fn unwrap_fn(&self) -> Instance<'tcx> {
408         match *self {
409             GlobalAlloc::Function(instance) => instance,
410             _ => bug!("expected function, got {:?}", self),
411         }
412     }
413 }
414 
415 crate struct AllocMap<'tcx> {
416     /// Maps `AllocId`s to their corresponding allocations.
417     alloc_map: FxHashMap<AllocId, GlobalAlloc<'tcx>>,
418 
419     /// Used to ensure that statics and functions only get one associated `AllocId`.
420     /// Should never contain a `GlobalAlloc::Memory`!
421     //
422     // FIXME: Should we just have two separate dedup maps for statics and functions each?
423     dedup: FxHashMap<GlobalAlloc<'tcx>, AllocId>,
424 
425     /// The `AllocId` to assign to the next requested ID.
426     /// Always incremented; never gets smaller.
427     next_id: AllocId,
428 }
429 
430 impl<'tcx> AllocMap<'tcx> {
new() -> Self431     crate fn new() -> Self {
432         AllocMap {
433             alloc_map: Default::default(),
434             dedup: Default::default(),
435             next_id: AllocId(NonZeroU64::new(1).unwrap()),
436         }
437     }
reserve(&mut self) -> AllocId438     fn reserve(&mut self) -> AllocId {
439         let next = self.next_id;
440         self.next_id.0 = self.next_id.0.checked_add(1).expect(
441             "You overflowed a u64 by incrementing by 1... \
442              You've just earned yourself a free drink if we ever meet. \
443              Seriously, how did you do that?!",
444         );
445         next
446     }
447 }
448 
449 impl<'tcx> TyCtxt<'tcx> {
450     /// Obtains a new allocation ID that can be referenced but does not
451     /// yet have an allocation backing it.
452     ///
453     /// Make sure to call `set_alloc_id_memory` or `set_alloc_id_same_memory` before returning such
454     /// an `AllocId` from a query.
reserve_alloc_id(self) -> AllocId455     pub fn reserve_alloc_id(self) -> AllocId {
456         self.alloc_map.lock().reserve()
457     }
458 
459     /// Reserves a new ID *if* this allocation has not been dedup-reserved before.
460     /// Should only be used for function pointers and statics, we don't want
461     /// to dedup IDs for "real" memory!
reserve_and_set_dedup(self, alloc: GlobalAlloc<'tcx>) -> AllocId462     fn reserve_and_set_dedup(self, alloc: GlobalAlloc<'tcx>) -> AllocId {
463         let mut alloc_map = self.alloc_map.lock();
464         match alloc {
465             GlobalAlloc::Function(..) | GlobalAlloc::Static(..) => {}
466             GlobalAlloc::Memory(..) => bug!("Trying to dedup-reserve memory with real data!"),
467         }
468         if let Some(&alloc_id) = alloc_map.dedup.get(&alloc) {
469             return alloc_id;
470         }
471         let id = alloc_map.reserve();
472         debug!("creating alloc {:?} with id {}", alloc, id);
473         alloc_map.alloc_map.insert(id, alloc.clone());
474         alloc_map.dedup.insert(alloc, id);
475         id
476     }
477 
478     /// Generates an `AllocId` for a static or return a cached one in case this function has been
479     /// called on the same static before.
create_static_alloc(self, static_id: DefId) -> AllocId480     pub fn create_static_alloc(self, static_id: DefId) -> AllocId {
481         self.reserve_and_set_dedup(GlobalAlloc::Static(static_id))
482     }
483 
484     /// Generates an `AllocId` for a function.  Depending on the function type,
485     /// this might get deduplicated or assigned a new ID each time.
create_fn_alloc(self, instance: Instance<'tcx>) -> AllocId486     pub fn create_fn_alloc(self, instance: Instance<'tcx>) -> AllocId {
487         // Functions cannot be identified by pointers, as asm-equal functions can get deduplicated
488         // by the linker (we set the "unnamed_addr" attribute for LLVM) and functions can be
489         // duplicated across crates.
490         // We thus generate a new `AllocId` for every mention of a function. This means that
491         // `main as fn() == main as fn()` is false, while `let x = main as fn(); x == x` is true.
492         // However, formatting code relies on function identity (see #58320), so we only do
493         // this for generic functions.  Lifetime parameters are ignored.
494         let is_generic = instance
495             .substs
496             .into_iter()
497             .any(|kind| !matches!(kind.unpack(), GenericArgKind::Lifetime(_)));
498         if is_generic {
499             // Get a fresh ID.
500             let mut alloc_map = self.alloc_map.lock();
501             let id = alloc_map.reserve();
502             alloc_map.alloc_map.insert(id, GlobalAlloc::Function(instance));
503             id
504         } else {
505             // Deduplicate.
506             self.reserve_and_set_dedup(GlobalAlloc::Function(instance))
507         }
508     }
509 
510     /// Interns the `Allocation` and return a new `AllocId`, even if there's already an identical
511     /// `Allocation` with a different `AllocId`.
512     /// Statics with identical content will still point to the same `Allocation`, i.e.,
513     /// their data will be deduplicated through `Allocation` interning -- but they
514     /// are different places in memory and as such need different IDs.
create_memory_alloc(self, mem: &'tcx Allocation) -> AllocId515     pub fn create_memory_alloc(self, mem: &'tcx Allocation) -> AllocId {
516         let id = self.reserve_alloc_id();
517         self.set_alloc_id_memory(id, mem);
518         id
519     }
520 
521     /// Returns `None` in case the `AllocId` is dangling. An `InterpretCx` can still have a
522     /// local `Allocation` for that `AllocId`, but having such an `AllocId` in a constant is
523     /// illegal and will likely ICE.
524     /// This function exists to allow const eval to detect the difference between evaluation-
525     /// local dangling pointers and allocations in constants/statics.
526     #[inline]
get_global_alloc(self, id: AllocId) -> Option<GlobalAlloc<'tcx>>527     pub fn get_global_alloc(self, id: AllocId) -> Option<GlobalAlloc<'tcx>> {
528         self.alloc_map.lock().alloc_map.get(&id).cloned()
529     }
530 
531     #[inline]
532     #[track_caller]
533     /// Panics in case the `AllocId` is dangling. Since that is impossible for `AllocId`s in
534     /// constants (as all constants must pass interning and validation that check for dangling
535     /// ids), this function is frequently used throughout rustc, but should not be used within
536     /// the miri engine.
global_alloc(self, id: AllocId) -> GlobalAlloc<'tcx>537     pub fn global_alloc(self, id: AllocId) -> GlobalAlloc<'tcx> {
538         match self.get_global_alloc(id) {
539             Some(alloc) => alloc,
540             None => bug!("could not find allocation for {}", id),
541         }
542     }
543 
544     /// Freezes an `AllocId` created with `reserve` by pointing it at an `Allocation`. Trying to
545     /// call this function twice, even with the same `Allocation` will ICE the compiler.
set_alloc_id_memory(self, id: AllocId, mem: &'tcx Allocation)546     pub fn set_alloc_id_memory(self, id: AllocId, mem: &'tcx Allocation) {
547         if let Some(old) = self.alloc_map.lock().alloc_map.insert(id, GlobalAlloc::Memory(mem)) {
548             bug!("tried to set allocation ID {}, but it was already existing as {:#?}", id, old);
549         }
550     }
551 
552     /// Freezes an `AllocId` created with `reserve` by pointing it at an `Allocation`. May be called
553     /// twice for the same `(AllocId, Allocation)` pair.
set_alloc_id_same_memory(self, id: AllocId, mem: &'tcx Allocation)554     fn set_alloc_id_same_memory(self, id: AllocId, mem: &'tcx Allocation) {
555         self.alloc_map.lock().alloc_map.insert_same(id, GlobalAlloc::Memory(mem));
556     }
557 }
558 
559 ////////////////////////////////////////////////////////////////////////////////
560 // Methods to access integers in the target endianness
561 ////////////////////////////////////////////////////////////////////////////////
562 
563 #[inline]
write_target_uint( endianness: Endian, mut target: &mut [u8], data: u128, ) -> Result<(), io::Error>564 pub fn write_target_uint(
565     endianness: Endian,
566     mut target: &mut [u8],
567     data: u128,
568 ) -> Result<(), io::Error> {
569     // This u128 holds an "any-size uint" (since smaller uints can fits in it)
570     // So we do not write all bytes of the u128, just the "payload".
571     match endianness {
572         Endian::Little => target.write(&data.to_le_bytes())?,
573         Endian::Big => target.write(&data.to_be_bytes()[16 - target.len()..])?,
574     };
575     debug_assert!(target.len() == 0); // We should have filled the target buffer.
576     Ok(())
577 }
578 
579 #[inline]
read_target_uint(endianness: Endian, mut source: &[u8]) -> Result<u128, io::Error>580 pub fn read_target_uint(endianness: Endian, mut source: &[u8]) -> Result<u128, io::Error> {
581     // This u128 holds an "any-size uint" (since smaller uints can fits in it)
582     let mut buf = [0u8; std::mem::size_of::<u128>()];
583     // So we do not read exactly 16 bytes into the u128, just the "payload".
584     let uint = match endianness {
585         Endian::Little => {
586             source.read(&mut buf)?;
587             Ok(u128::from_le_bytes(buf))
588         }
589         Endian::Big => {
590             source.read(&mut buf[16 - source.len()..])?;
591             Ok(u128::from_be_bytes(buf))
592         }
593     };
594     debug_assert!(source.len() == 0); // We should have consumed the source buffer.
595     uint
596 }
597