1 //! Intrinsics and other functions that the miri engine executes without
2 //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
3 //! and miri.
4 
5 use std::convert::TryFrom;
6 
7 use rustc_hir::def_id::DefId;
8 use rustc_middle::mir::{
9     self,
10     interpret::{ConstValue, GlobalId, InterpResult, Scalar},
11     BinOp,
12 };
13 use rustc_middle::ty;
14 use rustc_middle::ty::layout::LayoutOf as _;
15 use rustc_middle::ty::subst::SubstsRef;
16 use rustc_middle::ty::{Ty, TyCtxt};
17 use rustc_span::symbol::{sym, Symbol};
18 use rustc_target::abi::{Abi, Align, Primitive, Size};
19 
20 use super::{
21     util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
22     Pointer,
23 };
24 
25 mod caller_location;
26 mod type_name;
27 
numeric_intrinsic<Tag>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Tag>28 fn numeric_intrinsic<Tag>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Tag> {
29     let size = match kind {
30         Primitive::Int(integer, _) => integer.size(),
31         _ => bug!("invalid `{}` argument: {:?}", name, bits),
32     };
33     let extra = 128 - u128::from(size.bits());
34     let bits_out = match name {
35         sym::ctpop => u128::from(bits.count_ones()),
36         sym::ctlz => u128::from(bits.leading_zeros()) - extra,
37         sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
38         sym::bswap => (bits << extra).swap_bytes(),
39         sym::bitreverse => (bits << extra).reverse_bits(),
40         _ => bug!("not a numeric intrinsic: {}", name),
41     };
42     Scalar::from_uint(bits_out, size)
43 }
44 
45 /// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
46 /// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
eval_nullary_intrinsic<'tcx>( tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, def_id: DefId, substs: SubstsRef<'tcx>, ) -> InterpResult<'tcx, ConstValue<'tcx>>47 crate fn eval_nullary_intrinsic<'tcx>(
48     tcx: TyCtxt<'tcx>,
49     param_env: ty::ParamEnv<'tcx>,
50     def_id: DefId,
51     substs: SubstsRef<'tcx>,
52 ) -> InterpResult<'tcx, ConstValue<'tcx>> {
53     let tp_ty = substs.type_at(0);
54     let name = tcx.item_name(def_id);
55     Ok(match name {
56         sym::type_name => {
57             ensure_monomorphic_enough(tcx, tp_ty)?;
58             let alloc = type_name::alloc_type_name(tcx, tp_ty);
59             ConstValue::Slice { data: alloc, start: 0, end: alloc.len() }
60         }
61         sym::needs_drop => {
62             ensure_monomorphic_enough(tcx, tp_ty)?;
63             ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env))
64         }
65         sym::pref_align_of => {
66             // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
67             let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
68             ConstValue::from_machine_usize(layout.align.pref.bytes(), &tcx)
69         }
70         sym::type_id => {
71             ensure_monomorphic_enough(tcx, tp_ty)?;
72             ConstValue::from_u64(tcx.type_id_hash(tp_ty))
73         }
74         sym::variant_count => match tp_ty.kind() {
75             // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
76             ty::Adt(ref adt, _) => ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx),
77             ty::Projection(_)
78             | ty::Opaque(_, _)
79             | ty::Param(_)
80             | ty::Bound(_, _)
81             | ty::Placeholder(_)
82             | ty::Infer(_) => throw_inval!(TooGeneric),
83             ty::Bool
84             | ty::Char
85             | ty::Int(_)
86             | ty::Uint(_)
87             | ty::Float(_)
88             | ty::Foreign(_)
89             | ty::Str
90             | ty::Array(_, _)
91             | ty::Slice(_)
92             | ty::RawPtr(_)
93             | ty::Ref(_, _, _)
94             | ty::FnDef(_, _)
95             | ty::FnPtr(_)
96             | ty::Dynamic(_, _)
97             | ty::Closure(_, _)
98             | ty::Generator(_, _, _)
99             | ty::GeneratorWitness(_)
100             | ty::Never
101             | ty::Tuple(_)
102             | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx),
103         },
104         other => bug!("`{}` is not a zero arg intrinsic", other),
105     })
106 }
107 
108 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
109     /// Returns `true` if emulation happened.
110     /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
111     /// intrinsic handling.
emulate_intrinsic( &mut self, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx, M::PointerTag>], ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>, ) -> InterpResult<'tcx, bool>112     pub fn emulate_intrinsic(
113         &mut self,
114         instance: ty::Instance<'tcx>,
115         args: &[OpTy<'tcx, M::PointerTag>],
116         ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
117     ) -> InterpResult<'tcx, bool> {
118         let substs = instance.substs;
119         let intrinsic_name = self.tcx.item_name(instance.def_id());
120 
121         // First handle intrinsics without return place.
122         let (dest, ret) = match ret {
123             None => match intrinsic_name {
124                 sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
125                 sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
126                 // Unsupported diverging intrinsic.
127                 _ => return Ok(false),
128             },
129             Some(p) => p,
130         };
131 
132         // Keep the patterns in this match ordered the same as the list in
133         // `src/librustc_middle/ty/constness.rs`
134         match intrinsic_name {
135             sym::caller_location => {
136                 let span = self.find_closest_untracked_caller_location();
137                 let location = self.alloc_caller_location_for_span(span);
138                 self.write_immediate(location.to_ref(self), dest)?;
139             }
140 
141             sym::min_align_of_val | sym::size_of_val => {
142                 // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be
143                 // dereferencable!
144                 let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
145                 let (size, align) = self
146                     .size_and_align_of_mplace(&place)?
147                     .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
148 
149                 let result = match intrinsic_name {
150                     sym::min_align_of_val => align.bytes(),
151                     sym::size_of_val => size.bytes(),
152                     _ => bug!(),
153                 };
154 
155                 self.write_scalar(Scalar::from_machine_usize(result, self), dest)?;
156             }
157 
158             sym::pref_align_of
159             | sym::needs_drop
160             | sym::type_id
161             | sym::type_name
162             | sym::variant_count => {
163                 let gid = GlobalId { instance, promoted: None };
164                 let ty = match intrinsic_name {
165                     sym::pref_align_of | sym::variant_count => self.tcx.types.usize,
166                     sym::needs_drop => self.tcx.types.bool,
167                     sym::type_id => self.tcx.types.u64,
168                     sym::type_name => self.tcx.mk_static_str(),
169                     _ => bug!("already checked for nullary intrinsics"),
170                 };
171                 let val =
172                     self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
173                 let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
174                 self.copy_op(&val, dest)?;
175             }
176 
177             sym::ctpop
178             | sym::cttz
179             | sym::cttz_nonzero
180             | sym::ctlz
181             | sym::ctlz_nonzero
182             | sym::bswap
183             | sym::bitreverse => {
184                 let ty = substs.type_at(0);
185                 let layout_of = self.layout_of(ty)?;
186                 let val = self.read_scalar(&args[0])?.check_init()?;
187                 let bits = val.to_bits(layout_of.size)?;
188                 let kind = match layout_of.abi {
189                     Abi::Scalar(scalar) => scalar.value,
190                     _ => span_bug!(
191                         self.cur_span(),
192                         "{} called on invalid type {:?}",
193                         intrinsic_name,
194                         ty
195                     ),
196                 };
197                 let (nonzero, intrinsic_name) = match intrinsic_name {
198                     sym::cttz_nonzero => (true, sym::cttz),
199                     sym::ctlz_nonzero => (true, sym::ctlz),
200                     other => (false, other),
201                 };
202                 if nonzero && bits == 0 {
203                     throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
204                 }
205                 let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
206                 self.write_scalar(out_val, dest)?;
207             }
208             sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
209                 let lhs = self.read_immediate(&args[0])?;
210                 let rhs = self.read_immediate(&args[1])?;
211                 let bin_op = match intrinsic_name {
212                     sym::add_with_overflow => BinOp::Add,
213                     sym::sub_with_overflow => BinOp::Sub,
214                     sym::mul_with_overflow => BinOp::Mul,
215                     _ => bug!("Already checked for int ops"),
216                 };
217                 self.binop_with_overflow(bin_op, &lhs, &rhs, dest)?;
218             }
219             sym::saturating_add | sym::saturating_sub => {
220                 let l = self.read_immediate(&args[0])?;
221                 let r = self.read_immediate(&args[1])?;
222                 let is_add = intrinsic_name == sym::saturating_add;
223                 let (val, overflowed, _ty) = self.overflowing_binary_op(
224                     if is_add { BinOp::Add } else { BinOp::Sub },
225                     &l,
226                     &r,
227                 )?;
228                 let val = if overflowed {
229                     let size = l.layout.size;
230                     let num_bits = size.bits();
231                     if l.layout.abi.is_signed() {
232                         // For signed ints the saturated value depends on the sign of the first
233                         // term since the sign of the second term can be inferred from this and
234                         // the fact that the operation has overflowed (if either is 0 no
235                         // overflow can occur)
236                         let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?;
237                         let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
238                         if first_term_positive {
239                             // Negative overflow not possible since the positive first term
240                             // can only increase an (in range) negative term for addition
241                             // or corresponding negated positive term for subtraction
242                             Scalar::from_uint(
243                                 (1u128 << (num_bits - 1)) - 1, // max positive
244                                 Size::from_bits(num_bits),
245                             )
246                         } else {
247                             // Positive overflow not possible for similar reason
248                             // max negative
249                             Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits))
250                         }
251                     } else {
252                         // unsigned
253                         if is_add {
254                             // max unsigned
255                             Scalar::from_uint(size.unsigned_int_max(), Size::from_bits(num_bits))
256                         } else {
257                             // underflow to 0
258                             Scalar::from_uint(0u128, Size::from_bits(num_bits))
259                         }
260                     }
261                 } else {
262                     val
263                 };
264                 self.write_scalar(val, dest)?;
265             }
266             sym::discriminant_value => {
267                 let place = self.deref_operand(&args[0])?;
268                 let discr_val = self.read_discriminant(&place.into())?.0;
269                 self.write_scalar(discr_val, dest)?;
270             }
271             sym::unchecked_shl
272             | sym::unchecked_shr
273             | sym::unchecked_add
274             | sym::unchecked_sub
275             | sym::unchecked_mul
276             | sym::unchecked_div
277             | sym::unchecked_rem => {
278                 let l = self.read_immediate(&args[0])?;
279                 let r = self.read_immediate(&args[1])?;
280                 let bin_op = match intrinsic_name {
281                     sym::unchecked_shl => BinOp::Shl,
282                     sym::unchecked_shr => BinOp::Shr,
283                     sym::unchecked_add => BinOp::Add,
284                     sym::unchecked_sub => BinOp::Sub,
285                     sym::unchecked_mul => BinOp::Mul,
286                     sym::unchecked_div => BinOp::Div,
287                     sym::unchecked_rem => BinOp::Rem,
288                     _ => bug!("Already checked for int ops"),
289                 };
290                 let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
291                 if overflowed {
292                     let layout = self.layout_of(substs.type_at(0))?;
293                     let r_val = r.to_scalar()?.to_bits(layout.size)?;
294                     if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
295                         throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
296                     } else {
297                         throw_ub_format!("overflow executing `{}`", intrinsic_name);
298                     }
299                 }
300                 self.write_scalar(val, dest)?;
301             }
302             sym::rotate_left | sym::rotate_right => {
303                 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
304                 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
305                 let layout = self.layout_of(substs.type_at(0))?;
306                 let val = self.read_scalar(&args[0])?.check_init()?;
307                 let val_bits = val.to_bits(layout.size)?;
308                 let raw_shift = self.read_scalar(&args[1])?.check_init()?;
309                 let raw_shift_bits = raw_shift.to_bits(layout.size)?;
310                 let width_bits = u128::from(layout.size.bits());
311                 let shift_bits = raw_shift_bits % width_bits;
312                 let inv_shift_bits = (width_bits - shift_bits) % width_bits;
313                 let result_bits = if intrinsic_name == sym::rotate_left {
314                     (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
315                 } else {
316                     (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
317                 };
318                 let truncated_bits = self.truncate(result_bits, layout);
319                 let result = Scalar::from_uint(truncated_bits, layout.size);
320                 self.write_scalar(result, dest)?;
321             }
322             sym::copy => {
323                 self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
324             }
325             sym::offset => {
326                 let ptr = self.read_pointer(&args[0])?;
327                 let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
328                 let pointee_ty = substs.type_at(0);
329 
330                 let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
331                 self.write_pointer(offset_ptr, dest)?;
332             }
333             sym::arith_offset => {
334                 let ptr = self.read_pointer(&args[0])?;
335                 let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
336                 let pointee_ty = substs.type_at(0);
337 
338                 let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
339                 let offset_bytes = offset_count.wrapping_mul(pointee_size);
340                 let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
341                 self.write_pointer(offset_ptr, dest)?;
342             }
343             sym::ptr_offset_from => {
344                 let a = self.read_immediate(&args[0])?.to_scalar()?;
345                 let b = self.read_immediate(&args[1])?.to_scalar()?;
346 
347                 // Special case: if both scalars are *equal integers*
348                 // and not null, we pretend there is an allocation of size 0 right there,
349                 // and their offset is 0. (There's never a valid object at null, making it an
350                 // exception from the exception.)
351                 // This is the dual to the special exception for offset-by-0
352                 // in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
353                 //
354                 // Control flow is weird because we cannot early-return (to reach the
355                 // `go_to_block` at the end).
356                 let done = if let (Ok(a), Ok(b)) = (a.try_to_int(), b.try_to_int()) {
357                     let a = a.try_to_machine_usize(*self.tcx).unwrap();
358                     let b = b.try_to_machine_usize(*self.tcx).unwrap();
359                     if a == b && a != 0 {
360                         self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
361                         true
362                     } else {
363                         false
364                     }
365                 } else {
366                     false
367                 };
368 
369                 if !done {
370                     // General case: we need two pointers.
371                     let a = self.scalar_to_ptr(a);
372                     let b = self.scalar_to_ptr(b);
373                     let (a_alloc_id, a_offset, _) = self.memory.ptr_get_alloc(a)?;
374                     let (b_alloc_id, b_offset, _) = self.memory.ptr_get_alloc(b)?;
375                     if a_alloc_id != b_alloc_id {
376                         throw_ub_format!(
377                             "ptr_offset_from cannot compute offset of pointers into different \
378                             allocations.",
379                         );
380                     }
381                     let usize_layout = self.layout_of(self.tcx.types.usize)?;
382                     let isize_layout = self.layout_of(self.tcx.types.isize)?;
383                     let a_offset = ImmTy::from_uint(a_offset.bytes(), usize_layout);
384                     let b_offset = ImmTy::from_uint(b_offset.bytes(), usize_layout);
385                     let (val, _overflowed, _ty) =
386                         self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?;
387                     let pointee_layout = self.layout_of(substs.type_at(0))?;
388                     let val = ImmTy::from_scalar(val, isize_layout);
389                     let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
390                     self.exact_div(&val, &size, dest)?;
391                 }
392             }
393 
394             sym::transmute => {
395                 self.copy_op_transmute(&args[0], dest)?;
396             }
397             sym::assert_inhabited => {
398                 let ty = instance.substs.type_at(0);
399                 let layout = self.layout_of(ty)?;
400 
401                 if layout.abi.is_uninhabited() {
402                     // The run-time intrinsic panics just to get a good backtrace; here we abort
403                     // since there is no problem showing a backtrace even for aborts.
404                     M::abort(
405                         self,
406                         format!(
407                             "aborted execution: attempted to instantiate uninhabited type `{}`",
408                             ty
409                         ),
410                     )?;
411                 }
412             }
413             sym::simd_insert => {
414                 let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
415                 let elem = &args[2];
416                 let (input, input_len) = self.operand_to_simd(&args[0])?;
417                 let (dest, dest_len) = self.place_to_simd(dest)?;
418                 assert_eq!(input_len, dest_len, "Return vector length must match input length");
419                 assert!(
420                     index < dest_len,
421                     "Index `{}` must be in bounds of vector with length {}`",
422                     index,
423                     dest_len
424                 );
425 
426                 for i in 0..dest_len {
427                     let place = self.mplace_index(&dest, i)?;
428                     let value =
429                         if i == index { *elem } else { self.mplace_index(&input, i)?.into() };
430                     self.copy_op(&value, &place.into())?;
431                 }
432             }
433             sym::simd_extract => {
434                 let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
435                 let (input, input_len) = self.operand_to_simd(&args[0])?;
436                 assert!(
437                     index < input_len,
438                     "index `{}` must be in bounds of vector with length `{}`",
439                     index,
440                     input_len
441                 );
442                 self.copy_op(&self.mplace_index(&input, index)?.into(), dest)?;
443             }
444             sym::likely | sym::unlikely | sym::black_box => {
445                 // These just return their argument
446                 self.copy_op(&args[0], dest)?;
447             }
448             sym::assume => {
449                 let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;
450                 if !cond {
451                     throw_ub_format!("`assume` intrinsic called with `false`");
452                 }
453             }
454             sym::raw_eq => {
455                 let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
456                 self.write_scalar(result, dest)?;
457             }
458             _ => return Ok(false),
459         }
460 
461         trace!("{:?}", self.dump_place(**dest));
462         self.go_to_block(ret);
463         Ok(true)
464     }
465 
exact_div( &mut self, a: &ImmTy<'tcx, M::PointerTag>, b: &ImmTy<'tcx, M::PointerTag>, dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx>466     pub fn exact_div(
467         &mut self,
468         a: &ImmTy<'tcx, M::PointerTag>,
469         b: &ImmTy<'tcx, M::PointerTag>,
470         dest: &PlaceTy<'tcx, M::PointerTag>,
471     ) -> InterpResult<'tcx> {
472         // Performs an exact division, resulting in undefined behavior where
473         // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
474         // First, check x % y != 0 (or if that computation overflows).
475         let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
476         if overflow || res.assert_bits(a.layout.size) != 0 {
477             // Then, check if `b` is -1, which is the "MIN / -1" case.
478             let minus1 = Scalar::from_int(-1, dest.layout.size);
479             let b_scalar = b.to_scalar().unwrap();
480             if b_scalar == minus1 {
481                 throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
482             } else {
483                 throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b,)
484             }
485         }
486         // `Rem` says this is all right, so we can let `Div` do its job.
487         self.binop_ignore_overflow(BinOp::Div, &a, &b, dest)
488     }
489 
490     /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
491     /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
492     /// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value.
ptr_offset_inbounds( &self, ptr: Pointer<Option<M::PointerTag>>, pointee_ty: Ty<'tcx>, offset_count: i64, ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>>493     pub fn ptr_offset_inbounds(
494         &self,
495         ptr: Pointer<Option<M::PointerTag>>,
496         pointee_ty: Ty<'tcx>,
497         offset_count: i64,
498     ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
499         // We cannot overflow i64 as a type's size must be <= isize::MAX.
500         let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
501         // The computed offset, in bytes, cannot overflow an isize.
502         let offset_bytes =
503             offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
504         // The offset being in bounds cannot rely on "wrapping around" the address space.
505         // So, first rule out overflows in the pointer arithmetic.
506         let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
507         // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
508         // memory between these pointers must be accessible. Note that we do not require the
509         // pointers to be properly aligned (unlike a read/write operation).
510         let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
511         let size = offset_bytes.unsigned_abs();
512         // This call handles checking for integer/null pointers.
513         self.memory.check_ptr_access_align(
514             min_ptr,
515             Size::from_bytes(size),
516             Align::ONE,
517             CheckInAllocMsg::PointerArithmeticTest,
518         )?;
519         Ok(offset_ptr)
520     }
521 
522     /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
copy_intrinsic( &mut self, src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, nonoverlapping: bool, ) -> InterpResult<'tcx>523     pub(crate) fn copy_intrinsic(
524         &mut self,
525         src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
526         dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
527         count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
528         nonoverlapping: bool,
529     ) -> InterpResult<'tcx> {
530         let count = self.read_scalar(&count)?.to_machine_usize(self)?;
531         let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
532         let (size, align) = (layout.size, layout.align.abi);
533         let size = size.checked_mul(count, self).ok_or_else(|| {
534             err_ub_format!(
535                 "overflow computing total size of `{}`",
536                 if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
537             )
538         })?;
539 
540         let src = self.read_pointer(&src)?;
541         let dst = self.read_pointer(&dst)?;
542 
543         self.memory.copy(src, align, dst, align, size, nonoverlapping)
544     }
545 
raw_eq_intrinsic( &mut self, lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, ) -> InterpResult<'tcx, Scalar<M::PointerTag>>546     pub(crate) fn raw_eq_intrinsic(
547         &mut self,
548         lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
549         rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
550     ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
551         let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
552         assert!(!layout.is_unsized());
553 
554         let lhs = self.read_pointer(lhs)?;
555         let rhs = self.read_pointer(rhs)?;
556         let lhs_bytes = self.memory.read_bytes(lhs, layout.size)?;
557         let rhs_bytes = self.memory.read_bytes(rhs, layout.size)?;
558         Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
559     }
560 }
561