1 //! Computations on places -- field projections, going from mir::Place, and writing 2 //! into a place. 3 //! All high-level functions to write to memory work on places as destinations. 4 5 use std::convert::TryFrom; 6 use std::hash::Hash; 7 8 use rustc_ast::Mutability; 9 use rustc_macros::HashStable; 10 use rustc_middle::mir; 11 use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout}; 12 use rustc_middle::ty::{self, Ty}; 13 use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding}; 14 use rustc_target::abi::{HasDataLayout, Size, VariantIdx, Variants}; 15 16 use super::{ 17 alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, 18 ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, 19 Operand, Pointer, PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit, 20 }; 21 22 #[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)] 23 /// Information required for the sound usage of a `MemPlace`. 24 pub enum MemPlaceMeta<Tag: Provenance = AllocId> { 25 /// The unsized payload (e.g. length for slices or vtable pointer for trait objects). 26 Meta(Scalar<Tag>), 27 /// `Sized` types or unsized `extern type` 28 None, 29 /// The address of this place may not be taken. This protects the `MemPlace` from coming from 30 /// a ZST Operand without a backing allocation and being converted to an integer address. This 31 /// should be impossible, because you can't take the address of an operand, but this is a second 32 /// protection layer ensuring that we don't mess up. 33 Poison, 34 } 35 36 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] 37 rustc_data_structures::static_assert_size!(MemPlaceMeta, 24); 38 39 impl<Tag: Provenance> MemPlaceMeta<Tag> { unwrap_meta(self) -> Scalar<Tag>40 pub fn unwrap_meta(self) -> Scalar<Tag> { 41 match self { 42 Self::Meta(s) => s, 43 Self::None | Self::Poison => { 44 bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)") 45 } 46 } 47 } has_meta(self) -> bool48 fn has_meta(self) -> bool { 49 match self { 50 Self::Meta(_) => true, 51 Self::None | Self::Poison => false, 52 } 53 } 54 } 55 56 #[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)] 57 pub struct MemPlace<Tag: Provenance = AllocId> { 58 /// The pointer can be a pure integer, with the `None` tag. 59 pub ptr: Pointer<Option<Tag>>, 60 pub align: Align, 61 /// Metadata for unsized places. Interpretation is up to the type. 62 /// Must not be present for sized types, but can be missing for unsized types 63 /// (e.g., `extern type`). 64 pub meta: MemPlaceMeta<Tag>, 65 } 66 67 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] 68 rustc_data_structures::static_assert_size!(MemPlace, 48); 69 70 #[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)] 71 pub enum Place<Tag: Provenance = AllocId> { 72 /// A place referring to a value allocated in the `Memory` system. 73 Ptr(MemPlace<Tag>), 74 75 /// To support alloc-free locals, we are able to write directly to a local. 76 /// (Without that optimization, we'd just always be a `MemPlace`.) 77 Local { frame: usize, local: mir::Local }, 78 } 79 80 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] 81 rustc_data_structures::static_assert_size!(Place, 56); 82 83 #[derive(Copy, Clone, Debug)] 84 pub struct PlaceTy<'tcx, Tag: Provenance = AllocId> { 85 place: Place<Tag>, // Keep this private; it helps enforce invariants. 86 pub layout: TyAndLayout<'tcx>, 87 } 88 89 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] 90 rustc_data_structures::static_assert_size!(PlaceTy<'_>, 72); 91 92 impl<'tcx, Tag: Provenance> std::ops::Deref for PlaceTy<'tcx, Tag> { 93 type Target = Place<Tag>; 94 #[inline(always)] deref(&self) -> &Place<Tag>95 fn deref(&self) -> &Place<Tag> { 96 &self.place 97 } 98 } 99 100 /// A MemPlace with its layout. Constructing it is only possible in this module. 101 #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] 102 pub struct MPlaceTy<'tcx, Tag: Provenance = AllocId> { 103 mplace: MemPlace<Tag>, 104 pub layout: TyAndLayout<'tcx>, 105 } 106 107 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] 108 rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 64); 109 110 impl<'tcx, Tag: Provenance> std::ops::Deref for MPlaceTy<'tcx, Tag> { 111 type Target = MemPlace<Tag>; 112 #[inline(always)] deref(&self) -> &MemPlace<Tag>113 fn deref(&self) -> &MemPlace<Tag> { 114 &self.mplace 115 } 116 } 117 118 impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> { 119 #[inline(always)] from(mplace: MPlaceTy<'tcx, Tag>) -> Self120 fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self { 121 PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout } 122 } 123 } 124 125 impl<Tag: Provenance> MemPlace<Tag> { 126 #[inline(always)] from_ptr(ptr: Pointer<Option<Tag>>, align: Align) -> Self127 pub fn from_ptr(ptr: Pointer<Option<Tag>>, align: Align) -> Self { 128 MemPlace { ptr, align, meta: MemPlaceMeta::None } 129 } 130 131 /// Adjust the provenance of the main pointer (metadata is unaffected). map_provenance(self, f: impl FnOnce(Option<Tag>) -> Option<Tag>) -> Self132 pub fn map_provenance(self, f: impl FnOnce(Option<Tag>) -> Option<Tag>) -> Self { 133 MemPlace { ptr: self.ptr.map_provenance(f), ..self } 134 } 135 136 /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space. 137 /// This is the inverse of `ref_to_mplace`. 138 #[inline(always)] to_ref(self, cx: &impl HasDataLayout) -> Immediate<Tag>139 pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Tag> { 140 match self.meta { 141 MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)), 142 MemPlaceMeta::Meta(meta) => { 143 Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx).into(), meta.into()) 144 } 145 MemPlaceMeta::Poison => bug!( 146 "MPlaceTy::dangling may never be used to produce a \ 147 place that will have the address of its pointee taken" 148 ), 149 } 150 } 151 152 #[inline] offset( self, offset: Size, meta: MemPlaceMeta<Tag>, cx: &impl HasDataLayout, ) -> InterpResult<'tcx, Self>153 pub fn offset( 154 self, 155 offset: Size, 156 meta: MemPlaceMeta<Tag>, 157 cx: &impl HasDataLayout, 158 ) -> InterpResult<'tcx, Self> { 159 Ok(MemPlace { 160 ptr: self.ptr.offset(offset, cx)?, 161 align: self.align.restrict_for_offset(offset), 162 meta, 163 }) 164 } 165 } 166 167 impl<'tcx, Tag: Provenance> MPlaceTy<'tcx, Tag> { 168 /// Produces a MemPlace that works for ZST but nothing else 169 #[inline] dangling(layout: TyAndLayout<'tcx>) -> Self170 pub fn dangling(layout: TyAndLayout<'tcx>) -> Self { 171 let align = layout.align.abi; 172 let ptr = Pointer::new(None, Size::from_bytes(align.bytes())); // no provenance, absolute address 173 // `Poison` this to make sure that the pointer value `ptr` is never observable by the program. 174 MPlaceTy { mplace: MemPlace { ptr, align, meta: MemPlaceMeta::Poison }, layout } 175 } 176 177 #[inline] offset( &self, offset: Size, meta: MemPlaceMeta<Tag>, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout, ) -> InterpResult<'tcx, Self>178 pub fn offset( 179 &self, 180 offset: Size, 181 meta: MemPlaceMeta<Tag>, 182 layout: TyAndLayout<'tcx>, 183 cx: &impl HasDataLayout, 184 ) -> InterpResult<'tcx, Self> { 185 Ok(MPlaceTy { mplace: self.mplace.offset(offset, meta, cx)?, layout }) 186 } 187 188 #[inline] from_aligned_ptr(ptr: Pointer<Option<Tag>>, layout: TyAndLayout<'tcx>) -> Self189 pub fn from_aligned_ptr(ptr: Pointer<Option<Tag>>, layout: TyAndLayout<'tcx>) -> Self { 190 MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout } 191 } 192 193 #[inline] len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64>194 pub(super) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> { 195 if self.layout.is_unsized() { 196 // We need to consult `meta` metadata 197 match self.layout.ty.kind() { 198 ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_machine_usize(cx), 199 _ => bug!("len not supported on unsized type {:?}", self.layout.ty), 200 } 201 } else { 202 // Go through the layout. There are lots of types that support a length, 203 // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!) 204 match self.layout.fields { 205 FieldsShape::Array { count, .. } => Ok(count), 206 _ => bug!("len not supported on sized type {:?}", self.layout.ty), 207 } 208 } 209 } 210 211 #[inline] vtable(&self) -> Scalar<Tag>212 pub(super) fn vtable(&self) -> Scalar<Tag> { 213 match self.layout.ty.kind() { 214 ty::Dynamic(..) => self.mplace.meta.unwrap_meta(), 215 _ => bug!("vtable not supported on type {:?}", self.layout.ty), 216 } 217 } 218 } 219 220 // These are defined here because they produce a place. 221 impl<'tcx, Tag: Provenance> OpTy<'tcx, Tag> { 222 #[inline(always)] 223 /// Note: do not call `as_ref` on the resulting place. This function should only be used to 224 /// read from the resulting mplace, not to get its address back. try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>>225 pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> { 226 match **self { 227 Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }), 228 Operand::Immediate(_) if self.layout.is_zst() => Ok(MPlaceTy::dangling(self.layout)), 229 Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)), 230 } 231 } 232 233 #[inline(always)] 234 /// Note: do not call `as_ref` on the resulting place. This function should only be used to 235 /// read from the resulting mplace, not to get its address back. assert_mem_place(&self) -> MPlaceTy<'tcx, Tag>236 pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Tag> { 237 self.try_as_mplace().unwrap() 238 } 239 } 240 241 impl<Tag: Provenance> Place<Tag> { 242 #[inline] assert_mem_place(self) -> MemPlace<Tag>243 pub fn assert_mem_place(self) -> MemPlace<Tag> { 244 match self { 245 Place::Ptr(mplace) => mplace, 246 _ => bug!("assert_mem_place: expected Place::Ptr, got {:?}", self), 247 } 248 } 249 } 250 251 impl<'tcx, Tag: Provenance> PlaceTy<'tcx, Tag> { 252 #[inline] assert_mem_place(self) -> MPlaceTy<'tcx, Tag>253 pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> { 254 MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout } 255 } 256 } 257 258 // separating the pointer tag for `impl Trait`, see https://github.com/rust-lang/rust/issues/54385 259 impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M> 260 where 261 // FIXME: Working around https://github.com/rust-lang/rust/issues/54385 262 Tag: Provenance + Eq + Hash + 'static, 263 M: Machine<'mir, 'tcx, PointerTag = Tag>, 264 { 265 /// Take a value, which represents a (thin or wide) reference, and make it a place. 266 /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`. 267 /// 268 /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not 269 /// want to ever use the place for memory access! 270 /// Generally prefer `deref_operand`. ref_to_mplace( &self, val: &ImmTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>>271 pub fn ref_to_mplace( 272 &self, 273 val: &ImmTy<'tcx, M::PointerTag>, 274 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { 275 let pointee_type = 276 val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty; 277 let layout = self.layout_of(pointee_type)?; 278 let (ptr, meta) = match **val { 279 Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None), 280 Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta.check_init()?)), 281 }; 282 283 let mplace = MemPlace { 284 ptr: self.scalar_to_ptr(ptr.check_init()?), 285 // We could use the run-time alignment here. For now, we do not, because 286 // the point of tracking the alignment here is to make sure that the *static* 287 // alignment information emitted with the loads is correct. The run-time 288 // alignment can only be more restrictive. 289 align: layout.align.abi, 290 meta, 291 }; 292 Ok(MPlaceTy { mplace, layout }) 293 } 294 295 /// Take an operand, representing a pointer, and dereference it to a place -- that 296 /// will always be a MemPlace. Lives in `place.rs` because it creates a place. deref_operand( &self, src: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>>297 pub fn deref_operand( 298 &self, 299 src: &OpTy<'tcx, M::PointerTag>, 300 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { 301 let val = self.read_immediate(src)?; 302 trace!("deref to {} on {:?}", val.layout.ty, *val); 303 let mplace = self.ref_to_mplace(&val)?; 304 self.check_mplace_access(mplace, CheckInAllocMsg::DerefTest)?; 305 Ok(mplace) 306 } 307 308 #[inline] get_alloc( &self, place: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::PointerTag, M::AllocExtra>>>309 pub(super) fn get_alloc( 310 &self, 311 place: &MPlaceTy<'tcx, M::PointerTag>, 312 ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::PointerTag, M::AllocExtra>>> { 313 assert!(!place.layout.is_unsized()); 314 assert!(!place.meta.has_meta()); 315 let size = place.layout.size; 316 self.memory.get(place.ptr, size, place.align) 317 } 318 319 #[inline] get_alloc_mut( &mut self, place: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::PointerTag, M::AllocExtra>>>320 pub(super) fn get_alloc_mut( 321 &mut self, 322 place: &MPlaceTy<'tcx, M::PointerTag>, 323 ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::PointerTag, M::AllocExtra>>> { 324 assert!(!place.layout.is_unsized()); 325 assert!(!place.meta.has_meta()); 326 let size = place.layout.size; 327 self.memory.get_mut(place.ptr, size, place.align) 328 } 329 330 /// Check if this mplace is dereferencable and sufficiently aligned. check_mplace_access( &self, mplace: MPlaceTy<'tcx, M::PointerTag>, msg: CheckInAllocMsg, ) -> InterpResult<'tcx>331 fn check_mplace_access( 332 &self, 333 mplace: MPlaceTy<'tcx, M::PointerTag>, 334 msg: CheckInAllocMsg, 335 ) -> InterpResult<'tcx> { 336 let (size, align) = self 337 .size_and_align_of_mplace(&mplace)? 338 .unwrap_or((mplace.layout.size, mplace.layout.align.abi)); 339 assert!(mplace.mplace.align <= align, "dynamic alignment less strict than static one?"); 340 let align = M::enforce_alignment(&self.memory.extra).then_some(align); 341 self.memory.check_ptr_access_align(mplace.ptr, size, align.unwrap_or(Align::ONE), msg)?; 342 Ok(()) 343 } 344 345 /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is 346 /// always possible without allocating, so it can take `&self`. Also return the field's layout. 347 /// This supports both struct and array fields. 348 /// 349 /// This also works for arrays, but then the `usize` index type is restricting. 350 /// For indexing into arrays, use `mplace_index`. 351 #[inline(always)] mplace_field( &self, base: &MPlaceTy<'tcx, M::PointerTag>, field: usize, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>>352 pub fn mplace_field( 353 &self, 354 base: &MPlaceTy<'tcx, M::PointerTag>, 355 field: usize, 356 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { 357 let offset = base.layout.fields.offset(field); 358 let field_layout = base.layout.field(self, field); 359 360 // Offset may need adjustment for unsized fields. 361 let (meta, offset) = if field_layout.is_unsized() { 362 // Re-use parent metadata to determine dynamic field layout. 363 // With custom DSTS, this *will* execute user-defined code, but the same 364 // happens at run-time so that's okay. 365 let align = match self.size_and_align_of(&base.meta, &field_layout)? { 366 Some((_, align)) => align, 367 None if offset == Size::ZERO => { 368 // An extern type at offset 0, we fall back to its static alignment. 369 // FIXME: Once we have made decisions for how to handle size and alignment 370 // of `extern type`, this should be adapted. It is just a temporary hack 371 // to get some code to work that probably ought to work. 372 field_layout.align.abi 373 } 374 None => span_bug!( 375 self.cur_span(), 376 "cannot compute offset for extern type field at non-0 offset" 377 ), 378 }; 379 (base.meta, offset.align_to(align)) 380 } else { 381 // base.meta could be present; we might be accessing a sized field of an unsized 382 // struct. 383 (MemPlaceMeta::None, offset) 384 }; 385 386 // We do not look at `base.layout.align` nor `field_layout.align`, unlike 387 // codegen -- mostly to see if we can get away with that 388 base.offset(offset, meta, field_layout, self) 389 } 390 391 /// Index into an array. 392 #[inline(always)] mplace_index( &self, base: &MPlaceTy<'tcx, M::PointerTag>, index: u64, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>>393 pub fn mplace_index( 394 &self, 395 base: &MPlaceTy<'tcx, M::PointerTag>, 396 index: u64, 397 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { 398 // Not using the layout method because we want to compute on u64 399 match base.layout.fields { 400 FieldsShape::Array { stride, .. } => { 401 let len = base.len(self)?; 402 if index >= len { 403 // This can only be reached in ConstProp and non-rustc-MIR. 404 throw_ub!(BoundsCheckFailed { len, index }); 405 } 406 let offset = stride * index; // `Size` multiplication 407 // All fields have the same layout. 408 let field_layout = base.layout.field(self, 0); 409 410 assert!(!field_layout.is_unsized()); 411 base.offset(offset, MemPlaceMeta::None, field_layout, self) 412 } 413 _ => span_bug!( 414 self.cur_span(), 415 "`mplace_index` called on non-array type {:?}", 416 base.layout.ty 417 ), 418 } 419 } 420 421 // Iterates over all fields of an array. Much more efficient than doing the 422 // same by repeatedly calling `mplace_array`. mplace_array_fields( &self, base: &'a MPlaceTy<'tcx, Tag>, ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a>423 pub(super) fn mplace_array_fields( 424 &self, 425 base: &'a MPlaceTy<'tcx, Tag>, 426 ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a> 427 { 428 let len = base.len(self)?; // also asserts that we have a type where this makes sense 429 let stride = match base.layout.fields { 430 FieldsShape::Array { stride, .. } => stride, 431 _ => span_bug!(self.cur_span(), "mplace_array_fields: expected an array layout"), 432 }; 433 let layout = base.layout.field(self, 0); 434 let dl = &self.tcx.data_layout; 435 // `Size` multiplication 436 Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl))) 437 } 438 mplace_subslice( &self, base: &MPlaceTy<'tcx, M::PointerTag>, from: u64, to: u64, from_end: bool, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>>439 fn mplace_subslice( 440 &self, 441 base: &MPlaceTy<'tcx, M::PointerTag>, 442 from: u64, 443 to: u64, 444 from_end: bool, 445 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { 446 let len = base.len(self)?; // also asserts that we have a type where this makes sense 447 let actual_to = if from_end { 448 if from.checked_add(to).map_or(true, |to| to > len) { 449 // This can only be reached in ConstProp and non-rustc-MIR. 450 throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) }); 451 } 452 len.checked_sub(to).unwrap() 453 } else { 454 to 455 }; 456 457 // Not using layout method because that works with usize, and does not work with slices 458 // (that have count 0 in their layout). 459 let from_offset = match base.layout.fields { 460 FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked 461 _ => { 462 span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout) 463 } 464 }; 465 466 // Compute meta and new layout 467 let inner_len = actual_to.checked_sub(from).unwrap(); 468 let (meta, ty) = match base.layout.ty.kind() { 469 // It is not nice to match on the type, but that seems to be the only way to 470 // implement this. 471 ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(inner, inner_len)), 472 ty::Slice(..) => { 473 let len = Scalar::from_machine_usize(inner_len, self); 474 (MemPlaceMeta::Meta(len), base.layout.ty) 475 } 476 _ => { 477 span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty) 478 } 479 }; 480 let layout = self.layout_of(ty)?; 481 base.offset(from_offset, meta, layout, self) 482 } 483 mplace_downcast( &self, base: &MPlaceTy<'tcx, M::PointerTag>, variant: VariantIdx, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>>484 pub(crate) fn mplace_downcast( 485 &self, 486 base: &MPlaceTy<'tcx, M::PointerTag>, 487 variant: VariantIdx, 488 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { 489 // Downcasts only change the layout 490 assert!(!base.meta.has_meta()); 491 Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..*base }) 492 } 493 494 /// Project into an mplace mplace_projection( &self, base: &MPlaceTy<'tcx, M::PointerTag>, proj_elem: mir::PlaceElem<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>>495 pub(super) fn mplace_projection( 496 &self, 497 base: &MPlaceTy<'tcx, M::PointerTag>, 498 proj_elem: mir::PlaceElem<'tcx>, 499 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { 500 use rustc_middle::mir::ProjectionElem::*; 501 Ok(match proj_elem { 502 Field(field, _) => self.mplace_field(base, field.index())?, 503 Downcast(_, variant) => self.mplace_downcast(base, variant)?, 504 Deref => self.deref_operand(&base.into())?, 505 506 Index(local) => { 507 let layout = self.layout_of(self.tcx.types.usize)?; 508 let n = self.access_local(self.frame(), local, Some(layout))?; 509 let n = self.read_scalar(&n)?; 510 let n = n.to_machine_usize(self)?; 511 self.mplace_index(base, n)? 512 } 513 514 ConstantIndex { offset, min_length, from_end } => { 515 let n = base.len(self)?; 516 if n < min_length { 517 // This can only be reached in ConstProp and non-rustc-MIR. 518 throw_ub!(BoundsCheckFailed { len: min_length, index: n }); 519 } 520 521 let index = if from_end { 522 assert!(0 < offset && offset <= min_length); 523 n.checked_sub(offset).unwrap() 524 } else { 525 assert!(offset < min_length); 526 offset 527 }; 528 529 self.mplace_index(base, index)? 530 } 531 532 Subslice { from, to, from_end } => self.mplace_subslice(base, from, to, from_end)?, 533 }) 534 } 535 536 /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements. 537 /// Also returns the number of elements. mplace_to_simd( &self, base: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)>538 pub fn mplace_to_simd( 539 &self, 540 base: &MPlaceTy<'tcx, M::PointerTag>, 541 ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> { 542 // Basically we just transmute this place into an array following simd_size_and_type. 543 // (Transmuting is okay since this is an in-memory place. We also double-check the size 544 // stays the same.) 545 let (len, e_ty) = base.layout.ty.simd_size_and_type(*self.tcx); 546 let array = self.tcx.mk_array(e_ty, len); 547 let layout = self.layout_of(array)?; 548 assert_eq!(layout.size, base.layout.size); 549 Ok((MPlaceTy { layout, ..*base }, len)) 550 } 551 552 /// Gets the place of a field inside the place, and also the field's type. 553 /// Just a convenience function, but used quite a bit. 554 /// This is the only projection that might have a side-effect: We cannot project 555 /// into the field of a local `ScalarPair`, we have to first allocate it. place_field( &mut self, base: &PlaceTy<'tcx, M::PointerTag>, field: usize, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>>556 pub fn place_field( 557 &mut self, 558 base: &PlaceTy<'tcx, M::PointerTag>, 559 field: usize, 560 ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { 561 // FIXME: We could try to be smarter and avoid allocation for fields that span the 562 // entire place. 563 let mplace = self.force_allocation(base)?; 564 Ok(self.mplace_field(&mplace, field)?.into()) 565 } 566 place_index( &mut self, base: &PlaceTy<'tcx, M::PointerTag>, index: u64, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>>567 pub fn place_index( 568 &mut self, 569 base: &PlaceTy<'tcx, M::PointerTag>, 570 index: u64, 571 ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { 572 let mplace = self.force_allocation(base)?; 573 Ok(self.mplace_index(&mplace, index)?.into()) 574 } 575 place_downcast( &self, base: &PlaceTy<'tcx, M::PointerTag>, variant: VariantIdx, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>>576 pub fn place_downcast( 577 &self, 578 base: &PlaceTy<'tcx, M::PointerTag>, 579 variant: VariantIdx, 580 ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { 581 // Downcast just changes the layout 582 Ok(match base.place { 583 Place::Ptr(mplace) => { 584 self.mplace_downcast(&MPlaceTy { mplace, layout: base.layout }, variant)?.into() 585 } 586 Place::Local { .. } => { 587 let layout = base.layout.for_variant(self, variant); 588 PlaceTy { layout, ..*base } 589 } 590 }) 591 } 592 593 /// Projects into a place. place_projection( &mut self, base: &PlaceTy<'tcx, M::PointerTag>, &proj_elem: &mir::ProjectionElem<mir::Local, Ty<'tcx>>, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>>594 pub fn place_projection( 595 &mut self, 596 base: &PlaceTy<'tcx, M::PointerTag>, 597 &proj_elem: &mir::ProjectionElem<mir::Local, Ty<'tcx>>, 598 ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { 599 use rustc_middle::mir::ProjectionElem::*; 600 Ok(match proj_elem { 601 Field(field, _) => self.place_field(base, field.index())?, 602 Downcast(_, variant) => self.place_downcast(base, variant)?, 603 Deref => self.deref_operand(&self.place_to_op(base)?)?.into(), 604 // For the other variants, we have to force an allocation. 605 // This matches `operand_projection`. 606 Subslice { .. } | ConstantIndex { .. } | Index(_) => { 607 let mplace = self.force_allocation(base)?; 608 self.mplace_projection(&mplace, proj_elem)?.into() 609 } 610 }) 611 } 612 613 /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements. 614 /// Also returns the number of elements. place_to_simd( &mut self, base: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)>615 pub fn place_to_simd( 616 &mut self, 617 base: &PlaceTy<'tcx, M::PointerTag>, 618 ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> { 619 let mplace = self.force_allocation(base)?; 620 self.mplace_to_simd(&mplace) 621 } 622 623 /// Computes a place. You should only use this if you intend to write into this 624 /// place; for reading, a more efficient alternative is `eval_place_for_read`. eval_place( &mut self, place: mir::Place<'tcx>, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>>625 pub fn eval_place( 626 &mut self, 627 place: mir::Place<'tcx>, 628 ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { 629 let mut place_ty = PlaceTy { 630 // This works even for dead/uninitialized locals; we check further when writing 631 place: Place::Local { frame: self.frame_idx(), local: place.local }, 632 layout: self.layout_of_local(self.frame(), place.local, None)?, 633 }; 634 635 for elem in place.projection.iter() { 636 place_ty = self.place_projection(&place_ty, &elem)? 637 } 638 639 trace!("{:?}", self.dump_place(place_ty.place)); 640 // Sanity-check the type we ended up with. 641 debug_assert!(mir_assign_valid_types( 642 *self.tcx, 643 self.param_env, 644 self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions( 645 place.ty(&self.frame().body.local_decls, *self.tcx).ty 646 ))?, 647 place_ty.layout, 648 )); 649 Ok(place_ty) 650 } 651 652 /// Write an immediate to a place 653 #[inline(always)] write_immediate( &mut self, src: Immediate<M::PointerTag>, dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx>654 pub fn write_immediate( 655 &mut self, 656 src: Immediate<M::PointerTag>, 657 dest: &PlaceTy<'tcx, M::PointerTag>, 658 ) -> InterpResult<'tcx> { 659 self.write_immediate_no_validate(src, dest)?; 660 661 if M::enforce_validity(self) { 662 // Data got changed, better make sure it matches the type! 663 self.validate_operand(&self.place_to_op(dest)?)?; 664 } 665 666 Ok(()) 667 } 668 669 /// Write a scalar to a place 670 #[inline(always)] write_scalar( &mut self, val: impl Into<ScalarMaybeUninit<M::PointerTag>>, dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx>671 pub fn write_scalar( 672 &mut self, 673 val: impl Into<ScalarMaybeUninit<M::PointerTag>>, 674 dest: &PlaceTy<'tcx, M::PointerTag>, 675 ) -> InterpResult<'tcx> { 676 self.write_immediate(Immediate::Scalar(val.into()), dest) 677 } 678 679 /// Write a pointer to a place 680 #[inline(always)] write_pointer( &mut self, ptr: impl Into<Pointer<Option<M::PointerTag>>>, dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx>681 pub fn write_pointer( 682 &mut self, 683 ptr: impl Into<Pointer<Option<M::PointerTag>>>, 684 dest: &PlaceTy<'tcx, M::PointerTag>, 685 ) -> InterpResult<'tcx> { 686 self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest) 687 } 688 689 /// Write an immediate to a place. 690 /// If you use this you are responsible for validating that things got copied at the 691 /// right type. write_immediate_no_validate( &mut self, src: Immediate<M::PointerTag>, dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx>692 fn write_immediate_no_validate( 693 &mut self, 694 src: Immediate<M::PointerTag>, 695 dest: &PlaceTy<'tcx, M::PointerTag>, 696 ) -> InterpResult<'tcx> { 697 if cfg!(debug_assertions) { 698 // This is a very common path, avoid some checks in release mode 699 assert!(!dest.layout.is_unsized(), "Cannot write unsized data"); 700 match src { 701 Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(..))) => assert_eq!( 702 self.pointer_size(), 703 dest.layout.size, 704 "Size mismatch when writing pointer" 705 ), 706 Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Int(int))) => { 707 assert_eq!(int.size(), dest.layout.size, "Size mismatch when writing bits") 708 } 709 Immediate::Scalar(ScalarMaybeUninit::Uninit) => {} // uninit can have any size 710 Immediate::ScalarPair(_, _) => { 711 // FIXME: Can we check anything here? 712 } 713 } 714 } 715 trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty); 716 717 // See if we can avoid an allocation. This is the counterpart to `try_read_immediate`, 718 // but not factored as a separate function. 719 let mplace = match dest.place { 720 Place::Local { frame, local } => { 721 match M::access_local_mut(self, frame, local)? { 722 Ok(local) => { 723 // Local can be updated in-place. 724 *local = LocalValue::Live(Operand::Immediate(src)); 725 return Ok(()); 726 } 727 Err(mplace) => { 728 // The local is in memory, go on below. 729 mplace 730 } 731 } 732 } 733 Place::Ptr(mplace) => mplace, // already referring to memory 734 }; 735 let dest = MPlaceTy { mplace, layout: dest.layout }; 736 737 // This is already in memory, write there. 738 self.write_immediate_to_mplace_no_validate(src, &dest) 739 } 740 741 /// Write an immediate to memory. 742 /// If you use this you are responsible for validating that things got copied at the 743 /// right type. write_immediate_to_mplace_no_validate( &mut self, value: Immediate<M::PointerTag>, dest: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx>744 fn write_immediate_to_mplace_no_validate( 745 &mut self, 746 value: Immediate<M::PointerTag>, 747 dest: &MPlaceTy<'tcx, M::PointerTag>, 748 ) -> InterpResult<'tcx> { 749 // Note that it is really important that the type here is the right one, and matches the 750 // type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here 751 // to handle padding properly, which is only correct if we never look at this data with the 752 // wrong type. 753 754 // Invalid places are a thing: the return place of a diverging function 755 let tcx = *self.tcx; 756 let mut alloc = match self.get_alloc_mut(dest)? { 757 Some(a) => a, 758 None => return Ok(()), // zero-sized access 759 }; 760 761 // FIXME: We should check that there are dest.layout.size many bytes available in 762 // memory. The code below is not sufficient, with enough padding it might not 763 // cover all the bytes! 764 match value { 765 Immediate::Scalar(scalar) => { 766 match dest.layout.abi { 767 Abi::Scalar(_) => {} // fine 768 _ => span_bug!( 769 self.cur_span(), 770 "write_immediate_to_mplace: invalid Scalar layout: {:#?}", 771 dest.layout 772 ), 773 } 774 alloc.write_scalar(alloc_range(Size::ZERO, dest.layout.size), scalar) 775 } 776 Immediate::ScalarPair(a_val, b_val) => { 777 // We checked `ptr_align` above, so all fields will have the alignment they need. 778 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, 779 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy. 780 let (a, b) = match dest.layout.abi { 781 Abi::ScalarPair(a, b) => (a.value, b.value), 782 _ => span_bug!( 783 self.cur_span(), 784 "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}", 785 dest.layout 786 ), 787 }; 788 let (a_size, b_size) = (a.size(&tcx), b.size(&tcx)); 789 let b_offset = a_size.align_to(b.align(&tcx).abi); 790 791 // It is tempting to verify `b_offset` against `layout.fields.offset(1)`, 792 // but that does not work: We could be a newtype around a pair, then the 793 // fields do not match the `ScalarPair` components. 794 795 alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?; 796 alloc.write_scalar(alloc_range(b_offset, b_size), b_val) 797 } 798 } 799 } 800 801 /// Copies the data from an operand to a place. This does not support transmuting! 802 /// Use `copy_op_transmute` if the layouts could disagree. 803 #[inline(always)] copy_op( &mut self, src: &OpTy<'tcx, M::PointerTag>, dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx>804 pub fn copy_op( 805 &mut self, 806 src: &OpTy<'tcx, M::PointerTag>, 807 dest: &PlaceTy<'tcx, M::PointerTag>, 808 ) -> InterpResult<'tcx> { 809 self.copy_op_no_validate(src, dest)?; 810 811 if M::enforce_validity(self) { 812 // Data got changed, better make sure it matches the type! 813 self.validate_operand(&self.place_to_op(dest)?)?; 814 } 815 816 Ok(()) 817 } 818 819 /// Copies the data from an operand to a place. This does not support transmuting! 820 /// Use `copy_op_transmute` if the layouts could disagree. 821 /// Also, if you use this you are responsible for validating that things get copied at the 822 /// right type. copy_op_no_validate( &mut self, src: &OpTy<'tcx, M::PointerTag>, dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx>823 fn copy_op_no_validate( 824 &mut self, 825 src: &OpTy<'tcx, M::PointerTag>, 826 dest: &PlaceTy<'tcx, M::PointerTag>, 827 ) -> InterpResult<'tcx> { 828 // We do NOT compare the types for equality, because well-typed code can 829 // actually "transmute" `&mut T` to `&T` in an assignment without a cast. 830 if !mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) { 831 span_bug!( 832 self.cur_span(), 833 "type mismatch when copying!\nsrc: {:?},\ndest: {:?}", 834 src.layout.ty, 835 dest.layout.ty, 836 ); 837 } 838 839 // Let us see if the layout is simple so we take a shortcut, avoid force_allocation. 840 let src = match self.try_read_immediate(src)? { 841 Ok(src_val) => { 842 assert!(!src.layout.is_unsized(), "cannot have unsized immediates"); 843 // Yay, we got a value that we can write directly. 844 // FIXME: Add a check to make sure that if `src` is indirect, 845 // it does not overlap with `dest`. 846 return self.write_immediate_no_validate(*src_val, dest); 847 } 848 Err(mplace) => mplace, 849 }; 850 // Slow path, this does not fit into an immediate. Just memcpy. 851 trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty); 852 853 // This interprets `src.meta` with the `dest` local's layout, if an unsized local 854 // is being initialized! 855 let (dest, size) = self.force_allocation_maybe_sized(dest, src.meta)?; 856 let size = size.unwrap_or_else(|| { 857 assert!( 858 !dest.layout.is_unsized(), 859 "Cannot copy into already initialized unsized place" 860 ); 861 dest.layout.size 862 }); 863 assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances"); 864 865 self.memory 866 .copy(src.ptr, src.align, dest.ptr, dest.align, size, /*nonoverlapping*/ true) 867 } 868 869 /// Copies the data from an operand to a place. The layouts may disagree, but they must 870 /// have the same size. copy_op_transmute( &mut self, src: &OpTy<'tcx, M::PointerTag>, dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx>871 pub fn copy_op_transmute( 872 &mut self, 873 src: &OpTy<'tcx, M::PointerTag>, 874 dest: &PlaceTy<'tcx, M::PointerTag>, 875 ) -> InterpResult<'tcx> { 876 if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) { 877 // Fast path: Just use normal `copy_op` 878 return self.copy_op(src, dest); 879 } 880 // We still require the sizes to match. 881 if src.layout.size != dest.layout.size { 882 // FIXME: This should be an assert instead of an error, but if we transmute within an 883 // array length computation, `typeck` may not have yet been run and errored out. In fact 884 // most likey we *are* running `typeck` right now. Investigate whether we can bail out 885 // on `typeck_results().has_errors` at all const eval entry points. 886 debug!("Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest); 887 self.tcx.sess.delay_span_bug( 888 self.cur_span(), 889 "size-changing transmute, should have been caught by transmute checking", 890 ); 891 throw_inval!(TransmuteSizeDiff(src.layout.ty, dest.layout.ty)); 892 } 893 // Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want 894 // to avoid that here. 895 assert!( 896 !src.layout.is_unsized() && !dest.layout.is_unsized(), 897 "Cannot transmute unsized data" 898 ); 899 900 // The hard case is `ScalarPair`. `src` is already read from memory in this case, 901 // using `src.layout` to figure out which bytes to use for the 1st and 2nd field. 902 // We have to write them to `dest` at the offsets they were *read at*, which is 903 // not necessarily the same as the offsets in `dest.layout`! 904 // Hence we do the copy with the source layout on both sides. We also make sure to write 905 // into memory, because if `dest` is a local we would not even have a way to write 906 // at the `src` offsets; the fact that we came from a different layout would 907 // just be lost. 908 let dest = self.force_allocation(dest)?; 909 self.copy_op_no_validate( 910 src, 911 &PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout }), 912 )?; 913 914 if M::enforce_validity(self) { 915 // Data got changed, better make sure it matches the type! 916 self.validate_operand(&dest.into())?; 917 } 918 919 Ok(()) 920 } 921 922 /// Ensures that a place is in memory, and returns where it is. 923 /// If the place currently refers to a local that doesn't yet have a matching allocation, 924 /// create such an allocation. 925 /// This is essentially `force_to_memplace`. 926 /// 927 /// This supports unsized types and returns the computed size to avoid some 928 /// redundant computation when copying; use `force_allocation` for a simpler, sized-only 929 /// version. force_allocation_maybe_sized( &mut self, place: &PlaceTy<'tcx, M::PointerTag>, meta: MemPlaceMeta<M::PointerTag>, ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)>930 pub fn force_allocation_maybe_sized( 931 &mut self, 932 place: &PlaceTy<'tcx, M::PointerTag>, 933 meta: MemPlaceMeta<M::PointerTag>, 934 ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> { 935 let (mplace, size) = match place.place { 936 Place::Local { frame, local } => { 937 match M::access_local_mut(self, frame, local)? { 938 Ok(&mut local_val) => { 939 // We need to make an allocation. 940 941 // We need the layout of the local. We can NOT use the layout we got, 942 // that might e.g., be an inner field of a struct with `Scalar` layout, 943 // that has different alignment than the outer field. 944 let local_layout = 945 self.layout_of_local(&self.stack()[frame], local, None)?; 946 // We also need to support unsized types, and hence cannot use `allocate`. 947 let (size, align) = self 948 .size_and_align_of(&meta, &local_layout)? 949 .expect("Cannot allocate for non-dyn-sized type"); 950 let ptr = self.memory.allocate(size, align, MemoryKind::Stack)?; 951 let mplace = MemPlace { ptr: ptr.into(), align, meta }; 952 if let LocalValue::Live(Operand::Immediate(value)) = local_val { 953 // Preserve old value. 954 // We don't have to validate as we can assume the local 955 // was already valid for its type. 956 let mplace = MPlaceTy { mplace, layout: local_layout }; 957 self.write_immediate_to_mplace_no_validate(value, &mplace)?; 958 } 959 // Now we can call `access_mut` again, asserting it goes well, 960 // and actually overwrite things. 961 *M::access_local_mut(self, frame, local).unwrap().unwrap() = 962 LocalValue::Live(Operand::Indirect(mplace)); 963 (mplace, Some(size)) 964 } 965 Err(mplace) => (mplace, None), // this already was an indirect local 966 } 967 } 968 Place::Ptr(mplace) => (mplace, None), 969 }; 970 // Return with the original layout, so that the caller can go on 971 Ok((MPlaceTy { mplace, layout: place.layout }, size)) 972 } 973 974 #[inline(always)] force_allocation( &mut self, place: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>>975 pub fn force_allocation( 976 &mut self, 977 place: &PlaceTy<'tcx, M::PointerTag>, 978 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { 979 Ok(self.force_allocation_maybe_sized(place, MemPlaceMeta::None)?.0) 980 } 981 allocate( &mut self, layout: TyAndLayout<'tcx>, kind: MemoryKind<M::MemoryKind>, ) -> InterpResult<'static, MPlaceTy<'tcx, M::PointerTag>>982 pub fn allocate( 983 &mut self, 984 layout: TyAndLayout<'tcx>, 985 kind: MemoryKind<M::MemoryKind>, 986 ) -> InterpResult<'static, MPlaceTy<'tcx, M::PointerTag>> { 987 let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?; 988 Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout)) 989 } 990 991 /// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation. allocate_str( &mut self, str: &str, kind: MemoryKind<M::MemoryKind>, mutbl: Mutability, ) -> MPlaceTy<'tcx, M::PointerTag>992 pub fn allocate_str( 993 &mut self, 994 str: &str, 995 kind: MemoryKind<M::MemoryKind>, 996 mutbl: Mutability, 997 ) -> MPlaceTy<'tcx, M::PointerTag> { 998 let ptr = self.memory.allocate_bytes(str.as_bytes(), Align::ONE, kind, mutbl); 999 let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self); 1000 let mplace = 1001 MemPlace { ptr: ptr.into(), align: Align::ONE, meta: MemPlaceMeta::Meta(meta) }; 1002 1003 let ty = self.tcx.mk_ref( 1004 self.tcx.lifetimes.re_static, 1005 ty::TypeAndMut { ty: self.tcx.types.str_, mutbl }, 1006 ); 1007 let layout = self.layout_of(ty).unwrap(); 1008 MPlaceTy { mplace, layout } 1009 } 1010 1011 /// Writes the discriminant of the given variant. write_discriminant( &mut self, variant_index: VariantIdx, dest: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx>1012 pub fn write_discriminant( 1013 &mut self, 1014 variant_index: VariantIdx, 1015 dest: &PlaceTy<'tcx, M::PointerTag>, 1016 ) -> InterpResult<'tcx> { 1017 // This must be an enum or generator. 1018 match dest.layout.ty.kind() { 1019 ty::Adt(adt, _) => assert!(adt.is_enum()), 1020 ty::Generator(..) => {} 1021 _ => span_bug!( 1022 self.cur_span(), 1023 "write_discriminant called on non-variant-type (neither enum nor generator)" 1024 ), 1025 } 1026 // Layout computation excludes uninhabited variants from consideration 1027 // therefore there's no way to represent those variants in the given layout. 1028 // Essentially, uninhabited variants do not have a tag that corresponds to their 1029 // discriminant, so we cannot do anything here. 1030 // When evaluating we will always error before even getting here, but ConstProp 'executes' 1031 // dead code, so we cannot ICE here. 1032 if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() { 1033 throw_ub!(UninhabitedEnumVariantWritten) 1034 } 1035 1036 match dest.layout.variants { 1037 Variants::Single { index } => { 1038 assert_eq!(index, variant_index); 1039 } 1040 Variants::Multiple { 1041 tag_encoding: TagEncoding::Direct, 1042 tag: tag_layout, 1043 tag_field, 1044 .. 1045 } => { 1046 // No need to validate that the discriminant here because the 1047 // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. 1048 1049 let discr_val = 1050 dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val; 1051 1052 // raw discriminants for enums are isize or bigger during 1053 // their computation, but the in-memory tag is the smallest possible 1054 // representation 1055 let size = tag_layout.value.size(self); 1056 let tag_val = size.truncate(discr_val); 1057 1058 let tag_dest = self.place_field(dest, tag_field)?; 1059 self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?; 1060 } 1061 Variants::Multiple { 1062 tag_encoding: 1063 TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start }, 1064 tag: tag_layout, 1065 tag_field, 1066 .. 1067 } => { 1068 // No need to validate that the discriminant here because the 1069 // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. 1070 1071 if variant_index != dataful_variant { 1072 let variants_start = niche_variants.start().as_u32(); 1073 let variant_index_relative = variant_index 1074 .as_u32() 1075 .checked_sub(variants_start) 1076 .expect("overflow computing relative variant idx"); 1077 // We need to use machine arithmetic when taking into account `niche_start`: 1078 // tag_val = variant_index_relative + niche_start_val 1079 let tag_layout = self.layout_of(tag_layout.value.to_int_ty(*self.tcx))?; 1080 let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); 1081 let variant_index_relative_val = 1082 ImmTy::from_uint(variant_index_relative, tag_layout); 1083 let tag_val = self.binary_op( 1084 mir::BinOp::Add, 1085 &variant_index_relative_val, 1086 &niche_start_val, 1087 )?; 1088 // Write result. 1089 let niche_dest = self.place_field(dest, tag_field)?; 1090 self.write_immediate(*tag_val, &niche_dest)?; 1091 } 1092 } 1093 } 1094 1095 Ok(()) 1096 } 1097 raw_const_to_mplace( &self, raw: ConstAlloc<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>>1098 pub fn raw_const_to_mplace( 1099 &self, 1100 raw: ConstAlloc<'tcx>, 1101 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { 1102 // This must be an allocation in `tcx` 1103 let _ = self.tcx.global_alloc(raw.alloc_id); 1104 let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?; 1105 let layout = self.layout_of(raw.ty)?; 1106 Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout)) 1107 } 1108 1109 /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type. 1110 /// Also return some more information so drop doesn't have to run the same code twice. unpack_dyn_trait( &self, mplace: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)>1111 pub(super) fn unpack_dyn_trait( 1112 &self, 1113 mplace: &MPlaceTy<'tcx, M::PointerTag>, 1114 ) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> { 1115 let vtable = self.scalar_to_ptr(mplace.vtable()); // also sanity checks the type 1116 let (instance, ty) = self.read_drop_type_from_vtable(vtable)?; 1117 let layout = self.layout_of(ty)?; 1118 1119 // More sanity checks 1120 if cfg!(debug_assertions) { 1121 let (size, align) = self.read_size_and_align_from_vtable(vtable)?; 1122 assert_eq!(size, layout.size); 1123 // only ABI alignment is preserved 1124 assert_eq!(align, layout.align.abi); 1125 } 1126 1127 let mplace = MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..**mplace }, layout }; 1128 Ok((instance, mplace)) 1129 } 1130 } 1131