1 //! Functions concerning immediate values and operands, and reading from operands. 2 //! All high-level functions to read from memory work on operands as sources. 3 4 use std::convert::TryFrom; 5 use std::fmt::Write; 6 7 use rustc_errors::ErrorReported; 8 use rustc_hir::def::Namespace; 9 use rustc_macros::HashStable; 10 use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout}; 11 use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer}; 12 use rustc_middle::ty::{ConstInt, Ty}; 13 use rustc_middle::{mir, ty}; 14 use rustc_target::abi::{Abi, HasDataLayout, Size, TagEncoding}; 15 use rustc_target::abi::{VariantIdx, Variants}; 16 17 use super::{ 18 alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, GlobalId, 19 InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Provenance, 20 Scalar, ScalarMaybeUninit, 21 }; 22 23 /// An `Immediate` represents a single immediate self-contained Rust value. 24 /// 25 /// For optimization of a few very common cases, there is also a representation for a pair of 26 /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary 27 /// operations and wide pointers. This idea was taken from rustc's codegen. 28 /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely 29 /// defined on `Immediate`, and do not have to work with a `Place`. 30 #[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)] 31 pub enum Immediate<Tag: Provenance = AllocId> { 32 Scalar(ScalarMaybeUninit<Tag>), 33 ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>), 34 } 35 36 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] 37 rustc_data_structures::static_assert_size!(Immediate, 56); 38 39 impl<Tag: Provenance> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> { 40 #[inline(always)] from(val: ScalarMaybeUninit<Tag>) -> Self41 fn from(val: ScalarMaybeUninit<Tag>) -> Self { 42 Immediate::Scalar(val) 43 } 44 } 45 46 impl<Tag: Provenance> From<Scalar<Tag>> for Immediate<Tag> { 47 #[inline(always)] from(val: Scalar<Tag>) -> Self48 fn from(val: Scalar<Tag>) -> Self { 49 Immediate::Scalar(val.into()) 50 } 51 } 52 53 impl<'tcx, Tag: Provenance> Immediate<Tag> { from_pointer(p: Pointer<Tag>, cx: &impl HasDataLayout) -> Self54 pub fn from_pointer(p: Pointer<Tag>, cx: &impl HasDataLayout) -> Self { 55 Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx)) 56 } 57 from_maybe_pointer(p: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self58 pub fn from_maybe_pointer(p: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self { 59 Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx)) 60 } 61 new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self62 pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self { 63 Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into()) 64 } 65 new_dyn_trait( val: Scalar<Tag>, vtable: Pointer<Option<Tag>>, cx: &impl HasDataLayout, ) -> Self66 pub fn new_dyn_trait( 67 val: Scalar<Tag>, 68 vtable: Pointer<Option<Tag>>, 69 cx: &impl HasDataLayout, 70 ) -> Self { 71 Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx)) 72 } 73 74 #[inline] to_scalar_or_uninit(self) -> ScalarMaybeUninit<Tag>75 pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Tag> { 76 match self { 77 Immediate::Scalar(val) => val, 78 Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"), 79 } 80 } 81 82 #[inline] to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>>83 pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> { 84 self.to_scalar_or_uninit().check_init() 85 } 86 87 #[inline] to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)>88 pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> { 89 match self { 90 Immediate::ScalarPair(val1, val2) => Ok((val1.check_init()?, val2.check_init()?)), 91 Immediate::Scalar(..) => { 92 bug!("Got a scalar where a scalar pair was expected") 93 } 94 } 95 } 96 } 97 98 // ScalarPair needs a type to interpret, so we often have an immediate and a type together 99 // as input for binary and cast operations. 100 #[derive(Copy, Clone, Debug)] 101 pub struct ImmTy<'tcx, Tag: Provenance = AllocId> { 102 imm: Immediate<Tag>, 103 pub layout: TyAndLayout<'tcx>, 104 } 105 106 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] 107 rustc_data_structures::static_assert_size!(ImmTy<'_>, 72); 108 109 impl<Tag: Provenance> std::fmt::Display for ImmTy<'tcx, Tag> { fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result110 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 111 /// Helper function for printing a scalar to a FmtPrinter 112 fn p<'a, 'tcx, F: std::fmt::Write, Tag: Provenance>( 113 cx: FmtPrinter<'a, 'tcx, F>, 114 s: ScalarMaybeUninit<Tag>, 115 ty: Ty<'tcx>, 116 ) -> Result<FmtPrinter<'a, 'tcx, F>, std::fmt::Error> { 117 match s { 118 ScalarMaybeUninit::Scalar(Scalar::Int(int)) => { 119 cx.pretty_print_const_scalar_int(int, ty, true) 120 } 121 ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => { 122 // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to 123 // print what is points to, which would fail since it has no access to the local 124 // memory. 125 cx.pretty_print_const_pointer(ptr, ty, true) 126 } 127 ScalarMaybeUninit::Uninit => cx.typed_value( 128 |mut this| { 129 this.write_str("uninit ")?; 130 Ok(this) 131 }, 132 |this| this.print_type(ty), 133 " ", 134 ), 135 } 136 } 137 ty::tls::with(|tcx| { 138 match self.imm { 139 Immediate::Scalar(s) => { 140 if let Some(ty) = tcx.lift(self.layout.ty) { 141 let cx = FmtPrinter::new(tcx, f, Namespace::ValueNS); 142 p(cx, s, ty)?; 143 return Ok(()); 144 } 145 write!(f, "{}: {}", s, self.layout.ty) 146 } 147 Immediate::ScalarPair(a, b) => { 148 // FIXME(oli-obk): at least print tuples and slices nicely 149 write!(f, "({}, {}): {}", a, b, self.layout.ty,) 150 } 151 } 152 }) 153 } 154 } 155 156 impl<'tcx, Tag: Provenance> std::ops::Deref for ImmTy<'tcx, Tag> { 157 type Target = Immediate<Tag>; 158 #[inline(always)] deref(&self) -> &Immediate<Tag>159 fn deref(&self) -> &Immediate<Tag> { 160 &self.imm 161 } 162 } 163 164 /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate, 165 /// or still in memory. The latter is an optimization, to delay reading that chunk of 166 /// memory and to avoid having to store arbitrary-sized data here. 167 #[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)] 168 pub enum Operand<Tag: Provenance = AllocId> { 169 Immediate(Immediate<Tag>), 170 Indirect(MemPlace<Tag>), 171 } 172 173 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] 174 pub struct OpTy<'tcx, Tag: Provenance = AllocId> { 175 op: Operand<Tag>, // Keep this private; it helps enforce invariants. 176 pub layout: TyAndLayout<'tcx>, 177 } 178 179 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] 180 rustc_data_structures::static_assert_size!(OpTy<'_>, 80); 181 182 impl<'tcx, Tag: Provenance> std::ops::Deref for OpTy<'tcx, Tag> { 183 type Target = Operand<Tag>; 184 #[inline(always)] deref(&self) -> &Operand<Tag>185 fn deref(&self) -> &Operand<Tag> { 186 &self.op 187 } 188 } 189 190 impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> { 191 #[inline(always)] from(mplace: MPlaceTy<'tcx, Tag>) -> Self192 fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self { 193 OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout } 194 } 195 } 196 197 impl<'tcx, Tag: Provenance> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> { 198 #[inline(always)] from(mplace: &MPlaceTy<'tcx, Tag>) -> Self199 fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self { 200 OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout } 201 } 202 } 203 204 impl<'tcx, Tag: Provenance> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> { 205 #[inline(always)] from(val: ImmTy<'tcx, Tag>) -> Self206 fn from(val: ImmTy<'tcx, Tag>) -> Self { 207 OpTy { op: Operand::Immediate(val.imm), layout: val.layout } 208 } 209 } 210 211 impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> { 212 #[inline] from_scalar(val: Scalar<Tag>, layout: TyAndLayout<'tcx>) -> Self213 pub fn from_scalar(val: Scalar<Tag>, layout: TyAndLayout<'tcx>) -> Self { 214 ImmTy { imm: val.into(), layout } 215 } 216 217 #[inline] from_immediate(imm: Immediate<Tag>, layout: TyAndLayout<'tcx>) -> Self218 pub fn from_immediate(imm: Immediate<Tag>, layout: TyAndLayout<'tcx>) -> Self { 219 ImmTy { imm, layout } 220 } 221 222 #[inline] try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self>223 pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> { 224 Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout)) 225 } 226 #[inline] from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self227 pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self { 228 Self::from_scalar(Scalar::from_uint(i, layout.size), layout) 229 } 230 231 #[inline] try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self>232 pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> { 233 Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout)) 234 } 235 236 #[inline] from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self237 pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self { 238 Self::from_scalar(Scalar::from_int(i, layout.size), layout) 239 } 240 241 #[inline] to_const_int(self) -> ConstInt242 pub fn to_const_int(self) -> ConstInt { 243 assert!(self.layout.ty.is_integral()); 244 let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int(); 245 ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral()) 246 } 247 } 248 249 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { 250 /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`. 251 /// Returns `None` if the layout does not permit loading this as a value. try_read_immediate_from_mplace( &self, mplace: &MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>>252 fn try_read_immediate_from_mplace( 253 &self, 254 mplace: &MPlaceTy<'tcx, M::PointerTag>, 255 ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> { 256 if mplace.layout.is_unsized() { 257 // Don't touch unsized 258 return Ok(None); 259 } 260 261 let alloc = match self.get_alloc(mplace)? { 262 Some(ptr) => ptr, 263 None => { 264 return Ok(Some(ImmTy { 265 // zero-sized type 266 imm: Scalar::ZST.into(), 267 layout: mplace.layout, 268 })); 269 } 270 }; 271 272 match mplace.layout.abi { 273 Abi::Scalar(..) => { 274 let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?; 275 Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout })) 276 } 277 Abi::ScalarPair(a, b) => { 278 // We checked `ptr_align` above, so all fields will have the alignment they need. 279 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, 280 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy. 281 let (a, b) = (a.value, b.value); 282 let (a_size, b_size) = (a.size(self), b.size(self)); 283 let b_offset = a_size.align_to(b.align(self).abi); 284 assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields 285 let a_val = alloc.read_scalar(alloc_range(Size::ZERO, a_size))?; 286 let b_val = alloc.read_scalar(alloc_range(b_offset, b_size))?; 287 Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout })) 288 } 289 _ => Ok(None), 290 } 291 } 292 293 /// Try returning an immediate for the operand. 294 /// If the layout does not permit loading this as an immediate, return where in memory 295 /// we can find the data. 296 /// Note that for a given layout, this operation will either always fail or always 297 /// succeed! Whether it succeeds depends on whether the layout can be represented 298 /// in an `Immediate`, not on which data is stored there currently. try_read_immediate( &self, src: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>>299 pub fn try_read_immediate( 300 &self, 301 src: &OpTy<'tcx, M::PointerTag>, 302 ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> { 303 Ok(match src.try_as_mplace() { 304 Ok(ref mplace) => { 305 if let Some(val) = self.try_read_immediate_from_mplace(mplace)? { 306 Ok(val) 307 } else { 308 Err(*mplace) 309 } 310 } 311 Err(val) => Ok(val), 312 }) 313 } 314 315 /// Read an immediate from a place, asserting that that is possible with the given layout. 316 #[inline(always)] read_immediate( &self, op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>>317 pub fn read_immediate( 318 &self, 319 op: &OpTy<'tcx, M::PointerTag>, 320 ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> { 321 if let Ok(imm) = self.try_read_immediate(op)? { 322 Ok(imm) 323 } else { 324 span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty); 325 } 326 } 327 328 /// Read a scalar from a place read_scalar( &self, op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>>329 pub fn read_scalar( 330 &self, 331 op: &OpTy<'tcx, M::PointerTag>, 332 ) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> { 333 Ok(self.read_immediate(op)?.to_scalar_or_uninit()) 334 } 335 336 /// Read a pointer from a place. read_pointer( &self, op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>>337 pub fn read_pointer( 338 &self, 339 op: &OpTy<'tcx, M::PointerTag>, 340 ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> { 341 Ok(self.scalar_to_ptr(self.read_scalar(op)?.check_init()?)) 342 } 343 344 // Turn the wide MPlace into a string (must already be dereferenced!) read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str>345 pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> { 346 let len = mplace.len(self)?; 347 let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?; 348 let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?; 349 Ok(str) 350 } 351 352 /// Projection functions operand_field( &self, op: &OpTy<'tcx, M::PointerTag>, field: usize, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>353 pub fn operand_field( 354 &self, 355 op: &OpTy<'tcx, M::PointerTag>, 356 field: usize, 357 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { 358 let base = match op.try_as_mplace() { 359 Ok(ref mplace) => { 360 // We can reuse the mplace field computation logic for indirect operands. 361 let field = self.mplace_field(mplace, field)?; 362 return Ok(field.into()); 363 } 364 Err(value) => value, 365 }; 366 367 let field_layout = op.layout.field(self, field); 368 if field_layout.is_zst() { 369 let immediate = Scalar::ZST.into(); 370 return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout }); 371 } 372 let offset = op.layout.fields.offset(field); 373 let immediate = match *base { 374 // the field covers the entire type 375 _ if offset.bytes() == 0 && field_layout.size == op.layout.size => *base, 376 // extract fields from types with `ScalarPair` ABI 377 Immediate::ScalarPair(a, b) => { 378 let val = if offset.bytes() == 0 { a } else { b }; 379 Immediate::from(val) 380 } 381 Immediate::Scalar(val) => span_bug!( 382 self.cur_span(), 383 "field access on non aggregate {:#?}, {:#?}", 384 val, 385 op.layout 386 ), 387 }; 388 Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout }) 389 } 390 operand_index( &self, op: &OpTy<'tcx, M::PointerTag>, index: u64, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>391 pub fn operand_index( 392 &self, 393 op: &OpTy<'tcx, M::PointerTag>, 394 index: u64, 395 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { 396 if let Ok(index) = usize::try_from(index) { 397 // We can just treat this as a field. 398 self.operand_field(op, index) 399 } else { 400 // Indexing into a big array. This must be an mplace. 401 let mplace = op.assert_mem_place(); 402 Ok(self.mplace_index(&mplace, index)?.into()) 403 } 404 } 405 operand_downcast( &self, op: &OpTy<'tcx, M::PointerTag>, variant: VariantIdx, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>406 pub fn operand_downcast( 407 &self, 408 op: &OpTy<'tcx, M::PointerTag>, 409 variant: VariantIdx, 410 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { 411 // Downcasts only change the layout 412 Ok(match op.try_as_mplace() { 413 Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(), 414 Err(..) => { 415 let layout = op.layout.for_variant(self, variant); 416 OpTy { layout, ..*op } 417 } 418 }) 419 } 420 operand_projection( &self, base: &OpTy<'tcx, M::PointerTag>, proj_elem: mir::PlaceElem<'tcx>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>421 pub fn operand_projection( 422 &self, 423 base: &OpTy<'tcx, M::PointerTag>, 424 proj_elem: mir::PlaceElem<'tcx>, 425 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { 426 use rustc_middle::mir::ProjectionElem::*; 427 Ok(match proj_elem { 428 Field(field, _) => self.operand_field(base, field.index())?, 429 Downcast(_, variant) => self.operand_downcast(base, variant)?, 430 Deref => self.deref_operand(base)?.into(), 431 Subslice { .. } | ConstantIndex { .. } | Index(_) => { 432 // The rest should only occur as mplace, we do not use Immediates for types 433 // allowing such operations. This matches place_projection forcing an allocation. 434 let mplace = base.assert_mem_place(); 435 self.mplace_projection(&mplace, proj_elem)?.into() 436 } 437 }) 438 } 439 440 /// Converts a repr(simd) operand into an operand where `place_index` accesses the SIMD elements. 441 /// Also returns the number of elements. operand_to_simd( &self, base: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)>442 pub fn operand_to_simd( 443 &self, 444 base: &OpTy<'tcx, M::PointerTag>, 445 ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> { 446 // Basically we just transmute this place into an array following simd_size_and_type. 447 // This only works in memory, but repr(simd) types should never be immediates anyway. 448 assert!(base.layout.ty.is_simd()); 449 self.mplace_to_simd(&base.assert_mem_place()) 450 } 451 452 /// Read from a local. Will not actually access the local if reading from a ZST. 453 /// Will not access memory, instead an indirect `Operand` is returned. 454 /// 455 /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an 456 /// OpTy from a local access_local( &self, frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>, local: mir::Local, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>457 pub fn access_local( 458 &self, 459 frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>, 460 local: mir::Local, 461 layout: Option<TyAndLayout<'tcx>>, 462 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { 463 let layout = self.layout_of_local(frame, local, layout)?; 464 let op = if layout.is_zst() { 465 // Do not read from ZST, they might not be initialized 466 Operand::Immediate(Scalar::ZST.into()) 467 } else { 468 M::access_local(&self, frame, local)? 469 }; 470 Ok(OpTy { op, layout }) 471 } 472 473 /// Every place can be read from, so we can turn them into an operand. 474 /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this 475 /// will never actually read from memory. 476 #[inline(always)] place_to_op( &self, place: &PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>477 pub fn place_to_op( 478 &self, 479 place: &PlaceTy<'tcx, M::PointerTag>, 480 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { 481 let op = match **place { 482 Place::Ptr(mplace) => Operand::Indirect(mplace), 483 Place::Local { frame, local } => { 484 *self.access_local(&self.stack()[frame], local, None)? 485 } 486 }; 487 Ok(OpTy { op, layout: place.layout }) 488 } 489 490 // Evaluate a place with the goal of reading from it. This lets us sometimes 491 // avoid allocations. eval_place_to_op( &self, place: mir::Place<'tcx>, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>492 pub fn eval_place_to_op( 493 &self, 494 place: mir::Place<'tcx>, 495 layout: Option<TyAndLayout<'tcx>>, 496 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { 497 // Do not use the layout passed in as argument if the base we are looking at 498 // here is not the entire place. 499 let layout = if place.projection.is_empty() { layout } else { None }; 500 501 let base_op = self.access_local(self.frame(), place.local, layout)?; 502 503 let op = place 504 .projection 505 .iter() 506 .try_fold(base_op, |op, elem| self.operand_projection(&op, elem))?; 507 508 trace!("eval_place_to_op: got {:?}", *op); 509 // Sanity-check the type we ended up with. 510 debug_assert!(mir_assign_valid_types( 511 *self.tcx, 512 self.param_env, 513 self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions( 514 place.ty(&self.frame().body.local_decls, *self.tcx).ty 515 ))?, 516 op.layout, 517 )); 518 Ok(op) 519 } 520 521 /// Evaluate the operand, returning a place where you can then find the data. 522 /// If you already know the layout, you can save two table lookups 523 /// by passing it in here. 524 #[inline] eval_operand( &self, mir_op: &mir::Operand<'tcx>, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>525 pub fn eval_operand( 526 &self, 527 mir_op: &mir::Operand<'tcx>, 528 layout: Option<TyAndLayout<'tcx>>, 529 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { 530 use rustc_middle::mir::Operand::*; 531 let op = match *mir_op { 532 // FIXME: do some more logic on `move` to invalidate the old location 533 Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?, 534 535 Constant(ref constant) => { 536 let val = 537 self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal); 538 // This can still fail: 539 // * During ConstProp, with `TooGeneric` or since the `requried_consts` were not all 540 // checked yet. 541 // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail. 542 543 self.mir_const_to_op(&val, layout)? 544 } 545 }; 546 trace!("{:?}: {:?}", mir_op, *op); 547 Ok(op) 548 } 549 550 /// Evaluate a bunch of operands at once eval_operands( &self, ops: &[mir::Operand<'tcx>], ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>>551 pub(super) fn eval_operands( 552 &self, 553 ops: &[mir::Operand<'tcx>], 554 ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> { 555 ops.iter().map(|op| self.eval_operand(op, None)).collect() 556 } 557 558 // Used when the miri-engine runs into a constant and for extracting information from constants 559 // in patterns via the `const_eval` module 560 /// The `val` and `layout` are assumed to already be in our interpreter 561 /// "universe" (param_env). const_to_op( &self, val: &ty::Const<'tcx>, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>562 pub fn const_to_op( 563 &self, 564 val: &ty::Const<'tcx>, 565 layout: Option<TyAndLayout<'tcx>>, 566 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { 567 match val.val { 568 ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric), 569 ty::ConstKind::Error(_) => throw_inval!(AlreadyReported(ErrorReported)), 570 ty::ConstKind::Unevaluated(uv) => { 571 let instance = self.resolve(uv.def, uv.substs(*self.tcx))?; 572 Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into()) 573 } 574 ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => { 575 span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", val) 576 } 577 ty::ConstKind::Value(val_val) => self.const_val_to_op(val_val, val.ty, layout), 578 } 579 } 580 mir_const_to_op( &self, val: &mir::ConstantKind<'tcx>, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>581 pub fn mir_const_to_op( 582 &self, 583 val: &mir::ConstantKind<'tcx>, 584 layout: Option<TyAndLayout<'tcx>>, 585 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { 586 match val { 587 mir::ConstantKind::Ty(ct) => self.const_to_op(ct, layout), 588 mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, ty, layout), 589 } 590 } 591 const_val_to_op( &self, val_val: ConstValue<'tcx>, ty: Ty<'tcx>, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>592 crate fn const_val_to_op( 593 &self, 594 val_val: ConstValue<'tcx>, 595 ty: Ty<'tcx>, 596 layout: Option<TyAndLayout<'tcx>>, 597 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { 598 // Other cases need layout. 599 let tag_scalar = |scalar| -> InterpResult<'tcx, _> { 600 Ok(match scalar { 601 Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size), 602 Scalar::Int(int) => Scalar::Int(int), 603 }) 604 }; 605 let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?; 606 let op = match val_val { 607 ConstValue::ByRef { alloc, offset } => { 608 let id = self.tcx.create_memory_alloc(alloc); 609 // We rely on mutability being set correctly in that allocation to prevent writes 610 // where none should happen. 611 let ptr = self.global_base_pointer(Pointer::new(id, offset))?; 612 Operand::Indirect(MemPlace::from_ptr(ptr.into(), layout.align.abi)) 613 } 614 ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()), 615 ConstValue::Slice { data, start, end } => { 616 // We rely on mutability being set correctly in `data` to prevent writes 617 // where none should happen. 618 let ptr = Pointer::new( 619 self.tcx.create_memory_alloc(data), 620 Size::from_bytes(start), // offset: `start` 621 ); 622 Operand::Immediate(Immediate::new_slice( 623 Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx), 624 u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start` 625 self, 626 )) 627 } 628 }; 629 Ok(OpTy { op, layout }) 630 } 631 632 /// Read discriminant, return the runtime value as well as the variant index. 633 /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)! read_discriminant( &self, op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)>634 pub fn read_discriminant( 635 &self, 636 op: &OpTy<'tcx, M::PointerTag>, 637 ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> { 638 trace!("read_discriminant_value {:#?}", op.layout); 639 // Get type and layout of the discriminant. 640 let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?; 641 trace!("discriminant type: {:?}", discr_layout.ty); 642 643 // We use "discriminant" to refer to the value associated with a particular enum variant. 644 // This is not to be confused with its "variant index", which is just determining its position in the 645 // declared list of variants -- they can differ with explicitly assigned discriminants. 646 // We use "tag" to refer to how the discriminant is encoded in memory, which can be either 647 // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`). 648 let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants { 649 Variants::Single { index } => { 650 let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) { 651 Some(discr) => { 652 // This type actually has discriminants. 653 assert_eq!(discr.ty, discr_layout.ty); 654 Scalar::from_uint(discr.val, discr_layout.size) 655 } 656 None => { 657 // On a type without actual discriminants, variant is 0. 658 assert_eq!(index.as_u32(), 0); 659 Scalar::from_uint(index.as_u32(), discr_layout.size) 660 } 661 }; 662 return Ok((discr, index)); 663 } 664 Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => { 665 (tag, tag_encoding, tag_field) 666 } 667 }; 668 669 // There are *three* layouts that come into play here: 670 // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for 671 // the `Scalar` we return. 672 // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type, 673 // and used to interpret the value we read from the tag field. 674 // For the return value, a cast to `discr_layout` is performed. 675 // - The field storing the tag has a layout, which is very similar to `tag_layout` but 676 // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks. 677 678 // Get layout for tag. 679 let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?; 680 681 // Read tag and sanity-check `tag_layout`. 682 let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?; 683 assert_eq!(tag_layout.size, tag_val.layout.size); 684 assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed()); 685 let tag_val = tag_val.to_scalar()?; 686 trace!("tag value: {:?}", tag_val); 687 688 // Figure out which discriminant and variant this corresponds to. 689 Ok(match *tag_encoding { 690 TagEncoding::Direct => { 691 let tag_bits = tag_val 692 .try_to_int() 693 .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))? 694 .assert_bits(tag_layout.size); 695 // Cast bits from tag layout to discriminant layout. 696 let discr_val = self.cast_from_scalar(tag_bits, tag_layout, discr_layout.ty); 697 let discr_bits = discr_val.assert_bits(discr_layout.size); 698 // Convert discriminant to variant index, and catch invalid discriminants. 699 let index = match *op.layout.ty.kind() { 700 ty::Adt(adt, _) => { 701 adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits) 702 } 703 ty::Generator(def_id, substs, _) => { 704 let substs = substs.as_generator(); 705 substs 706 .discriminants(def_id, *self.tcx) 707 .find(|(_, var)| var.val == discr_bits) 708 } 709 _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"), 710 } 711 .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?; 712 // Return the cast value, and the index. 713 (discr_val, index.0) 714 } 715 TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => { 716 // Compute the variant this niche value/"tag" corresponds to. With niche layout, 717 // discriminant (encoded in niche/tag) and variant index are the same. 718 let variants_start = niche_variants.start().as_u32(); 719 let variants_end = niche_variants.end().as_u32(); 720 let variant = match tag_val.try_to_int() { 721 Err(dbg_val) => { 722 // So this is a pointer then, and casting to an int failed. 723 // Can only happen during CTFE. 724 let ptr = self.scalar_to_ptr(tag_val); 725 // The niche must be just 0, and the ptr not null, then we know this is 726 // okay. Everything else, we conservatively reject. 727 let ptr_valid = niche_start == 0 728 && variants_start == variants_end 729 && !self.memory.ptr_may_be_null(ptr); 730 if !ptr_valid { 731 throw_ub!(InvalidTag(dbg_val)) 732 } 733 dataful_variant 734 } 735 Ok(tag_bits) => { 736 let tag_bits = tag_bits.assert_bits(tag_layout.size); 737 // We need to use machine arithmetic to get the relative variant idx: 738 // variant_index_relative = tag_val - niche_start_val 739 let tag_val = ImmTy::from_uint(tag_bits, tag_layout); 740 let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); 741 let variant_index_relative_val = 742 self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?; 743 let variant_index_relative = variant_index_relative_val 744 .to_scalar()? 745 .assert_bits(tag_val.layout.size); 746 // Check if this is in the range that indicates an actual discriminant. 747 if variant_index_relative <= u128::from(variants_end - variants_start) { 748 let variant_index_relative = u32::try_from(variant_index_relative) 749 .expect("we checked that this fits into a u32"); 750 // Then computing the absolute variant idx should not overflow any more. 751 let variant_index = variants_start 752 .checked_add(variant_index_relative) 753 .expect("overflow computing absolute variant idx"); 754 let variants_len = op 755 .layout 756 .ty 757 .ty_adt_def() 758 .expect("tagged layout for non adt") 759 .variants 760 .len(); 761 assert!(usize::try_from(variant_index).unwrap() < variants_len); 762 VariantIdx::from_u32(variant_index) 763 } else { 764 dataful_variant 765 } 766 } 767 }; 768 // Compute the size of the scalar we need to return. 769 // No need to cast, because the variant index directly serves as discriminant and is 770 // encoded in the tag. 771 (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant) 772 } 773 }) 774 } 775 } 776