1 //! Global machine state as well as implementation of the interpreter engine 2 //! `Machine` trait. 3 4 use std::borrow::Cow; 5 use std::cell::RefCell; 6 use std::fmt; 7 use std::num::NonZeroU64; 8 use std::time::Instant; 9 10 use log::trace; 11 use rand::rngs::StdRng; 12 use rand::SeedableRng; 13 14 use rustc_data_structures::fx::FxHashMap; 15 use rustc_middle::{ 16 mir, 17 ty::{ 18 self, 19 layout::{LayoutCx, LayoutError, LayoutOf, TyAndLayout}, 20 Instance, TyCtxt, 21 }, 22 }; 23 use rustc_span::def_id::DefId; 24 use rustc_span::symbol::{sym, Symbol}; 25 use rustc_target::abi::Size; 26 use rustc_target::spec::abi::Abi; 27 28 use crate::*; 29 30 // Some global facts about the emulated machine. 31 pub const PAGE_SIZE: u64 = 4 * 1024; // FIXME: adjust to target architecture 32 pub const STACK_ADDR: u64 = 32 * PAGE_SIZE; // not really about the "stack", but where we start assigning integer addresses to allocations 33 pub const STACK_SIZE: u64 = 16 * PAGE_SIZE; // whatever 34 pub const NUM_CPUS: u64 = 1; 35 36 /// Extra data stored with each stack frame 37 pub struct FrameData<'tcx> { 38 /// Extra data for Stacked Borrows. 39 pub call_id: stacked_borrows::CallId, 40 41 /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn` 42 /// called by `try`). When this frame is popped during unwinding a panic, 43 /// we stop unwinding, use the `CatchUnwindData` to handle catching. 44 pub catch_unwind: Option<CatchUnwindData<'tcx>>, 45 46 /// If `measureme` profiling is enabled, holds timing information 47 /// for the start of this frame. When we finish executing this frame, 48 /// we use this to register a completed event with `measureme`. 49 pub timing: Option<measureme::DetachedTiming>, 50 } 51 52 impl<'tcx> std::fmt::Debug for FrameData<'tcx> { fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result53 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 54 // Omitting `timing`, it does not support `Debug`. 55 let FrameData { call_id, catch_unwind, timing: _ } = self; 56 f.debug_struct("FrameData") 57 .field("call_id", call_id) 58 .field("catch_unwind", catch_unwind) 59 .finish() 60 } 61 } 62 63 /// Extra memory kinds 64 #[derive(Debug, Copy, Clone, PartialEq, Eq)] 65 pub enum MiriMemoryKind { 66 /// `__rust_alloc` memory. 67 Rust, 68 /// `malloc` memory. 69 C, 70 /// Windows `HeapAlloc` memory. 71 WinHeap, 72 /// Memory for args, errno, and other parts of the machine-managed environment. 73 /// This memory may leak. 74 Machine, 75 /// Memory for env vars. Separate from `Machine` because we clean it up and leak-check it. 76 Env, 77 /// Globals copied from `tcx`. 78 /// This memory may leak. 79 Global, 80 /// Memory for extern statics. 81 /// This memory may leak. 82 ExternStatic, 83 /// Memory for thread-local statics. 84 /// This memory may leak. 85 Tls, 86 } 87 88 impl Into<MemoryKind<MiriMemoryKind>> for MiriMemoryKind { 89 #[inline(always)] into(self) -> MemoryKind<MiriMemoryKind>90 fn into(self) -> MemoryKind<MiriMemoryKind> { 91 MemoryKind::Machine(self) 92 } 93 } 94 95 impl MayLeak for MiriMemoryKind { 96 #[inline(always)] may_leak(self) -> bool97 fn may_leak(self) -> bool { 98 use self::MiriMemoryKind::*; 99 match self { 100 Rust | C | WinHeap | Env => false, 101 Machine | Global | ExternStatic | Tls => true, 102 } 103 } 104 } 105 106 impl fmt::Display for MiriMemoryKind { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result107 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 108 use self::MiriMemoryKind::*; 109 match self { 110 Rust => write!(f, "Rust heap"), 111 C => write!(f, "C heap"), 112 WinHeap => write!(f, "Windows heap"), 113 Machine => write!(f, "machine-managed memory"), 114 Env => write!(f, "environment variable"), 115 Global => write!(f, "global (static or const)"), 116 ExternStatic => write!(f, "extern static"), 117 Tls => write!(f, "thread-local static"), 118 } 119 } 120 } 121 122 /// Pointer provenance (tag). 123 #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] 124 pub struct Tag { 125 pub alloc_id: AllocId, 126 /// Stacked Borrows tag. 127 pub sb: SbTag, 128 } 129 130 impl Provenance for Tag { 131 /// We use absolute addresses in the `offset` of a `Pointer<Tag>`. 132 const OFFSET_IS_ADDR: bool = true; 133 134 /// We cannot err on partial overwrites, it happens too often in practice (due to unions). 135 const ERR_ON_PARTIAL_PTR_OVERWRITE: bool = false; 136 fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result137 fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result { 138 let (tag, addr) = ptr.into_parts(); // address is absolute 139 write!(f, "0x{:x}", addr.bytes())?; 140 // Forward `alternate` flag to `alloc_id` printing. 141 if f.alternate() { 142 write!(f, "[{:#?}]", tag.alloc_id)?; 143 } else { 144 write!(f, "[{:?}]", tag.alloc_id)?; 145 } 146 // Print Stacked Borrows tag. 147 write!(f, "{:?}", tag.sb) 148 } 149 get_alloc_id(self) -> AllocId150 fn get_alloc_id(self) -> AllocId { 151 self.alloc_id 152 } 153 } 154 155 /// Extra per-allocation data 156 #[derive(Debug, Clone)] 157 pub struct AllocExtra { 158 /// Stacked Borrows state is only added if it is enabled. 159 pub stacked_borrows: Option<stacked_borrows::AllocExtra>, 160 /// Data race detection via the use of a vector-clock, 161 /// this is only added if it is enabled. 162 pub data_race: Option<data_race::AllocExtra>, 163 } 164 165 /// Extra global memory data 166 #[derive(Debug)] 167 pub struct MemoryExtra { 168 pub stacked_borrows: Option<stacked_borrows::MemoryExtra>, 169 pub data_race: Option<data_race::MemoryExtra>, 170 pub intptrcast: intptrcast::MemoryExtra, 171 172 /// Mapping extern static names to their base pointer. 173 extern_statics: FxHashMap<Symbol, Pointer<Tag>>, 174 175 /// The random number generator used for resolving non-determinism. 176 /// Needs to be queried by ptr_to_int, hence needs interior mutability. 177 pub(crate) rng: RefCell<StdRng>, 178 179 /// An allocation ID to report when it is being allocated 180 /// (helps for debugging memory leaks and use after free bugs). 181 tracked_alloc_id: Option<AllocId>, 182 183 /// Controls whether alignment of memory accesses is being checked. 184 pub(crate) check_alignment: AlignmentCheck, 185 186 /// Failure rate of compare_exchange_weak, between 0.0 and 1.0 187 pub(crate) cmpxchg_weak_failure_rate: f64, 188 } 189 190 impl MemoryExtra { new(config: &MiriConfig) -> Self191 pub fn new(config: &MiriConfig) -> Self { 192 let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0)); 193 let stacked_borrows = if config.stacked_borrows { 194 Some(RefCell::new(stacked_borrows::GlobalState::new( 195 config.tracked_pointer_tag, 196 config.tracked_call_id, 197 config.track_raw, 198 ))) 199 } else { 200 None 201 }; 202 let data_race = 203 if config.data_race_detector { Some(data_race::GlobalState::new()) } else { None }; 204 MemoryExtra { 205 stacked_borrows, 206 data_race, 207 intptrcast: Default::default(), 208 extern_statics: FxHashMap::default(), 209 rng: RefCell::new(rng), 210 tracked_alloc_id: config.tracked_alloc_id, 211 check_alignment: config.check_alignment, 212 cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate, 213 } 214 } 215 add_extern_static<'tcx, 'mir>( this: &mut MiriEvalContext<'mir, 'tcx>, name: &str, ptr: Pointer<Option<Tag>>, )216 fn add_extern_static<'tcx, 'mir>( 217 this: &mut MiriEvalContext<'mir, 'tcx>, 218 name: &str, 219 ptr: Pointer<Option<Tag>>, 220 ) { 221 let ptr = ptr.into_pointer_or_addr().unwrap(); 222 this.memory.extra.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap(); 223 } 224 225 /// Sets up the "extern statics" for this machine. init_extern_statics<'tcx, 'mir>( this: &mut MiriEvalContext<'mir, 'tcx>, ) -> InterpResult<'tcx>226 pub fn init_extern_statics<'tcx, 'mir>( 227 this: &mut MiriEvalContext<'mir, 'tcx>, 228 ) -> InterpResult<'tcx> { 229 match this.tcx.sess.target.os.as_str() { 230 "linux" => { 231 // "__cxa_thread_atexit_impl" 232 // This should be all-zero, pointer-sized. 233 let layout = this.machine.layouts.usize; 234 let place = this.allocate(layout, MiriMemoryKind::ExternStatic.into())?; 235 this.write_scalar(Scalar::from_machine_usize(0, this), &place.into())?; 236 Self::add_extern_static(this, "__cxa_thread_atexit_impl", place.ptr); 237 // "environ" 238 Self::add_extern_static( 239 this, 240 "environ", 241 this.machine.env_vars.environ.unwrap().ptr, 242 ); 243 } 244 "windows" => { 245 // "_tls_used" 246 // This is some obscure hack that is part of the Windows TLS story. It's a `u8`. 247 let layout = this.machine.layouts.u8; 248 let place = this.allocate(layout, MiriMemoryKind::ExternStatic.into())?; 249 this.write_scalar(Scalar::from_u8(0), &place.into())?; 250 Self::add_extern_static(this, "_tls_used", place.ptr); 251 } 252 _ => {} // No "extern statics" supported on this target 253 } 254 Ok(()) 255 } 256 } 257 258 /// Precomputed layouts of primitive types 259 pub struct PrimitiveLayouts<'tcx> { 260 pub unit: TyAndLayout<'tcx>, 261 pub i8: TyAndLayout<'tcx>, 262 pub i32: TyAndLayout<'tcx>, 263 pub isize: TyAndLayout<'tcx>, 264 pub u8: TyAndLayout<'tcx>, 265 pub u32: TyAndLayout<'tcx>, 266 pub usize: TyAndLayout<'tcx>, 267 } 268 269 impl<'mir, 'tcx: 'mir> PrimitiveLayouts<'tcx> { new(layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Result<Self, LayoutError<'tcx>>270 fn new(layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Result<Self, LayoutError<'tcx>> { 271 Ok(Self { 272 unit: layout_cx.layout_of(layout_cx.tcx.mk_unit())?, 273 i8: layout_cx.layout_of(layout_cx.tcx.types.i8)?, 274 i32: layout_cx.layout_of(layout_cx.tcx.types.i32)?, 275 isize: layout_cx.layout_of(layout_cx.tcx.types.isize)?, 276 u8: layout_cx.layout_of(layout_cx.tcx.types.u8)?, 277 u32: layout_cx.layout_of(layout_cx.tcx.types.u32)?, 278 usize: layout_cx.layout_of(layout_cx.tcx.types.usize)?, 279 }) 280 } 281 } 282 283 /// The machine itself. 284 pub struct Evaluator<'mir, 'tcx> { 285 /// Environment variables set by `setenv`. 286 /// Miri does not expose env vars from the host to the emulated program. 287 pub(crate) env_vars: EnvVars<'tcx>, 288 289 /// Program arguments (`Option` because we can only initialize them after creating the ecx). 290 /// These are *pointers* to argc/argv because macOS. 291 /// We also need the full command line as one string because of Windows. 292 pub(crate) argc: Option<MemPlace<Tag>>, 293 pub(crate) argv: Option<MemPlace<Tag>>, 294 pub(crate) cmd_line: Option<MemPlace<Tag>>, 295 296 /// TLS state. 297 pub(crate) tls: TlsData<'tcx>, 298 299 /// What should Miri do when an op requires communicating with the host, 300 /// such as accessing host env vars, random number generation, and 301 /// file system access. 302 pub(crate) isolated_op: IsolatedOp, 303 304 /// Whether to enforce the validity invariant. 305 pub(crate) validate: bool, 306 307 /// Whether to enforce validity (e.g., initialization) of integers and floats. 308 pub(crate) enforce_number_validity: bool, 309 310 /// Whether to enforce [ABI](Abi) of function calls. 311 pub(crate) enforce_abi: bool, 312 313 pub(crate) file_handler: shims::posix::FileHandler, 314 pub(crate) dir_handler: shims::posix::DirHandler, 315 316 /// The "time anchor" for this machine's monotone clock (for `Instant` simulation). 317 pub(crate) time_anchor: Instant, 318 319 /// The set of threads. 320 pub(crate) threads: ThreadManager<'mir, 'tcx>, 321 322 /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri. 323 pub(crate) layouts: PrimitiveLayouts<'tcx>, 324 325 /// Allocations that are considered roots of static memory (that may leak). 326 pub(crate) static_roots: Vec<AllocId>, 327 328 /// The `measureme` profiler used to record timing information about 329 /// the emulated program. 330 profiler: Option<measureme::Profiler>, 331 /// Used with `profiler` to cache the `StringId`s for event names 332 /// uesd with `measureme`. 333 string_cache: FxHashMap<String, measureme::StringId>, 334 335 /// Cache of `Instance` exported under the given `Symbol` name. 336 /// `None` means no `Instance` exported under the given name is found. 337 pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>, 338 339 /// Whether to raise a panic in the context of the evaluated process when unsupported 340 /// functionality is encountered. If `false`, an error is propagated in the Miri application context 341 /// instead (default behavior) 342 pub(crate) panic_on_unsupported: bool, 343 } 344 345 impl<'mir, 'tcx> Evaluator<'mir, 'tcx> { new(config: &MiriConfig, layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Self346 pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Self { 347 let layouts = 348 PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types"); 349 let profiler = config.measureme_out.as_ref().map(|out| { 350 measureme::Profiler::new(out).expect("Couldn't create `measureme` profiler") 351 }); 352 Evaluator { 353 // `env_vars` could be initialized properly here if `Memory` were available before 354 // calling this method. 355 env_vars: EnvVars::default(), 356 argc: None, 357 argv: None, 358 cmd_line: None, 359 tls: TlsData::default(), 360 isolated_op: config.isolated_op, 361 validate: config.validate, 362 enforce_number_validity: config.check_number_validity, 363 enforce_abi: config.check_abi, 364 file_handler: Default::default(), 365 dir_handler: Default::default(), 366 time_anchor: Instant::now(), 367 layouts, 368 threads: ThreadManager::default(), 369 static_roots: Vec::new(), 370 profiler, 371 string_cache: Default::default(), 372 exported_symbols_cache: FxHashMap::default(), 373 panic_on_unsupported: config.panic_on_unsupported, 374 } 375 } 376 communicate(&self) -> bool377 pub(crate) fn communicate(&self) -> bool { 378 self.isolated_op == IsolatedOp::Allow 379 } 380 } 381 382 /// A rustc InterpCx for Miri. 383 pub type MiriEvalContext<'mir, 'tcx> = InterpCx<'mir, 'tcx, Evaluator<'mir, 'tcx>>; 384 385 /// A little trait that's useful to be inherited by extension traits. 386 pub trait MiriEvalContextExt<'mir, 'tcx> { eval_context_ref<'a>(&'a self) -> &'a MiriEvalContext<'mir, 'tcx>387 fn eval_context_ref<'a>(&'a self) -> &'a MiriEvalContext<'mir, 'tcx>; eval_context_mut<'a>(&'a mut self) -> &'a mut MiriEvalContext<'mir, 'tcx>388 fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriEvalContext<'mir, 'tcx>; 389 } 390 impl<'mir, 'tcx> MiriEvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> { 391 #[inline(always)] eval_context_ref(&self) -> &MiriEvalContext<'mir, 'tcx>392 fn eval_context_ref(&self) -> &MiriEvalContext<'mir, 'tcx> { 393 self 394 } 395 #[inline(always)] eval_context_mut(&mut self) -> &mut MiriEvalContext<'mir, 'tcx>396 fn eval_context_mut(&mut self) -> &mut MiriEvalContext<'mir, 'tcx> { 397 self 398 } 399 } 400 401 /// Machine hook implementations. 402 impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { 403 type MemoryKind = MiriMemoryKind; 404 405 type FrameExtra = FrameData<'tcx>; 406 type MemoryExtra = MemoryExtra; 407 type AllocExtra = AllocExtra; 408 type PointerTag = Tag; 409 type ExtraFnVal = Dlsym; 410 411 type MemoryMap = 412 MonoHashMap<AllocId, (MemoryKind<MiriMemoryKind>, Allocation<Tag, Self::AllocExtra>)>; 413 414 const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global); 415 416 const PANIC_ON_ALLOC_FAIL: bool = false; 417 418 #[inline(always)] enforce_alignment(memory_extra: &MemoryExtra) -> bool419 fn enforce_alignment(memory_extra: &MemoryExtra) -> bool { 420 memory_extra.check_alignment != AlignmentCheck::None 421 } 422 423 #[inline(always)] force_int_for_alignment_check(memory_extra: &Self::MemoryExtra) -> bool424 fn force_int_for_alignment_check(memory_extra: &Self::MemoryExtra) -> bool { 425 memory_extra.check_alignment == AlignmentCheck::Int 426 } 427 428 #[inline(always)] enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool429 fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool { 430 ecx.machine.validate 431 } 432 433 #[inline(always)] enforce_number_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool434 fn enforce_number_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool { 435 ecx.machine.enforce_number_validity 436 } 437 438 #[inline(always)] enforce_abi(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool439 fn enforce_abi(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool { 440 ecx.machine.enforce_abi 441 } 442 443 #[inline(always)] find_mir_or_eval_fn( ecx: &mut InterpCx<'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, abi: Abi, args: &[OpTy<'tcx, Tag>], ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>, unwind: StackPopUnwind, ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>>444 fn find_mir_or_eval_fn( 445 ecx: &mut InterpCx<'mir, 'tcx, Self>, 446 instance: ty::Instance<'tcx>, 447 abi: Abi, 448 args: &[OpTy<'tcx, Tag>], 449 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>, 450 unwind: StackPopUnwind, 451 ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> { 452 ecx.find_mir_or_eval_fn(instance, abi, args, ret, unwind) 453 } 454 455 #[inline(always)] call_extra_fn( ecx: &mut InterpCx<'mir, 'tcx, Self>, fn_val: Dlsym, abi: Abi, args: &[OpTy<'tcx, Tag>], ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>, _unwind: StackPopUnwind, ) -> InterpResult<'tcx>456 fn call_extra_fn( 457 ecx: &mut InterpCx<'mir, 'tcx, Self>, 458 fn_val: Dlsym, 459 abi: Abi, 460 args: &[OpTy<'tcx, Tag>], 461 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>, 462 _unwind: StackPopUnwind, 463 ) -> InterpResult<'tcx> { 464 ecx.call_dlsym(fn_val, abi, args, ret) 465 } 466 467 #[inline(always)] call_intrinsic( ecx: &mut rustc_const_eval::interpret::InterpCx<'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx, Tag>], ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>, unwind: StackPopUnwind, ) -> InterpResult<'tcx>468 fn call_intrinsic( 469 ecx: &mut rustc_const_eval::interpret::InterpCx<'mir, 'tcx, Self>, 470 instance: ty::Instance<'tcx>, 471 args: &[OpTy<'tcx, Tag>], 472 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>, 473 unwind: StackPopUnwind, 474 ) -> InterpResult<'tcx> { 475 ecx.call_intrinsic(instance, args, ret, unwind) 476 } 477 478 #[inline(always)] assert_panic( ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &mir::AssertMessage<'tcx>, unwind: Option<mir::BasicBlock>, ) -> InterpResult<'tcx>479 fn assert_panic( 480 ecx: &mut InterpCx<'mir, 'tcx, Self>, 481 msg: &mir::AssertMessage<'tcx>, 482 unwind: Option<mir::BasicBlock>, 483 ) -> InterpResult<'tcx> { 484 ecx.assert_panic(msg, unwind) 485 } 486 487 #[inline(always)] abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !>488 fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> { 489 throw_machine_stop!(TerminationInfo::Abort(msg)) 490 } 491 492 #[inline(always)] binary_ptr_op( ecx: &rustc_const_eval::interpret::InterpCx<'mir, 'tcx, Self>, bin_op: mir::BinOp, left: &ImmTy<'tcx, Tag>, right: &ImmTy<'tcx, Tag>, ) -> InterpResult<'tcx, (Scalar<Tag>, bool, ty::Ty<'tcx>)>493 fn binary_ptr_op( 494 ecx: &rustc_const_eval::interpret::InterpCx<'mir, 'tcx, Self>, 495 bin_op: mir::BinOp, 496 left: &ImmTy<'tcx, Tag>, 497 right: &ImmTy<'tcx, Tag>, 498 ) -> InterpResult<'tcx, (Scalar<Tag>, bool, ty::Ty<'tcx>)> { 499 ecx.binary_ptr_op(bin_op, left, right) 500 } 501 box_alloc( ecx: &mut InterpCx<'mir, 'tcx, Self>, dest: &PlaceTy<'tcx, Tag>, ) -> InterpResult<'tcx>502 fn box_alloc( 503 ecx: &mut InterpCx<'mir, 'tcx, Self>, 504 dest: &PlaceTy<'tcx, Tag>, 505 ) -> InterpResult<'tcx> { 506 trace!("box_alloc for {:?}", dest.layout.ty); 507 let layout = ecx.layout_of(dest.layout.ty.builtin_deref(false).unwrap().ty)?; 508 // First argument: `size`. 509 // (`0` is allowed here -- this is expected to be handled by the lang item). 510 let size = Scalar::from_machine_usize(layout.size.bytes(), ecx); 511 512 // Second argument: `align`. 513 let align = Scalar::from_machine_usize(layout.align.abi.bytes(), ecx); 514 515 // Call the `exchange_malloc` lang item. 516 let malloc = ecx.tcx.lang_items().exchange_malloc_fn().unwrap(); 517 let malloc = ty::Instance::mono(ecx.tcx.tcx, malloc); 518 ecx.call_function( 519 malloc, 520 Abi::Rust, 521 &[size.into(), align.into()], 522 Some(dest), 523 // Don't do anything when we are done. The `statement()` function will increment 524 // the old stack frame's stmt counter to the next statement, which means that when 525 // `exchange_malloc` returns, we go on evaluating exactly where we want to be. 526 StackPopCleanup::None { cleanup: true }, 527 )?; 528 Ok(()) 529 } 530 thread_local_static_base_pointer( ecx: &mut InterpCx<'mir, 'tcx, Self>, def_id: DefId, ) -> InterpResult<'tcx, Pointer<Tag>>531 fn thread_local_static_base_pointer( 532 ecx: &mut InterpCx<'mir, 'tcx, Self>, 533 def_id: DefId, 534 ) -> InterpResult<'tcx, Pointer<Tag>> { 535 ecx.get_or_create_thread_local_alloc(def_id) 536 } 537 extern_static_base_pointer( memory: &Memory<'mir, 'tcx, Self>, def_id: DefId, ) -> InterpResult<'tcx, Pointer<Tag>>538 fn extern_static_base_pointer( 539 memory: &Memory<'mir, 'tcx, Self>, 540 def_id: DefId, 541 ) -> InterpResult<'tcx, Pointer<Tag>> { 542 let attrs = memory.tcx.get_attrs(def_id); 543 let link_name = match memory.tcx.sess.first_attr_value_str_by_name(&attrs, sym::link_name) { 544 Some(name) => name, 545 None => memory.tcx.item_name(def_id), 546 }; 547 if let Some(&ptr) = memory.extra.extern_statics.get(&link_name) { 548 Ok(ptr) 549 } else { 550 throw_unsup_format!("`extern` static {:?} is not supported by Miri", def_id) 551 } 552 } 553 init_allocation_extra<'b>( mem: &Memory<'mir, 'tcx, Self>, id: AllocId, alloc: Cow<'b, Allocation>, kind: Option<MemoryKind<Self::MemoryKind>>, ) -> Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>554 fn init_allocation_extra<'b>( 555 mem: &Memory<'mir, 'tcx, Self>, 556 id: AllocId, 557 alloc: Cow<'b, Allocation>, 558 kind: Option<MemoryKind<Self::MemoryKind>>, 559 ) -> Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>> { 560 if Some(id) == mem.extra.tracked_alloc_id { 561 register_diagnostic(NonHaltingDiagnostic::CreatedAlloc(id)); 562 } 563 564 let kind = kind.expect("we set our STATIC_KIND so this cannot be None"); 565 let alloc = alloc.into_owned(); 566 let stacks = if let Some(stacked_borrows) = &mem.extra.stacked_borrows { 567 Some(Stacks::new_allocation(id, alloc.size(), stacked_borrows, kind)) 568 } else { 569 None 570 }; 571 let race_alloc = if let Some(data_race) = &mem.extra.data_race { 572 Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size(), kind)) 573 } else { 574 None 575 }; 576 let alloc: Allocation<Tag, Self::AllocExtra> = alloc.convert_tag_add_extra( 577 &mem.tcx, 578 AllocExtra { stacked_borrows: stacks, data_race: race_alloc }, 579 |ptr| Evaluator::tag_alloc_base_pointer(mem, ptr), 580 ); 581 Cow::Owned(alloc) 582 } 583 tag_alloc_base_pointer( mem: &Memory<'mir, 'tcx, Self>, ptr: Pointer<AllocId>, ) -> Pointer<Tag>584 fn tag_alloc_base_pointer( 585 mem: &Memory<'mir, 'tcx, Self>, 586 ptr: Pointer<AllocId>, 587 ) -> Pointer<Tag> { 588 let absolute_addr = intptrcast::GlobalState::rel_ptr_to_addr(&mem, ptr); 589 let sb_tag = if let Some(stacked_borrows) = &mem.extra.stacked_borrows { 590 stacked_borrows.borrow_mut().base_tag(ptr.provenance) 591 } else { 592 SbTag::Untagged 593 }; 594 Pointer::new(Tag { alloc_id: ptr.provenance, sb: sb_tag }, Size::from_bytes(absolute_addr)) 595 } 596 597 #[inline(always)] ptr_from_addr( mem: &Memory<'mir, 'tcx, Self>, addr: u64, ) -> Pointer<Option<Self::PointerTag>>598 fn ptr_from_addr( 599 mem: &Memory<'mir, 'tcx, Self>, 600 addr: u64, 601 ) -> Pointer<Option<Self::PointerTag>> { 602 intptrcast::GlobalState::ptr_from_addr(addr, mem) 603 } 604 605 /// Convert a pointer with provenance into an allocation-offset pair, 606 /// or a `None` with an absolute address if that conversion is not possible. ptr_get_alloc( mem: &Memory<'mir, 'tcx, Self>, ptr: Pointer<Self::PointerTag>, ) -> (AllocId, Size)607 fn ptr_get_alloc( 608 mem: &Memory<'mir, 'tcx, Self>, 609 ptr: Pointer<Self::PointerTag>, 610 ) -> (AllocId, Size) { 611 let rel = intptrcast::GlobalState::abs_ptr_to_rel(mem, ptr); 612 (ptr.provenance.alloc_id, rel) 613 } 614 615 #[inline(always)] memory_read( memory_extra: &Self::MemoryExtra, alloc_extra: &AllocExtra, tag: Tag, range: AllocRange, ) -> InterpResult<'tcx>616 fn memory_read( 617 memory_extra: &Self::MemoryExtra, 618 alloc_extra: &AllocExtra, 619 tag: Tag, 620 range: AllocRange, 621 ) -> InterpResult<'tcx> { 622 if let Some(data_race) = &alloc_extra.data_race { 623 data_race.read(tag.alloc_id, range, memory_extra.data_race.as_ref().unwrap())?; 624 } 625 if let Some(stacked_borrows) = &alloc_extra.stacked_borrows { 626 stacked_borrows.memory_read( 627 tag.alloc_id, 628 tag.sb, 629 range, 630 memory_extra.stacked_borrows.as_ref().unwrap(), 631 ) 632 } else { 633 Ok(()) 634 } 635 } 636 637 #[inline(always)] memory_written( memory_extra: &mut Self::MemoryExtra, alloc_extra: &mut AllocExtra, tag: Tag, range: AllocRange, ) -> InterpResult<'tcx>638 fn memory_written( 639 memory_extra: &mut Self::MemoryExtra, 640 alloc_extra: &mut AllocExtra, 641 tag: Tag, 642 range: AllocRange, 643 ) -> InterpResult<'tcx> { 644 if let Some(data_race) = &mut alloc_extra.data_race { 645 data_race.write(tag.alloc_id, range, memory_extra.data_race.as_mut().unwrap())?; 646 } 647 if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows { 648 stacked_borrows.memory_written( 649 tag.alloc_id, 650 tag.sb, 651 range, 652 memory_extra.stacked_borrows.as_mut().unwrap(), 653 ) 654 } else { 655 Ok(()) 656 } 657 } 658 659 #[inline(always)] memory_deallocated( memory_extra: &mut Self::MemoryExtra, alloc_extra: &mut AllocExtra, tag: Tag, range: AllocRange, ) -> InterpResult<'tcx>660 fn memory_deallocated( 661 memory_extra: &mut Self::MemoryExtra, 662 alloc_extra: &mut AllocExtra, 663 tag: Tag, 664 range: AllocRange, 665 ) -> InterpResult<'tcx> { 666 if Some(tag.alloc_id) == memory_extra.tracked_alloc_id { 667 register_diagnostic(NonHaltingDiagnostic::FreedAlloc(tag.alloc_id)); 668 } 669 if let Some(data_race) = &mut alloc_extra.data_race { 670 data_race.deallocate(tag.alloc_id, range, memory_extra.data_race.as_mut().unwrap())?; 671 } 672 if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows { 673 stacked_borrows.memory_deallocated( 674 tag.alloc_id, 675 tag.sb, 676 range, 677 memory_extra.stacked_borrows.as_mut().unwrap(), 678 ) 679 } else { 680 Ok(()) 681 } 682 } 683 684 #[inline(always)] retag( ecx: &mut InterpCx<'mir, 'tcx, Self>, kind: mir::RetagKind, place: &PlaceTy<'tcx, Tag>, ) -> InterpResult<'tcx>685 fn retag( 686 ecx: &mut InterpCx<'mir, 'tcx, Self>, 687 kind: mir::RetagKind, 688 place: &PlaceTy<'tcx, Tag>, 689 ) -> InterpResult<'tcx> { 690 if ecx.memory.extra.stacked_borrows.is_some() { ecx.retag(kind, place) } else { Ok(()) } 691 } 692 693 #[inline(always)] init_frame_extra( ecx: &mut InterpCx<'mir, 'tcx, Self>, frame: Frame<'mir, 'tcx, Tag>, ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Tag, FrameData<'tcx>>>694 fn init_frame_extra( 695 ecx: &mut InterpCx<'mir, 'tcx, Self>, 696 frame: Frame<'mir, 'tcx, Tag>, 697 ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Tag, FrameData<'tcx>>> { 698 // Start recording our event before doing anything else 699 let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() { 700 let fn_name = frame.instance.to_string(); 701 let entry = ecx.machine.string_cache.entry(fn_name.clone()); 702 let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name)); 703 704 Some(profiler.start_recording_interval_event_detached( 705 *name, 706 measureme::EventId::from_label(*name), 707 ecx.get_active_thread().to_u32(), 708 )) 709 } else { 710 None 711 }; 712 713 let stacked_borrows = ecx.memory.extra.stacked_borrows.as_ref(); 714 let call_id = stacked_borrows.map_or(NonZeroU64::new(1).unwrap(), |stacked_borrows| { 715 stacked_borrows.borrow_mut().new_call() 716 }); 717 718 let extra = FrameData { call_id, catch_unwind: None, timing }; 719 Ok(frame.with_extra(extra)) 720 } 721 stack<'a>( ecx: &'a InterpCx<'mir, 'tcx, Self>, ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>]722 fn stack<'a>( 723 ecx: &'a InterpCx<'mir, 'tcx, Self>, 724 ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] { 725 ecx.active_thread_stack() 726 } 727 stack_mut<'a>( ecx: &'a mut InterpCx<'mir, 'tcx, Self>, ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>728 fn stack_mut<'a>( 729 ecx: &'a mut InterpCx<'mir, 'tcx, Self>, 730 ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>> { 731 ecx.active_thread_stack_mut() 732 } 733 734 #[inline(always)] after_stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx>735 fn after_stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> { 736 if ecx.memory.extra.stacked_borrows.is_some() { ecx.retag_return_place() } else { Ok(()) } 737 } 738 739 #[inline(always)] after_stack_pop( ecx: &mut InterpCx<'mir, 'tcx, Self>, mut frame: Frame<'mir, 'tcx, Tag, FrameData<'tcx>>, unwinding: bool, ) -> InterpResult<'tcx, StackPopJump>740 fn after_stack_pop( 741 ecx: &mut InterpCx<'mir, 'tcx, Self>, 742 mut frame: Frame<'mir, 'tcx, Tag, FrameData<'tcx>>, 743 unwinding: bool, 744 ) -> InterpResult<'tcx, StackPopJump> { 745 let timing = frame.extra.timing.take(); 746 let res = ecx.handle_stack_pop(frame.extra, unwinding); 747 if let Some(profiler) = ecx.machine.profiler.as_ref() { 748 profiler.finish_recording_interval_event(timing.unwrap()); 749 } 750 res 751 } 752 } 753