1 use std::cell::RefCell; 2 use std::cmp::max; 3 use std::collections::hash_map::Entry; 4 5 use log::trace; 6 use rand::Rng; 7 8 use rustc_data_structures::fx::FxHashMap; 9 use rustc_target::abi::{HasDataLayout, Size}; 10 11 use crate::*; 12 13 pub type MemoryExtra = RefCell<GlobalState>; 14 15 #[derive(Clone, Debug)] 16 pub struct GlobalState { 17 /// This is used as a map between the address of each allocation and its `AllocId`. 18 /// It is always sorted 19 pub int_to_ptr_map: Vec<(u64, AllocId)>, 20 /// The base address for each allocation. We cannot put that into 21 /// `AllocExtra` because function pointers also have a base address, and 22 /// they do not have an `AllocExtra`. 23 /// This is the inverse of `int_to_ptr_map`. 24 pub base_addr: FxHashMap<AllocId, u64>, 25 /// This is used as a memory address when a new pointer is casted to an integer. It 26 /// is always larger than any address that was previously made part of a block. 27 pub next_base_addr: u64, 28 } 29 30 impl Default for GlobalState { default() -> Self31 fn default() -> Self { 32 GlobalState { 33 int_to_ptr_map: Vec::default(), 34 base_addr: FxHashMap::default(), 35 next_base_addr: STACK_ADDR, 36 } 37 } 38 } 39 40 impl<'mir, 'tcx> GlobalState { ptr_from_addr( addr: u64, memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>, ) -> Pointer<Option<Tag>>41 pub fn ptr_from_addr( 42 addr: u64, 43 memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>, 44 ) -> Pointer<Option<Tag>> { 45 trace!("Casting 0x{:x} to a pointer", addr); 46 let global_state = memory.extra.intptrcast.borrow(); 47 let pos = global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr); 48 49 let alloc_id = match pos { 50 Ok(pos) => Some(global_state.int_to_ptr_map[pos].1), 51 Err(0) => None, 52 Err(pos) => { 53 // This is the largest of the adresses smaller than `int`, 54 // i.e. the greatest lower bound (glb) 55 let (glb, alloc_id) = global_state.int_to_ptr_map[pos - 1]; 56 // This never overflows because `addr >= glb` 57 let offset = addr - glb; 58 // If the offset exceeds the size of the allocation, don't use this `alloc_id`. 59 if offset 60 <= memory.get_size_and_align(alloc_id, AllocCheck::MaybeDead).unwrap().0.bytes() 61 { 62 Some(alloc_id) 63 } else { 64 None 65 } 66 } 67 }; 68 // Pointers created from integers are untagged. 69 Pointer::new( 70 alloc_id.map(|alloc_id| Tag { alloc_id, sb: SbTag::Untagged }), 71 Size::from_bytes(addr), 72 ) 73 } 74 alloc_base_addr( memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>, alloc_id: AllocId, ) -> u6475 fn alloc_base_addr( 76 memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>, 77 alloc_id: AllocId, 78 ) -> u64 { 79 let mut global_state = memory.extra.intptrcast.borrow_mut(); 80 let global_state = &mut *global_state; 81 82 match global_state.base_addr.entry(alloc_id) { 83 Entry::Occupied(entry) => *entry.get(), 84 Entry::Vacant(entry) => { 85 // There is nothing wrong with a raw pointer being cast to an integer only after 86 // it became dangling. Hence `MaybeDead`. 87 let (size, align) = 88 memory.get_size_and_align(alloc_id, AllocCheck::MaybeDead).unwrap(); 89 90 // This allocation does not have a base address yet, pick one. 91 // Leave some space to the previous allocation, to give it some chance to be less aligned. 92 let slack = { 93 let mut rng = memory.extra.rng.borrow_mut(); 94 // This means that `(global_state.next_base_addr + slack) % 16` is uniformly distributed. 95 rng.gen_range(0..16) 96 }; 97 // From next_base_addr + slack, round up to adjust for alignment. 98 let base_addr = global_state.next_base_addr.checked_add(slack).unwrap(); 99 let base_addr = Self::align_addr(base_addr, align.bytes()); 100 entry.insert(base_addr); 101 trace!( 102 "Assigning base address {:#x} to allocation {:?} (size: {}, align: {}, slack: {})", 103 base_addr, 104 alloc_id, 105 size.bytes(), 106 align.bytes(), 107 slack, 108 ); 109 110 // Remember next base address. If this allocation is zero-sized, leave a gap 111 // of at least 1 to avoid two allocations having the same base address. 112 global_state.next_base_addr = base_addr.checked_add(max(size.bytes(), 1)).unwrap(); 113 // Given that `next_base_addr` increases in each allocation, pushing the 114 // corresponding tuple keeps `int_to_ptr_map` sorted 115 global_state.int_to_ptr_map.push((base_addr, alloc_id)); 116 117 base_addr 118 } 119 } 120 } 121 122 /// Convert a relative (tcx) pointer to an absolute address. rel_ptr_to_addr( memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>, ptr: Pointer<AllocId>, ) -> u64123 pub fn rel_ptr_to_addr( 124 memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>, 125 ptr: Pointer<AllocId>, 126 ) -> u64 { 127 let (alloc_id, offset) = ptr.into_parts(); // offset is relative 128 let base_addr = GlobalState::alloc_base_addr(memory, alloc_id); 129 130 // Add offset with the right kind of pointer-overflowing arithmetic. 131 let dl = memory.data_layout(); 132 dl.overflowing_offset(base_addr, offset.bytes()).0 133 } 134 abs_ptr_to_rel( memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>, ptr: Pointer<Tag>, ) -> Size135 pub fn abs_ptr_to_rel( 136 memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>, 137 ptr: Pointer<Tag>, 138 ) -> Size { 139 let (tag, addr) = ptr.into_parts(); // addr is absolute 140 let base_addr = GlobalState::alloc_base_addr(memory, tag.alloc_id); 141 142 // Wrapping "addr - base_addr" 143 let dl = memory.data_layout(); 144 let neg_base_addr = (base_addr as i64).wrapping_neg(); 145 Size::from_bytes(dl.overflowing_signed_offset(addr.bytes(), neg_base_addr).0) 146 } 147 148 /// Shifts `addr` to make it aligned with `align` by rounding `addr` to the smallest multiple 149 /// of `align` that is larger or equal to `addr` align_addr(addr: u64, align: u64) -> u64150 fn align_addr(addr: u64, align: u64) -> u64 { 151 match addr % align { 152 0 => addr, 153 rem => addr.checked_add(align).unwrap() - rem, 154 } 155 } 156 } 157 158 #[cfg(test)] 159 mod tests { 160 use super::*; 161 162 #[test] test_align_addr()163 fn test_align_addr() { 164 assert_eq!(GlobalState::align_addr(37, 4), 40); 165 assert_eq!(GlobalState::align_addr(44, 4), 44); 166 } 167 } 168