1 //===-- Memory.cpp --------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "lldb/Target/Memory.h" 10 #include "lldb/Target/Process.h" 11 #include "lldb/Utility/DataBufferHeap.h" 12 #include "lldb/Utility/Log.h" 13 #include "lldb/Utility/RangeMap.h" 14 #include "lldb/Utility/State.h" 15 16 #include <cinttypes> 17 #include <memory> 18 19 using namespace lldb; 20 using namespace lldb_private; 21 22 // MemoryCache constructor 23 MemoryCache::MemoryCache(Process &process) 24 : m_mutex(), m_L1_cache(), m_L2_cache(), m_invalid_ranges(), 25 m_process(process), 26 m_L2_cache_line_byte_size(process.GetMemoryCacheLineSize()) {} 27 28 // Destructor 29 MemoryCache::~MemoryCache() = default; 30 31 void MemoryCache::Clear(bool clear_invalid_ranges) { 32 std::lock_guard<std::recursive_mutex> guard(m_mutex); 33 m_L1_cache.clear(); 34 m_L2_cache.clear(); 35 if (clear_invalid_ranges) 36 m_invalid_ranges.Clear(); 37 m_L2_cache_line_byte_size = m_process.GetMemoryCacheLineSize(); 38 } 39 40 void MemoryCache::AddL1CacheData(lldb::addr_t addr, const void *src, 41 size_t src_len) { 42 AddL1CacheData( 43 addr, DataBufferSP(new DataBufferHeap(DataBufferHeap(src, src_len)))); 44 } 45 46 void MemoryCache::AddL1CacheData(lldb::addr_t addr, 47 const DataBufferSP &data_buffer_sp) { 48 std::lock_guard<std::recursive_mutex> guard(m_mutex); 49 m_L1_cache[addr] = data_buffer_sp; 50 } 51 52 void MemoryCache::Flush(addr_t addr, size_t size) { 53 if (size == 0) 54 return; 55 56 std::lock_guard<std::recursive_mutex> guard(m_mutex); 57 58 // Erase any blocks from the L1 cache that intersect with the flush range 59 if (!m_L1_cache.empty()) { 60 AddrRange flush_range(addr, size); 61 BlockMap::iterator pos = m_L1_cache.upper_bound(addr); 62 if (pos != m_L1_cache.begin()) { 63 --pos; 64 } 65 while (pos != m_L1_cache.end()) { 66 AddrRange chunk_range(pos->first, pos->second->GetByteSize()); 67 if (!chunk_range.DoesIntersect(flush_range)) 68 break; 69 pos = m_L1_cache.erase(pos); 70 } 71 } 72 73 if (!m_L2_cache.empty()) { 74 const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size; 75 const addr_t end_addr = (addr + size - 1); 76 const addr_t first_cache_line_addr = addr - (addr % cache_line_byte_size); 77 const addr_t last_cache_line_addr = 78 end_addr - (end_addr % cache_line_byte_size); 79 // Watch for overflow where size will cause us to go off the end of the 80 // 64 bit address space 81 uint32_t num_cache_lines; 82 if (last_cache_line_addr >= first_cache_line_addr) 83 num_cache_lines = ((last_cache_line_addr - first_cache_line_addr) / 84 cache_line_byte_size) + 85 1; 86 else 87 num_cache_lines = 88 (UINT64_MAX - first_cache_line_addr + 1) / cache_line_byte_size; 89 90 uint32_t cache_idx = 0; 91 for (addr_t curr_addr = first_cache_line_addr; cache_idx < num_cache_lines; 92 curr_addr += cache_line_byte_size, ++cache_idx) { 93 BlockMap::iterator pos = m_L2_cache.find(curr_addr); 94 if (pos != m_L2_cache.end()) 95 m_L2_cache.erase(pos); 96 } 97 } 98 } 99 100 void MemoryCache::AddInvalidRange(lldb::addr_t base_addr, 101 lldb::addr_t byte_size) { 102 if (byte_size > 0) { 103 std::lock_guard<std::recursive_mutex> guard(m_mutex); 104 InvalidRanges::Entry range(base_addr, byte_size); 105 m_invalid_ranges.Append(range); 106 m_invalid_ranges.Sort(); 107 } 108 } 109 110 bool MemoryCache::RemoveInvalidRange(lldb::addr_t base_addr, 111 lldb::addr_t byte_size) { 112 if (byte_size > 0) { 113 std::lock_guard<std::recursive_mutex> guard(m_mutex); 114 const uint32_t idx = m_invalid_ranges.FindEntryIndexThatContains(base_addr); 115 if (idx != UINT32_MAX) { 116 const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex(idx); 117 if (entry->GetRangeBase() == base_addr && 118 entry->GetByteSize() == byte_size) 119 return m_invalid_ranges.RemoveEntryAtIndex(idx); 120 } 121 } 122 return false; 123 } 124 125 size_t MemoryCache::Read(addr_t addr, void *dst, size_t dst_len, 126 Status &error) { 127 size_t bytes_left = dst_len; 128 129 // Check the L1 cache for a range that contain the entire memory read. If we 130 // find a range in the L1 cache that does, we use it. Else we fall back to 131 // reading memory in m_L2_cache_line_byte_size byte sized chunks. The L1 132 // cache contains chunks of memory that are not required to be 133 // m_L2_cache_line_byte_size bytes in size, so we don't try anything tricky 134 // when reading from them (no partial reads from the L1 cache). 135 136 std::lock_guard<std::recursive_mutex> guard(m_mutex); 137 if (!m_L1_cache.empty()) { 138 AddrRange read_range(addr, dst_len); 139 BlockMap::iterator pos = m_L1_cache.upper_bound(addr); 140 if (pos != m_L1_cache.begin()) { 141 --pos; 142 } 143 AddrRange chunk_range(pos->first, pos->second->GetByteSize()); 144 if (chunk_range.Contains(read_range)) { 145 memcpy(dst, pos->second->GetBytes() + (addr - chunk_range.GetRangeBase()), 146 dst_len); 147 return dst_len; 148 } 149 } 150 151 // If this memory read request is larger than the cache line size, then we 152 // (1) try to read as much of it at once as possible, and (2) don't add the 153 // data to the memory cache. We don't want to split a big read up into more 154 // separate reads than necessary, and with a large memory read request, it is 155 // unlikely that the caller function will ask for the next 156 // 4 bytes after the large memory read - so there's little benefit to saving 157 // it in the cache. 158 if (dst && dst_len > m_L2_cache_line_byte_size) { 159 size_t bytes_read = 160 m_process.ReadMemoryFromInferior(addr, dst, dst_len, error); 161 // Add this non block sized range to the L1 cache if we actually read 162 // anything 163 if (bytes_read > 0) 164 AddL1CacheData(addr, dst, bytes_read); 165 return bytes_read; 166 } 167 168 if (dst && bytes_left > 0) { 169 const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size; 170 uint8_t *dst_buf = (uint8_t *)dst; 171 addr_t curr_addr = addr - (addr % cache_line_byte_size); 172 addr_t cache_offset = addr - curr_addr; 173 174 while (bytes_left > 0) { 175 if (m_invalid_ranges.FindEntryThatContains(curr_addr)) { 176 error.SetErrorStringWithFormat("memory read failed for 0x%" PRIx64, 177 curr_addr); 178 return dst_len - bytes_left; 179 } 180 181 BlockMap::const_iterator pos = m_L2_cache.find(curr_addr); 182 BlockMap::const_iterator end = m_L2_cache.end(); 183 184 if (pos != end) { 185 size_t curr_read_size = cache_line_byte_size - cache_offset; 186 if (curr_read_size > bytes_left) 187 curr_read_size = bytes_left; 188 189 memcpy(dst_buf + dst_len - bytes_left, 190 pos->second->GetBytes() + cache_offset, curr_read_size); 191 192 bytes_left -= curr_read_size; 193 curr_addr += curr_read_size + cache_offset; 194 cache_offset = 0; 195 196 if (bytes_left > 0) { 197 // Get sequential cache page hits 198 for (++pos; (pos != end) && (bytes_left > 0); ++pos) { 199 assert((curr_addr % cache_line_byte_size) == 0); 200 201 if (pos->first != curr_addr) 202 break; 203 204 curr_read_size = pos->second->GetByteSize(); 205 if (curr_read_size > bytes_left) 206 curr_read_size = bytes_left; 207 208 memcpy(dst_buf + dst_len - bytes_left, pos->second->GetBytes(), 209 curr_read_size); 210 211 bytes_left -= curr_read_size; 212 curr_addr += curr_read_size; 213 214 // We have a cache page that succeeded to read some bytes but not 215 // an entire page. If this happens, we must cap off how much data 216 // we are able to read... 217 if (pos->second->GetByteSize() != cache_line_byte_size) 218 return dst_len - bytes_left; 219 } 220 } 221 } 222 223 // We need to read from the process 224 225 if (bytes_left > 0) { 226 assert((curr_addr % cache_line_byte_size) == 0); 227 std::unique_ptr<DataBufferHeap> data_buffer_heap_up( 228 new DataBufferHeap(cache_line_byte_size, 0)); 229 size_t process_bytes_read = m_process.ReadMemoryFromInferior( 230 curr_addr, data_buffer_heap_up->GetBytes(), 231 data_buffer_heap_up->GetByteSize(), error); 232 if (process_bytes_read == 0) 233 return dst_len - bytes_left; 234 235 if (process_bytes_read != cache_line_byte_size) { 236 if (process_bytes_read < data_buffer_heap_up->GetByteSize()) { 237 dst_len -= data_buffer_heap_up->GetByteSize() - process_bytes_read; 238 bytes_left = process_bytes_read; 239 } 240 data_buffer_heap_up->SetByteSize(process_bytes_read); 241 } 242 m_L2_cache[curr_addr] = DataBufferSP(data_buffer_heap_up.release()); 243 // We have read data and put it into the cache, continue through the 244 // loop again to get the data out of the cache... 245 } 246 } 247 } 248 249 return dst_len - bytes_left; 250 } 251 252 AllocatedBlock::AllocatedBlock(lldb::addr_t addr, uint32_t byte_size, 253 uint32_t permissions, uint32_t chunk_size) 254 : m_range(addr, byte_size), m_permissions(permissions), 255 m_chunk_size(chunk_size) 256 { 257 // The entire address range is free to start with. 258 m_free_blocks.Append(m_range); 259 assert(byte_size > chunk_size); 260 } 261 262 AllocatedBlock::~AllocatedBlock() = default; 263 264 lldb::addr_t AllocatedBlock::ReserveBlock(uint32_t size) { 265 // We must return something valid for zero bytes. 266 if (size == 0) 267 size = 1; 268 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 269 270 const size_t free_count = m_free_blocks.GetSize(); 271 for (size_t i=0; i<free_count; ++i) 272 { 273 auto &free_block = m_free_blocks.GetEntryRef(i); 274 const lldb::addr_t range_size = free_block.GetByteSize(); 275 if (range_size >= size) 276 { 277 // We found a free block that is big enough for our data. Figure out how 278 // many chunks we will need and calculate the resulting block size we 279 // will reserve. 280 addr_t addr = free_block.GetRangeBase(); 281 size_t num_chunks = CalculateChunksNeededForSize(size); 282 lldb::addr_t block_size = num_chunks * m_chunk_size; 283 lldb::addr_t bytes_left = range_size - block_size; 284 if (bytes_left == 0) 285 { 286 // The newly allocated block will take all of the bytes in this 287 // available block, so we can just add it to the allocated ranges and 288 // remove the range from the free ranges. 289 m_reserved_blocks.Insert(free_block, false); 290 m_free_blocks.RemoveEntryAtIndex(i); 291 } 292 else 293 { 294 // Make the new allocated range and add it to the allocated ranges. 295 Range<lldb::addr_t, uint32_t> reserved_block(free_block); 296 reserved_block.SetByteSize(block_size); 297 // Insert the reserved range and don't combine it with other blocks in 298 // the reserved blocks list. 299 m_reserved_blocks.Insert(reserved_block, false); 300 // Adjust the free range in place since we won't change the sorted 301 // ordering of the m_free_blocks list. 302 free_block.SetRangeBase(reserved_block.GetRangeEnd()); 303 free_block.SetByteSize(bytes_left); 304 } 305 LLDB_LOGV(log, "({0}) (size = {1} ({1:x})) => {2:x}", this, size, addr); 306 return addr; 307 } 308 } 309 310 LLDB_LOGV(log, "({0}) (size = {1} ({1:x})) => {2:x}", this, size, 311 LLDB_INVALID_ADDRESS); 312 return LLDB_INVALID_ADDRESS; 313 } 314 315 bool AllocatedBlock::FreeBlock(addr_t addr) { 316 bool success = false; 317 auto entry_idx = m_reserved_blocks.FindEntryIndexThatContains(addr); 318 if (entry_idx != UINT32_MAX) 319 { 320 m_free_blocks.Insert(m_reserved_blocks.GetEntryRef(entry_idx), true); 321 m_reserved_blocks.RemoveEntryAtIndex(entry_idx); 322 success = true; 323 } 324 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 325 LLDB_LOGV(log, "({0}) (addr = {1:x}) => {2}", this, addr, success); 326 return success; 327 } 328 329 AllocatedMemoryCache::AllocatedMemoryCache(Process &process) 330 : m_process(process), m_mutex(), m_memory_map() {} 331 332 AllocatedMemoryCache::~AllocatedMemoryCache() = default; 333 334 void AllocatedMemoryCache::Clear() { 335 std::lock_guard<std::recursive_mutex> guard(m_mutex); 336 if (m_process.IsAlive()) { 337 PermissionsToBlockMap::iterator pos, end = m_memory_map.end(); 338 for (pos = m_memory_map.begin(); pos != end; ++pos) 339 m_process.DoDeallocateMemory(pos->second->GetBaseAddress()); 340 } 341 m_memory_map.clear(); 342 } 343 344 AllocatedMemoryCache::AllocatedBlockSP 345 AllocatedMemoryCache::AllocatePage(uint32_t byte_size, uint32_t permissions, 346 uint32_t chunk_size, Status &error) { 347 AllocatedBlockSP block_sp; 348 const size_t page_size = 4096; 349 const size_t num_pages = (byte_size + page_size - 1) / page_size; 350 const size_t page_byte_size = num_pages * page_size; 351 352 addr_t addr = m_process.DoAllocateMemory(page_byte_size, permissions, error); 353 354 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 355 if (log) { 356 LLDB_LOGF(log, 357 "Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32 358 ", permissions = %s) => 0x%16.16" PRIx64, 359 (uint32_t)page_byte_size, GetPermissionsAsCString(permissions), 360 (uint64_t)addr); 361 } 362 363 if (addr != LLDB_INVALID_ADDRESS) { 364 block_sp = std::make_shared<AllocatedBlock>(addr, page_byte_size, 365 permissions, chunk_size); 366 m_memory_map.insert(std::make_pair(permissions, block_sp)); 367 } 368 return block_sp; 369 } 370 371 lldb::addr_t AllocatedMemoryCache::AllocateMemory(size_t byte_size, 372 uint32_t permissions, 373 Status &error) { 374 std::lock_guard<std::recursive_mutex> guard(m_mutex); 375 376 addr_t addr = LLDB_INVALID_ADDRESS; 377 std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator> 378 range = m_memory_map.equal_range(permissions); 379 380 for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second; 381 ++pos) { 382 addr = (*pos).second->ReserveBlock(byte_size); 383 if (addr != LLDB_INVALID_ADDRESS) 384 break; 385 } 386 387 if (addr == LLDB_INVALID_ADDRESS) { 388 AllocatedBlockSP block_sp(AllocatePage(byte_size, permissions, 16, error)); 389 390 if (block_sp) 391 addr = block_sp->ReserveBlock(byte_size); 392 } 393 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 394 LLDB_LOGF(log, 395 "AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32 396 ", permissions = %s) => 0x%16.16" PRIx64, 397 (uint32_t)byte_size, GetPermissionsAsCString(permissions), 398 (uint64_t)addr); 399 return addr; 400 } 401 402 bool AllocatedMemoryCache::DeallocateMemory(lldb::addr_t addr) { 403 std::lock_guard<std::recursive_mutex> guard(m_mutex); 404 405 PermissionsToBlockMap::iterator pos, end = m_memory_map.end(); 406 bool success = false; 407 for (pos = m_memory_map.begin(); pos != end; ++pos) { 408 if (pos->second->Contains(addr)) { 409 success = pos->second->FreeBlock(addr); 410 break; 411 } 412 } 413 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 414 LLDB_LOGF(log, 415 "AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64 416 ") => %i", 417 (uint64_t)addr, success); 418 return success; 419 } 420