1 //===-- Memory.cpp --------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "lldb/Target/Memory.h"
10 #include "lldb/Target/Process.h"
11 #include "lldb/Utility/DataBufferHeap.h"
12 #include "lldb/Utility/LLDBLog.h"
13 #include "lldb/Utility/Log.h"
14 #include "lldb/Utility/RangeMap.h"
15 #include "lldb/Utility/State.h"
16
17 #include <cinttypes>
18 #include <memory>
19
20 using namespace lldb;
21 using namespace lldb_private;
22
23 // MemoryCache constructor
MemoryCache(Process & process)24 MemoryCache::MemoryCache(Process &process)
25 : m_mutex(), m_L1_cache(), m_L2_cache(), m_invalid_ranges(),
26 m_process(process),
27 m_L2_cache_line_byte_size(process.GetMemoryCacheLineSize()) {}
28
29 // Destructor
30 MemoryCache::~MemoryCache() = default;
31
Clear(bool clear_invalid_ranges)32 void MemoryCache::Clear(bool clear_invalid_ranges) {
33 std::lock_guard<std::recursive_mutex> guard(m_mutex);
34 m_L1_cache.clear();
35 m_L2_cache.clear();
36 if (clear_invalid_ranges)
37 m_invalid_ranges.Clear();
38 m_L2_cache_line_byte_size = m_process.GetMemoryCacheLineSize();
39 }
40
AddL1CacheData(lldb::addr_t addr,const void * src,size_t src_len)41 void MemoryCache::AddL1CacheData(lldb::addr_t addr, const void *src,
42 size_t src_len) {
43 AddL1CacheData(
44 addr, DataBufferSP(new DataBufferHeap(DataBufferHeap(src, src_len))));
45 }
46
AddL1CacheData(lldb::addr_t addr,const DataBufferSP & data_buffer_sp)47 void MemoryCache::AddL1CacheData(lldb::addr_t addr,
48 const DataBufferSP &data_buffer_sp) {
49 std::lock_guard<std::recursive_mutex> guard(m_mutex);
50 m_L1_cache[addr] = data_buffer_sp;
51 }
52
Flush(addr_t addr,size_t size)53 void MemoryCache::Flush(addr_t addr, size_t size) {
54 if (size == 0)
55 return;
56
57 std::lock_guard<std::recursive_mutex> guard(m_mutex);
58
59 // Erase any blocks from the L1 cache that intersect with the flush range
60 if (!m_L1_cache.empty()) {
61 AddrRange flush_range(addr, size);
62 BlockMap::iterator pos = m_L1_cache.upper_bound(addr);
63 if (pos != m_L1_cache.begin()) {
64 --pos;
65 }
66 while (pos != m_L1_cache.end()) {
67 AddrRange chunk_range(pos->first, pos->second->GetByteSize());
68 if (!chunk_range.DoesIntersect(flush_range))
69 break;
70 pos = m_L1_cache.erase(pos);
71 }
72 }
73
74 if (!m_L2_cache.empty()) {
75 const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size;
76 const addr_t end_addr = (addr + size - 1);
77 const addr_t first_cache_line_addr = addr - (addr % cache_line_byte_size);
78 const addr_t last_cache_line_addr =
79 end_addr - (end_addr % cache_line_byte_size);
80 // Watch for overflow where size will cause us to go off the end of the
81 // 64 bit address space
82 uint32_t num_cache_lines;
83 if (last_cache_line_addr >= first_cache_line_addr)
84 num_cache_lines = ((last_cache_line_addr - first_cache_line_addr) /
85 cache_line_byte_size) +
86 1;
87 else
88 num_cache_lines =
89 (UINT64_MAX - first_cache_line_addr + 1) / cache_line_byte_size;
90
91 uint32_t cache_idx = 0;
92 for (addr_t curr_addr = first_cache_line_addr; cache_idx < num_cache_lines;
93 curr_addr += cache_line_byte_size, ++cache_idx) {
94 BlockMap::iterator pos = m_L2_cache.find(curr_addr);
95 if (pos != m_L2_cache.end())
96 m_L2_cache.erase(pos);
97 }
98 }
99 }
100
AddInvalidRange(lldb::addr_t base_addr,lldb::addr_t byte_size)101 void MemoryCache::AddInvalidRange(lldb::addr_t base_addr,
102 lldb::addr_t byte_size) {
103 if (byte_size > 0) {
104 std::lock_guard<std::recursive_mutex> guard(m_mutex);
105 InvalidRanges::Entry range(base_addr, byte_size);
106 m_invalid_ranges.Append(range);
107 m_invalid_ranges.Sort();
108 }
109 }
110
RemoveInvalidRange(lldb::addr_t base_addr,lldb::addr_t byte_size)111 bool MemoryCache::RemoveInvalidRange(lldb::addr_t base_addr,
112 lldb::addr_t byte_size) {
113 if (byte_size > 0) {
114 std::lock_guard<std::recursive_mutex> guard(m_mutex);
115 const uint32_t idx = m_invalid_ranges.FindEntryIndexThatContains(base_addr);
116 if (idx != UINT32_MAX) {
117 const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex(idx);
118 if (entry->GetRangeBase() == base_addr &&
119 entry->GetByteSize() == byte_size)
120 return m_invalid_ranges.RemoveEntryAtIndex(idx);
121 }
122 }
123 return false;
124 }
125
Read(addr_t addr,void * dst,size_t dst_len,Status & error)126 size_t MemoryCache::Read(addr_t addr, void *dst, size_t dst_len,
127 Status &error) {
128 size_t bytes_left = dst_len;
129
130 // Check the L1 cache for a range that contain the entire memory read. If we
131 // find a range in the L1 cache that does, we use it. Else we fall back to
132 // reading memory in m_L2_cache_line_byte_size byte sized chunks. The L1
133 // cache contains chunks of memory that are not required to be
134 // m_L2_cache_line_byte_size bytes in size, so we don't try anything tricky
135 // when reading from them (no partial reads from the L1 cache).
136
137 std::lock_guard<std::recursive_mutex> guard(m_mutex);
138 if (!m_L1_cache.empty()) {
139 AddrRange read_range(addr, dst_len);
140 BlockMap::iterator pos = m_L1_cache.upper_bound(addr);
141 if (pos != m_L1_cache.begin()) {
142 --pos;
143 }
144 AddrRange chunk_range(pos->first, pos->second->GetByteSize());
145 if (chunk_range.Contains(read_range)) {
146 memcpy(dst, pos->second->GetBytes() + (addr - chunk_range.GetRangeBase()),
147 dst_len);
148 return dst_len;
149 }
150 }
151
152 // If this memory read request is larger than the cache line size, then we
153 // (1) try to read as much of it at once as possible, and (2) don't add the
154 // data to the memory cache. We don't want to split a big read up into more
155 // separate reads than necessary, and with a large memory read request, it is
156 // unlikely that the caller function will ask for the next
157 // 4 bytes after the large memory read - so there's little benefit to saving
158 // it in the cache.
159 if (dst && dst_len > m_L2_cache_line_byte_size) {
160 size_t bytes_read =
161 m_process.ReadMemoryFromInferior(addr, dst, dst_len, error);
162 // Add this non block sized range to the L1 cache if we actually read
163 // anything
164 if (bytes_read > 0)
165 AddL1CacheData(addr, dst, bytes_read);
166 return bytes_read;
167 }
168
169 if (dst && bytes_left > 0) {
170 const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size;
171 uint8_t *dst_buf = (uint8_t *)dst;
172 addr_t curr_addr = addr - (addr % cache_line_byte_size);
173 addr_t cache_offset = addr - curr_addr;
174
175 while (bytes_left > 0) {
176 if (m_invalid_ranges.FindEntryThatContains(curr_addr)) {
177 error.SetErrorStringWithFormat("memory read failed for 0x%" PRIx64,
178 curr_addr);
179 return dst_len - bytes_left;
180 }
181
182 BlockMap::const_iterator pos = m_L2_cache.find(curr_addr);
183 BlockMap::const_iterator end = m_L2_cache.end();
184
185 if (pos != end) {
186 size_t curr_read_size = cache_line_byte_size - cache_offset;
187 if (curr_read_size > bytes_left)
188 curr_read_size = bytes_left;
189
190 memcpy(dst_buf + dst_len - bytes_left,
191 pos->second->GetBytes() + cache_offset, curr_read_size);
192
193 bytes_left -= curr_read_size;
194 curr_addr += curr_read_size + cache_offset;
195 cache_offset = 0;
196
197 if (bytes_left > 0) {
198 // Get sequential cache page hits
199 for (++pos; (pos != end) && (bytes_left > 0); ++pos) {
200 assert((curr_addr % cache_line_byte_size) == 0);
201
202 if (pos->first != curr_addr)
203 break;
204
205 curr_read_size = pos->second->GetByteSize();
206 if (curr_read_size > bytes_left)
207 curr_read_size = bytes_left;
208
209 memcpy(dst_buf + dst_len - bytes_left, pos->second->GetBytes(),
210 curr_read_size);
211
212 bytes_left -= curr_read_size;
213 curr_addr += curr_read_size;
214
215 // We have a cache page that succeeded to read some bytes but not
216 // an entire page. If this happens, we must cap off how much data
217 // we are able to read...
218 if (pos->second->GetByteSize() != cache_line_byte_size)
219 return dst_len - bytes_left;
220 }
221 }
222 }
223
224 // We need to read from the process
225
226 if (bytes_left > 0) {
227 assert((curr_addr % cache_line_byte_size) == 0);
228 std::unique_ptr<DataBufferHeap> data_buffer_heap_up(
229 new DataBufferHeap(cache_line_byte_size, 0));
230 size_t process_bytes_read = m_process.ReadMemoryFromInferior(
231 curr_addr, data_buffer_heap_up->GetBytes(),
232 data_buffer_heap_up->GetByteSize(), error);
233 if (process_bytes_read == 0)
234 return dst_len - bytes_left;
235
236 if (process_bytes_read != cache_line_byte_size) {
237 if (process_bytes_read < data_buffer_heap_up->GetByteSize()) {
238 dst_len -= data_buffer_heap_up->GetByteSize() - process_bytes_read;
239 bytes_left = process_bytes_read;
240 }
241 data_buffer_heap_up->SetByteSize(process_bytes_read);
242 }
243 m_L2_cache[curr_addr] = DataBufferSP(data_buffer_heap_up.release());
244 // We have read data and put it into the cache, continue through the
245 // loop again to get the data out of the cache...
246 }
247 }
248 }
249
250 return dst_len - bytes_left;
251 }
252
AllocatedBlock(lldb::addr_t addr,uint32_t byte_size,uint32_t permissions,uint32_t chunk_size)253 AllocatedBlock::AllocatedBlock(lldb::addr_t addr, uint32_t byte_size,
254 uint32_t permissions, uint32_t chunk_size)
255 : m_range(addr, byte_size), m_permissions(permissions),
256 m_chunk_size(chunk_size)
257 {
258 // The entire address range is free to start with.
259 m_free_blocks.Append(m_range);
260 assert(byte_size > chunk_size);
261 }
262
263 AllocatedBlock::~AllocatedBlock() = default;
264
ReserveBlock(uint32_t size)265 lldb::addr_t AllocatedBlock::ReserveBlock(uint32_t size) {
266 // We must return something valid for zero bytes.
267 if (size == 0)
268 size = 1;
269 Log *log = GetLog(LLDBLog::Process);
270
271 const size_t free_count = m_free_blocks.GetSize();
272 for (size_t i=0; i<free_count; ++i)
273 {
274 auto &free_block = m_free_blocks.GetEntryRef(i);
275 const lldb::addr_t range_size = free_block.GetByteSize();
276 if (range_size >= size)
277 {
278 // We found a free block that is big enough for our data. Figure out how
279 // many chunks we will need and calculate the resulting block size we
280 // will reserve.
281 addr_t addr = free_block.GetRangeBase();
282 size_t num_chunks = CalculateChunksNeededForSize(size);
283 lldb::addr_t block_size = num_chunks * m_chunk_size;
284 lldb::addr_t bytes_left = range_size - block_size;
285 if (bytes_left == 0)
286 {
287 // The newly allocated block will take all of the bytes in this
288 // available block, so we can just add it to the allocated ranges and
289 // remove the range from the free ranges.
290 m_reserved_blocks.Insert(free_block, false);
291 m_free_blocks.RemoveEntryAtIndex(i);
292 }
293 else
294 {
295 // Make the new allocated range and add it to the allocated ranges.
296 Range<lldb::addr_t, uint32_t> reserved_block(free_block);
297 reserved_block.SetByteSize(block_size);
298 // Insert the reserved range and don't combine it with other blocks in
299 // the reserved blocks list.
300 m_reserved_blocks.Insert(reserved_block, false);
301 // Adjust the free range in place since we won't change the sorted
302 // ordering of the m_free_blocks list.
303 free_block.SetRangeBase(reserved_block.GetRangeEnd());
304 free_block.SetByteSize(bytes_left);
305 }
306 LLDB_LOGV(log, "({0}) (size = {1} ({1:x})) => {2:x}", this, size, addr);
307 return addr;
308 }
309 }
310
311 LLDB_LOGV(log, "({0}) (size = {1} ({1:x})) => {2:x}", this, size,
312 LLDB_INVALID_ADDRESS);
313 return LLDB_INVALID_ADDRESS;
314 }
315
FreeBlock(addr_t addr)316 bool AllocatedBlock::FreeBlock(addr_t addr) {
317 bool success = false;
318 auto entry_idx = m_reserved_blocks.FindEntryIndexThatContains(addr);
319 if (entry_idx != UINT32_MAX)
320 {
321 m_free_blocks.Insert(m_reserved_blocks.GetEntryRef(entry_idx), true);
322 m_reserved_blocks.RemoveEntryAtIndex(entry_idx);
323 success = true;
324 }
325 Log *log = GetLog(LLDBLog::Process);
326 LLDB_LOGV(log, "({0}) (addr = {1:x}) => {2}", this, addr, success);
327 return success;
328 }
329
AllocatedMemoryCache(Process & process)330 AllocatedMemoryCache::AllocatedMemoryCache(Process &process)
331 : m_process(process), m_mutex(), m_memory_map() {}
332
333 AllocatedMemoryCache::~AllocatedMemoryCache() = default;
334
Clear(bool deallocate_memory)335 void AllocatedMemoryCache::Clear(bool deallocate_memory) {
336 std::lock_guard<std::recursive_mutex> guard(m_mutex);
337 if (m_process.IsAlive() && deallocate_memory) {
338 PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
339 for (pos = m_memory_map.begin(); pos != end; ++pos)
340 m_process.DoDeallocateMemory(pos->second->GetBaseAddress());
341 }
342 m_memory_map.clear();
343 }
344
345 AllocatedMemoryCache::AllocatedBlockSP
AllocatePage(uint32_t byte_size,uint32_t permissions,uint32_t chunk_size,Status & error)346 AllocatedMemoryCache::AllocatePage(uint32_t byte_size, uint32_t permissions,
347 uint32_t chunk_size, Status &error) {
348 AllocatedBlockSP block_sp;
349 const size_t page_size = 4096;
350 const size_t num_pages = (byte_size + page_size - 1) / page_size;
351 const size_t page_byte_size = num_pages * page_size;
352
353 addr_t addr = m_process.DoAllocateMemory(page_byte_size, permissions, error);
354
355 Log *log = GetLog(LLDBLog::Process);
356 if (log) {
357 LLDB_LOGF(log,
358 "Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32
359 ", permissions = %s) => 0x%16.16" PRIx64,
360 (uint32_t)page_byte_size, GetPermissionsAsCString(permissions),
361 (uint64_t)addr);
362 }
363
364 if (addr != LLDB_INVALID_ADDRESS) {
365 block_sp = std::make_shared<AllocatedBlock>(addr, page_byte_size,
366 permissions, chunk_size);
367 m_memory_map.insert(std::make_pair(permissions, block_sp));
368 }
369 return block_sp;
370 }
371
AllocateMemory(size_t byte_size,uint32_t permissions,Status & error)372 lldb::addr_t AllocatedMemoryCache::AllocateMemory(size_t byte_size,
373 uint32_t permissions,
374 Status &error) {
375 std::lock_guard<std::recursive_mutex> guard(m_mutex);
376
377 addr_t addr = LLDB_INVALID_ADDRESS;
378 std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator>
379 range = m_memory_map.equal_range(permissions);
380
381 for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second;
382 ++pos) {
383 addr = (*pos).second->ReserveBlock(byte_size);
384 if (addr != LLDB_INVALID_ADDRESS)
385 break;
386 }
387
388 if (addr == LLDB_INVALID_ADDRESS) {
389 AllocatedBlockSP block_sp(AllocatePage(byte_size, permissions, 16, error));
390
391 if (block_sp)
392 addr = block_sp->ReserveBlock(byte_size);
393 }
394 Log *log = GetLog(LLDBLog::Process);
395 LLDB_LOGF(log,
396 "AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32
397 ", permissions = %s) => 0x%16.16" PRIx64,
398 (uint32_t)byte_size, GetPermissionsAsCString(permissions),
399 (uint64_t)addr);
400 return addr;
401 }
402
DeallocateMemory(lldb::addr_t addr)403 bool AllocatedMemoryCache::DeallocateMemory(lldb::addr_t addr) {
404 std::lock_guard<std::recursive_mutex> guard(m_mutex);
405
406 PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
407 bool success = false;
408 for (pos = m_memory_map.begin(); pos != end; ++pos) {
409 if (pos->second->Contains(addr)) {
410 success = pos->second->FreeBlock(addr);
411 break;
412 }
413 }
414 Log *log = GetLog(LLDBLog::Process);
415 LLDB_LOGF(log,
416 "AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64
417 ") => %i",
418 (uint64_t)addr, success);
419 return success;
420 }
421