1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  Created by Greg Clayton on 6/26/07.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "MachVMMemory.h"
14 #include "DNBLog.h"
15 #include "MachVMRegion.h"
16 #include <dlfcn.h>
17 #include <mach/mach_vm.h>
18 #include <mach/shared_region.h>
19 #include <sys/sysctl.h>
20 
21 #if defined(WITH_FBS) || defined(WITH_BKS)
22 extern "C" {
23 #import <System/sys/kern_memorystatus.h>
24 }
25 #endif
26 
27 static const vm_size_t kInvalidPageSize = ~0;
28 
MachVMMemory()29 MachVMMemory::MachVMMemory() : m_page_size(kInvalidPageSize), m_err(0) {}
30 
~MachVMMemory()31 MachVMMemory::~MachVMMemory() {}
32 
PageSize(task_t task)33 nub_size_t MachVMMemory::PageSize(task_t task) {
34   if (m_page_size == kInvalidPageSize) {
35 #if defined(TASK_VM_INFO) && TASK_VM_INFO >= 22
36     if (task != TASK_NULL) {
37       kern_return_t kr;
38       mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
39       task_vm_info_data_t vm_info;
40       kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count);
41       if (kr == KERN_SUCCESS) {
42         DNBLogThreadedIf(
43             LOG_TASK,
44             "MachVMMemory::PageSize task_info returned page size of 0x%x",
45             (int)vm_info.page_size);
46         m_page_size = vm_info.page_size;
47         return m_page_size;
48       } else {
49         DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call "
50                                    "failed to get page size, TASK_VM_INFO %d, "
51                                    "TASK_VM_INFO_COUNT %d, kern return %d",
52                          TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
53       }
54     }
55 #endif
56     m_err = ::host_page_size(::mach_host_self(), &m_page_size);
57     if (m_err.Fail())
58       m_page_size = 0;
59   }
60   return m_page_size;
61 }
62 
MaxBytesLeftInPage(task_t task,nub_addr_t addr,nub_size_t count)63 nub_size_t MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr,
64                                             nub_size_t count) {
65   const nub_size_t page_size = PageSize(task);
66   if (page_size > 0) {
67     nub_size_t page_offset = (addr % page_size);
68     nub_size_t bytes_left_in_page = page_size - page_offset;
69     if (count > bytes_left_in_page)
70       count = bytes_left_in_page;
71   }
72   return count;
73 }
74 
75 #define MAX_STACK_ALLOC_DISPOSITIONS                                           \
76   (16 * 1024 / sizeof(int)) // 16K of allocations
77 
get_dirty_pages(task_t task,mach_vm_address_t addr,mach_vm_size_t size)78 std::vector<nub_addr_t> get_dirty_pages(task_t task, mach_vm_address_t addr,
79                                         mach_vm_size_t size) {
80   std::vector<nub_addr_t> dirty_pages;
81 
82   int pages_to_query = size / vm_page_size;
83   // Don't try to fetch too many pages' dispositions in a single call or we
84   // could blow our stack out.
85   mach_vm_size_t dispositions_size =
86       std::min(pages_to_query, (int)MAX_STACK_ALLOC_DISPOSITIONS);
87   int dispositions[dispositions_size];
88 
89   mach_vm_size_t chunk_count =
90       ((pages_to_query + MAX_STACK_ALLOC_DISPOSITIONS - 1) /
91        MAX_STACK_ALLOC_DISPOSITIONS);
92 
93   for (mach_vm_size_t cur_disposition_chunk = 0;
94        cur_disposition_chunk < chunk_count; cur_disposition_chunk++) {
95     mach_vm_size_t dispositions_already_queried =
96         cur_disposition_chunk * MAX_STACK_ALLOC_DISPOSITIONS;
97 
98     mach_vm_size_t chunk_pages_to_query = std::min(
99         pages_to_query - dispositions_already_queried, dispositions_size);
100     mach_vm_address_t chunk_page_aligned_start_addr =
101         addr + (dispositions_already_queried * vm_page_size);
102 
103     kern_return_t kr = mach_vm_page_range_query(
104         task, chunk_page_aligned_start_addr,
105         chunk_pages_to_query * vm_page_size, (mach_vm_address_t)dispositions,
106         &chunk_pages_to_query);
107     if (kr != KERN_SUCCESS)
108       return dirty_pages;
109     for (mach_vm_size_t i = 0; i < chunk_pages_to_query; i++) {
110       uint64_t dirty_addr = chunk_page_aligned_start_addr + (i * vm_page_size);
111       if (dispositions[i] & VM_PAGE_QUERY_PAGE_DIRTY)
112         dirty_pages.push_back(dirty_addr);
113     }
114   }
115   return dirty_pages;
116 }
117 
GetMemoryRegionInfo(task_t task,nub_addr_t address,DNBRegionInfo * region_info)118 nub_bool_t MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address,
119                                              DNBRegionInfo *region_info) {
120   MachVMRegion vmRegion(task);
121 
122   if (vmRegion.GetRegionForAddress(address)) {
123     region_info->addr = vmRegion.StartAddress();
124     region_info->size = vmRegion.GetByteSize();
125     region_info->permissions = vmRegion.GetDNBPermissions();
126     region_info->dirty_pages =
127         get_dirty_pages(task, vmRegion.StartAddress(), vmRegion.GetByteSize());
128   } else {
129     region_info->addr = address;
130     region_info->size = 0;
131     if (vmRegion.GetError().Success()) {
132       // vmRegion.GetRegionForAddress() return false, indicating that "address"
133       // wasn't in a valid region, but the "vmRegion" info was successfully
134       // read from the task which means the info describes the next valid
135       // region from which we can infer the size of this invalid region
136       mach_vm_address_t start_addr = vmRegion.StartAddress();
137       if (address < start_addr)
138         region_info->size = start_addr - address;
139     }
140     // If we can't get any info about the size from the next region it means
141     // we asked about an address that was past all mappings, so the size
142     // of this region will take up all remaining address space.
143     if (region_info->size == 0)
144       region_info->size = INVALID_NUB_ADDRESS - region_info->addr;
145 
146     // Not readable, writeable or executable
147     region_info->permissions = 0;
148   }
149   return true;
150 }
151 
GetPhysicalMemory()152 static uint64_t GetPhysicalMemory() {
153   // This doesn't change often at all. No need to poll each time.
154   static uint64_t physical_memory = 0;
155   static bool calculated = false;
156   if (calculated)
157     return physical_memory;
158 
159   size_t len = sizeof(physical_memory);
160   sysctlbyname("hw.memsize", &physical_memory, &len, NULL, 0);
161 
162   calculated = true;
163   return physical_memory;
164 }
165 
GetMemoryProfile(DNBProfileDataScanType scanType,task_t task,struct task_basic_info ti,cpu_type_t cputype,nub_process_t pid,vm_statistics64_data_t & vminfo,uint64_t & physical_memory,uint64_t & anonymous,uint64_t & phys_footprint,uint64_t & memory_cap)166 nub_bool_t MachVMMemory::GetMemoryProfile(
167     DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti,
168     cpu_type_t cputype, nub_process_t pid, vm_statistics64_data_t &vminfo,
169     uint64_t &physical_memory, uint64_t &anonymous,
170     uint64_t &phys_footprint, uint64_t &memory_cap)
171 {
172   if (scanType & eProfileHostMemory)
173     physical_memory = GetPhysicalMemory();
174 
175   if (scanType & eProfileMemory) {
176     static mach_port_t localHost = mach_host_self();
177     mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
178     host_statistics64(localHost, HOST_VM_INFO64, (host_info64_t)&vminfo,
179                       &count);
180 
181     kern_return_t kr;
182     mach_msg_type_number_t info_count;
183     task_vm_info_data_t vm_info;
184 
185     info_count = TASK_VM_INFO_COUNT;
186     kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
187     if (kr == KERN_SUCCESS) {
188       if (scanType & eProfileMemoryAnonymous) {
189         anonymous = vm_info.internal + vm_info.compressed - vm_info.purgeable_volatile_pmap;
190       }
191 
192       phys_footprint = vm_info.phys_footprint;
193     }
194   }
195 
196 #if defined(WITH_FBS) || defined(WITH_BKS)
197   if (scanType & eProfileMemoryCap) {
198     memorystatus_memlimit_properties_t memlimit_properties;
199     memset(&memlimit_properties, 0, sizeof(memlimit_properties));
200     if (memorystatus_control(MEMORYSTATUS_CMD_GET_MEMLIMIT_PROPERTIES, pid, 0, &memlimit_properties, sizeof(memlimit_properties)) == 0) {
201         memory_cap = memlimit_properties.memlimit_active;
202     }
203   }
204 #endif
205 
206   return true;
207 }
208 
Read(task_t task,nub_addr_t address,void * data,nub_size_t data_count)209 nub_size_t MachVMMemory::Read(task_t task, nub_addr_t address, void *data,
210                               nub_size_t data_count) {
211   if (data == NULL || data_count == 0)
212     return 0;
213 
214   nub_size_t total_bytes_read = 0;
215   nub_addr_t curr_addr = address;
216   uint8_t *curr_data = (uint8_t *)data;
217   while (total_bytes_read < data_count) {
218     mach_vm_size_t curr_size =
219         MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
220     mach_msg_type_number_t curr_bytes_read = 0;
221     vm_offset_t vm_memory = 0;
222     m_err = ::mach_vm_read(task, curr_addr, curr_size, &vm_memory,
223                            &curr_bytes_read);
224 
225     if (DNBLogCheckLogBit(LOG_MEMORY))
226       m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, "
227                         "size = %llu, data => %8.8p, dataCnt => %i )",
228                         task, (uint64_t)curr_addr, (uint64_t)curr_size,
229                         vm_memory, curr_bytes_read);
230 
231     if (m_err.Success()) {
232       if (curr_bytes_read != curr_size) {
233         if (DNBLogCheckLogBit(LOG_MEMORY))
234           m_err.LogThreaded(
235               "::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, "
236               "data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes",
237               task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory,
238               curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
239       }
240       ::memcpy(curr_data, (void *)vm_memory, curr_bytes_read);
241       ::vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read);
242       total_bytes_read += curr_bytes_read;
243       curr_addr += curr_bytes_read;
244       curr_data += curr_bytes_read;
245     } else {
246       break;
247     }
248   }
249   return total_bytes_read;
250 }
251 
Write(task_t task,nub_addr_t address,const void * data,nub_size_t data_count)252 nub_size_t MachVMMemory::Write(task_t task, nub_addr_t address,
253                                const void *data, nub_size_t data_count) {
254   MachVMRegion vmRegion(task);
255 
256   nub_size_t total_bytes_written = 0;
257   nub_addr_t curr_addr = address;
258   const uint8_t *curr_data = (const uint8_t *)data;
259 
260   while (total_bytes_written < data_count) {
261     if (vmRegion.GetRegionForAddress(curr_addr)) {
262       mach_vm_size_t curr_data_count = data_count - total_bytes_written;
263       mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
264       if (region_bytes_left == 0) {
265         break;
266       }
267       if (curr_data_count > region_bytes_left)
268         curr_data_count = region_bytes_left;
269 
270       if (vmRegion.SetProtections(curr_addr, curr_data_count,
271                                   VM_PROT_READ | VM_PROT_WRITE)) {
272         nub_size_t bytes_written =
273             WriteRegion(task, curr_addr, curr_data, curr_data_count);
274         if (bytes_written <= 0) {
275           // Status should have already be posted by WriteRegion...
276           break;
277         } else {
278           total_bytes_written += bytes_written;
279           curr_addr += bytes_written;
280           curr_data += bytes_written;
281         }
282       } else {
283         DNBLogThreadedIf(
284             LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on "
285                                     "region for address: [0x%8.8llx-0x%8.8llx)",
286             (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
287         break;
288       }
289     } else {
290       DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS,
291                        "Failed to get region for address: 0x%8.8llx",
292                        (uint64_t)address);
293       break;
294     }
295   }
296 
297   return total_bytes_written;
298 }
299 
WriteRegion(task_t task,const nub_addr_t address,const void * data,const nub_size_t data_count)300 nub_size_t MachVMMemory::WriteRegion(task_t task, const nub_addr_t address,
301                                      const void *data,
302                                      const nub_size_t data_count) {
303   if (data == NULL || data_count == 0)
304     return 0;
305 
306   nub_size_t total_bytes_written = 0;
307   nub_addr_t curr_addr = address;
308   const uint8_t *curr_data = (const uint8_t *)data;
309   while (total_bytes_written < data_count) {
310     mach_msg_type_number_t curr_data_count =
311         static_cast<mach_msg_type_number_t>(MaxBytesLeftInPage(
312             task, curr_addr, data_count - total_bytes_written));
313     m_err =
314         ::mach_vm_write(task, curr_addr, (pointer_t)curr_data, curr_data_count);
315     if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
316       m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, "
317                         "data = %8.8p, dataCnt = %u )",
318                         task, (uint64_t)curr_addr, curr_data, curr_data_count);
319 
320 #if !defined(__i386__) && !defined(__x86_64__)
321     vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
322 
323     m_err = ::vm_machine_attribute(task, curr_addr, curr_data_count,
324                                    MATTR_CACHE, &mattr_value);
325     if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
326       m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = "
327                         "0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value "
328                         "=> MATTR_VAL_CACHE_FLUSH )",
329                         task, (uint64_t)curr_addr, curr_data_count);
330 #endif
331 
332     if (m_err.Success()) {
333       total_bytes_written += curr_data_count;
334       curr_addr += curr_data_count;
335       curr_data += curr_data_count;
336     } else {
337       break;
338     }
339   }
340   return total_bytes_written;
341 }
342