1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 #include <algorithm>
31 #include <cstdio>
32
33 #include <mach/host_info.h>
34 #include <mach/machine.h>
35 #include <mach/vm_statistics.h>
36 #include <mach-o/dyld.h>
37 #include <mach-o/loader.h>
38 #include <mach-o/getsect.h>
39 #include <sys/sysctl.h>
40 #include <sys/resource.h>
41
42 #include <CoreFoundation/CoreFoundation.h>
43
44 #include "mac/handler/minidump_generator.h"
45
46 #if defined(HAS_ARM_SUPPORT) || defined(HAS_ARM64_SUPPORT)
47 #include <mach/arm/thread_status.h>
48 #endif
49 #ifdef HAS_PPC_SUPPORT
50 #include <mach/ppc/thread_status.h>
51 #endif
52 #ifdef HAS_X86_SUPPORT
53 #include <mach/i386/thread_status.h>
54 #endif
55
56 #include "minidump_file_writer-inl.h"
57 #include "common/mac/file_id.h"
58 #include "common/mac/macho_id.h"
59 #include "common/mac/string_utilities.h"
60
61 using MacStringUtils::ConvertToString;
62 using MacStringUtils::IntegerValueAtIndex;
63
64 namespace google_breakpad {
65
66 #if defined(__LP64__) && __LP64__
67 #define LC_SEGMENT_ARCH LC_SEGMENT_64
68 #else
69 #define LC_SEGMENT_ARCH LC_SEGMENT
70 #endif
71
72 // constructor when generating from within the crashed process
MinidumpGenerator()73 MinidumpGenerator::MinidumpGenerator()
74 : writer_(),
75 exception_type_(0),
76 exception_code_(0),
77 exception_subcode_(0),
78 exception_thread_(0),
79 crashing_task_(mach_task_self()),
80 handler_thread_(mach_thread_self()),
81 cpu_type_(DynamicImages::GetNativeCPUType()),
82 task_context_(NULL),
83 dynamic_images_(NULL),
84 memory_blocks_(&allocator_) {
85 GatherSystemInformation();
86 }
87
88 // constructor when generating from a different process than the
89 // crashed process
MinidumpGenerator(mach_port_t crashing_task,mach_port_t handler_thread)90 MinidumpGenerator::MinidumpGenerator(mach_port_t crashing_task,
91 mach_port_t handler_thread)
92 : writer_(),
93 exception_type_(0),
94 exception_code_(0),
95 exception_subcode_(0),
96 exception_thread_(0),
97 crashing_task_(crashing_task),
98 handler_thread_(handler_thread),
99 cpu_type_(DynamicImages::GetNativeCPUType()),
100 task_context_(NULL),
101 dynamic_images_(NULL),
102 memory_blocks_(&allocator_) {
103 if (crashing_task != mach_task_self()) {
104 dynamic_images_ = new DynamicImages(crashing_task_);
105 cpu_type_ = dynamic_images_->GetCPUType();
106 } else {
107 dynamic_images_ = NULL;
108 cpu_type_ = DynamicImages::GetNativeCPUType();
109 }
110
111 GatherSystemInformation();
112 }
113
~MinidumpGenerator()114 MinidumpGenerator::~MinidumpGenerator() {
115 delete dynamic_images_;
116 }
117
118 char MinidumpGenerator::build_string_[16];
119 int MinidumpGenerator::os_major_version_ = 0;
120 int MinidumpGenerator::os_minor_version_ = 0;
121 int MinidumpGenerator::os_build_number_ = 0;
122
123 // static
GatherSystemInformation()124 void MinidumpGenerator::GatherSystemInformation() {
125 // If this is non-zero, then we've already gathered the information
126 if (os_major_version_)
127 return;
128
129 // This code extracts the version and build information from the OS
130 CFStringRef vers_path =
131 CFSTR("/System/Library/CoreServices/SystemVersion.plist");
132 CFURLRef sys_vers =
133 CFURLCreateWithFileSystemPath(NULL,
134 vers_path,
135 kCFURLPOSIXPathStyle,
136 false);
137 CFReadStreamRef read_stream = CFReadStreamCreateWithFile(NULL, sys_vers);
138 CFRelease(sys_vers);
139 if (!read_stream) {
140 return;
141 }
142 if (!CFReadStreamOpen(read_stream)) {
143 CFRelease(read_stream);
144 return;
145 }
146 CFMutableDataRef data = NULL;
147 while (true) {
148 // Actual data file tests: Mac at 480 bytes and iOS at 413 bytes.
149 const CFIndex kMaxBufferLength = 1024;
150 UInt8 data_bytes[kMaxBufferLength];
151 CFIndex num_bytes_read =
152 CFReadStreamRead(read_stream, data_bytes, kMaxBufferLength);
153 if (num_bytes_read < 0) {
154 if (data) {
155 CFRelease(data);
156 data = NULL;
157 }
158 break;
159 } else if (num_bytes_read == 0) {
160 break;
161 } else if (!data) {
162 data = CFDataCreateMutable(NULL, 0);
163 }
164 CFDataAppendBytes(data, data_bytes, num_bytes_read);
165 }
166 CFReadStreamClose(read_stream);
167 CFRelease(read_stream);
168 if (!data) {
169 return;
170 }
171 CFDictionaryRef list =
172 static_cast<CFDictionaryRef>(CFPropertyListCreateWithData(
173 NULL, data, kCFPropertyListImmutable, NULL, NULL));
174 CFRelease(data);
175 if (!list) {
176 return;
177 }
178 CFStringRef build_version = static_cast<CFStringRef>
179 (CFDictionaryGetValue(list, CFSTR("ProductBuildVersion")));
180 CFStringRef product_version = static_cast<CFStringRef>
181 (CFDictionaryGetValue(list, CFSTR("ProductVersion")));
182 string build_str = ConvertToString(build_version);
183 string product_str = ConvertToString(product_version);
184
185 CFRelease(list);
186
187 strlcpy(build_string_, build_str.c_str(), sizeof(build_string_));
188
189 // Parse the string that looks like "10.4.8"
190 os_major_version_ = IntegerValueAtIndex(product_str, 0);
191 os_minor_version_ = IntegerValueAtIndex(product_str, 1);
192 os_build_number_ = IntegerValueAtIndex(product_str, 2);
193 }
194
SetTaskContext(breakpad_ucontext_t * task_context)195 void MinidumpGenerator::SetTaskContext(breakpad_ucontext_t *task_context) {
196 task_context_ = task_context;
197 }
198
UniqueNameInDirectory(const string & dir,string * unique_name)199 string MinidumpGenerator::UniqueNameInDirectory(const string &dir,
200 string *unique_name) {
201 CFUUIDRef uuid = CFUUIDCreate(NULL);
202 CFStringRef uuid_cfstr = CFUUIDCreateString(NULL, uuid);
203 CFRelease(uuid);
204 string file_name(ConvertToString(uuid_cfstr));
205 CFRelease(uuid_cfstr);
206 string path(dir);
207
208 // Ensure that the directory (if non-empty) has a trailing slash so that
209 // we can append the file name and have a valid pathname.
210 if (!dir.empty()) {
211 if (dir.at(dir.size() - 1) != '/')
212 path.append(1, '/');
213 }
214
215 path.append(file_name);
216 path.append(".dmp");
217
218 if (unique_name)
219 *unique_name = file_name;
220
221 return path;
222 }
223
Write(const char * path)224 bool MinidumpGenerator::Write(const char *path) {
225 WriteStreamFN writers[] = {
226 &MinidumpGenerator::WriteThreadListStream,
227 &MinidumpGenerator::WriteMemoryListStream,
228 &MinidumpGenerator::WriteSystemInfoStream,
229 &MinidumpGenerator::WriteModuleListStream,
230 &MinidumpGenerator::WriteMiscInfoStream,
231 &MinidumpGenerator::WriteBreakpadInfoStream,
232 &MinidumpGenerator::WriteCrashInfoStream,
233 &MinidumpGenerator::WriteThreadNamesStream,
234 // Exception stream needs to be the last entry in this array as it may
235 // be omitted in the case where the minidump is written without an
236 // exception.
237 &MinidumpGenerator::WriteExceptionStream,
238 };
239 bool result = false;
240
241 // If opening was successful, create the header, directory, and call each
242 // writer. The destructor for the TypedMDRVAs will cause the data to be
243 // flushed. The destructor for the MinidumpFileWriter will close the file.
244 if (writer_.Open(path)) {
245 TypedMDRVA<MDRawHeader> header(&writer_);
246 TypedMDRVA<MDRawDirectory> dir(&writer_);
247
248 if (!header.Allocate())
249 return false;
250
251 int writer_count = static_cast<int>(sizeof(writers) / sizeof(writers[0]));
252
253 // If we don't have exception information, don't write out the
254 // exception stream
255 if (!exception_thread_ && !exception_type_)
256 --writer_count;
257
258 // Add space for all writers
259 if (!dir.AllocateArray(writer_count))
260 return false;
261
262 MDRawHeader *header_ptr = header.get();
263 header_ptr->signature = MD_HEADER_SIGNATURE;
264 header_ptr->version = MD_HEADER_VERSION;
265 time(reinterpret_cast<time_t *>(&(header_ptr->time_date_stamp)));
266 header_ptr->stream_count = writer_count;
267 header_ptr->stream_directory_rva = dir.position();
268
269 MDRawDirectory local_dir;
270 result = true;
271 for (int i = 0; (result) && (i < writer_count); ++i) {
272 result = (this->*writers[i])(&local_dir);
273
274 if (result)
275 dir.CopyIndex(i, &local_dir);
276 }
277 }
278 return result;
279 }
280
CalculateStackSize(mach_vm_address_t start_addr)281 size_t MinidumpGenerator::CalculateStackSize(mach_vm_address_t start_addr) {
282 mach_vm_address_t stack_region_base = start_addr;
283 mach_vm_size_t stack_region_size;
284 natural_t nesting_level = 0;
285 vm_region_submap_info_64 submap_info;
286 mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
287
288 vm_region_recurse_info_t region_info;
289 region_info = reinterpret_cast<vm_region_recurse_info_t>(&submap_info);
290
291 if (start_addr == 0) {
292 return 0;
293 }
294
295 kern_return_t result =
296 mach_vm_region_recurse(crashing_task_, &stack_region_base,
297 &stack_region_size, &nesting_level,
298 region_info, &info_count);
299
300 if (result != KERN_SUCCESS || start_addr < stack_region_base) {
301 // Failure or stack corruption, since mach_vm_region had to go
302 // higher in the process address space to find a valid region.
303 return 0;
304 }
305
306 unsigned int tag = submap_info.user_tag;
307
308 // If the user tag is VM_MEMORY_STACK, look for more readable regions with
309 // the same tag placed immediately above the computed stack region. Under
310 // some circumstances, the stack for thread 0 winds up broken up into
311 // multiple distinct abutting regions. This can happen for several reasons,
312 // including user code that calls setrlimit(RLIMIT_STACK, ...) or changes
313 // the access on stack pages by calling mprotect.
314 if (tag == VM_MEMORY_STACK) {
315 while (true) {
316 mach_vm_address_t next_region_base = stack_region_base +
317 stack_region_size;
318 mach_vm_address_t proposed_next_region_base = next_region_base;
319 mach_vm_size_t next_region_size;
320 nesting_level = 0;
321 info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
322 result = mach_vm_region_recurse(crashing_task_, &next_region_base,
323 &next_region_size, &nesting_level,
324 region_info, &info_count);
325 if (result != KERN_SUCCESS ||
326 next_region_base != proposed_next_region_base ||
327 submap_info.user_tag != tag ||
328 (submap_info.protection & VM_PROT_READ) == 0) {
329 break;
330 }
331
332 stack_region_size += next_region_size;
333 }
334 }
335
336 return stack_region_base + stack_region_size - start_addr;
337 }
338
WriteStackFromStartAddress(mach_vm_address_t start_addr,MDMemoryDescriptor * stack_location)339 bool MinidumpGenerator::WriteStackFromStartAddress(
340 mach_vm_address_t start_addr,
341 MDMemoryDescriptor *stack_location) {
342 UntypedMDRVA memory(&writer_);
343
344 bool result = false;
345 size_t size = CalculateStackSize(start_addr);
346
347 if (size == 0) {
348 // In some situations the stack address for the thread can come back 0.
349 // In these cases we skip over the threads in question and stuff the
350 // stack with a clearly borked value.
351 start_addr = 0xDEADBEEF;
352 size = 16;
353 if (!memory.Allocate(size))
354 return false;
355
356 unsigned long long dummy_stack[2]; // Fill dummy stack with 16 bytes of
357 // junk.
358 dummy_stack[0] = 0xDEADBEEF;
359 dummy_stack[1] = 0xDEADBEEF;
360
361 result = memory.Copy(dummy_stack, size);
362 } else {
363
364 if (!memory.Allocate(size))
365 return false;
366
367 if (dynamic_images_) {
368 vector<uint8_t> stack_memory;
369 if (ReadTaskMemory(crashing_task_,
370 start_addr,
371 size,
372 stack_memory) != KERN_SUCCESS) {
373 return false;
374 }
375
376 result = memory.Copy(&stack_memory[0], size);
377 } else {
378 result = memory.Copy(reinterpret_cast<const void *>(start_addr), size);
379 }
380 }
381
382 stack_location->start_of_memory_range = start_addr;
383 stack_location->memory = memory.location();
384
385 return result;
386 }
387
WriteStack(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)388 bool MinidumpGenerator::WriteStack(breakpad_thread_state_data_t state,
389 MDMemoryDescriptor *stack_location) {
390 switch (cpu_type_) {
391 #ifdef HAS_ARM_SUPPORT
392 case CPU_TYPE_ARM:
393 return WriteStackARM(state, stack_location);
394 #endif
395 #ifdef HAS_ARM64_SUPPORT
396 case CPU_TYPE_ARM64:
397 return WriteStackARM64(state, stack_location);
398 #endif
399 #ifdef HAS_PPC_SUPPORT
400 case CPU_TYPE_POWERPC:
401 return WriteStackPPC(state, stack_location);
402 case CPU_TYPE_POWERPC64:
403 return WriteStackPPC64(state, stack_location);
404 #endif
405 #ifdef HAS_X86_SUPPORT
406 case CPU_TYPE_I386:
407 return WriteStackX86(state, stack_location);
408 case CPU_TYPE_X86_64:
409 return WriteStackX86_64(state, stack_location);
410 #endif
411 default:
412 return false;
413 }
414 }
415
WriteContext(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)416 bool MinidumpGenerator::WriteContext(breakpad_thread_state_data_t state,
417 MDLocationDescriptor *register_location) {
418 switch (cpu_type_) {
419 #ifdef HAS_ARM_SUPPORT
420 case CPU_TYPE_ARM:
421 return WriteContextARM(state, register_location);
422 #endif
423 #ifdef HAS_ARM64_SUPPORT
424 case CPU_TYPE_ARM64:
425 return WriteContextARM64(state, register_location);
426 #endif
427 #ifdef HAS_PPC_SUPPORT
428 case CPU_TYPE_POWERPC:
429 return WriteContextPPC(state, register_location);
430 case CPU_TYPE_POWERPC64:
431 return WriteContextPPC64(state, register_location);
432 #endif
433 #ifdef HAS_X86_SUPPORT
434 case CPU_TYPE_I386:
435 return WriteContextX86(state, register_location);
436 case CPU_TYPE_X86_64:
437 return WriteContextX86_64(state, register_location);
438 #endif
439 default:
440 return false;
441 }
442 }
443
CurrentPCForStack(breakpad_thread_state_data_t state)444 uint64_t MinidumpGenerator::CurrentPCForStack(
445 breakpad_thread_state_data_t state) {
446 switch (cpu_type_) {
447 #ifdef HAS_ARM_SUPPORT
448 case CPU_TYPE_ARM:
449 return CurrentPCForStackARM(state);
450 #endif
451 #ifdef HAS_ARM64_SUPPORT
452 case CPU_TYPE_ARM64:
453 return CurrentPCForStackARM64(state);
454 #endif
455 #ifdef HAS_PPC_SUPPORT
456 case CPU_TYPE_POWERPC:
457 return CurrentPCForStackPPC(state);
458 case CPU_TYPE_POWERPC64:
459 return CurrentPCForStackPPC64(state);
460 #endif
461 #ifdef HAS_X86_SUPPORT
462 case CPU_TYPE_I386:
463 return CurrentPCForStackX86(state);
464 case CPU_TYPE_X86_64:
465 return CurrentPCForStackX86_64(state);
466 #endif
467 default:
468 assert(0 && "Unknown CPU type!");
469 return 0;
470 }
471 }
472
473 #ifdef HAS_ARM_SUPPORT
WriteStackARM(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)474 bool MinidumpGenerator::WriteStackARM(breakpad_thread_state_data_t state,
475 MDMemoryDescriptor *stack_location) {
476 arm_thread_state_t *machine_state =
477 reinterpret_cast<arm_thread_state_t *>(state);
478 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, sp);
479 return WriteStackFromStartAddress(start_addr, stack_location);
480 }
481
482 uint64_t
CurrentPCForStackARM(breakpad_thread_state_data_t state)483 MinidumpGenerator::CurrentPCForStackARM(breakpad_thread_state_data_t state) {
484 arm_thread_state_t *machine_state =
485 reinterpret_cast<arm_thread_state_t *>(state);
486
487 return REGISTER_FROM_THREADSTATE(machine_state, pc);
488 }
489
WriteContextARM(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)490 bool MinidumpGenerator::WriteContextARM(breakpad_thread_state_data_t state,
491 MDLocationDescriptor *register_location)
492 {
493 TypedMDRVA<MDRawContextARM> context(&writer_);
494 arm_thread_state_t *machine_state =
495 reinterpret_cast<arm_thread_state_t *>(state);
496
497 if (!context.Allocate())
498 return false;
499
500 *register_location = context.location();
501 MDRawContextARM *context_ptr = context.get();
502 context_ptr->context_flags = MD_CONTEXT_ARM_FULL;
503
504 #define AddGPR(a) context_ptr->iregs[a] = REGISTER_FROM_THREADSTATE(machine_state, r[a])
505
506 context_ptr->iregs[13] = REGISTER_FROM_THREADSTATE(machine_state, sp);
507 context_ptr->iregs[14] = REGISTER_FROM_THREADSTATE(machine_state, lr);
508 context_ptr->iregs[15] = REGISTER_FROM_THREADSTATE(machine_state, pc);
509 context_ptr->cpsr = REGISTER_FROM_THREADSTATE(machine_state, cpsr);
510
511 AddGPR(0);
512 AddGPR(1);
513 AddGPR(2);
514 AddGPR(3);
515 AddGPR(4);
516 AddGPR(5);
517 AddGPR(6);
518 AddGPR(7);
519 AddGPR(8);
520 AddGPR(9);
521 AddGPR(10);
522 AddGPR(11);
523 AddGPR(12);
524 #undef AddGPR
525
526 return true;
527 }
528 #endif
529
530 #ifdef HAS_ARM64_SUPPORT
WriteStackARM64(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)531 bool MinidumpGenerator::WriteStackARM64(breakpad_thread_state_data_t state,
532 MDMemoryDescriptor *stack_location) {
533 arm_thread_state64_t *machine_state =
534 reinterpret_cast<arm_thread_state64_t *>(state);
535 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, sp);
536 return WriteStackFromStartAddress(start_addr, stack_location);
537 }
538
539 uint64_t
CurrentPCForStackARM64(breakpad_thread_state_data_t state)540 MinidumpGenerator::CurrentPCForStackARM64(breakpad_thread_state_data_t state) {
541 arm_thread_state64_t *machine_state =
542 reinterpret_cast<arm_thread_state64_t *>(state);
543
544 return REGISTER_FROM_THREADSTATE(machine_state, pc);
545 }
546
547 bool
WriteContextARM64(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)548 MinidumpGenerator::WriteContextARM64(breakpad_thread_state_data_t state,
549 MDLocationDescriptor *register_location)
550 {
551 TypedMDRVA<MDRawContextARM64_Old> context(&writer_);
552 arm_thread_state64_t *machine_state =
553 reinterpret_cast<arm_thread_state64_t *>(state);
554
555 if (!context.Allocate())
556 return false;
557
558 *register_location = context.location();
559 MDRawContextARM64_Old *context_ptr = context.get();
560 context_ptr->context_flags = MD_CONTEXT_ARM64_FULL_OLD;
561
562 #define AddGPR(a) \
563 context_ptr->iregs[a] = ARRAY_REGISTER_FROM_THREADSTATE(machine_state, x, a)
564
565 context_ptr->iregs[29] = REGISTER_FROM_THREADSTATE(machine_state, fp);
566 context_ptr->iregs[30] = REGISTER_FROM_THREADSTATE(machine_state, lr);
567 context_ptr->iregs[31] = REGISTER_FROM_THREADSTATE(machine_state, sp);
568 context_ptr->iregs[32] = REGISTER_FROM_THREADSTATE(machine_state, pc);
569 context_ptr->cpsr = REGISTER_FROM_THREADSTATE(machine_state, cpsr);
570
571 AddGPR(0);
572 AddGPR(1);
573 AddGPR(2);
574 AddGPR(3);
575 AddGPR(4);
576 AddGPR(5);
577 AddGPR(6);
578 AddGPR(7);
579 AddGPR(8);
580 AddGPR(9);
581 AddGPR(10);
582 AddGPR(11);
583 AddGPR(12);
584 AddGPR(13);
585 AddGPR(14);
586 AddGPR(15);
587 AddGPR(16);
588 AddGPR(17);
589 AddGPR(18);
590 AddGPR(19);
591 AddGPR(20);
592 AddGPR(21);
593 AddGPR(22);
594 AddGPR(23);
595 AddGPR(24);
596 AddGPR(25);
597 AddGPR(26);
598 AddGPR(27);
599 AddGPR(28);
600 #undef AddGPR
601
602 return true;
603 }
604 #endif
605
606 #ifdef HAS_PCC_SUPPORT
WriteStackPPC(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)607 bool MinidumpGenerator::WriteStackPPC(breakpad_thread_state_data_t state,
608 MDMemoryDescriptor *stack_location) {
609 ppc_thread_state_t *machine_state =
610 reinterpret_cast<ppc_thread_state_t *>(state);
611 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, r1);
612 return WriteStackFromStartAddress(start_addr, stack_location);
613 }
614
WriteStackPPC64(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)615 bool MinidumpGenerator::WriteStackPPC64(breakpad_thread_state_data_t state,
616 MDMemoryDescriptor *stack_location) {
617 ppc_thread_state64_t *machine_state =
618 reinterpret_cast<ppc_thread_state64_t *>(state);
619 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, r1);
620 return WriteStackFromStartAddress(start_addr, stack_location);
621 }
622
623 uint64_t
CurrentPCForStackPPC(breakpad_thread_state_data_t state)624 MinidumpGenerator::CurrentPCForStackPPC(breakpad_thread_state_data_t state) {
625 ppc_thread_state_t *machine_state =
626 reinterpret_cast<ppc_thread_state_t *>(state);
627
628 return REGISTER_FROM_THREADSTATE(machine_state, srr0);
629 }
630
631 uint64_t
CurrentPCForStackPPC64(breakpad_thread_state_data_t state)632 MinidumpGenerator::CurrentPCForStackPPC64(breakpad_thread_state_data_t state) {
633 ppc_thread_state64_t *machine_state =
634 reinterpret_cast<ppc_thread_state64_t *>(state);
635
636 return REGISTER_FROM_THREADSTATE(machine_state, srr0);
637 }
638
WriteContextPPC(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)639 bool MinidumpGenerator::WriteContextPPC(breakpad_thread_state_data_t state,
640 MDLocationDescriptor *register_location)
641 {
642 TypedMDRVA<MDRawContextPPC> context(&writer_);
643 ppc_thread_state_t *machine_state =
644 reinterpret_cast<ppc_thread_state_t *>(state);
645
646 if (!context.Allocate())
647 return false;
648
649 *register_location = context.location();
650 MDRawContextPPC *context_ptr = context.get();
651 context_ptr->context_flags = MD_CONTEXT_PPC_BASE;
652
653 #define AddReg(a) context_ptr->a = static_cast<__typeof__(context_ptr->a)>( \
654 REGISTER_FROM_THREADSTATE(machine_state, a))
655 #define AddGPR(a) context_ptr->gpr[a] = \
656 static_cast<__typeof__(context_ptr->a)>( \
657 REGISTER_FROM_THREADSTATE(machine_state, r ## a)
658
659 AddReg(srr0);
660 AddReg(cr);
661 AddReg(xer);
662 AddReg(ctr);
663 AddReg(lr);
664 AddReg(vrsave);
665
666 AddGPR(0);
667 AddGPR(1);
668 AddGPR(2);
669 AddGPR(3);
670 AddGPR(4);
671 AddGPR(5);
672 AddGPR(6);
673 AddGPR(7);
674 AddGPR(8);
675 AddGPR(9);
676 AddGPR(10);
677 AddGPR(11);
678 AddGPR(12);
679 AddGPR(13);
680 AddGPR(14);
681 AddGPR(15);
682 AddGPR(16);
683 AddGPR(17);
684 AddGPR(18);
685 AddGPR(19);
686 AddGPR(20);
687 AddGPR(21);
688 AddGPR(22);
689 AddGPR(23);
690 AddGPR(24);
691 AddGPR(25);
692 AddGPR(26);
693 AddGPR(27);
694 AddGPR(28);
695 AddGPR(29);
696 AddGPR(30);
697 AddGPR(31);
698 AddReg(mq);
699 #undef AddReg
700 #undef AddGPR
701
702 return true;
703 }
704
WriteContextPPC64(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)705 bool MinidumpGenerator::WriteContextPPC64(
706 breakpad_thread_state_data_t state,
707 MDLocationDescriptor *register_location) {
708 TypedMDRVA<MDRawContextPPC64> context(&writer_);
709 ppc_thread_state64_t *machine_state =
710 reinterpret_cast<ppc_thread_state64_t *>(state);
711
712 if (!context.Allocate())
713 return false;
714
715 *register_location = context.location();
716 MDRawContextPPC64 *context_ptr = context.get();
717 context_ptr->context_flags = MD_CONTEXT_PPC_BASE;
718
719 #define AddReg(a) context_ptr->a = static_cast<__typeof__(context_ptr->a)>( \
720 REGISTER_FROM_THREADSTATE(machine_state, a))
721 #define AddGPR(a) context_ptr->gpr[a] = \
722 static_cast<__typeof__(context_ptr->a)>( \
723 REGISTER_FROM_THREADSTATE(machine_state, r ## a)
724
725 AddReg(srr0);
726 AddReg(cr);
727 AddReg(xer);
728 AddReg(ctr);
729 AddReg(lr);
730 AddReg(vrsave);
731
732 AddGPR(0);
733 AddGPR(1);
734 AddGPR(2);
735 AddGPR(3);
736 AddGPR(4);
737 AddGPR(5);
738 AddGPR(6);
739 AddGPR(7);
740 AddGPR(8);
741 AddGPR(9);
742 AddGPR(10);
743 AddGPR(11);
744 AddGPR(12);
745 AddGPR(13);
746 AddGPR(14);
747 AddGPR(15);
748 AddGPR(16);
749 AddGPR(17);
750 AddGPR(18);
751 AddGPR(19);
752 AddGPR(20);
753 AddGPR(21);
754 AddGPR(22);
755 AddGPR(23);
756 AddGPR(24);
757 AddGPR(25);
758 AddGPR(26);
759 AddGPR(27);
760 AddGPR(28);
761 AddGPR(29);
762 AddGPR(30);
763 AddGPR(31);
764 #undef AddReg
765 #undef AddGPR
766
767 return true;
768 }
769
770 #endif
771
772 #ifdef HAS_X86_SUPPORT
WriteStackX86(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)773 bool MinidumpGenerator::WriteStackX86(breakpad_thread_state_data_t state,
774 MDMemoryDescriptor *stack_location) {
775 i386_thread_state_t *machine_state =
776 reinterpret_cast<i386_thread_state_t *>(state);
777
778 mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, esp);
779 return WriteStackFromStartAddress(start_addr, stack_location);
780 }
781
WriteStackX86_64(breakpad_thread_state_data_t state,MDMemoryDescriptor * stack_location)782 bool MinidumpGenerator::WriteStackX86_64(breakpad_thread_state_data_t state,
783 MDMemoryDescriptor *stack_location) {
784 x86_thread_state64_t *machine_state =
785 reinterpret_cast<x86_thread_state64_t *>(state);
786
787 mach_vm_address_t start_addr = static_cast<mach_vm_address_t>(
788 REGISTER_FROM_THREADSTATE(machine_state, rsp));
789 return WriteStackFromStartAddress(start_addr, stack_location);
790 }
791
792 uint64_t
CurrentPCForStackX86(breakpad_thread_state_data_t state)793 MinidumpGenerator::CurrentPCForStackX86(breakpad_thread_state_data_t state) {
794 i386_thread_state_t *machine_state =
795 reinterpret_cast<i386_thread_state_t *>(state);
796
797 return REGISTER_FROM_THREADSTATE(machine_state, eip);
798 }
799
800 uint64_t
CurrentPCForStackX86_64(breakpad_thread_state_data_t state)801 MinidumpGenerator::CurrentPCForStackX86_64(breakpad_thread_state_data_t state) {
802 x86_thread_state64_t *machine_state =
803 reinterpret_cast<x86_thread_state64_t *>(state);
804
805 return REGISTER_FROM_THREADSTATE(machine_state, rip);
806 }
807
WriteContextX86(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)808 bool MinidumpGenerator::WriteContextX86(breakpad_thread_state_data_t state,
809 MDLocationDescriptor *register_location)
810 {
811 TypedMDRVA<MDRawContextX86> context(&writer_);
812 i386_thread_state_t *machine_state =
813 reinterpret_cast<i386_thread_state_t *>(state);
814
815 if (!context.Allocate())
816 return false;
817
818 *register_location = context.location();
819 MDRawContextX86 *context_ptr = context.get();
820
821 #define AddReg(a) context_ptr->a = static_cast<__typeof__(context_ptr->a)>( \
822 REGISTER_FROM_THREADSTATE(machine_state, a))
823
824 context_ptr->context_flags = MD_CONTEXT_X86;
825 AddReg(eax);
826 AddReg(ebx);
827 AddReg(ecx);
828 AddReg(edx);
829 AddReg(esi);
830 AddReg(edi);
831 AddReg(ebp);
832 AddReg(esp);
833
834 AddReg(cs);
835 AddReg(ds);
836 AddReg(ss);
837 AddReg(es);
838 AddReg(fs);
839 AddReg(gs);
840 AddReg(eflags);
841
842 AddReg(eip);
843 #undef AddReg
844
845 return true;
846 }
847
WriteContextX86_64(breakpad_thread_state_data_t state,MDLocationDescriptor * register_location)848 bool MinidumpGenerator::WriteContextX86_64(
849 breakpad_thread_state_data_t state,
850 MDLocationDescriptor *register_location) {
851 TypedMDRVA<MDRawContextAMD64> context(&writer_);
852 x86_thread_state64_t *machine_state =
853 reinterpret_cast<x86_thread_state64_t *>(state);
854
855 if (!context.Allocate())
856 return false;
857
858 *register_location = context.location();
859 MDRawContextAMD64 *context_ptr = context.get();
860
861 #define AddReg(a) context_ptr->a = static_cast<__typeof__(context_ptr->a)>( \
862 REGISTER_FROM_THREADSTATE(machine_state, a))
863
864 context_ptr->context_flags = MD_CONTEXT_AMD64;
865 AddReg(rax);
866 AddReg(rbx);
867 AddReg(rcx);
868 AddReg(rdx);
869 AddReg(rdi);
870 AddReg(rsi);
871 AddReg(rbp);
872 AddReg(rsp);
873 AddReg(r8);
874 AddReg(r9);
875 AddReg(r10);
876 AddReg(r11);
877 AddReg(r12);
878 AddReg(r13);
879 AddReg(r14);
880 AddReg(r15);
881 AddReg(rip);
882 // according to AMD's software developer guide, bits above 18 are
883 // not used in the flags register. Since the minidump format
884 // specifies 32 bits for the flags register, we can truncate safely
885 // with no loss.
886 context_ptr->eflags = static_cast<uint32_t>(REGISTER_FROM_THREADSTATE(machine_state, rflags));
887 AddReg(cs);
888 AddReg(fs);
889 AddReg(gs);
890 #undef AddReg
891
892 return true;
893 }
894 #endif
895
GetThreadState(thread_act_t target_thread,thread_state_t state,mach_msg_type_number_t * count)896 bool MinidumpGenerator::GetThreadState(thread_act_t target_thread,
897 thread_state_t state,
898 mach_msg_type_number_t *count) {
899 if (task_context_ && target_thread == mach_thread_self()) {
900 switch (cpu_type_) {
901 #ifdef HAS_ARM_SUPPORT
902 case CPU_TYPE_ARM:
903 size_t final_size =
904 std::min(static_cast<size_t>(*count), sizeof(arm_thread_state_t));
905 memcpy(state, &task_context_->breakpad_uc_mcontext->__ss, final_size);
906 *count = static_cast<mach_msg_type_number_t>(final_size);
907 return true;
908 #endif
909 #ifdef HAS_ARM64_SUPPORT
910 case CPU_TYPE_ARM64: {
911 size_t final_size =
912 std::min(static_cast<size_t>(*count), sizeof(arm_thread_state64_t));
913 memcpy(state, &task_context_->breakpad_uc_mcontext->__ss, final_size);
914 *count = static_cast<mach_msg_type_number_t>(final_size);
915 return true;
916 }
917 #endif
918 #ifdef HAS_X86_SUPPORT
919 case CPU_TYPE_I386:
920 case CPU_TYPE_X86_64: {
921 size_t state_size = cpu_type_ == CPU_TYPE_I386 ?
922 sizeof(i386_thread_state_t) : sizeof(x86_thread_state64_t);
923 size_t final_size =
924 std::min(static_cast<size_t>(*count), state_size);
925 memcpy(state, &task_context_->breakpad_uc_mcontext->__ss, final_size);
926 *count = static_cast<mach_msg_type_number_t>(final_size);
927 return true;
928 }
929 #endif
930 }
931 }
932
933 thread_state_flavor_t flavor;
934 switch (cpu_type_) {
935 #ifdef HAS_ARM_SUPPORT
936 case CPU_TYPE_ARM:
937 flavor = ARM_THREAD_STATE;
938 break;
939 #endif
940 #ifdef HAS_ARM64_SUPPORT
941 case CPU_TYPE_ARM64:
942 flavor = ARM_THREAD_STATE64;
943 break;
944 #endif
945 #ifdef HAS_PPC_SUPPORT
946 case CPU_TYPE_POWERPC:
947 flavor = PPC_THREAD_STATE;
948 break;
949 case CPU_TYPE_POWERPC64:
950 flavor = PPC_THREAD_STATE64;
951 break;
952 #endif
953 #ifdef HAS_X86_SUPPORT
954 case CPU_TYPE_I386:
955 flavor = i386_THREAD_STATE;
956 break;
957 case CPU_TYPE_X86_64:
958 flavor = x86_THREAD_STATE64;
959 break;
960 #endif
961 default:
962 return false;
963 }
964 return thread_get_state(target_thread, flavor,
965 state, count) == KERN_SUCCESS;
966 }
967
WriteThreadStream(mach_port_t thread_id,MDRawThread * thread)968 bool MinidumpGenerator::WriteThreadStream(mach_port_t thread_id,
969 MDRawThread *thread) {
970 breakpad_thread_state_data_t state;
971 mach_msg_type_number_t state_count
972 = static_cast<mach_msg_type_number_t>(sizeof(state));
973
974 if (GetThreadState(thread_id, state, &state_count)) {
975 if (!WriteStack(state, &thread->stack))
976 return false;
977
978 memory_blocks_.push_back(thread->stack);
979
980 if (!WriteContext(state, &thread->thread_context))
981 return false;
982
983 thread->thread_id = thread_id;
984 } else {
985 return false;
986 }
987
988 return true;
989 }
990
WriteThreadListStream(MDRawDirectory * thread_list_stream)991 bool MinidumpGenerator::WriteThreadListStream(
992 MDRawDirectory *thread_list_stream) {
993 TypedMDRVA<MDRawThreadList> list(&writer_);
994 thread_act_port_array_t threads_for_task;
995 mach_msg_type_number_t thread_count;
996 int non_generator_thread_count;
997
998 if (task_threads(crashing_task_, &threads_for_task, &thread_count))
999 return false;
1000
1001 // Don't include the generator thread
1002 if (handler_thread_ != MACH_PORT_NULL)
1003 non_generator_thread_count = thread_count - 1;
1004 else
1005 non_generator_thread_count = thread_count;
1006 if (!list.AllocateObjectAndArray(non_generator_thread_count,
1007 sizeof(MDRawThread)))
1008 return false;
1009
1010 thread_list_stream->stream_type = MD_THREAD_LIST_STREAM;
1011 thread_list_stream->location = list.location();
1012
1013 list.get()->number_of_threads = non_generator_thread_count;
1014
1015 MDRawThread thread;
1016 int thread_idx = 0;
1017
1018 for (unsigned int i = 0; i < thread_count; ++i) {
1019 memset(&thread, 0, sizeof(MDRawThread));
1020
1021 if (threads_for_task[i] != handler_thread_) {
1022 if (!WriteThreadStream(threads_for_task[i], &thread))
1023 return false;
1024
1025 list.CopyIndexAfterObject(thread_idx++, &thread, sizeof(MDRawThread));
1026 }
1027 }
1028
1029 return true;
1030 }
1031
WriteMemoryListStream(MDRawDirectory * memory_list_stream)1032 bool MinidumpGenerator::WriteMemoryListStream(
1033 MDRawDirectory *memory_list_stream) {
1034 TypedMDRVA<MDRawMemoryList> list(&writer_);
1035
1036 // If the dump has an exception, include some memory around the
1037 // instruction pointer.
1038 const size_t kIPMemorySize = 256; // bytes
1039 bool have_ip_memory = false;
1040 MDMemoryDescriptor ip_memory_d;
1041 if (exception_thread_ && exception_type_) {
1042 breakpad_thread_state_data_t state;
1043 mach_msg_type_number_t stateCount
1044 = static_cast<mach_msg_type_number_t>(sizeof(state));
1045
1046 if (GetThreadState(exception_thread_, state, &stateCount)) {
1047 uint64_t ip = CurrentPCForStack(state);
1048 // Bound it to the upper and lower bounds of the region
1049 // it's contained within. If it's not in a known memory region,
1050 // don't bother trying to write it.
1051 mach_vm_address_t addr = static_cast<vm_address_t>(ip);
1052 mach_vm_size_t size;
1053 natural_t nesting_level = 0;
1054 vm_region_submap_info_64 info;
1055 mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
1056 vm_region_recurse_info_t recurse_info;
1057 recurse_info = reinterpret_cast<vm_region_recurse_info_t>(&info);
1058
1059 kern_return_t ret =
1060 mach_vm_region_recurse(crashing_task_,
1061 &addr,
1062 &size,
1063 &nesting_level,
1064 recurse_info,
1065 &info_count);
1066 if (ret == KERN_SUCCESS && ip >= addr && ip < (addr + size)) {
1067 // Try to get 128 bytes before and after the IP, but
1068 // settle for whatever's available.
1069 ip_memory_d.start_of_memory_range =
1070 std::max(uintptr_t(addr),
1071 uintptr_t(ip - (kIPMemorySize / 2)));
1072 uintptr_t end_of_range =
1073 std::min(uintptr_t(ip + (kIPMemorySize / 2)),
1074 uintptr_t(addr + size));
1075 uintptr_t range_diff = end_of_range -
1076 static_cast<uintptr_t>(ip_memory_d.start_of_memory_range);
1077 ip_memory_d.memory.data_size = static_cast<uint32_t>(range_diff);
1078 have_ip_memory = true;
1079 // This needs to get appended to the list even though
1080 // the memory bytes aren't filled in yet so the entire
1081 // list can be written first. The memory bytes will get filled
1082 // in after the memory list is written.
1083 memory_blocks_.push_back(ip_memory_d);
1084 }
1085 }
1086 }
1087
1088 // Now fill in the memory list and write it.
1089 size_t memory_count = memory_blocks_.size();
1090 if (!list.AllocateObjectAndArray(memory_count,
1091 sizeof(MDMemoryDescriptor)))
1092 return false;
1093
1094 memory_list_stream->stream_type = MD_MEMORY_LIST_STREAM;
1095 memory_list_stream->location = list.location();
1096
1097 list.get()->number_of_memory_ranges = static_cast<uint32_t>(memory_count);
1098
1099 unsigned int i;
1100 for (i = 0; i < memory_count; ++i) {
1101 list.CopyIndexAfterObject(i, &memory_blocks_[i],
1102 sizeof(MDMemoryDescriptor));
1103 }
1104
1105 if (have_ip_memory) {
1106 // Now read the memory around the instruction pointer.
1107 UntypedMDRVA ip_memory(&writer_);
1108 if (!ip_memory.Allocate(ip_memory_d.memory.data_size))
1109 return false;
1110
1111 if (dynamic_images_) {
1112 // Out-of-process.
1113 vector<uint8_t> memory;
1114 if (ReadTaskMemory(crashing_task_,
1115 ip_memory_d.start_of_memory_range,
1116 ip_memory_d.memory.data_size,
1117 memory) != KERN_SUCCESS) {
1118 return false;
1119 }
1120
1121 ip_memory.Copy(&memory[0], ip_memory_d.memory.data_size);
1122 } else {
1123 // In-process, just copy from local memory.
1124 ip_memory.Copy(
1125 reinterpret_cast<const void *>(ip_memory_d.start_of_memory_range),
1126 ip_memory_d.memory.data_size);
1127 }
1128
1129 ip_memory_d.memory = ip_memory.location();
1130 // Write this again now that the data location is filled in.
1131 list.CopyIndexAfterObject(i - 1, &ip_memory_d,
1132 sizeof(MDMemoryDescriptor));
1133 }
1134
1135 return true;
1136 }
1137
1138 bool
WriteExceptionStream(MDRawDirectory * exception_stream)1139 MinidumpGenerator::WriteExceptionStream(MDRawDirectory *exception_stream) {
1140 TypedMDRVA<MDRawExceptionStream> exception(&writer_);
1141
1142 if (!exception.Allocate())
1143 return false;
1144
1145 exception_stream->stream_type = MD_EXCEPTION_STREAM;
1146 exception_stream->location = exception.location();
1147 MDRawExceptionStream *exception_ptr = exception.get();
1148 exception_ptr->thread_id = exception_thread_;
1149
1150 uint64_t u_exception_code = exception_code_;
1151 if (exception_type_ == EXC_CRASH) {
1152 if (!IsValidExcCrash(exception_code_)) {
1153 return false;
1154 }
1155
1156 [[maybe_unused]] int signal_number;
1157 RecoverExceptionDataFromExcCrash(u_exception_code, signal_number);
1158 }
1159
1160 // This naming is confusing, but it is the proper translation from
1161 // mach naming to minidump naming.
1162 exception_ptr->exception_record.exception_code = exception_type_;
1163
1164 uint32_t exception_flags = 0;
1165 if (exception_type_ == EXC_RESOURCE || exception_type_ == EXC_GUARD) {
1166 // For EXC_RESOURCE and EXC_GUARD crashes Crashpad records the uppermost
1167 // 32 bits of the exception code in the exception flags, let's do the same
1168 // here.
1169 exception_flags = u_exception_code >> 32;
1170 } else {
1171 exception_flags = exception_code_;
1172 }
1173
1174 exception_ptr->exception_record.exception_flags = exception_flags;
1175
1176 breakpad_thread_state_data_t state;
1177 mach_msg_type_number_t state_count
1178 = static_cast<mach_msg_type_number_t>(sizeof(state));
1179
1180 if (!GetThreadState(exception_thread_, state, &state_count))
1181 return false;
1182
1183 if (!WriteContext(state, &exception_ptr->thread_context))
1184 return false;
1185
1186 if (exception_type_ == EXC_BAD_ACCESS)
1187 exception_ptr->exception_record.exception_address = exception_subcode_;
1188 else
1189 exception_ptr->exception_record.exception_address = CurrentPCForStack(state);
1190
1191 // Crashpad stores the exception type and the optional exception codes in
1192 // the exception information field, so we do the same here.
1193 exception_ptr->exception_record.number_parameters =
1194 (exception_subcode_ != 0) ? 3 : 2;
1195 exception_ptr->exception_record.exception_information[0] = exception_type_;
1196 exception_ptr->exception_record.exception_information[1] = exception_code_;
1197 exception_ptr->exception_record.exception_information[2] = exception_subcode_;
1198
1199 return true;
1200 }
1201
WriteSystemInfoStream(MDRawDirectory * system_info_stream)1202 bool MinidumpGenerator::WriteSystemInfoStream(
1203 MDRawDirectory *system_info_stream) {
1204 TypedMDRVA<MDRawSystemInfo> info(&writer_);
1205
1206 if (!info.Allocate())
1207 return false;
1208
1209 system_info_stream->stream_type = MD_SYSTEM_INFO_STREAM;
1210 system_info_stream->location = info.location();
1211
1212 // CPU Information
1213 uint32_t number_of_processors;
1214 size_t len = sizeof(number_of_processors);
1215 sysctlbyname("hw.ncpu", &number_of_processors, &len, NULL, 0);
1216 MDRawSystemInfo *info_ptr = info.get();
1217
1218 switch (cpu_type_) {
1219 #ifdef HAS_ARM_SUPPORT
1220 case CPU_TYPE_ARM:
1221 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_ARM;
1222 break;
1223 #endif
1224 #ifdef HAS_ARM64_SUPPORT
1225 case CPU_TYPE_ARM64:
1226 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_ARM64_OLD;
1227 break;
1228 #endif
1229 #ifdef HAS_PPC_SUPPORT
1230 case CPU_TYPE_POWERPC:
1231 case CPU_TYPE_POWERPC64:
1232 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_PPC;
1233 break;
1234 #endif
1235 #ifdef HAS_X86_SUPPORT
1236 case CPU_TYPE_I386:
1237 case CPU_TYPE_X86_64:
1238 if (cpu_type_ == CPU_TYPE_I386)
1239 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_X86;
1240 else
1241 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_AMD64;
1242 #ifdef __i386__
1243 // ebx is used for PIC code, so we need
1244 // to preserve it.
1245 #define cpuid(op,eax,ebx,ecx,edx) \
1246 asm ("pushl %%ebx \n\t" \
1247 "cpuid \n\t" \
1248 "movl %%ebx,%1 \n\t" \
1249 "popl %%ebx" \
1250 : "=a" (eax), \
1251 "=g" (ebx), \
1252 "=c" (ecx), \
1253 "=d" (edx) \
1254 : "0" (op))
1255 #elif defined(__x86_64__)
1256
1257 #define cpuid(op,eax,ebx,ecx,edx) \
1258 asm ("cpuid \n\t" \
1259 : "=a" (eax), \
1260 "=b" (ebx), \
1261 "=c" (ecx), \
1262 "=d" (edx) \
1263 : "0" (op))
1264 #endif
1265
1266 #if defined(__i386__) || defined(__x86_64__)
1267 int unused, unused2;
1268 // get vendor id
1269 cpuid(0, unused, info_ptr->cpu.x86_cpu_info.vendor_id[0],
1270 info_ptr->cpu.x86_cpu_info.vendor_id[2],
1271 info_ptr->cpu.x86_cpu_info.vendor_id[1]);
1272 // get version and feature info
1273 cpuid(1, info_ptr->cpu.x86_cpu_info.version_information, unused, unused2,
1274 info_ptr->cpu.x86_cpu_info.feature_information);
1275
1276 // family
1277 info_ptr->processor_level =
1278 (info_ptr->cpu.x86_cpu_info.version_information & 0xF00) >> 8;
1279 // 0xMMSS (Model, Stepping)
1280 info_ptr->processor_revision = static_cast<uint16_t>(
1281 (info_ptr->cpu.x86_cpu_info.version_information & 0xF) |
1282 ((info_ptr->cpu.x86_cpu_info.version_information & 0xF0) << 4));
1283
1284 // decode extended model info
1285 if (info_ptr->processor_level == 0xF ||
1286 info_ptr->processor_level == 0x6) {
1287 info_ptr->processor_revision |=
1288 ((info_ptr->cpu.x86_cpu_info.version_information & 0xF0000) >> 4);
1289 }
1290
1291 // decode extended family info
1292 if (info_ptr->processor_level == 0xF) {
1293 info_ptr->processor_level +=
1294 ((info_ptr->cpu.x86_cpu_info.version_information & 0xFF00000) >> 20);
1295 }
1296
1297 #endif // __i386__ || __x86_64_
1298 break;
1299 #endif // HAS_X86_SUPPORT
1300 default:
1301 info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_UNKNOWN;
1302 break;
1303 }
1304
1305 info_ptr->number_of_processors = static_cast<uint8_t>(number_of_processors);
1306 #if TARGET_OS_IPHONE
1307 info_ptr->platform_id = MD_OS_IOS;
1308 #else
1309 info_ptr->platform_id = MD_OS_MAC_OS_X;
1310 #endif // TARGET_OS_IPHONE
1311
1312 MDLocationDescriptor build_string_loc;
1313
1314 if (!writer_.WriteString(build_string_, 0,
1315 &build_string_loc))
1316 return false;
1317
1318 info_ptr->csd_version_rva = build_string_loc.rva;
1319 info_ptr->major_version = os_major_version_;
1320 info_ptr->minor_version = os_minor_version_;
1321 info_ptr->build_number = os_build_number_;
1322
1323 return true;
1324 }
1325
WriteModuleStream(unsigned int index,MDRawModule * module)1326 bool MinidumpGenerator::WriteModuleStream(unsigned int index,
1327 MDRawModule *module) {
1328 if (dynamic_images_) {
1329 // we're in a different process than the crashed process
1330 DynamicImage *image = dynamic_images_->GetImage(index);
1331
1332 if (!image)
1333 return false;
1334
1335 memset(module, 0, sizeof(MDRawModule));
1336
1337 MDLocationDescriptor string_location;
1338
1339 string name = image->GetFilePath();
1340 if (!writer_.WriteString(name.c_str(), 0, &string_location))
1341 return false;
1342
1343 module->base_of_image = image->GetVMAddr() + image->GetVMAddrSlide();
1344 module->size_of_image = static_cast<uint32_t>(image->GetVMSize());
1345 module->module_name_rva = string_location.rva;
1346
1347 // We'll skip the executable module, because they don't have
1348 // LC_ID_DYLIB load commands, and the crash processing server gets
1349 // version information from the Plist file, anyway.
1350 if (index != static_cast<uint32_t>(FindExecutableModule())) {
1351 module->version_info.signature = MD_VSFIXEDFILEINFO_SIGNATURE;
1352 module->version_info.struct_version |= MD_VSFIXEDFILEINFO_VERSION;
1353 // Convert MAC dylib version format, which is a 32 bit number, to the
1354 // format used by minidump. The mac format is <16 bits>.<8 bits>.<8 bits>
1355 // so it fits nicely into the windows version with some massaging
1356 // The mapping is:
1357 // 1) upper 16 bits of MAC version go to lower 16 bits of product HI
1358 // 2) Next most significant 8 bits go to upper 16 bits of product LO
1359 // 3) Least significant 8 bits go to lower 16 bits of product LO
1360 uint32_t modVersion = image->GetVersion();
1361 module->version_info.file_version_hi = 0;
1362 module->version_info.file_version_hi = modVersion >> 16;
1363 module->version_info.file_version_lo |= (modVersion & 0xff00) << 8;
1364 module->version_info.file_version_lo |= (modVersion & 0xff);
1365 }
1366
1367 if (!WriteCVRecord(module, image->GetCPUType(), image->GetCPUSubtype(),
1368 name.c_str(), /* in_memory */ false, /* out_of_process */ true,
1369 image->GetInDyldSharedCache())) {
1370 return false;
1371 }
1372 } else {
1373 // Getting module info in the crashed process
1374 const breakpad_mach_header *header;
1375 header = (breakpad_mach_header*)_dyld_get_image_header(index);
1376 if (!header)
1377 return false;
1378
1379 #ifdef __LP64__
1380 assert(header->magic == MH_MAGIC_64);
1381
1382 if(header->magic != MH_MAGIC_64)
1383 return false;
1384 #else
1385 assert(header->magic == MH_MAGIC);
1386
1387 if(header->magic != MH_MAGIC)
1388 return false;
1389 #endif
1390
1391 int cpu_type = header->cputype;
1392 int cpu_subtype = (header->cpusubtype & ~CPU_SUBTYPE_MASK);
1393 bool in_dyld_shared_cache = ((header->flags & MH_SHAREDCACHE) != 0);
1394 unsigned long slide = _dyld_get_image_vmaddr_slide(index);
1395 const char* name = _dyld_get_image_name(index);
1396 const struct load_command *cmd =
1397 reinterpret_cast<const struct load_command *>(header + 1);
1398
1399 memset(module, 0, sizeof(MDRawModule));
1400
1401 for (unsigned int i = 0; cmd && (i < header->ncmds); i++) {
1402 if (cmd->cmd == LC_SEGMENT_ARCH) {
1403
1404 const breakpad_mach_segment_command *seg =
1405 reinterpret_cast<const breakpad_mach_segment_command *>(cmd);
1406
1407 if (!strcmp(seg->segname, "__TEXT")) {
1408 MDLocationDescriptor string_location;
1409
1410 if (!writer_.WriteString(name, 0, &string_location))
1411 return false;
1412
1413 module->base_of_image = seg->vmaddr + slide;
1414 module->size_of_image = static_cast<uint32_t>(seg->vmsize);
1415 module->module_name_rva = string_location.rva;
1416
1417 bool in_memory = false;
1418 #if TARGET_OS_IPHONE
1419 in_memory = true;
1420 #endif
1421 if (!WriteCVRecord(module, cpu_type, cpu_subtype, name, in_memory,
1422 /* out_of_process */ false, in_dyld_shared_cache)) {
1423 return false;
1424 }
1425
1426 return true;
1427 }
1428 }
1429
1430 cmd = reinterpret_cast<struct load_command*>((char *)cmd + cmd->cmdsize);
1431 }
1432 }
1433
1434 return true;
1435 }
1436
FindExecutableModule()1437 int MinidumpGenerator::FindExecutableModule() {
1438 if (dynamic_images_) {
1439 int index = dynamic_images_->GetExecutableImageIndex();
1440
1441 if (index >= 0) {
1442 return index;
1443 }
1444 } else {
1445 int image_count = _dyld_image_count();
1446 const struct mach_header *header;
1447
1448 for (int index = 0; index < image_count; ++index) {
1449 header = _dyld_get_image_header(index);
1450
1451 if (header->filetype == MH_EXECUTE)
1452 return index;
1453 }
1454 }
1455
1456 // failed - just use the first image
1457 return 0;
1458 }
1459
IsValidExcCrash(uint64_t exception_code)1460 bool MinidumpGenerator::IsValidExcCrash(uint64_t exception_code) {
1461 switch ((exception_code >> 20) & 0xf) {
1462 case EXC_CRASH: // EXC_CRASH cannot wrap EXC_CRASH
1463 case EXC_RESOURCE: // EXC_RESOURCE would lose data if wrapped
1464 case EXC_GUARD: // EXC_GUARD would lose data if wrapped
1465 case EXC_CORPSE_NOTIFY: // EXC_CRASH cannot wrap EXC_CORPSE_NOTIFY
1466 return false;
1467 default:
1468 return true;
1469 }
1470 }
1471
RecoverExceptionDataFromExcCrash(uint64_t exception_code,int & signal_number)1472 void MinidumpGenerator::RecoverExceptionDataFromExcCrash(
1473 uint64_t exception_code, int& signal_number)
1474 {
1475 exception_type_ = (exception_code >> 20) & 0xf;
1476 exception_code_ = exception_code & 0xfffff;
1477 signal_number = (exception_code >> 24) & 0xff;
1478 }
1479
WriteCVRecord(MDRawModule * module,int cpu_type,int cpu_subtype,const char * module_path,bool in_memory,bool out_of_process,bool in_dyld_shared_cache)1480 bool MinidumpGenerator::WriteCVRecord(MDRawModule *module, int cpu_type, int cpu_subtype,
1481 const char *module_path, bool in_memory,
1482 bool out_of_process, bool in_dyld_shared_cache) {
1483 TypedMDRVA<MDCVInfoPDB70> cv(&writer_);
1484
1485 // Only return the last path component of the full module path
1486 const char *module_name = strrchr(module_path, '/');
1487
1488 // Increment past the slash
1489 if (module_name)
1490 ++module_name;
1491 else
1492 module_name = "<Unknown>";
1493
1494 size_t module_name_length = strlen(module_name);
1495
1496 if (!cv.AllocateObjectAndArray(module_name_length + 1, sizeof(uint8_t)))
1497 return false;
1498
1499 if (!cv.CopyIndexAfterObject(0, module_name, module_name_length))
1500 return false;
1501
1502 module->cv_record = cv.location();
1503 MDCVInfoPDB70 *cv_ptr = cv.get();
1504 cv_ptr->cv_signature = MD_CVINFOPDB70_SIGNATURE;
1505 cv_ptr->age = 0;
1506
1507 // Get the module identifier
1508 unsigned char identifier[16];
1509 bool result = false;
1510 bool in_memory_changed = false;
1511 // As of macOS 11, most system libraries no longer have separate copies in
1512 // the macOS file system. They only exist all lumped together in the "dyld
1513 // shared cache", which gets loaded into each process on startup. If one of
1514 // our system libraries isn't in the file system, we can only get a UUID
1515 // (aka a debug id) for it by looking at a copy of the module loaded into
1516 // the crashing process. Setting 'in_memory' to 'true' makes this happen.
1517 //
1518 // We should be reluctant to change the value of 'in_memory' from 'false' to
1519 // 'true'. But we'll sometimes need to do that to work around the problem
1520 // discussed above. In any case we only do it if all else has failed. This
1521 // resolves https://bugzilla.mozilla.org/show_bug.cgi?id=1662862.
1522 //
1523 // We're always called in the main process. But the crashing process might
1524 // be either the same process or a different one (a child process). If it's
1525 // a child process, the modules we'll be looking at are in that process's
1526 // memory space, to which we generally don't have access. But because the
1527 // dyld shared cache is loaded into all processes, we do have access to
1528 // child process modules that are in the dyld shared cache. So it's fine to
1529 // look at these modules, but we must prevent ourselves from trying to
1530 // child process modules that aren't in the dyld shared cache. This resolves
1531 // https://bugzilla.mozilla.org/show_bug.cgi?id=1676102.
1532 while (true) {
1533 if (in_memory) {
1534 if (out_of_process && !in_dyld_shared_cache) {
1535 break;
1536 }
1537 MacFileUtilities::MachoID macho(module_path,
1538 reinterpret_cast<void *>(module->base_of_image),
1539 static_cast<size_t>(module->size_of_image));
1540 result = macho.UUIDCommand(cpu_type, cpu_subtype, identifier);
1541 if (!result)
1542 result = macho.MD5(cpu_type, cpu_subtype, identifier);
1543 if (result || in_memory_changed)
1544 break;
1545 }
1546
1547 if (!result) {
1548 FileID file_id(module_path);
1549 result = file_id.MachoIdentifier(cpu_type, cpu_subtype,
1550 identifier);
1551 }
1552 if (result)
1553 break;
1554
1555 if (!in_memory) {
1556 in_memory = true;
1557 in_memory_changed = true;
1558 } else
1559 break;
1560 }
1561
1562 if (result) {
1563 cv_ptr->signature.data1 =
1564 static_cast<uint32_t>(identifier[0]) << 24 |
1565 static_cast<uint32_t>(identifier[1]) << 16 |
1566 static_cast<uint32_t>(identifier[2]) << 8 |
1567 static_cast<uint32_t>(identifier[3]);
1568 cv_ptr->signature.data2 =
1569 static_cast<uint16_t>(identifier[4] << 8) | identifier[5];
1570 cv_ptr->signature.data3 =
1571 static_cast<uint16_t>(identifier[6] << 8) | identifier[7];
1572 cv_ptr->signature.data4[0] = identifier[8];
1573 cv_ptr->signature.data4[1] = identifier[9];
1574 cv_ptr->signature.data4[2] = identifier[10];
1575 cv_ptr->signature.data4[3] = identifier[11];
1576 cv_ptr->signature.data4[4] = identifier[12];
1577 cv_ptr->signature.data4[5] = identifier[13];
1578 cv_ptr->signature.data4[6] = identifier[14];
1579 cv_ptr->signature.data4[7] = identifier[15];
1580 }
1581
1582 return true;
1583 }
1584
WriteModuleListStream(MDRawDirectory * module_list_stream)1585 bool MinidumpGenerator::WriteModuleListStream(
1586 MDRawDirectory *module_list_stream) {
1587 TypedMDRVA<MDRawModuleList> list(&writer_);
1588
1589 uint32_t image_count = dynamic_images_ ?
1590 dynamic_images_->GetImageCount() :
1591 _dyld_image_count();
1592
1593 if (!list.AllocateObjectAndArray(image_count, MD_MODULE_SIZE))
1594 return false;
1595
1596 module_list_stream->stream_type = MD_MODULE_LIST_STREAM;
1597 module_list_stream->location = list.location();
1598 list.get()->number_of_modules = static_cast<uint32_t>(image_count);
1599
1600 // Write out the executable module as the first one
1601 MDRawModule module;
1602 uint32_t executableIndex = FindExecutableModule();
1603
1604 if (!WriteModuleStream(static_cast<unsigned>(executableIndex), &module)) {
1605 return false;
1606 }
1607
1608 list.CopyIndexAfterObject(0, &module, MD_MODULE_SIZE);
1609 int destinationIndex = 1; // Write all other modules after this one
1610
1611 for (uint32_t i = 0; i < image_count; ++i) {
1612 if (i != executableIndex) {
1613 if (!WriteModuleStream(static_cast<unsigned>(i), &module)) {
1614 return false;
1615 }
1616
1617 list.CopyIndexAfterObject(destinationIndex++, &module, MD_MODULE_SIZE);
1618 }
1619 }
1620
1621 return true;
1622 }
1623
WriteMiscInfoStream(MDRawDirectory * misc_info_stream)1624 bool MinidumpGenerator::WriteMiscInfoStream(MDRawDirectory *misc_info_stream) {
1625 TypedMDRVA<MDRawMiscInfo> info(&writer_);
1626
1627 if (!info.Allocate())
1628 return false;
1629
1630 misc_info_stream->stream_type = MD_MISC_INFO_STREAM;
1631 misc_info_stream->location = info.location();
1632
1633 MDRawMiscInfo *info_ptr = info.get();
1634 info_ptr->size_of_info = static_cast<uint32_t>(sizeof(MDRawMiscInfo));
1635 info_ptr->flags1 = MD_MISCINFO_FLAGS1_PROCESS_ID |
1636 MD_MISCINFO_FLAGS1_PROCESS_TIMES |
1637 MD_MISCINFO_FLAGS1_PROCESSOR_POWER_INFO;
1638
1639 // Process ID
1640 info_ptr->process_id = getpid();
1641
1642 // Times
1643 struct rusage usage;
1644 if (getrusage(RUSAGE_SELF, &usage) != -1) {
1645 // Omit the fractional time since the MDRawMiscInfo only wants seconds
1646 info_ptr->process_user_time =
1647 static_cast<uint32_t>(usage.ru_utime.tv_sec);
1648 info_ptr->process_kernel_time =
1649 static_cast<uint32_t>(usage.ru_stime.tv_sec);
1650 }
1651 int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID,
1652 static_cast<int>(info_ptr->process_id) };
1653 uint mibsize = static_cast<uint>(sizeof(mib) / sizeof(mib[0]));
1654 struct kinfo_proc proc;
1655 size_t size = sizeof(proc);
1656 if (sysctl(mib, mibsize, &proc, &size, NULL, 0) == 0) {
1657 info_ptr->process_create_time =
1658 static_cast<uint32_t>(proc.kp_proc.p_starttime.tv_sec);
1659 }
1660
1661 // Speed
1662 uint64_t speed;
1663 const uint64_t kOneMillion = 1000 * 1000;
1664 size = sizeof(speed);
1665 sysctlbyname("hw.cpufrequency_max", &speed, &size, NULL, 0);
1666 info_ptr->processor_max_mhz = static_cast<uint32_t>(speed / kOneMillion);
1667 info_ptr->processor_mhz_limit = static_cast<uint32_t>(speed / kOneMillion);
1668 size = sizeof(speed);
1669 sysctlbyname("hw.cpufrequency", &speed, &size, NULL, 0);
1670 info_ptr->processor_current_mhz = static_cast<uint32_t>(speed / kOneMillion);
1671
1672 return true;
1673 }
1674
WriteBreakpadInfoStream(MDRawDirectory * breakpad_info_stream)1675 bool MinidumpGenerator::WriteBreakpadInfoStream(
1676 MDRawDirectory *breakpad_info_stream) {
1677 TypedMDRVA<MDRawBreakpadInfo> info(&writer_);
1678
1679 if (!info.Allocate())
1680 return false;
1681
1682 breakpad_info_stream->stream_type = MD_BREAKPAD_INFO_STREAM;
1683 breakpad_info_stream->location = info.location();
1684 MDRawBreakpadInfo *info_ptr = info.get();
1685
1686 if (exception_thread_ && exception_type_) {
1687 info_ptr->validity = MD_BREAKPAD_INFO_VALID_DUMP_THREAD_ID |
1688 MD_BREAKPAD_INFO_VALID_REQUESTING_THREAD_ID;
1689 info_ptr->dump_thread_id = handler_thread_;
1690 info_ptr->requesting_thread_id = exception_thread_;
1691 } else {
1692 info_ptr->validity = MD_BREAKPAD_INFO_VALID_DUMP_THREAD_ID;
1693 info_ptr->dump_thread_id = handler_thread_;
1694 info_ptr->requesting_thread_id = 0;
1695 }
1696
1697 return true;
1698 }
1699
WriteCrashInfoRecord(MDLocationDescriptor * location,const char * module_path,const char * crash_info,unsigned long crash_info_size,bool out_of_process,bool in_dyld_shared_cache)1700 bool MinidumpGenerator::WriteCrashInfoRecord(MDLocationDescriptor *location,
1701 const char *module_path,
1702 const char *crash_info,
1703 unsigned long crash_info_size,
1704 bool out_of_process,
1705 bool in_dyld_shared_cache) {
1706 TypedMDRVA<MDRawMacCrashInfoRecord> info(&writer_);
1707
1708 // Only write crash info records for modules that actually have
1709 // __DATA,__crash_info sections.
1710 if (!crash_info || !crash_info_size) {
1711 return false;
1712 }
1713 // We generally don't have access to modules in another process's memory
1714 // space if they're not in the dyld shared cache.
1715 if (out_of_process && !in_dyld_shared_cache) {
1716 return false;
1717 }
1718
1719 // If 'crash_info_size' is larger than we expect, 'crash_info' probably
1720 // contains fields we don't recognize (added by Apple since we last updated
1721 // this code). In that case only copy the fields we do recognize. If it's
1722 // smaller than we expect, we're probably running on an older version of
1723 // macOS, whose __crash_info sections don't contain all the fields we
1724 // recognize. In that case make sure the "missing" fields are zeroed in
1725 // 'raw_crash_info'.
1726 crashreporter_annotations_t raw_crash_info;
1727 bzero(&raw_crash_info, sizeof(raw_crash_info));
1728 if (crash_info_size > sizeof(raw_crash_info)) {
1729 crash_info_size = sizeof(raw_crash_info);
1730 }
1731 memcpy(&raw_crash_info, crash_info, crash_info_size);
1732
1733 // Don't write crash info records that are empty of useful data (see
1734 // definition of crashreporter_annotations_t in mach_vm_compat.h).
1735 bool is_empty = true;
1736 if (raw_crash_info.message ||
1737 raw_crash_info.signature_string ||
1738 raw_crash_info.backtrace ||
1739 raw_crash_info.message2 ||
1740 raw_crash_info.thread ||
1741 raw_crash_info.dialog_mode ||
1742 ((raw_crash_info.version > 4) && raw_crash_info.abort_cause)) {
1743 is_empty = false;
1744 }
1745 if (is_empty) {
1746 return false;
1747 }
1748
1749 string message;
1750 string signature_string;
1751 string backtrace;
1752 string message2;
1753
1754 const char *message_ptr = NULL;
1755 const char *signature_string_ptr = NULL;
1756 const char *backtrace_ptr = NULL;
1757 const char *message2_ptr = NULL;
1758
1759 if (out_of_process) {
1760 if (raw_crash_info.message) {
1761 message = ReadTaskString(crashing_task_, raw_crash_info.message);
1762 message_ptr = message.c_str();
1763 }
1764 if (raw_crash_info.signature_string) {
1765 signature_string =
1766 ReadTaskString(crashing_task_, raw_crash_info.signature_string);
1767 signature_string_ptr = signature_string.c_str();
1768 }
1769 if (raw_crash_info.backtrace) {
1770 backtrace = ReadTaskString(crashing_task_, raw_crash_info.backtrace);
1771 backtrace_ptr = backtrace.c_str();
1772 }
1773 if (raw_crash_info.message2) {
1774 message2 = ReadTaskString(crashing_task_, raw_crash_info.message2);
1775 message2_ptr = message2.c_str();
1776 }
1777 } else {
1778 message_ptr = reinterpret_cast<const char *>(raw_crash_info.message);
1779 signature_string_ptr =
1780 reinterpret_cast<const char *>(raw_crash_info.signature_string);
1781 backtrace_ptr = reinterpret_cast<const char *>(raw_crash_info.backtrace);
1782 message2_ptr = reinterpret_cast<const char *>(raw_crash_info.message2);
1783 }
1784
1785 const char* data_strings[] = { module_path, message_ptr,
1786 signature_string_ptr, backtrace_ptr,
1787 message2_ptr };
1788
1789 // Compute the total size of the strings we'll be copying to
1790 // (MDRawMacCrashInfoRecord).data, including their terminal nulls.
1791 size_t data_size = 0;
1792 for (auto src : data_strings) {
1793 if (!src) {
1794 src = "";
1795 }
1796 // Always include the terminal null, even for an empty string.
1797 size_t copy_length = strlen(src) + 1;
1798 // A "string" that's too large is a sign of data corruption.
1799 if (copy_length > MACCRASHINFO_STRING_MAXSIZE) {
1800 return false;
1801 }
1802 data_size += copy_length;
1803 }
1804
1805 if (!info.AllocateObjectAndArray(data_size, sizeof(uint8_t)))
1806 return false;
1807
1808 // Now copy 'module_path' and the __crash_info strings in order to
1809 // (MDRawMacCrashInfoRecord).data, including their terminal nulls.
1810 size_t offset = 0;
1811 for (auto src : data_strings) {
1812 if (!src) {
1813 src = "";
1814 }
1815 // Always include the terminal null, even for an empty string.
1816 size_t copy_length = strlen(src) + 1;
1817 // We can't use CopyIndexAfterObject() here. Calling that method multiple
1818 // times only works for objects in an array (which are all the same size).
1819 if (!info.Copy(info.position() + sizeof(MDRawMacCrashInfoRecord) + offset,
1820 src, copy_length)) {
1821 return false;
1822 }
1823 offset += copy_length;
1824 }
1825
1826 *location = info.location();
1827 MDRawMacCrashInfoRecord *info_ptr = info.get();
1828 info_ptr->stream_type = MOZ_MACOS_CRASH_INFO_STREAM;
1829 info_ptr->version = raw_crash_info.version;
1830 info_ptr->thread = raw_crash_info.thread;
1831 info_ptr->dialog_mode = raw_crash_info.dialog_mode;
1832 info_ptr->abort_cause = raw_crash_info.abort_cause;
1833
1834 return true;
1835 }
1836
WriteCrashInfoStream(MDRawDirectory * crash_info_stream)1837 bool MinidumpGenerator::WriteCrashInfoStream(
1838 MDRawDirectory *crash_info_stream) {
1839 TypedMDRVA<MDRawMacCrashInfo> list(&writer_);
1840
1841 if (!list.Allocate())
1842 return false;
1843
1844 crash_info_stream->stream_type = MOZ_MACOS_CRASH_INFO_STREAM;
1845 crash_info_stream->location = list.location();
1846
1847 MDRawMacCrashInfo *list_ptr = list.get();
1848 bzero(list_ptr, sizeof(MDRawMacCrashInfo));
1849 list_ptr->stream_type = MOZ_MACOS_CRASH_INFO_STREAM;
1850 list_ptr->record_start_size = sizeof(MDRawMacCrashInfoRecord);
1851
1852 uint32_t image_count = dynamic_images_ ?
1853 dynamic_images_->GetImageCount() :
1854 _dyld_image_count();
1855 uint32_t crash_info_count = 0;
1856 for (uint32_t i = 0; (i < image_count) &&
1857 (crash_info_count < MAC_CRASH_INFOS_MAX); ++i) {
1858 if (dynamic_images_) {
1859 // We're in a different process than the crashed process
1860 DynamicImage *image = dynamic_images_->GetImage(i);
1861 if (!image) {
1862 continue;
1863 }
1864
1865 MDLocationDescriptor location;
1866 string module_path = image->GetFilePath();
1867 // WriteCrashInfoRecord() fails if a module doesn't contain a
1868 // __DATA,__crash_info section, or if it's empty of useful data.
1869 if (WriteCrashInfoRecord(&location,
1870 module_path.c_str(),
1871 reinterpret_cast<const char *>
1872 (image->GetCrashInfo()),
1873 image->GetCrashInfoSize(),
1874 /* out_of_process */ true,
1875 image->GetInDyldSharedCache())) {
1876 list_ptr->records[crash_info_count] = location;
1877 ++crash_info_count;
1878 }
1879 } else {
1880 // Getting crash info in the crashed process
1881 const breakpad_mach_header *header =
1882 (breakpad_mach_header*) _dyld_get_image_header(i);
1883 if (!header) {
1884 continue;
1885 }
1886 #ifdef __LP64__
1887 if (header->magic != MH_MAGIC_64) {
1888 continue;
1889 }
1890 #else
1891 if (header->magic != MH_MAGIC) {
1892 continue;
1893 }
1894 #endif
1895
1896 bool in_dyld_shared_cache = ((header->flags & MH_SHAREDCACHE) != 0);
1897 unsigned long slide = _dyld_get_image_vmaddr_slide(i);
1898 const char *module_path = _dyld_get_image_name(i);
1899
1900 getsectdata_size_type crash_info_size = 0;
1901 const char *crash_info =
1902 getsectdatafromheader_func(header, "__DATA", "__crash_info",
1903 &crash_info_size);
1904 if (crash_info) {
1905 crash_info += slide;
1906 }
1907 MDLocationDescriptor location;
1908 // WriteCrashInfoRecord() fails if a module doesn't contain a
1909 // __DATA,__crash_info section, or if it's empty of useful data.
1910 if (WriteCrashInfoRecord(&location, module_path, crash_info,
1911 crash_info_size, /* out_of_process */ false,
1912 in_dyld_shared_cache)) {
1913 list_ptr->records[crash_info_count] = location;
1914 ++crash_info_count;
1915 }
1916 }
1917 }
1918
1919 list_ptr->record_count = crash_info_count;
1920
1921 return true;
1922 }
1923
WriteThreadName(mach_port_t thread_id,MDRawThreadName * thread_name)1924 bool MinidumpGenerator::WriteThreadName(
1925 mach_port_t thread_id,
1926 MDRawThreadName *thread_name) {
1927 MDLocationDescriptor string_location;
1928
1929 thread_extended_info_data_t thread_extended_info;
1930 mach_msg_type_number_t thread_extended_info_count =
1931 THREAD_EXTENDED_INFO_COUNT;
1932 kern_return_t res = thread_info(thread_id, THREAD_EXTENDED_INFO,
1933 (thread_info_t)&thread_extended_info,
1934 &thread_extended_info_count);
1935
1936 if (res != KERN_SUCCESS)
1937 return false;
1938
1939 if (!writer_.WriteString(thread_extended_info.pth_name, 0, &string_location))
1940 return false;
1941
1942 thread_name->thread_id = thread_id;
1943 thread_name->rva_of_thread_name = string_location.rva;
1944 return true;
1945 }
1946
WriteThreadNamesStream(MDRawDirectory * thread_names_stream)1947 bool MinidumpGenerator::WriteThreadNamesStream(
1948 MDRawDirectory *thread_names_stream) {
1949 TypedMDRVA<MDRawThreadNamesList> list(&writer_);
1950 thread_act_port_array_t threads_for_task;
1951 mach_msg_type_number_t thread_count;
1952
1953 if (task_threads(crashing_task_, &threads_for_task, &thread_count))
1954 return false;
1955
1956 int non_generator_thread_count;
1957
1958 // Don't include the generator thread
1959 if (handler_thread_ != MACH_PORT_NULL)
1960 non_generator_thread_count = thread_count - 1;
1961 else
1962 non_generator_thread_count = thread_count;
1963
1964 if (!list.AllocateObjectAndArray(non_generator_thread_count,
1965 sizeof(MDRawThreadName))) {
1966 return false;
1967 }
1968
1969 thread_names_stream->stream_type = MD_THREAD_NAMES_STREAM;
1970 thread_names_stream->location = list.location();
1971
1972 list.get()->number_of_thread_names = non_generator_thread_count;
1973
1974 MDRawThreadName thread_name;
1975 int thread_idx = 0;
1976
1977 for (unsigned int i = 0; i < thread_count; ++i) {
1978 memset(&thread_name, 0, sizeof(MDRawThreadName));
1979
1980 if (threads_for_task[i] != handler_thread_) {
1981 if (WriteThreadName(threads_for_task[i], &thread_name)) {
1982 list.CopyIndexAfterObject(thread_idx++, &thread_name,
1983 sizeof(MDRawThreadName));
1984 }
1985 }
1986 }
1987
1988 return true;
1989 }
1990
1991 } // namespace google_breakpad
1992