1 /* 2 * File cpu_x86_64.c 3 * 4 * Copyright (C) 1999, 2005 Alexandre Julliard 5 * Copyright (C) 2009, 2011 Eric Pouech. 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA 20 */ 21 22 #include <assert.h> 23 24 #define NONAMELESSUNION 25 #define NONAMELESSSTRUCT 26 #include "ntstatus.h" 27 #define WIN32_NO_STATUS 28 #include "dbghelp_private.h" 29 #include "winternl.h" 30 #include "wine/debug.h" 31 32 WINE_DEFAULT_DEBUG_CHANNEL(dbghelp); 33 34 /* x86-64 unwind information, for PE modules, as described on MSDN */ 35 36 typedef enum _UNWIND_OP_CODES 37 { 38 UWOP_PUSH_NONVOL = 0, 39 UWOP_ALLOC_LARGE, 40 UWOP_ALLOC_SMALL, 41 UWOP_SET_FPREG, 42 UWOP_SAVE_NONVOL, 43 UWOP_SAVE_NONVOL_FAR, 44 UWOP_SAVE_XMM128, 45 UWOP_SAVE_XMM128_FAR, 46 UWOP_PUSH_MACHFRAME 47 } UNWIND_CODE_OPS; 48 49 typedef union _UNWIND_CODE 50 { 51 struct 52 { 53 BYTE CodeOffset; 54 BYTE UnwindOp : 4; 55 BYTE OpInfo : 4; 56 } u; 57 USHORT FrameOffset; 58 } UNWIND_CODE, *PUNWIND_CODE; 59 60 typedef struct _UNWIND_INFO 61 { 62 BYTE Version : 3; 63 BYTE Flags : 5; 64 BYTE SizeOfProlog; 65 BYTE CountOfCodes; 66 BYTE FrameRegister : 4; 67 BYTE FrameOffset : 4; 68 UNWIND_CODE UnwindCode[1]; /* actually CountOfCodes (aligned) */ 69 /* 70 * union 71 * { 72 * OPTIONAL ULONG ExceptionHandler; 73 * OPTIONAL ULONG FunctionEntry; 74 * }; 75 * OPTIONAL ULONG ExceptionData[]; 76 */ 77 } UNWIND_INFO, *PUNWIND_INFO; 78 79 static BOOL x86_64_get_addr(HANDLE hThread, const CONTEXT* ctx, 80 enum cpu_addr ca, ADDRESS64* addr) 81 { 82 addr->Mode = AddrModeFlat; 83 switch (ca) 84 { 85 #ifdef __x86_64__ 86 case cpu_addr_pc: addr->Segment = ctx->SegCs; addr->Offset = ctx->Rip; return TRUE; 87 case cpu_addr_stack: addr->Segment = ctx->SegSs; addr->Offset = ctx->Rsp; return TRUE; 88 case cpu_addr_frame: addr->Segment = ctx->SegSs; addr->Offset = ctx->Rbp; return TRUE; 89 #endif 90 default: addr->Mode = -1; 91 return FALSE; 92 } 93 } 94 95 #ifdef __x86_64__ 96 97 enum st_mode {stm_start, stm_64bit, stm_done}; 98 99 /* indexes in Reserved array */ 100 #define __CurrentMode 0 101 #define __CurrentCount 1 102 /* #define __ 2 (unused) */ 103 104 #define curr_mode (frame->Reserved[__CurrentMode]) 105 #define curr_count (frame->Reserved[__CurrentCount]) 106 /* #define ??? (frame->Reserved[__]) (unused) */ 107 108 union handler_data 109 { 110 RUNTIME_FUNCTION chain; 111 ULONG handler; 112 }; 113 114 static void dump_unwind_info(struct cpu_stack_walk* csw, ULONG64 base, RUNTIME_FUNCTION *function) 115 { 116 static const char * const reg_names[16] = 117 { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", 118 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" }; 119 120 union handler_data handler_data; 121 char buffer[sizeof(UNWIND_INFO) + 256 * sizeof(UNWIND_CODE)]; 122 UNWIND_INFO* info = (UNWIND_INFO*)buffer; 123 unsigned int i, count; 124 RUNTIME_FUNCTION snext; 125 ULONG64 addr; 126 127 TRACE("**** func %x-%x\n", function->BeginAddress, function->EndAddress); 128 for (;;) 129 { 130 if (function->UnwindData & 1) 131 { 132 if (!sw_read_mem(csw, base + function->UnwindData, &snext, sizeof(snext))) 133 { 134 TRACE("Couldn't unwind RUNTIME_INFO at %lx\n", base + function->UnwindData); 135 return; 136 } 137 TRACE("unwind info for function %p-%p chained to function %p-%p\n", 138 (char*)base + function->BeginAddress, (char*)base + function->EndAddress, 139 (char*)base + snext.BeginAddress, (char*)base + snext.EndAddress); 140 function = &snext; 141 continue; 142 } 143 addr = base + function->UnwindData; 144 if (!sw_read_mem(csw, addr, info, FIELD_OFFSET(UNWIND_INFO, UnwindCode)) || 145 !sw_read_mem(csw, addr + FIELD_OFFSET(UNWIND_INFO, UnwindCode), 146 info->UnwindCode, info->CountOfCodes * sizeof(UNWIND_CODE))) 147 { 148 FIXME("couldn't read memory for UNWIND_INFO at %lx\n", addr); 149 return; 150 } 151 TRACE("unwind info at %p flags %x prolog 0x%x bytes function %p-%p\n", 152 (char*)addr, info->Flags, info->SizeOfProlog, 153 (char*)base + function->BeginAddress, (char*)base + function->EndAddress); 154 155 if (info->FrameRegister) 156 TRACE(" frame register %s offset 0x%x(%%rsp)\n", 157 reg_names[info->FrameRegister], info->FrameOffset * 16); 158 159 for (i = 0; i < info->CountOfCodes; i++) 160 { 161 TRACE(" 0x%x: ", info->UnwindCode[i].u.CodeOffset); 162 switch (info->UnwindCode[i].u.UnwindOp) 163 { 164 case UWOP_PUSH_NONVOL: 165 TRACE("pushq %%%s\n", reg_names[info->UnwindCode[i].u.OpInfo]); 166 break; 167 case UWOP_ALLOC_LARGE: 168 if (info->UnwindCode[i].u.OpInfo) 169 { 170 count = *(DWORD*)&info->UnwindCode[i+1]; 171 i += 2; 172 } 173 else 174 { 175 count = *(USHORT*)&info->UnwindCode[i+1] * 8; 176 i++; 177 } 178 TRACE("subq $0x%x,%%rsp\n", count); 179 break; 180 case UWOP_ALLOC_SMALL: 181 count = (info->UnwindCode[i].u.OpInfo + 1) * 8; 182 TRACE("subq $0x%x,%%rsp\n", count); 183 break; 184 case UWOP_SET_FPREG: 185 TRACE("leaq 0x%x(%%rsp),%s\n", 186 info->FrameOffset * 16, reg_names[info->FrameRegister]); 187 break; 188 case UWOP_SAVE_NONVOL: 189 count = *(USHORT*)&info->UnwindCode[i+1] * 8; 190 TRACE("movq %%%s,0x%x(%%rsp)\n", reg_names[info->UnwindCode[i].u.OpInfo], count); 191 i++; 192 break; 193 case UWOP_SAVE_NONVOL_FAR: 194 count = *(DWORD*)&info->UnwindCode[i+1]; 195 TRACE("movq %%%s,0x%x(%%rsp)\n", reg_names[info->UnwindCode[i].u.OpInfo], count); 196 i += 2; 197 break; 198 case UWOP_SAVE_XMM128: 199 count = *(USHORT*)&info->UnwindCode[i+1] * 16; 200 TRACE("movaps %%xmm%u,0x%x(%%rsp)\n", info->UnwindCode[i].u.OpInfo, count); 201 i++; 202 break; 203 case UWOP_SAVE_XMM128_FAR: 204 count = *(DWORD*)&info->UnwindCode[i+1]; 205 TRACE("movaps %%xmm%u,0x%x(%%rsp)\n", info->UnwindCode[i].u.OpInfo, count); 206 i += 2; 207 break; 208 case UWOP_PUSH_MACHFRAME: 209 TRACE("PUSH_MACHFRAME %u\n", info->UnwindCode[i].u.OpInfo); 210 break; 211 default: 212 FIXME("unknown code %u\n", info->UnwindCode[i].u.UnwindOp); 213 break; 214 } 215 } 216 217 addr += FIELD_OFFSET(UNWIND_INFO, UnwindCode) + 218 ((info->CountOfCodes + 1) & ~1) * sizeof(UNWIND_CODE); 219 if (info->Flags & UNW_FLAG_CHAININFO) 220 { 221 if (!sw_read_mem(csw, addr, &handler_data, sizeof(handler_data.chain))) 222 { 223 FIXME("couldn't read memory for handler_data.chain\n"); 224 return; 225 } 226 TRACE(" chained to function %p-%p\n", 227 (char*)base + handler_data.chain.BeginAddress, 228 (char*)base + handler_data.chain.EndAddress); 229 function = &handler_data.chain; 230 continue; 231 } 232 if (info->Flags & (UNW_FLAG_EHANDLER | UNW_FLAG_UHANDLER)) 233 { 234 if (!sw_read_mem(csw, addr, &handler_data, sizeof(handler_data.handler))) 235 { 236 FIXME("couldn't read memory for handler_data.handler\n"); 237 return; 238 } 239 TRACE(" handler %p data at %p\n", 240 (char*)base + handler_data.handler, (char*)addr + sizeof(handler_data.handler)); 241 } 242 break; 243 } 244 } 245 246 /* highly derived from dlls/ntdll/signal_x86_64.c */ 247 static ULONG64 get_int_reg(CONTEXT *context, int reg) 248 { 249 return *(&context->Rax + reg); 250 } 251 252 static void set_int_reg(CONTEXT *context, int reg, ULONG64 val) 253 { 254 *(&context->Rax + reg) = val; 255 } 256 257 static void set_float_reg(CONTEXT *context, int reg, M128A val) 258 { 259 *(&context->u.s.Xmm0 + reg) = val; 260 } 261 262 static int get_opcode_size(UNWIND_CODE op) 263 { 264 switch (op.u.UnwindOp) 265 { 266 case UWOP_ALLOC_LARGE: 267 return 2 + (op.u.OpInfo != 0); 268 case UWOP_SAVE_NONVOL: 269 case UWOP_SAVE_XMM128: 270 return 2; 271 case UWOP_SAVE_NONVOL_FAR: 272 case UWOP_SAVE_XMM128_FAR: 273 return 3; 274 default: 275 return 1; 276 } 277 } 278 279 static BOOL is_inside_epilog(struct cpu_stack_walk* csw, DWORD64 pc, 280 DWORD64 base, const RUNTIME_FUNCTION *function ) 281 { 282 BYTE op0, op1, op2; 283 LONG val32; 284 285 if (!sw_read_mem(csw, pc, &op0, 1)) return FALSE; 286 287 /* add or lea must be the first instruction, and it must have a rex.W prefix */ 288 if ((op0 & 0xf8) == 0x48) 289 { 290 if (!sw_read_mem(csw, pc + 1, &op1, 1)) return FALSE; 291 if (!sw_read_mem(csw, pc + 2, &op2, 1)) return FALSE; 292 switch (op1) 293 { 294 case 0x81: /* add $nnnn,%rsp */ 295 if (op0 == 0x48 && op2 == 0xc4) 296 { 297 pc += 7; 298 break; 299 } 300 return FALSE; 301 case 0x83: /* add $n,%rsp */ 302 if (op0 == 0x48 && op2 == 0xc4) 303 { 304 pc += 4; 305 break; 306 } 307 return FALSE; 308 case 0x8d: /* lea n(reg),%rsp */ 309 if (op0 & 0x06) return FALSE; /* rex.RX must be cleared */ 310 if (((op2 >> 3) & 7) != 4) return FALSE; /* dest reg mus be %rsp */ 311 if ((op2 & 7) == 4) return FALSE; /* no SIB byte allowed */ 312 if ((op2 >> 6) == 1) /* 8-bit offset */ 313 { 314 pc += 4; 315 break; 316 } 317 if ((op2 >> 6) == 2) /* 32-bit offset */ 318 { 319 pc += 7; 320 break; 321 } 322 return FALSE; 323 } 324 } 325 326 /* now check for various pop instructions */ 327 for (;;) 328 { 329 if (!sw_read_mem(csw, pc, &op0, 1)) return FALSE; 330 if ((op0 & 0xf0) == 0x40) /* rex prefix */ 331 { 332 if (!sw_read_mem(csw, ++pc, &op0, 1)) return FALSE; 333 } 334 335 switch (op0) 336 { 337 case 0x58: /* pop %rax/%r8 */ 338 case 0x59: /* pop %rcx/%r9 */ 339 case 0x5a: /* pop %rdx/%r10 */ 340 case 0x5b: /* pop %rbx/%r11 */ 341 case 0x5c: /* pop %rsp/%r12 */ 342 case 0x5d: /* pop %rbp/%r13 */ 343 case 0x5e: /* pop %rsi/%r14 */ 344 case 0x5f: /* pop %rdi/%r15 */ 345 pc++; 346 continue; 347 case 0xc2: /* ret $nn */ 348 case 0xc3: /* ret */ 349 return TRUE; 350 case 0xe9: /* jmp nnnn */ 351 if (!sw_read_mem(csw, pc + 1, &val32, sizeof(LONG))) return FALSE; 352 pc += 5 + val32; 353 if (pc - base >= function->BeginAddress && pc - base < function->EndAddress) 354 continue; 355 break; 356 case 0xeb: /* jmp n */ 357 if (!sw_read_mem(csw, pc + 1, &op1, 1)) return FALSE; 358 pc += 2 + (signed char)op1; 359 if (pc - base >= function->BeginAddress && pc - base < function->EndAddress) 360 continue; 361 break; 362 case 0xf3: /* rep; ret (for amd64 prediction bug) */ 363 if (!sw_read_mem(csw, pc + 1, &op1, 1)) return FALSE; 364 return op1 == 0xc3; 365 } 366 return FALSE; 367 } 368 } 369 370 static BOOL interpret_epilog(struct cpu_stack_walk* csw, ULONG64 pc, CONTEXT *context ) 371 { 372 BYTE insn, val8; 373 WORD val16; 374 LONG val32; 375 DWORD64 val64; 376 377 for (;;) 378 { 379 BYTE rex = 0; 380 381 if (!sw_read_mem(csw, pc, &insn, 1)) return FALSE; 382 if ((insn & 0xf0) == 0x40) 383 { 384 rex = insn & 0x0f; /* rex prefix */ 385 if (!sw_read_mem(csw, ++pc, &insn, 1)) return FALSE; 386 } 387 388 switch (insn) 389 { 390 case 0x58: /* pop %rax/r8 */ 391 case 0x59: /* pop %rcx/r9 */ 392 case 0x5a: /* pop %rdx/r10 */ 393 case 0x5b: /* pop %rbx/r11 */ 394 case 0x5c: /* pop %rsp/r12 */ 395 case 0x5d: /* pop %rbp/r13 */ 396 case 0x5e: /* pop %rsi/r14 */ 397 case 0x5f: /* pop %rdi/r15 */ 398 if (!sw_read_mem(csw, context->Rsp, &val64, sizeof(DWORD64))) return FALSE; 399 set_int_reg(context, insn - 0x58 + (rex & 1) * 8, val64); 400 context->Rsp += sizeof(ULONG64); 401 pc++; 402 continue; 403 case 0x81: /* add $nnnn,%rsp */ 404 if (!sw_read_mem(csw, pc + 2, &val32, sizeof(LONG))) return FALSE; 405 context->Rsp += val32; 406 pc += 2 + sizeof(LONG); 407 continue; 408 case 0x83: /* add $n,%rsp */ 409 if (!sw_read_mem(csw, pc + 2, &val8, sizeof(BYTE))) return FALSE; 410 context->Rsp += (signed char)val8; 411 pc += 3; 412 continue; 413 case 0x8d: 414 if (!sw_read_mem(csw, pc + 1, &insn, sizeof(BYTE))) return FALSE; 415 if ((insn >> 6) == 1) /* lea n(reg),%rsp */ 416 { 417 if (!sw_read_mem(csw, pc + 2, &val8, sizeof(BYTE))) return FALSE; 418 context->Rsp = get_int_reg( context, (insn & 7) + (rex & 1) * 8 ) + (signed char)val8; 419 pc += 3; 420 } 421 else /* lea nnnn(reg),%rsp */ 422 { 423 if (!sw_read_mem(csw, pc + 2, &val32, sizeof(LONG))) return FALSE; 424 context->Rsp = get_int_reg( context, (insn & 7) + (rex & 1) * 8 ) + val32; 425 pc += 2 + sizeof(LONG); 426 } 427 continue; 428 case 0xc2: /* ret $nn */ 429 if (!sw_read_mem(csw, context->Rsp, &val64, sizeof(DWORD64))) return FALSE; 430 if (!sw_read_mem(csw, pc + 1, &val16, sizeof(WORD))) return FALSE; 431 context->Rip = val64; 432 context->Rsp += sizeof(ULONG64) + val16; 433 return TRUE; 434 case 0xc3: /* ret */ 435 case 0xf3: /* rep; ret */ 436 if (!sw_read_mem(csw, context->Rsp, &val64, sizeof(DWORD64))) return FALSE; 437 context->Rip = val64; 438 context->Rsp += sizeof(ULONG64); 439 return TRUE; 440 case 0xe9: /* jmp nnnn */ 441 if (!sw_read_mem(csw, pc + 1, &val32, sizeof(LONG))) return FALSE; 442 pc += 5 + val32; 443 continue; 444 case 0xeb: /* jmp n */ 445 if (!sw_read_mem(csw, pc + 1, &val8, sizeof(BYTE))) return FALSE; 446 pc += 2 + (signed char)val8; 447 continue; 448 } 449 FIXME("unsupported insn %x\n", insn); 450 return FALSE; 451 } 452 } 453 454 static BOOL default_unwind(struct cpu_stack_walk* csw, CONTEXT* context) 455 { 456 if (!sw_read_mem(csw, context->Rsp, &context->Rip, sizeof(DWORD64))) 457 { 458 WARN("Cannot read new frame offset %s\n", wine_dbgstr_longlong(context->Rsp)); 459 return FALSE; 460 } 461 context->Rsp += sizeof(DWORD64); 462 return TRUE; 463 } 464 465 static BOOL interpret_function_table_entry(struct cpu_stack_walk* csw, 466 CONTEXT* context, RUNTIME_FUNCTION* function, DWORD64 base) 467 { 468 char buffer[sizeof(UNWIND_INFO) + 256 * sizeof(UNWIND_CODE)]; 469 UNWIND_INFO* info = (UNWIND_INFO*)buffer; 470 unsigned i; 471 DWORD64 newframe, prolog_offset, off, value; 472 M128A floatvalue; 473 union handler_data handler_data; 474 475 /* FIXME: we have some assumptions here */ 476 assert(context); 477 dump_unwind_info(csw, sw_module_base(csw, context->Rip), function); 478 newframe = context->Rsp; 479 for (;;) 480 { 481 if (!sw_read_mem(csw, base + function->UnwindData, info, sizeof(*info)) || 482 !sw_read_mem(csw, base + function->UnwindData + FIELD_OFFSET(UNWIND_INFO, UnwindCode), 483 info->UnwindCode, info->CountOfCodes * sizeof(UNWIND_CODE))) 484 { 485 WARN("Couldn't read unwind_code at %lx\n", base + function->UnwindData); 486 return FALSE; 487 } 488 489 if (info->Version != 1) 490 { 491 WARN("unknown unwind info version %u at %lx\n", info->Version, base + function->UnwindData); 492 return FALSE; 493 } 494 495 if (info->FrameRegister) 496 newframe = get_int_reg(context, info->FrameRegister) - info->FrameOffset * 16; 497 498 /* check if in prolog */ 499 if (context->Rip >= base + function->BeginAddress && 500 context->Rip < base + function->BeginAddress + info->SizeOfProlog) 501 { 502 prolog_offset = context->Rip - base - function->BeginAddress; 503 } 504 else 505 { 506 prolog_offset = ~0; 507 if (is_inside_epilog(csw, context->Rip, base, function)) 508 { 509 interpret_epilog(csw, context->Rip, context); 510 return TRUE; 511 } 512 } 513 514 for (i = 0; i < info->CountOfCodes; i += get_opcode_size(info->UnwindCode[i])) 515 { 516 if (prolog_offset < info->UnwindCode[i].u.CodeOffset) continue; /* skip it */ 517 518 switch (info->UnwindCode[i].u.UnwindOp) 519 { 520 case UWOP_PUSH_NONVOL: /* pushq %reg */ 521 if (!sw_read_mem(csw, context->Rsp, &value, sizeof(DWORD64))) return FALSE; 522 set_int_reg(context, info->UnwindCode[i].u.OpInfo, value); 523 context->Rsp += sizeof(ULONG64); 524 break; 525 case UWOP_ALLOC_LARGE: /* subq $nn,%rsp */ 526 if (info->UnwindCode[i].u.OpInfo) context->Rsp += *(DWORD*)&info->UnwindCode[i+1]; 527 else context->Rsp += *(USHORT*)&info->UnwindCode[i+1] * 8; 528 break; 529 case UWOP_ALLOC_SMALL: /* subq $n,%rsp */ 530 context->Rsp += (info->UnwindCode[i].u.OpInfo + 1) * 8; 531 break; 532 case UWOP_SET_FPREG: /* leaq nn(%rsp),%framereg */ 533 context->Rsp = newframe; 534 break; 535 case UWOP_SAVE_NONVOL: /* movq %reg,n(%rsp) */ 536 off = newframe + *(USHORT*)&info->UnwindCode[i+1] * 8; 537 if (!sw_read_mem(csw, off, &value, sizeof(DWORD64))) return FALSE; 538 set_int_reg(context, info->UnwindCode[i].u.OpInfo, value); 539 break; 540 case UWOP_SAVE_NONVOL_FAR: /* movq %reg,nn(%rsp) */ 541 off = newframe + *(DWORD*)&info->UnwindCode[i+1]; 542 if (!sw_read_mem(csw, off, &value, sizeof(DWORD64))) return FALSE; 543 set_int_reg(context, info->UnwindCode[i].u.OpInfo, value); 544 break; 545 case UWOP_SAVE_XMM128: /* movaps %xmmreg,n(%rsp) */ 546 off = newframe + *(USHORT*)&info->UnwindCode[i+1] * 16; 547 if (!sw_read_mem(csw, off, &floatvalue, sizeof(M128A))) return FALSE; 548 set_float_reg(context, info->UnwindCode[i].u.OpInfo, floatvalue); 549 break; 550 case UWOP_SAVE_XMM128_FAR: /* movaps %xmmreg,nn(%rsp) */ 551 off = newframe + *(DWORD*)&info->UnwindCode[i+1]; 552 if (!sw_read_mem(csw, off, &floatvalue, sizeof(M128A))) return FALSE; 553 set_float_reg(context, info->UnwindCode[i].u.OpInfo, floatvalue); 554 break; 555 case UWOP_PUSH_MACHFRAME: 556 FIXME("PUSH_MACHFRAME %u\n", info->UnwindCode[i].u.OpInfo); 557 break; 558 default: 559 FIXME("unknown code %u\n", info->UnwindCode[i].u.UnwindOp); 560 break; 561 } 562 } 563 if (!(info->Flags & UNW_FLAG_CHAININFO)) break; 564 if (!sw_read_mem(csw, base + function->UnwindData + FIELD_OFFSET(UNWIND_INFO, UnwindCode) + 565 ((info->CountOfCodes + 1) & ~1) * sizeof(UNWIND_CODE), 566 &handler_data, sizeof(handler_data))) return FALSE; 567 function = &handler_data.chain; /* restart with the chained info */ 568 } 569 return default_unwind(csw, context); 570 } 571 572 /* fetch_next_frame() 573 * 574 * modify (at least) context.{rip, rsp, rbp} using unwind information 575 * either out of PE exception handlers, debug info (dwarf), or simple stack unwind 576 */ 577 static BOOL fetch_next_frame(struct cpu_stack_walk *csw, union ctx *pcontext, 578 DWORD_PTR curr_pc, void** prtf) 579 { 580 DWORD64 cfa; 581 RUNTIME_FUNCTION* rtf; 582 DWORD64 base; 583 CONTEXT *context = &pcontext->ctx; 584 585 if (!curr_pc || !(base = sw_module_base(csw, curr_pc))) return FALSE; 586 rtf = sw_table_access(csw, curr_pc); 587 if (prtf) *prtf = rtf; 588 if (rtf) 589 { 590 return interpret_function_table_entry(csw, context, rtf, base); 591 } 592 else if (dwarf2_virtual_unwind(csw, curr_pc, pcontext, &cfa)) 593 { 594 context->Rsp = cfa; 595 TRACE("next function rip=%016lx\n", context->Rip); 596 TRACE(" rax=%016lx rbx=%016lx rcx=%016lx rdx=%016lx\n", 597 context->Rax, context->Rbx, context->Rcx, context->Rdx); 598 TRACE(" rsi=%016lx rdi=%016lx rbp=%016lx rsp=%016lx\n", 599 context->Rsi, context->Rdi, context->Rbp, context->Rsp); 600 TRACE(" r8=%016lx r9=%016lx r10=%016lx r11=%016lx\n", 601 context->R8, context->R9, context->R10, context->R11); 602 TRACE(" r12=%016lx r13=%016lx r14=%016lx r15=%016lx\n", 603 context->R12, context->R13, context->R14, context->R15); 604 return TRUE; 605 } 606 else 607 return default_unwind(csw, context); 608 } 609 610 static BOOL x86_64_stack_walk(struct cpu_stack_walk *csw, STACKFRAME64 *frame, 611 union ctx *context) 612 { 613 unsigned deltapc = curr_count <= 1 ? 0 : 1; 614 615 /* sanity check */ 616 if (curr_mode >= stm_done) return FALSE; 617 assert(!csw->is32); 618 619 TRACE("Enter: PC=%s Frame=%s Return=%s Stack=%s Mode=%s Count=%s\n", 620 wine_dbgstr_addr(&frame->AddrPC), 621 wine_dbgstr_addr(&frame->AddrFrame), 622 wine_dbgstr_addr(&frame->AddrReturn), 623 wine_dbgstr_addr(&frame->AddrStack), 624 curr_mode == stm_start ? "start" : "64bit", 625 wine_dbgstr_longlong(curr_count)); 626 627 if (curr_mode == stm_start) 628 { 629 if ((frame->AddrPC.Mode == AddrModeFlat) && 630 (frame->AddrFrame.Mode != AddrModeFlat)) 631 { 632 WARN("Bad AddrPC.Mode / AddrFrame.Mode combination\n"); 633 goto done_err; 634 } 635 636 /* Init done */ 637 curr_mode = stm_64bit; 638 frame->AddrReturn.Mode = frame->AddrStack.Mode = AddrModeFlat; 639 /* don't set up AddrStack on first call. Either the caller has set it up, or 640 * we will get it in the next frame 641 */ 642 memset(&frame->AddrBStore, 0, sizeof(frame->AddrBStore)); 643 } 644 else 645 { 646 if (context->ctx.Rsp != frame->AddrStack.Offset) FIXME("inconsistent Stack Pointer\n"); 647 if (context->ctx.Rip != frame->AddrPC.Offset) FIXME("inconsistent Instruction Pointer\n"); 648 649 if (frame->AddrReturn.Offset == 0) goto done_err; 650 if (!fetch_next_frame(csw, context, frame->AddrPC.Offset - deltapc, &frame->FuncTableEntry)) 651 goto done_err; 652 deltapc = 1; 653 } 654 655 memset(&frame->Params, 0, sizeof(frame->Params)); 656 657 /* set frame information */ 658 frame->AddrStack.Offset = context->ctx.Rsp; 659 frame->AddrFrame.Offset = context->ctx.Rbp; 660 frame->AddrPC.Offset = context->ctx.Rip; 661 if (1) 662 { 663 union ctx newctx = *context; 664 665 if (!fetch_next_frame(csw, &newctx, frame->AddrPC.Offset - deltapc, NULL)) 666 goto done_err; 667 frame->AddrReturn.Mode = AddrModeFlat; 668 frame->AddrReturn.Offset = newctx.ctx.Rip; 669 } 670 671 frame->Far = TRUE; 672 frame->Virtual = TRUE; 673 curr_count++; 674 675 TRACE("Leave: PC=%s Frame=%s Return=%s Stack=%s Mode=%s Count=%s FuncTable=%p\n", 676 wine_dbgstr_addr(&frame->AddrPC), 677 wine_dbgstr_addr(&frame->AddrFrame), 678 wine_dbgstr_addr(&frame->AddrReturn), 679 wine_dbgstr_addr(&frame->AddrStack), 680 curr_mode == stm_start ? "start" : "64bit", 681 wine_dbgstr_longlong(curr_count), 682 frame->FuncTableEntry); 683 684 return TRUE; 685 done_err: 686 curr_mode = stm_done; 687 return FALSE; 688 } 689 #else 690 static BOOL x86_64_stack_walk(struct cpu_stack_walk *csw, STACKFRAME64 *frame, 691 union ctx *ctx) 692 { 693 return FALSE; 694 } 695 #endif 696 697 static void* x86_64_find_runtime_function(struct module* module, DWORD64 addr) 698 { 699 #ifdef __x86_64__ 700 RUNTIME_FUNCTION* rtf; 701 ULONG size; 702 int min, max; 703 704 rtf = (RUNTIME_FUNCTION*)pe_map_directory(module, IMAGE_DIRECTORY_ENTRY_EXCEPTION, &size); 705 if (rtf) for (min = 0, max = size / sizeof(*rtf); min <= max; ) 706 { 707 int pos = (min + max) / 2; 708 if (addr < module->module.BaseOfImage + rtf[pos].BeginAddress) max = pos - 1; 709 else if (addr >= module->module.BaseOfImage + rtf[pos].EndAddress) min = pos + 1; 710 else 711 { 712 rtf += pos; 713 while (rtf->UnwindData & 1) /* follow chained entry */ 714 { 715 FIXME("RunTime_Function outside IMAGE_DIRECTORY_ENTRY_EXCEPTION unimplemented yet!\n"); 716 return NULL; 717 /* we need to read into the other process */ 718 /* rtf = (RUNTIME_FUNCTION*)(module->module.BaseOfImage + (rtf->UnwindData & ~1)); */ 719 } 720 return rtf; 721 } 722 } 723 #endif 724 return NULL; 725 } 726 727 static unsigned x86_64_map_dwarf_register(unsigned regno, const struct module* module, BOOL eh_frame) 728 { 729 unsigned reg; 730 731 if (regno >= 17 && regno <= 24) 732 reg = CV_AMD64_XMM0 + regno - 17; 733 else if (regno >= 25 && regno <= 32) 734 reg = CV_AMD64_XMM8 + regno - 25; 735 else if (regno >= 33 && regno <= 40) 736 reg = CV_AMD64_ST0 + regno - 33; 737 else switch (regno) 738 { 739 case 0: reg = CV_AMD64_RAX; break; 740 case 1: reg = CV_AMD64_RDX; break; 741 case 2: reg = CV_AMD64_RCX; break; 742 case 3: reg = CV_AMD64_RBX; break; 743 case 4: reg = CV_AMD64_RSI; break; 744 case 5: reg = CV_AMD64_RDI; break; 745 case 6: reg = CV_AMD64_RBP; break; 746 case 7: reg = CV_AMD64_RSP; break; 747 case 8: reg = CV_AMD64_R8; break; 748 case 9: reg = CV_AMD64_R9; break; 749 case 10: reg = CV_AMD64_R10; break; 750 case 11: reg = CV_AMD64_R11; break; 751 case 12: reg = CV_AMD64_R12; break; 752 case 13: reg = CV_AMD64_R13; break; 753 case 14: reg = CV_AMD64_R14; break; 754 case 15: reg = CV_AMD64_R15; break; 755 case 16: reg = CV_AMD64_RIP; break; 756 case 49: reg = CV_AMD64_EFLAGS; break; 757 case 50: reg = CV_AMD64_ES; break; 758 case 51: reg = CV_AMD64_CS; break; 759 case 52: reg = CV_AMD64_SS; break; 760 case 53: reg = CV_AMD64_DS; break; 761 case 54: reg = CV_AMD64_FS; break; 762 case 55: reg = CV_AMD64_GS; break; 763 case 62: reg = CV_AMD64_TR; break; 764 case 63: reg = CV_AMD64_LDTR; break; 765 case 64: reg = CV_AMD64_MXCSR; break; 766 case 65: reg = CV_AMD64_CTRL; break; 767 case 66: reg = CV_AMD64_STAT; break; 768 /* 769 * 56-57 reserved 770 * 58 %fs.base 771 * 59 %gs.base 772 * 60-61 reserved 773 */ 774 default: 775 FIXME("Don't know how to map register %d\n", regno); 776 return 0; 777 } 778 return reg; 779 } 780 781 static void *x86_64_fetch_context_reg(union ctx *pctx, unsigned regno, unsigned *size) 782 { 783 #ifdef __x86_64__ 784 CONTEXT *ctx = &pctx->ctx; 785 786 switch (regno) 787 { 788 case CV_AMD64_RAX: *size = sizeof(ctx->Rax); return &ctx->Rax; 789 case CV_AMD64_RDX: *size = sizeof(ctx->Rdx); return &ctx->Rdx; 790 case CV_AMD64_RCX: *size = sizeof(ctx->Rcx); return &ctx->Rcx; 791 case CV_AMD64_RBX: *size = sizeof(ctx->Rbx); return &ctx->Rbx; 792 case CV_AMD64_RSI: *size = sizeof(ctx->Rsi); return &ctx->Rsi; 793 case CV_AMD64_RDI: *size = sizeof(ctx->Rdi); return &ctx->Rdi; 794 case CV_AMD64_RBP: *size = sizeof(ctx->Rbp); return &ctx->Rbp; 795 case CV_AMD64_RSP: *size = sizeof(ctx->Rsp); return &ctx->Rsp; 796 case CV_AMD64_R8: *size = sizeof(ctx->R8); return &ctx->R8; 797 case CV_AMD64_R9: *size = sizeof(ctx->R9); return &ctx->R9; 798 case CV_AMD64_R10: *size = sizeof(ctx->R10); return &ctx->R10; 799 case CV_AMD64_R11: *size = sizeof(ctx->R11); return &ctx->R11; 800 case CV_AMD64_R12: *size = sizeof(ctx->R12); return &ctx->R12; 801 case CV_AMD64_R13: *size = sizeof(ctx->R13); return &ctx->R13; 802 case CV_AMD64_R14: *size = sizeof(ctx->R14); return &ctx->R14; 803 case CV_AMD64_R15: *size = sizeof(ctx->R15); return &ctx->R15; 804 case CV_AMD64_RIP: *size = sizeof(ctx->Rip); return &ctx->Rip; 805 806 case CV_AMD64_XMM0 + 0: *size = sizeof(ctx->u.s.Xmm0 ); return &ctx->u.s.Xmm0; 807 case CV_AMD64_XMM0 + 1: *size = sizeof(ctx->u.s.Xmm1 ); return &ctx->u.s.Xmm1; 808 case CV_AMD64_XMM0 + 2: *size = sizeof(ctx->u.s.Xmm2 ); return &ctx->u.s.Xmm2; 809 case CV_AMD64_XMM0 + 3: *size = sizeof(ctx->u.s.Xmm3 ); return &ctx->u.s.Xmm3; 810 case CV_AMD64_XMM0 + 4: *size = sizeof(ctx->u.s.Xmm4 ); return &ctx->u.s.Xmm4; 811 case CV_AMD64_XMM0 + 5: *size = sizeof(ctx->u.s.Xmm5 ); return &ctx->u.s.Xmm5; 812 case CV_AMD64_XMM0 + 6: *size = sizeof(ctx->u.s.Xmm6 ); return &ctx->u.s.Xmm6; 813 case CV_AMD64_XMM0 + 7: *size = sizeof(ctx->u.s.Xmm7 ); return &ctx->u.s.Xmm7; 814 case CV_AMD64_XMM8 + 0: *size = sizeof(ctx->u.s.Xmm8 ); return &ctx->u.s.Xmm8; 815 case CV_AMD64_XMM8 + 1: *size = sizeof(ctx->u.s.Xmm9 ); return &ctx->u.s.Xmm9; 816 case CV_AMD64_XMM8 + 2: *size = sizeof(ctx->u.s.Xmm10); return &ctx->u.s.Xmm10; 817 case CV_AMD64_XMM8 + 3: *size = sizeof(ctx->u.s.Xmm11); return &ctx->u.s.Xmm11; 818 case CV_AMD64_XMM8 + 4: *size = sizeof(ctx->u.s.Xmm12); return &ctx->u.s.Xmm12; 819 case CV_AMD64_XMM8 + 5: *size = sizeof(ctx->u.s.Xmm13); return &ctx->u.s.Xmm13; 820 case CV_AMD64_XMM8 + 6: *size = sizeof(ctx->u.s.Xmm14); return &ctx->u.s.Xmm14; 821 case CV_AMD64_XMM8 + 7: *size = sizeof(ctx->u.s.Xmm15); return &ctx->u.s.Xmm15; 822 823 case CV_AMD64_ST0 + 0: *size = sizeof(ctx->u.s.Legacy[0]); return &ctx->u.s.Legacy[0]; 824 case CV_AMD64_ST0 + 1: *size = sizeof(ctx->u.s.Legacy[1]); return &ctx->u.s.Legacy[1]; 825 case CV_AMD64_ST0 + 2: *size = sizeof(ctx->u.s.Legacy[2]); return &ctx->u.s.Legacy[2]; 826 case CV_AMD64_ST0 + 3: *size = sizeof(ctx->u.s.Legacy[3]); return &ctx->u.s.Legacy[3]; 827 case CV_AMD64_ST0 + 4: *size = sizeof(ctx->u.s.Legacy[4]); return &ctx->u.s.Legacy[4]; 828 case CV_AMD64_ST0 + 5: *size = sizeof(ctx->u.s.Legacy[5]); return &ctx->u.s.Legacy[5]; 829 case CV_AMD64_ST0 + 6: *size = sizeof(ctx->u.s.Legacy[6]); return &ctx->u.s.Legacy[6]; 830 case CV_AMD64_ST0 + 7: *size = sizeof(ctx->u.s.Legacy[7]); return &ctx->u.s.Legacy[7]; 831 832 case CV_AMD64_EFLAGS: *size = sizeof(ctx->EFlags); return &ctx->EFlags; 833 case CV_AMD64_ES: *size = sizeof(ctx->SegEs); return &ctx->SegEs; 834 case CV_AMD64_CS: *size = sizeof(ctx->SegCs); return &ctx->SegCs; 835 case CV_AMD64_SS: *size = sizeof(ctx->SegSs); return &ctx->SegSs; 836 case CV_AMD64_DS: *size = sizeof(ctx->SegDs); return &ctx->SegDs; 837 case CV_AMD64_FS: *size = sizeof(ctx->SegFs); return &ctx->SegFs; 838 case CV_AMD64_GS: *size = sizeof(ctx->SegGs); return &ctx->SegGs; 839 840 } 841 #endif 842 FIXME("Unknown register %x\n", regno); 843 return NULL; 844 } 845 846 static const char* x86_64_fetch_regname(unsigned regno) 847 { 848 switch (regno) 849 { 850 case CV_AMD64_RAX: return "rax"; 851 case CV_AMD64_RDX: return "rdx"; 852 case CV_AMD64_RCX: return "rcx"; 853 case CV_AMD64_RBX: return "rbx"; 854 case CV_AMD64_RSI: return "rsi"; 855 case CV_AMD64_RDI: return "rdi"; 856 case CV_AMD64_RBP: return "rbp"; 857 case CV_AMD64_RSP: return "rsp"; 858 case CV_AMD64_R8: return "r8"; 859 case CV_AMD64_R9: return "r9"; 860 case CV_AMD64_R10: return "r10"; 861 case CV_AMD64_R11: return "r11"; 862 case CV_AMD64_R12: return "r12"; 863 case CV_AMD64_R13: return "r13"; 864 case CV_AMD64_R14: return "r14"; 865 case CV_AMD64_R15: return "r15"; 866 case CV_AMD64_RIP: return "rip"; 867 868 case CV_AMD64_XMM0 + 0: return "xmm0"; 869 case CV_AMD64_XMM0 + 1: return "xmm1"; 870 case CV_AMD64_XMM0 + 2: return "xmm2"; 871 case CV_AMD64_XMM0 + 3: return "xmm3"; 872 case CV_AMD64_XMM0 + 4: return "xmm4"; 873 case CV_AMD64_XMM0 + 5: return "xmm5"; 874 case CV_AMD64_XMM0 + 6: return "xmm6"; 875 case CV_AMD64_XMM0 + 7: return "xmm7"; 876 case CV_AMD64_XMM8 + 0: return "xmm8"; 877 case CV_AMD64_XMM8 + 1: return "xmm9"; 878 case CV_AMD64_XMM8 + 2: return "xmm10"; 879 case CV_AMD64_XMM8 + 3: return "xmm11"; 880 case CV_AMD64_XMM8 + 4: return "xmm12"; 881 case CV_AMD64_XMM8 + 5: return "xmm13"; 882 case CV_AMD64_XMM8 + 6: return "xmm14"; 883 case CV_AMD64_XMM8 + 7: return "xmm15"; 884 885 case CV_AMD64_ST0 + 0: return "st0"; 886 case CV_AMD64_ST0 + 1: return "st1"; 887 case CV_AMD64_ST0 + 2: return "st2"; 888 case CV_AMD64_ST0 + 3: return "st3"; 889 case CV_AMD64_ST0 + 4: return "st4"; 890 case CV_AMD64_ST0 + 5: return "st5"; 891 case CV_AMD64_ST0 + 6: return "st6"; 892 case CV_AMD64_ST0 + 7: return "st7"; 893 894 case CV_AMD64_EFLAGS: return "eflags"; 895 case CV_AMD64_ES: return "es"; 896 case CV_AMD64_CS: return "cs"; 897 case CV_AMD64_SS: return "ss"; 898 case CV_AMD64_DS: return "ds"; 899 case CV_AMD64_FS: return "fs"; 900 case CV_AMD64_GS: return "gs"; 901 } 902 FIXME("Unknown register %x\n", regno); 903 return NULL; 904 } 905 906 static BOOL x86_64_fetch_minidump_thread(struct dump_context* dc, unsigned index, unsigned flags, const CONTEXT* ctx) 907 { 908 if (ctx->ContextFlags && (flags & ThreadWriteInstructionWindow)) 909 { 910 /* FIXME: crop values across module boundaries, */ 911 #ifdef __x86_64__ 912 ULONG64 base = ctx->Rip <= 0x80 ? 0 : ctx->Rip - 0x80; 913 minidump_add_memory_block(dc, base, ctx->Rip + 0x80 - base, 0); 914 #endif 915 } 916 917 return TRUE; 918 } 919 920 static BOOL x86_64_fetch_minidump_module(struct dump_context* dc, unsigned index, unsigned flags) 921 { 922 /* FIXME: not sure about the flags... */ 923 if (1) 924 { 925 /* FIXME: crop values across module boundaries, */ 926 #ifdef __x86_64__ 927 struct process* pcs; 928 struct module* module; 929 const RUNTIME_FUNCTION* rtf; 930 ULONG size; 931 932 if (!(pcs = process_find_by_handle(dc->process->handle)) || 933 !(module = module_find_by_addr(pcs, dc->modules[index].base, DMT_UNKNOWN))) 934 return FALSE; 935 rtf = (const RUNTIME_FUNCTION*)pe_map_directory(module, IMAGE_DIRECTORY_ENTRY_EXCEPTION, &size); 936 if (rtf) 937 { 938 const RUNTIME_FUNCTION* end = (const RUNTIME_FUNCTION*)((const char*)rtf + size); 939 UNWIND_INFO ui; 940 941 while (rtf + 1 < end) 942 { 943 while (rtf->UnwindData & 1) /* follow chained entry */ 944 { 945 FIXME("RunTime_Function outside IMAGE_DIRECTORY_ENTRY_EXCEPTION unimplemented yet!\n"); 946 return FALSE; 947 /* we need to read into the other process */ 948 /* rtf = (RUNTIME_FUNCTION*)(module->module.BaseOfImage + (rtf->UnwindData & ~1)); */ 949 } 950 if (read_process_memory(dc->process, dc->modules[index].base + rtf->UnwindData, &ui, sizeof(ui))) 951 minidump_add_memory_block(dc, dc->modules[index].base + rtf->UnwindData, 952 FIELD_OFFSET(UNWIND_INFO, UnwindCode) + ui.CountOfCodes * sizeof(UNWIND_CODE), 0); 953 rtf++; 954 } 955 } 956 #endif 957 } 958 959 return TRUE; 960 } 961 962 DECLSPEC_HIDDEN struct cpu cpu_x86_64 = { 963 IMAGE_FILE_MACHINE_AMD64, 964 8, 965 CV_AMD64_RSP, 966 x86_64_get_addr, 967 x86_64_stack_walk, 968 x86_64_find_runtime_function, 969 x86_64_map_dwarf_register, 970 x86_64_fetch_context_reg, 971 x86_64_fetch_regname, 972 x86_64_fetch_minidump_thread, 973 x86_64_fetch_minidump_module, 974 }; 975