xref: /reactos/dll/win32/dbghelp/cpu_x86_64.c (revision 19b18ce2)
1 /*
2  * File cpu_x86_64.c
3  *
4  * Copyright (C) 1999, 2005 Alexandre Julliard
5  * Copyright (C) 2009, 2011 Eric Pouech.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
20  */
21 
22 #include <assert.h>
23 
24 #define NONAMELESSUNION
25 #define NONAMELESSSTRUCT
26 #include "ntstatus.h"
27 #define WIN32_NO_STATUS
28 #include "dbghelp_private.h"
29 #include "winternl.h"
30 #include "wine/debug.h"
31 
32 WINE_DEFAULT_DEBUG_CHANNEL(dbghelp);
33 
34 /* x86-64 unwind information, for PE modules, as described on MSDN */
35 
36 typedef enum _UNWIND_OP_CODES
37 {
38     UWOP_PUSH_NONVOL = 0,
39     UWOP_ALLOC_LARGE,
40     UWOP_ALLOC_SMALL,
41     UWOP_SET_FPREG,
42     UWOP_SAVE_NONVOL,
43     UWOP_SAVE_NONVOL_FAR,
44     UWOP_SAVE_XMM128,
45     UWOP_SAVE_XMM128_FAR,
46     UWOP_PUSH_MACHFRAME
47 } UNWIND_CODE_OPS;
48 
49 typedef union _UNWIND_CODE
50 {
51     struct
52     {
53         BYTE CodeOffset;
54         BYTE UnwindOp : 4;
55         BYTE OpInfo   : 4;
56     } u;
57     USHORT FrameOffset;
58 } UNWIND_CODE, *PUNWIND_CODE;
59 
60 typedef struct _UNWIND_INFO
61 {
62     BYTE Version       : 3;
63     BYTE Flags         : 5;
64     BYTE SizeOfProlog;
65     BYTE CountOfCodes;
66     BYTE FrameRegister : 4;
67     BYTE FrameOffset   : 4;
68     UNWIND_CODE UnwindCode[1]; /* actually CountOfCodes (aligned) */
69 /*
70  *  union
71  *  {
72  *      OPTIONAL ULONG ExceptionHandler;
73  *      OPTIONAL ULONG FunctionEntry;
74  *  };
75  *  OPTIONAL ULONG ExceptionData[];
76  */
77 } UNWIND_INFO, *PUNWIND_INFO;
78 
79 static BOOL x86_64_get_addr(HANDLE hThread, const CONTEXT* ctx,
80                             enum cpu_addr ca, ADDRESS64* addr)
81 {
82     addr->Mode = AddrModeFlat;
83     switch (ca)
84     {
85 #ifdef __x86_64__
86     case cpu_addr_pc:    addr->Segment = ctx->SegCs; addr->Offset = ctx->Rip; return TRUE;
87     case cpu_addr_stack: addr->Segment = ctx->SegSs; addr->Offset = ctx->Rsp; return TRUE;
88     case cpu_addr_frame: addr->Segment = ctx->SegSs; addr->Offset = ctx->Rbp; return TRUE;
89 #endif
90     default: addr->Mode = -1;
91         return FALSE;
92     }
93 }
94 
95 #ifdef __x86_64__
96 
97 enum st_mode {stm_start, stm_64bit, stm_done};
98 
99 /* indexes in Reserved array */
100 #define __CurrentMode     0
101 #define __CurrentCount    1
102 /* #define __     2 (unused) */
103 
104 #define curr_mode   (frame->Reserved[__CurrentMode])
105 #define curr_count  (frame->Reserved[__CurrentCount])
106 /* #define ??? (frame->Reserved[__]) (unused) */
107 
108 union handler_data
109 {
110     RUNTIME_FUNCTION chain;
111     ULONG handler;
112 };
113 
114 static void dump_unwind_info(struct cpu_stack_walk* csw, ULONG64 base, RUNTIME_FUNCTION *function)
115 {
116     static const char * const reg_names[16] =
117         { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
118           "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15" };
119 
120     union handler_data handler_data;
121     char buffer[sizeof(UNWIND_INFO) + 256 * sizeof(UNWIND_CODE)];
122     UNWIND_INFO* info = (UNWIND_INFO*)buffer;
123     unsigned int i, count;
124     RUNTIME_FUNCTION snext;
125     ULONG64 addr;
126 
127     TRACE("**** func %x-%x\n", function->BeginAddress, function->EndAddress);
128     for (;;)
129     {
130         if (function->UnwindData & 1)
131         {
132             if (!sw_read_mem(csw, base + function->UnwindData, &snext, sizeof(snext)))
133             {
134                 TRACE("Couldn't unwind RUNTIME_INFO at %lx\n", base + function->UnwindData);
135                 return;
136             }
137             TRACE("unwind info for function %p-%p chained to function %p-%p\n",
138                   (char*)base + function->BeginAddress, (char*)base + function->EndAddress,
139                   (char*)base + snext.BeginAddress, (char*)base + snext.EndAddress);
140             function = &snext;
141             continue;
142         }
143         addr = base + function->UnwindData;
144         if (!sw_read_mem(csw, addr, info, FIELD_OFFSET(UNWIND_INFO, UnwindCode)) ||
145             !sw_read_mem(csw, addr + FIELD_OFFSET(UNWIND_INFO, UnwindCode),
146                          info->UnwindCode, info->CountOfCodes * sizeof(UNWIND_CODE)))
147         {
148             FIXME("couldn't read memory for UNWIND_INFO at %lx\n", addr);
149             return;
150         }
151         TRACE("unwind info at %p flags %x prolog 0x%x bytes function %p-%p\n",
152               (char*)addr, info->Flags, info->SizeOfProlog,
153               (char*)base + function->BeginAddress, (char*)base + function->EndAddress);
154 
155         if (info->FrameRegister)
156             TRACE("    frame register %s offset 0x%x(%%rsp)\n",
157                   reg_names[info->FrameRegister], info->FrameOffset * 16);
158 
159         for (i = 0; i < info->CountOfCodes; i++)
160         {
161             TRACE("    0x%x: ", info->UnwindCode[i].u.CodeOffset);
162             switch (info->UnwindCode[i].u.UnwindOp)
163             {
164             case UWOP_PUSH_NONVOL:
165                 TRACE("pushq %%%s\n", reg_names[info->UnwindCode[i].u.OpInfo]);
166                 break;
167             case UWOP_ALLOC_LARGE:
168                 if (info->UnwindCode[i].u.OpInfo)
169                 {
170                     count = *(DWORD*)&info->UnwindCode[i+1];
171                     i += 2;
172                 }
173                 else
174                 {
175                     count = *(USHORT*)&info->UnwindCode[i+1] * 8;
176                     i++;
177                 }
178                 TRACE("subq $0x%x,%%rsp\n", count);
179                 break;
180             case UWOP_ALLOC_SMALL:
181                 count = (info->UnwindCode[i].u.OpInfo + 1) * 8;
182                 TRACE("subq $0x%x,%%rsp\n", count);
183                 break;
184             case UWOP_SET_FPREG:
185                 TRACE("leaq 0x%x(%%rsp),%s\n",
186                       info->FrameOffset * 16, reg_names[info->FrameRegister]);
187                 break;
188             case UWOP_SAVE_NONVOL:
189                 count = *(USHORT*)&info->UnwindCode[i+1] * 8;
190                 TRACE("movq %%%s,0x%x(%%rsp)\n", reg_names[info->UnwindCode[i].u.OpInfo], count);
191                 i++;
192                 break;
193             case UWOP_SAVE_NONVOL_FAR:
194                 count = *(DWORD*)&info->UnwindCode[i+1];
195                 TRACE("movq %%%s,0x%x(%%rsp)\n", reg_names[info->UnwindCode[i].u.OpInfo], count);
196                 i += 2;
197                 break;
198             case UWOP_SAVE_XMM128:
199                 count = *(USHORT*)&info->UnwindCode[i+1] * 16;
200                 TRACE("movaps %%xmm%u,0x%x(%%rsp)\n", info->UnwindCode[i].u.OpInfo, count);
201                 i++;
202                 break;
203             case UWOP_SAVE_XMM128_FAR:
204                 count = *(DWORD*)&info->UnwindCode[i+1];
205                 TRACE("movaps %%xmm%u,0x%x(%%rsp)\n", info->UnwindCode[i].u.OpInfo, count);
206                 i += 2;
207                 break;
208             case UWOP_PUSH_MACHFRAME:
209                 TRACE("PUSH_MACHFRAME %u\n", info->UnwindCode[i].u.OpInfo);
210                 break;
211             default:
212                 FIXME("unknown code %u\n", info->UnwindCode[i].u.UnwindOp);
213                 break;
214             }
215         }
216 
217         addr += FIELD_OFFSET(UNWIND_INFO, UnwindCode) +
218             ((info->CountOfCodes + 1) & ~1) * sizeof(UNWIND_CODE);
219         if (info->Flags & UNW_FLAG_CHAININFO)
220         {
221             if (!sw_read_mem(csw, addr, &handler_data, sizeof(handler_data.chain)))
222             {
223                 FIXME("couldn't read memory for handler_data.chain\n");
224                 return;
225             }
226             TRACE("    chained to function %p-%p\n",
227                   (char*)base + handler_data.chain.BeginAddress,
228                   (char*)base + handler_data.chain.EndAddress);
229             function = &handler_data.chain;
230             continue;
231         }
232         if (info->Flags & (UNW_FLAG_EHANDLER | UNW_FLAG_UHANDLER))
233         {
234             if (!sw_read_mem(csw, addr, &handler_data, sizeof(handler_data.handler)))
235             {
236                 FIXME("couldn't read memory for handler_data.handler\n");
237                 return;
238             }
239             TRACE("    handler %p data at %p\n",
240                   (char*)base + handler_data.handler, (char*)addr + sizeof(handler_data.handler));
241         }
242         break;
243     }
244 }
245 
246 /* highly derived from dlls/ntdll/signal_x86_64.c */
247 static ULONG64 get_int_reg(CONTEXT *context, int reg)
248 {
249     return *(&context->Rax + reg);
250 }
251 
252 static void set_int_reg(CONTEXT *context, int reg, ULONG64 val)
253 {
254     *(&context->Rax + reg) = val;
255 }
256 
257 static void set_float_reg(CONTEXT *context, int reg, M128A val)
258 {
259     *(&context->u.s.Xmm0 + reg) = val;
260 }
261 
262 static int get_opcode_size(UNWIND_CODE op)
263 {
264     switch (op.u.UnwindOp)
265     {
266     case UWOP_ALLOC_LARGE:
267         return 2 + (op.u.OpInfo != 0);
268     case UWOP_SAVE_NONVOL:
269     case UWOP_SAVE_XMM128:
270         return 2;
271     case UWOP_SAVE_NONVOL_FAR:
272     case UWOP_SAVE_XMM128_FAR:
273         return 3;
274     default:
275         return 1;
276     }
277 }
278 
279 static BOOL is_inside_epilog(struct cpu_stack_walk* csw, DWORD64 pc,
280                              DWORD64 base, const RUNTIME_FUNCTION *function )
281 {
282     BYTE op0, op1, op2;
283     LONG val32;
284 
285     if (!sw_read_mem(csw, pc, &op0, 1)) return FALSE;
286 
287     /* add or lea must be the first instruction, and it must have a rex.W prefix */
288     if ((op0 & 0xf8) == 0x48)
289     {
290         if (!sw_read_mem(csw, pc + 1, &op1, 1)) return FALSE;
291         if (!sw_read_mem(csw, pc + 2, &op2, 1)) return FALSE;
292         switch (op1)
293         {
294         case 0x81: /* add $nnnn,%rsp */
295             if (op0 == 0x48 && op2 == 0xc4)
296             {
297                 pc += 7;
298                 break;
299             }
300             return FALSE;
301         case 0x83: /* add $n,%rsp */
302             if (op0 == 0x48 && op2 == 0xc4)
303             {
304                 pc += 4;
305                 break;
306             }
307             return FALSE;
308         case 0x8d: /* lea n(reg),%rsp */
309             if (op0 & 0x06) return FALSE;  /* rex.RX must be cleared */
310             if (((op2 >> 3) & 7) != 4) return FALSE;  /* dest reg mus be %rsp */
311             if ((op2 & 7) == 4) return FALSE;  /* no SIB byte allowed */
312             if ((op2 >> 6) == 1)  /* 8-bit offset */
313             {
314                 pc += 4;
315                 break;
316             }
317             if ((op2 >> 6) == 2)  /* 32-bit offset */
318             {
319                 pc += 7;
320                 break;
321             }
322             return FALSE;
323         }
324     }
325 
326     /* now check for various pop instructions */
327     for (;;)
328     {
329         if (!sw_read_mem(csw, pc, &op0, 1)) return FALSE;
330         if ((op0 & 0xf0) == 0x40)  /* rex prefix */
331         {
332             if (!sw_read_mem(csw, ++pc, &op0, 1)) return FALSE;
333         }
334 
335         switch (op0)
336         {
337         case 0x58: /* pop %rax/%r8 */
338         case 0x59: /* pop %rcx/%r9 */
339         case 0x5a: /* pop %rdx/%r10 */
340         case 0x5b: /* pop %rbx/%r11 */
341         case 0x5c: /* pop %rsp/%r12 */
342         case 0x5d: /* pop %rbp/%r13 */
343         case 0x5e: /* pop %rsi/%r14 */
344         case 0x5f: /* pop %rdi/%r15 */
345             pc++;
346             continue;
347         case 0xc2: /* ret $nn */
348         case 0xc3: /* ret */
349             return TRUE;
350         case 0xe9: /* jmp nnnn */
351             if (!sw_read_mem(csw, pc + 1, &val32, sizeof(LONG))) return FALSE;
352             pc += 5 + val32;
353             if (pc - base >= function->BeginAddress && pc - base < function->EndAddress)
354                 continue;
355             break;
356         case 0xeb: /* jmp n */
357             if (!sw_read_mem(csw, pc + 1, &op1, 1)) return FALSE;
358             pc += 2 + (signed char)op1;
359             if (pc - base >= function->BeginAddress && pc - base < function->EndAddress)
360                 continue;
361             break;
362         case 0xf3: /* rep; ret (for amd64 prediction bug) */
363             if (!sw_read_mem(csw, pc + 1, &op1, 1)) return FALSE;
364             return op1 == 0xc3;
365         }
366         return FALSE;
367     }
368 }
369 
370 static BOOL interpret_epilog(struct cpu_stack_walk* csw, ULONG64 pc, CONTEXT *context )
371 {
372     BYTE        insn, val8;
373     WORD        val16;
374     LONG        val32;
375     DWORD64     val64;
376 
377     for (;;)
378     {
379         BYTE rex = 0;
380 
381         if (!sw_read_mem(csw, pc, &insn, 1)) return FALSE;
382         if ((insn & 0xf0) == 0x40)
383         {
384             rex = insn & 0x0f;  /* rex prefix */
385             if (!sw_read_mem(csw, ++pc, &insn, 1)) return FALSE;
386         }
387 
388         switch (insn)
389         {
390         case 0x58: /* pop %rax/r8 */
391         case 0x59: /* pop %rcx/r9 */
392         case 0x5a: /* pop %rdx/r10 */
393         case 0x5b: /* pop %rbx/r11 */
394         case 0x5c: /* pop %rsp/r12 */
395         case 0x5d: /* pop %rbp/r13 */
396         case 0x5e: /* pop %rsi/r14 */
397         case 0x5f: /* pop %rdi/r15 */
398             if (!sw_read_mem(csw, context->Rsp, &val64, sizeof(DWORD64))) return FALSE;
399             set_int_reg(context, insn - 0x58 + (rex & 1) * 8, val64);
400             context->Rsp += sizeof(ULONG64);
401             pc++;
402             continue;
403         case 0x81: /* add $nnnn,%rsp */
404             if (!sw_read_mem(csw, pc + 2, &val32, sizeof(LONG))) return FALSE;
405             context->Rsp += val32;
406             pc += 2 + sizeof(LONG);
407             continue;
408         case 0x83: /* add $n,%rsp */
409             if (!sw_read_mem(csw, pc + 2, &val8, sizeof(BYTE))) return FALSE;
410             context->Rsp += (signed char)val8;
411             pc += 3;
412             continue;
413         case 0x8d:
414             if (!sw_read_mem(csw, pc + 1, &insn, sizeof(BYTE))) return FALSE;
415             if ((insn >> 6) == 1)  /* lea n(reg),%rsp */
416             {
417                 if (!sw_read_mem(csw, pc + 2, &val8, sizeof(BYTE))) return FALSE;
418                 context->Rsp = get_int_reg( context, (insn & 7) + (rex & 1) * 8 ) + (signed char)val8;
419                 pc += 3;
420             }
421             else  /* lea nnnn(reg),%rsp */
422             {
423                 if (!sw_read_mem(csw, pc + 2, &val32, sizeof(LONG))) return FALSE;
424                 context->Rsp = get_int_reg( context, (insn & 7) + (rex & 1) * 8 ) + val32;
425                 pc += 2 + sizeof(LONG);
426             }
427             continue;
428         case 0xc2: /* ret $nn */
429             if (!sw_read_mem(csw, context->Rsp, &val64, sizeof(DWORD64))) return FALSE;
430             if (!sw_read_mem(csw, pc + 1, &val16, sizeof(WORD))) return FALSE;
431             context->Rip = val64;
432             context->Rsp += sizeof(ULONG64) + val16;
433             return TRUE;
434         case 0xc3: /* ret */
435         case 0xf3: /* rep; ret */
436             if (!sw_read_mem(csw, context->Rsp, &val64, sizeof(DWORD64))) return FALSE;
437             context->Rip = val64;
438             context->Rsp += sizeof(ULONG64);
439             return TRUE;
440         case 0xe9: /* jmp nnnn */
441             if (!sw_read_mem(csw, pc + 1, &val32, sizeof(LONG))) return FALSE;
442             pc += 5 + val32;
443             continue;
444         case 0xeb: /* jmp n */
445             if (!sw_read_mem(csw, pc + 1, &val8, sizeof(BYTE))) return FALSE;
446             pc += 2 + (signed char)val8;
447             continue;
448         }
449         FIXME("unsupported insn %x\n", insn);
450         return FALSE;
451     }
452 }
453 
454 static BOOL default_unwind(struct cpu_stack_walk* csw, CONTEXT* context)
455 {
456     if (!sw_read_mem(csw, context->Rsp, &context->Rip, sizeof(DWORD64)))
457     {
458         WARN("Cannot read new frame offset %s\n", wine_dbgstr_longlong(context->Rsp));
459         return FALSE;
460     }
461     context->Rsp += sizeof(DWORD64);
462     return TRUE;
463 }
464 
465 static BOOL interpret_function_table_entry(struct cpu_stack_walk* csw,
466                                            CONTEXT* context, RUNTIME_FUNCTION* function, DWORD64 base)
467 {
468     char                buffer[sizeof(UNWIND_INFO) + 256 * sizeof(UNWIND_CODE)];
469     UNWIND_INFO*        info = (UNWIND_INFO*)buffer;
470     unsigned            i;
471     DWORD64             newframe, prolog_offset, off, value;
472     M128A               floatvalue;
473     union handler_data  handler_data;
474 
475     /* FIXME: we have some assumptions here */
476     assert(context);
477     dump_unwind_info(csw, sw_module_base(csw, context->Rip), function);
478     newframe = context->Rsp;
479     for (;;)
480     {
481         if (!sw_read_mem(csw, base + function->UnwindData, info, sizeof(*info)) ||
482             !sw_read_mem(csw, base + function->UnwindData + FIELD_OFFSET(UNWIND_INFO, UnwindCode),
483                          info->UnwindCode, info->CountOfCodes * sizeof(UNWIND_CODE)))
484         {
485             WARN("Couldn't read unwind_code at %lx\n", base + function->UnwindData);
486             return FALSE;
487         }
488 
489         if (info->Version != 1)
490         {
491             WARN("unknown unwind info version %u at %lx\n", info->Version, base + function->UnwindData);
492             return FALSE;
493         }
494 
495         if (info->FrameRegister)
496             newframe = get_int_reg(context, info->FrameRegister) - info->FrameOffset * 16;
497 
498         /* check if in prolog */
499         if (context->Rip >= base + function->BeginAddress &&
500             context->Rip < base + function->BeginAddress + info->SizeOfProlog)
501         {
502             prolog_offset = context->Rip - base - function->BeginAddress;
503         }
504         else
505         {
506             prolog_offset = ~0;
507             if (is_inside_epilog(csw, context->Rip, base, function))
508             {
509                 interpret_epilog(csw, context->Rip, context);
510                 return TRUE;
511             }
512         }
513 
514         for (i = 0; i < info->CountOfCodes; i += get_opcode_size(info->UnwindCode[i]))
515         {
516             if (prolog_offset < info->UnwindCode[i].u.CodeOffset) continue; /* skip it */
517 
518             switch (info->UnwindCode[i].u.UnwindOp)
519             {
520             case UWOP_PUSH_NONVOL:  /* pushq %reg */
521                 if (!sw_read_mem(csw, context->Rsp, &value, sizeof(DWORD64))) return FALSE;
522                 set_int_reg(context, info->UnwindCode[i].u.OpInfo, value);
523                 context->Rsp += sizeof(ULONG64);
524                 break;
525             case UWOP_ALLOC_LARGE:  /* subq $nn,%rsp */
526                 if (info->UnwindCode[i].u.OpInfo) context->Rsp += *(DWORD*)&info->UnwindCode[i+1];
527                 else context->Rsp += *(USHORT*)&info->UnwindCode[i+1] * 8;
528                 break;
529             case UWOP_ALLOC_SMALL:  /* subq $n,%rsp */
530                 context->Rsp += (info->UnwindCode[i].u.OpInfo + 1) * 8;
531                 break;
532             case UWOP_SET_FPREG:  /* leaq nn(%rsp),%framereg */
533                 context->Rsp = newframe;
534                 break;
535             case UWOP_SAVE_NONVOL:  /* movq %reg,n(%rsp) */
536                 off = newframe + *(USHORT*)&info->UnwindCode[i+1] * 8;
537                 if (!sw_read_mem(csw, off, &value, sizeof(DWORD64))) return FALSE;
538                 set_int_reg(context, info->UnwindCode[i].u.OpInfo, value);
539                 break;
540             case UWOP_SAVE_NONVOL_FAR:  /* movq %reg,nn(%rsp) */
541                 off = newframe + *(DWORD*)&info->UnwindCode[i+1];
542                 if (!sw_read_mem(csw, off, &value, sizeof(DWORD64))) return FALSE;
543                 set_int_reg(context, info->UnwindCode[i].u.OpInfo, value);
544                 break;
545             case UWOP_SAVE_XMM128:  /* movaps %xmmreg,n(%rsp) */
546                 off = newframe + *(USHORT*)&info->UnwindCode[i+1] * 16;
547                 if (!sw_read_mem(csw, off, &floatvalue, sizeof(M128A))) return FALSE;
548                 set_float_reg(context, info->UnwindCode[i].u.OpInfo, floatvalue);
549                 break;
550             case UWOP_SAVE_XMM128_FAR:  /* movaps %xmmreg,nn(%rsp) */
551                 off = newframe + *(DWORD*)&info->UnwindCode[i+1];
552                 if (!sw_read_mem(csw, off, &floatvalue, sizeof(M128A))) return FALSE;
553                 set_float_reg(context, info->UnwindCode[i].u.OpInfo, floatvalue);
554                 break;
555             case UWOP_PUSH_MACHFRAME:
556                 FIXME("PUSH_MACHFRAME %u\n", info->UnwindCode[i].u.OpInfo);
557                 break;
558             default:
559                 FIXME("unknown code %u\n", info->UnwindCode[i].u.UnwindOp);
560                 break;
561             }
562         }
563         if (!(info->Flags & UNW_FLAG_CHAININFO)) break;
564         if (!sw_read_mem(csw, base + function->UnwindData + FIELD_OFFSET(UNWIND_INFO, UnwindCode) +
565                                    ((info->CountOfCodes + 1) & ~1) * sizeof(UNWIND_CODE),
566                          &handler_data, sizeof(handler_data))) return FALSE;
567         function = &handler_data.chain;  /* restart with the chained info */
568     }
569     return default_unwind(csw, context);
570 }
571 
572 /* fetch_next_frame()
573  *
574  * modify (at least) context.{rip, rsp, rbp} using unwind information
575  * either out of PE exception handlers, debug info (dwarf), or simple stack unwind
576  */
577 static BOOL fetch_next_frame(struct cpu_stack_walk* csw, CONTEXT* context,
578                              DWORD_PTR curr_pc, void** prtf)
579 {
580     DWORD_PTR               cfa;
581     RUNTIME_FUNCTION*       rtf;
582     DWORD64                 base;
583 
584     if (!curr_pc || !(base = sw_module_base(csw, curr_pc))) return FALSE;
585     rtf = sw_table_access(csw, curr_pc);
586     if (prtf) *prtf = rtf;
587     if (rtf)
588     {
589         return interpret_function_table_entry(csw, context, rtf, base);
590     }
591     else if (dwarf2_virtual_unwind(csw, curr_pc, context, &cfa))
592     {
593         context->Rsp = cfa;
594         TRACE("next function rip=%016lx\n", context->Rip);
595         TRACE("  rax=%016lx rbx=%016lx rcx=%016lx rdx=%016lx\n",
596               context->Rax, context->Rbx, context->Rcx, context->Rdx);
597         TRACE("  rsi=%016lx rdi=%016lx rbp=%016lx rsp=%016lx\n",
598               context->Rsi, context->Rdi, context->Rbp, context->Rsp);
599         TRACE("   r8=%016lx  r9=%016lx r10=%016lx r11=%016lx\n",
600               context->R8, context->R9, context->R10, context->R11);
601         TRACE("  r12=%016lx r13=%016lx r14=%016lx r15=%016lx\n",
602               context->R12, context->R13, context->R14, context->R15);
603         return TRUE;
604     }
605     else
606         return default_unwind(csw, context);
607 }
608 
609 static BOOL x86_64_stack_walk(struct cpu_stack_walk* csw, LPSTACKFRAME64 frame, CONTEXT* context)
610 {
611     unsigned    deltapc = curr_count <= 1 ? 0 : 1;
612 
613     /* sanity check */
614     if (curr_mode >= stm_done) return FALSE;
615     assert(!csw->is32);
616 
617     TRACE("Enter: PC=%s Frame=%s Return=%s Stack=%s Mode=%s Count=%s\n",
618           wine_dbgstr_addr(&frame->AddrPC),
619           wine_dbgstr_addr(&frame->AddrFrame),
620           wine_dbgstr_addr(&frame->AddrReturn),
621           wine_dbgstr_addr(&frame->AddrStack),
622           curr_mode == stm_start ? "start" : "64bit",
623           wine_dbgstr_longlong(curr_count));
624 
625     if (curr_mode == stm_start)
626     {
627         if ((frame->AddrPC.Mode == AddrModeFlat) &&
628             (frame->AddrFrame.Mode != AddrModeFlat))
629         {
630             WARN("Bad AddrPC.Mode / AddrFrame.Mode combination\n");
631             goto done_err;
632         }
633 
634         /* Init done */
635         curr_mode = stm_64bit;
636         frame->AddrReturn.Mode = frame->AddrStack.Mode = AddrModeFlat;
637         /* don't set up AddrStack on first call. Either the caller has set it up, or
638          * we will get it in the next frame
639          */
640         memset(&frame->AddrBStore, 0, sizeof(frame->AddrBStore));
641     }
642     else
643     {
644         if (context->Rsp != frame->AddrStack.Offset) FIXME("inconsistent Stack Pointer\n");
645         if (context->Rip != frame->AddrPC.Offset) FIXME("inconsistent Instruction Pointer\n");
646 
647         if (frame->AddrReturn.Offset == 0) goto done_err;
648         if (!fetch_next_frame(csw, context, frame->AddrPC.Offset - deltapc, &frame->FuncTableEntry))
649             goto done_err;
650         deltapc = 1;
651     }
652 
653     memset(&frame->Params, 0, sizeof(frame->Params));
654 
655     /* set frame information */
656     frame->AddrStack.Offset = context->Rsp;
657     frame->AddrFrame.Offset = context->Rbp;
658     frame->AddrPC.Offset = context->Rip;
659     if (1)
660     {
661         CONTEXT         newctx = *context;
662 
663         if (!fetch_next_frame(csw, &newctx, frame->AddrPC.Offset - deltapc, NULL))
664             goto done_err;
665         frame->AddrReturn.Mode = AddrModeFlat;
666         frame->AddrReturn.Offset = newctx.Rip;
667     }
668 
669     frame->Far = TRUE;
670     frame->Virtual = TRUE;
671     curr_count++;
672 
673     TRACE("Leave: PC=%s Frame=%s Return=%s Stack=%s Mode=%s Count=%s FuncTable=%p\n",
674           wine_dbgstr_addr(&frame->AddrPC),
675           wine_dbgstr_addr(&frame->AddrFrame),
676           wine_dbgstr_addr(&frame->AddrReturn),
677           wine_dbgstr_addr(&frame->AddrStack),
678           curr_mode == stm_start ? "start" : "64bit",
679           wine_dbgstr_longlong(curr_count),
680           frame->FuncTableEntry);
681 
682     return TRUE;
683 done_err:
684     curr_mode = stm_done;
685     return FALSE;
686 }
687 #else
688 static BOOL x86_64_stack_walk(struct cpu_stack_walk* csw, LPSTACKFRAME64 frame, CONTEXT* context)
689 {
690     return FALSE;
691 }
692 #endif
693 
694 static void*    x86_64_find_runtime_function(struct module* module, DWORD64 addr)
695 {
696 #ifdef __x86_64__
697     RUNTIME_FUNCTION*   rtf;
698     ULONG               size;
699     int                 min, max;
700 
701     rtf = (RUNTIME_FUNCTION*)pe_map_directory(module, IMAGE_DIRECTORY_ENTRY_EXCEPTION, &size);
702     if (rtf) for (min = 0, max = size / sizeof(*rtf); min <= max; )
703     {
704         int pos = (min + max) / 2;
705         if (addr < module->module.BaseOfImage + rtf[pos].BeginAddress) max = pos - 1;
706         else if (addr >= module->module.BaseOfImage + rtf[pos].EndAddress) min = pos + 1;
707         else
708         {
709             rtf += pos;
710             while (rtf->UnwindData & 1)  /* follow chained entry */
711             {
712                 FIXME("RunTime_Function outside IMAGE_DIRECTORY_ENTRY_EXCEPTION unimplemented yet!\n");
713                 return NULL;
714                 /* we need to read into the other process */
715                 /* rtf = (RUNTIME_FUNCTION*)(module->module.BaseOfImage + (rtf->UnwindData & ~1)); */
716             }
717             return rtf;
718         }
719     }
720 #endif
721     return NULL;
722 }
723 
724 static unsigned x86_64_map_dwarf_register(unsigned regno, BOOL eh_frame)
725 {
726     unsigned    reg;
727 
728     if (regno >= 17 && regno <= 24)
729         reg = CV_AMD64_XMM0 + regno - 17;
730     else if (regno >= 25 && regno <= 32)
731         reg = CV_AMD64_XMM8 + regno - 25;
732     else if (regno >= 33 && regno <= 40)
733         reg = CV_AMD64_ST0 + regno - 33;
734     else switch (regno)
735     {
736     case  0: reg = CV_AMD64_RAX;    break;
737     case  1: reg = CV_AMD64_RDX;    break;
738     case  2: reg = CV_AMD64_RCX;    break;
739     case  3: reg = CV_AMD64_RBX;    break;
740     case  4: reg = CV_AMD64_RSI;    break;
741     case  5: reg = CV_AMD64_RDI;    break;
742     case  6: reg = CV_AMD64_RBP;    break;
743     case  7: reg = CV_AMD64_RSP;    break;
744     case  8: reg = CV_AMD64_R8;     break;
745     case  9: reg = CV_AMD64_R9;     break;
746     case 10: reg = CV_AMD64_R10;    break;
747     case 11: reg = CV_AMD64_R11;    break;
748     case 12: reg = CV_AMD64_R12;    break;
749     case 13: reg = CV_AMD64_R13;    break;
750     case 14: reg = CV_AMD64_R14;    break;
751     case 15: reg = CV_AMD64_R15;    break;
752     case 16: reg = CV_AMD64_RIP;    break;
753     case 49: reg = CV_AMD64_EFLAGS; break;
754     case 50: reg = CV_AMD64_ES;     break;
755     case 51: reg = CV_AMD64_CS;     break;
756     case 52: reg = CV_AMD64_SS;     break;
757     case 53: reg = CV_AMD64_DS;     break;
758     case 54: reg = CV_AMD64_FS;     break;
759     case 55: reg = CV_AMD64_GS;     break;
760     case 62: reg = CV_AMD64_TR;     break;
761     case 63: reg = CV_AMD64_LDTR;   break;
762     case 64: reg = CV_AMD64_MXCSR;  break;
763     case 65: reg = CV_AMD64_CTRL;   break;
764     case 66: reg = CV_AMD64_STAT;   break;
765 /*
766  * 56-57 reserved
767  * 58 %fs.base
768  * 59 %gs.base
769  * 60-61 reserved
770  */
771     default:
772         FIXME("Don't know how to map register %d\n", regno);
773         return 0;
774     }
775     return reg;
776 }
777 
778 static void* x86_64_fetch_context_reg(CONTEXT* ctx, unsigned regno, unsigned* size)
779 {
780 #ifdef __x86_64__
781     switch (regno)
782     {
783     case CV_AMD64_RAX: *size = sizeof(ctx->Rax); return &ctx->Rax;
784     case CV_AMD64_RDX: *size = sizeof(ctx->Rdx); return &ctx->Rdx;
785     case CV_AMD64_RCX: *size = sizeof(ctx->Rcx); return &ctx->Rcx;
786     case CV_AMD64_RBX: *size = sizeof(ctx->Rbx); return &ctx->Rbx;
787     case CV_AMD64_RSI: *size = sizeof(ctx->Rsi); return &ctx->Rsi;
788     case CV_AMD64_RDI: *size = sizeof(ctx->Rdi); return &ctx->Rdi;
789     case CV_AMD64_RBP: *size = sizeof(ctx->Rbp); return &ctx->Rbp;
790     case CV_AMD64_RSP: *size = sizeof(ctx->Rsp); return &ctx->Rsp;
791     case CV_AMD64_R8:  *size = sizeof(ctx->R8);  return &ctx->R8;
792     case CV_AMD64_R9:  *size = sizeof(ctx->R9);  return &ctx->R9;
793     case CV_AMD64_R10: *size = sizeof(ctx->R10); return &ctx->R10;
794     case CV_AMD64_R11: *size = sizeof(ctx->R11); return &ctx->R11;
795     case CV_AMD64_R12: *size = sizeof(ctx->R12); return &ctx->R12;
796     case CV_AMD64_R13: *size = sizeof(ctx->R13); return &ctx->R13;
797     case CV_AMD64_R14: *size = sizeof(ctx->R14); return &ctx->R14;
798     case CV_AMD64_R15: *size = sizeof(ctx->R15); return &ctx->R15;
799     case CV_AMD64_RIP: *size = sizeof(ctx->Rip); return &ctx->Rip;
800 
801     case CV_AMD64_XMM0 + 0: *size = sizeof(ctx->u.s.Xmm0 ); return &ctx->u.s.Xmm0;
802     case CV_AMD64_XMM0 + 1: *size = sizeof(ctx->u.s.Xmm1 ); return &ctx->u.s.Xmm1;
803     case CV_AMD64_XMM0 + 2: *size = sizeof(ctx->u.s.Xmm2 ); return &ctx->u.s.Xmm2;
804     case CV_AMD64_XMM0 + 3: *size = sizeof(ctx->u.s.Xmm3 ); return &ctx->u.s.Xmm3;
805     case CV_AMD64_XMM0 + 4: *size = sizeof(ctx->u.s.Xmm4 ); return &ctx->u.s.Xmm4;
806     case CV_AMD64_XMM0 + 5: *size = sizeof(ctx->u.s.Xmm5 ); return &ctx->u.s.Xmm5;
807     case CV_AMD64_XMM0 + 6: *size = sizeof(ctx->u.s.Xmm6 ); return &ctx->u.s.Xmm6;
808     case CV_AMD64_XMM0 + 7: *size = sizeof(ctx->u.s.Xmm7 ); return &ctx->u.s.Xmm7;
809     case CV_AMD64_XMM8 + 0: *size = sizeof(ctx->u.s.Xmm8 ); return &ctx->u.s.Xmm8;
810     case CV_AMD64_XMM8 + 1: *size = sizeof(ctx->u.s.Xmm9 ); return &ctx->u.s.Xmm9;
811     case CV_AMD64_XMM8 + 2: *size = sizeof(ctx->u.s.Xmm10); return &ctx->u.s.Xmm10;
812     case CV_AMD64_XMM8 + 3: *size = sizeof(ctx->u.s.Xmm11); return &ctx->u.s.Xmm11;
813     case CV_AMD64_XMM8 + 4: *size = sizeof(ctx->u.s.Xmm12); return &ctx->u.s.Xmm12;
814     case CV_AMD64_XMM8 + 5: *size = sizeof(ctx->u.s.Xmm13); return &ctx->u.s.Xmm13;
815     case CV_AMD64_XMM8 + 6: *size = sizeof(ctx->u.s.Xmm14); return &ctx->u.s.Xmm14;
816     case CV_AMD64_XMM8 + 7: *size = sizeof(ctx->u.s.Xmm15); return &ctx->u.s.Xmm15;
817 
818     case CV_AMD64_ST0 + 0: *size = sizeof(ctx->u.s.Legacy[0]); return &ctx->u.s.Legacy[0];
819     case CV_AMD64_ST0 + 1: *size = sizeof(ctx->u.s.Legacy[1]); return &ctx->u.s.Legacy[1];
820     case CV_AMD64_ST0 + 2: *size = sizeof(ctx->u.s.Legacy[2]); return &ctx->u.s.Legacy[2];
821     case CV_AMD64_ST0 + 3: *size = sizeof(ctx->u.s.Legacy[3]); return &ctx->u.s.Legacy[3];
822     case CV_AMD64_ST0 + 4: *size = sizeof(ctx->u.s.Legacy[4]); return &ctx->u.s.Legacy[4];
823     case CV_AMD64_ST0 + 5: *size = sizeof(ctx->u.s.Legacy[5]); return &ctx->u.s.Legacy[5];
824     case CV_AMD64_ST0 + 6: *size = sizeof(ctx->u.s.Legacy[6]); return &ctx->u.s.Legacy[6];
825     case CV_AMD64_ST0 + 7: *size = sizeof(ctx->u.s.Legacy[7]); return &ctx->u.s.Legacy[7];
826 
827     case CV_AMD64_EFLAGS: *size = sizeof(ctx->EFlags); return &ctx->EFlags;
828     case CV_AMD64_ES: *size = sizeof(ctx->SegEs); return &ctx->SegEs;
829     case CV_AMD64_CS: *size = sizeof(ctx->SegCs); return &ctx->SegCs;
830     case CV_AMD64_SS: *size = sizeof(ctx->SegSs); return &ctx->SegSs;
831     case CV_AMD64_DS: *size = sizeof(ctx->SegDs); return &ctx->SegDs;
832     case CV_AMD64_FS: *size = sizeof(ctx->SegFs); return &ctx->SegFs;
833     case CV_AMD64_GS: *size = sizeof(ctx->SegGs); return &ctx->SegGs;
834 
835     }
836 #endif
837     FIXME("Unknown register %x\n", regno);
838     return NULL;
839 }
840 
841 static const char* x86_64_fetch_regname(unsigned regno)
842 {
843     switch (regno)
844     {
845     case CV_AMD64_RAX:          return "rax";
846     case CV_AMD64_RDX:          return "rdx";
847     case CV_AMD64_RCX:          return "rcx";
848     case CV_AMD64_RBX:          return "rbx";
849     case CV_AMD64_RSI:          return "rsi";
850     case CV_AMD64_RDI:          return "rdi";
851     case CV_AMD64_RBP:          return "rbp";
852     case CV_AMD64_RSP:          return "rsp";
853     case CV_AMD64_R8:           return "r8";
854     case CV_AMD64_R9:           return "r9";
855     case CV_AMD64_R10:          return "r10";
856     case CV_AMD64_R11:          return "r11";
857     case CV_AMD64_R12:          return "r12";
858     case CV_AMD64_R13:          return "r13";
859     case CV_AMD64_R14:          return "r14";
860     case CV_AMD64_R15:          return "r15";
861     case CV_AMD64_RIP:          return "rip";
862 
863     case CV_AMD64_XMM0 + 0:     return "xmm0";
864     case CV_AMD64_XMM0 + 1:     return "xmm1";
865     case CV_AMD64_XMM0 + 2:     return "xmm2";
866     case CV_AMD64_XMM0 + 3:     return "xmm3";
867     case CV_AMD64_XMM0 + 4:     return "xmm4";
868     case CV_AMD64_XMM0 + 5:     return "xmm5";
869     case CV_AMD64_XMM0 + 6:     return "xmm6";
870     case CV_AMD64_XMM0 + 7:     return "xmm7";
871     case CV_AMD64_XMM8 + 0:     return "xmm8";
872     case CV_AMD64_XMM8 + 1:     return "xmm9";
873     case CV_AMD64_XMM8 + 2:     return "xmm10";
874     case CV_AMD64_XMM8 + 3:     return "xmm11";
875     case CV_AMD64_XMM8 + 4:     return "xmm12";
876     case CV_AMD64_XMM8 + 5:     return "xmm13";
877     case CV_AMD64_XMM8 + 6:     return "xmm14";
878     case CV_AMD64_XMM8 + 7:     return "xmm15";
879 
880     case CV_AMD64_ST0 + 0:      return "st0";
881     case CV_AMD64_ST0 + 1:      return "st1";
882     case CV_AMD64_ST0 + 2:      return "st2";
883     case CV_AMD64_ST0 + 3:      return "st3";
884     case CV_AMD64_ST0 + 4:      return "st4";
885     case CV_AMD64_ST0 + 5:      return "st5";
886     case CV_AMD64_ST0 + 6:      return "st6";
887     case CV_AMD64_ST0 + 7:      return "st7";
888 
889     case CV_AMD64_EFLAGS:       return "eflags";
890     case CV_AMD64_ES:           return "es";
891     case CV_AMD64_CS:           return "cs";
892     case CV_AMD64_SS:           return "ss";
893     case CV_AMD64_DS:           return "ds";
894     case CV_AMD64_FS:           return "fs";
895     case CV_AMD64_GS:           return "gs";
896     }
897     FIXME("Unknown register %x\n", regno);
898     return NULL;
899 }
900 
901 static BOOL x86_64_fetch_minidump_thread(struct dump_context* dc, unsigned index, unsigned flags, const CONTEXT* ctx)
902 {
903     if (ctx->ContextFlags && (flags & ThreadWriteInstructionWindow))
904     {
905         /* FIXME: crop values across module boundaries, */
906 #ifdef __x86_64__
907         ULONG64 base = ctx->Rip <= 0x80 ? 0 : ctx->Rip - 0x80;
908         minidump_add_memory_block(dc, base, ctx->Rip + 0x80 - base, 0);
909 #endif
910     }
911 
912     return TRUE;
913 }
914 
915 static BOOL x86_64_fetch_minidump_module(struct dump_context* dc, unsigned index, unsigned flags)
916 {
917     /* FIXME: not sure about the flags... */
918     if (1)
919     {
920         /* FIXME: crop values across module boundaries, */
921 #ifdef __x86_64__
922         struct process*         pcs;
923         struct module*          module;
924         const RUNTIME_FUNCTION* rtf;
925         ULONG                   size;
926 
927         if (!(pcs = process_find_by_handle(dc->hProcess)) ||
928             !(module = module_find_by_addr(pcs, dc->modules[index].base, DMT_UNKNOWN)))
929             return FALSE;
930         rtf = (const RUNTIME_FUNCTION*)pe_map_directory(module, IMAGE_DIRECTORY_ENTRY_EXCEPTION, &size);
931         if (rtf)
932         {
933             const RUNTIME_FUNCTION* end = (const RUNTIME_FUNCTION*)((const char*)rtf + size);
934             UNWIND_INFO ui;
935 
936             while (rtf + 1 < end)
937             {
938                 while (rtf->UnwindData & 1)  /* follow chained entry */
939                 {
940                     FIXME("RunTime_Function outside IMAGE_DIRECTORY_ENTRY_EXCEPTION unimplemented yet!\n");
941                     return FALSE;
942                     /* we need to read into the other process */
943                     /* rtf = (RUNTIME_FUNCTION*)(module->module.BaseOfImage + (rtf->UnwindData & ~1)); */
944                 }
945                 if (ReadProcessMemory(dc->hProcess,
946                                       (void*)(dc->modules[index].base + rtf->UnwindData),
947                                       &ui, sizeof(ui), NULL))
948                     minidump_add_memory_block(dc, dc->modules[index].base + rtf->UnwindData,
949                                               FIELD_OFFSET(UNWIND_INFO, UnwindCode) + ui.CountOfCodes * sizeof(UNWIND_CODE), 0);
950                 rtf++;
951             }
952         }
953 #endif
954     }
955 
956     return TRUE;
957 }
958 
959 DECLSPEC_HIDDEN struct cpu cpu_x86_64 = {
960     IMAGE_FILE_MACHINE_AMD64,
961     8,
962     CV_AMD64_RSP,
963     x86_64_get_addr,
964     x86_64_stack_walk,
965     x86_64_find_runtime_function,
966     x86_64_map_dwarf_register,
967     x86_64_fetch_context_reg,
968     x86_64_fetch_regname,
969     x86_64_fetch_minidump_thread,
970     x86_64_fetch_minidump_module,
971 };
972