xref: /qemu/linux-user/elfload.c (revision 83f7d43a)
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
4 
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
15 
16 #include "qemu.h"
17 #include "disas.h"
18 
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
27 
28 #define ELF_OSABI   ELFOSABI_SYSV
29 
30 /* from personality.h */
31 
32 /*
33  * Flags for bug emulation.
34  *
35  * These occupy the top three bytes.
36  */
37 enum {
38     ADDR_NO_RANDOMIZE = 0x0040000,      /* disable randomization of VA space */
39     FDPIC_FUNCPTRS =    0x0080000,      /* userspace function ptrs point to
40                                            descriptors (signal handling) */
41     MMAP_PAGE_ZERO =    0x0100000,
42     ADDR_COMPAT_LAYOUT = 0x0200000,
43     READ_IMPLIES_EXEC = 0x0400000,
44     ADDR_LIMIT_32BIT =  0x0800000,
45     SHORT_INODE =       0x1000000,
46     WHOLE_SECONDS =     0x2000000,
47     STICKY_TIMEOUTS =   0x4000000,
48     ADDR_LIMIT_3GB =    0x8000000,
49 };
50 
51 /*
52  * Personality types.
53  *
54  * These go in the low byte.  Avoid using the top bit, it will
55  * conflict with error returns.
56  */
57 enum {
58     PER_LINUX =         0x0000,
59     PER_LINUX_32BIT =   0x0000 | ADDR_LIMIT_32BIT,
60     PER_LINUX_FDPIC =   0x0000 | FDPIC_FUNCPTRS,
61     PER_SVR4 =          0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
62     PER_SVR3 =          0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
63     PER_SCOSVR3 =       0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
64     PER_OSR5 =          0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
65     PER_WYSEV386 =      0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
66     PER_ISCR4 =         0x0005 | STICKY_TIMEOUTS,
67     PER_BSD =           0x0006,
68     PER_SUNOS =         0x0006 | STICKY_TIMEOUTS,
69     PER_XENIX =         0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
70     PER_LINUX32 =       0x0008,
71     PER_LINUX32_3GB =   0x0008 | ADDR_LIMIT_3GB,
72     PER_IRIX32 =        0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
73     PER_IRIXN32 =       0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
74     PER_IRIX64 =        0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
75     PER_RISCOS =        0x000c,
76     PER_SOLARIS =       0x000d | STICKY_TIMEOUTS,
77     PER_UW7 =           0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
78     PER_OSF4 =          0x000f,                  /* OSF/1 v4 */
79     PER_HPUX =          0x0010,
80     PER_MASK =          0x00ff,
81 };
82 
83 /*
84  * Return the base personality without flags.
85  */
86 #define personality(pers)       (pers & PER_MASK)
87 
88 /* this flag is uneffective under linux too, should be deleted */
89 #ifndef MAP_DENYWRITE
90 #define MAP_DENYWRITE 0
91 #endif
92 
93 /* should probably go in elf.h */
94 #ifndef ELIBBAD
95 #define ELIBBAD 80
96 #endif
97 
98 #ifdef TARGET_WORDS_BIGENDIAN
99 #define ELF_DATA        ELFDATA2MSB
100 #else
101 #define ELF_DATA        ELFDATA2LSB
102 #endif
103 
104 typedef target_ulong    target_elf_greg_t;
105 #ifdef USE_UID16
106 typedef target_ushort   target_uid_t;
107 typedef target_ushort   target_gid_t;
108 #else
109 typedef target_uint     target_uid_t;
110 typedef target_uint     target_gid_t;
111 #endif
112 typedef target_int      target_pid_t;
113 
114 #ifdef TARGET_I386
115 
116 #define ELF_PLATFORM get_elf_platform()
117 
118 static const char *get_elf_platform(void)
119 {
120     static char elf_platform[] = "i386";
121     int family = (thread_env->cpuid_version >> 8) & 0xff;
122     if (family > 6)
123         family = 6;
124     if (family >= 3)
125         elf_platform[1] = '0' + family;
126     return elf_platform;
127 }
128 
129 #define ELF_HWCAP get_elf_hwcap()
130 
131 static uint32_t get_elf_hwcap(void)
132 {
133     return thread_env->cpuid_features;
134 }
135 
136 #ifdef TARGET_X86_64
137 #define ELF_START_MMAP 0x2aaaaab000ULL
138 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
139 
140 #define ELF_CLASS      ELFCLASS64
141 #define ELF_ARCH       EM_X86_64
142 
143 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
144 {
145     regs->rax = 0;
146     regs->rsp = infop->start_stack;
147     regs->rip = infop->entry;
148 }
149 
150 #define ELF_NREG    27
151 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
152 
153 /*
154  * Note that ELF_NREG should be 29 as there should be place for
155  * TRAPNO and ERR "registers" as well but linux doesn't dump
156  * those.
157  *
158  * See linux kernel: arch/x86/include/asm/elf.h
159  */
160 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
161 {
162     (*regs)[0] = env->regs[15];
163     (*regs)[1] = env->regs[14];
164     (*regs)[2] = env->regs[13];
165     (*regs)[3] = env->regs[12];
166     (*regs)[4] = env->regs[R_EBP];
167     (*regs)[5] = env->regs[R_EBX];
168     (*regs)[6] = env->regs[11];
169     (*regs)[7] = env->regs[10];
170     (*regs)[8] = env->regs[9];
171     (*regs)[9] = env->regs[8];
172     (*regs)[10] = env->regs[R_EAX];
173     (*regs)[11] = env->regs[R_ECX];
174     (*regs)[12] = env->regs[R_EDX];
175     (*regs)[13] = env->regs[R_ESI];
176     (*regs)[14] = env->regs[R_EDI];
177     (*regs)[15] = env->regs[R_EAX]; /* XXX */
178     (*regs)[16] = env->eip;
179     (*regs)[17] = env->segs[R_CS].selector & 0xffff;
180     (*regs)[18] = env->eflags;
181     (*regs)[19] = env->regs[R_ESP];
182     (*regs)[20] = env->segs[R_SS].selector & 0xffff;
183     (*regs)[21] = env->segs[R_FS].selector & 0xffff;
184     (*regs)[22] = env->segs[R_GS].selector & 0xffff;
185     (*regs)[23] = env->segs[R_DS].selector & 0xffff;
186     (*regs)[24] = env->segs[R_ES].selector & 0xffff;
187     (*regs)[25] = env->segs[R_FS].selector & 0xffff;
188     (*regs)[26] = env->segs[R_GS].selector & 0xffff;
189 }
190 
191 #else
192 
193 #define ELF_START_MMAP 0x80000000
194 
195 /*
196  * This is used to ensure we don't load something for the wrong architecture.
197  */
198 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
199 
200 /*
201  * These are used to set parameters in the core dumps.
202  */
203 #define ELF_CLASS       ELFCLASS32
204 #define ELF_ARCH        EM_386
205 
206 static inline void init_thread(struct target_pt_regs *regs,
207                                struct image_info *infop)
208 {
209     regs->esp = infop->start_stack;
210     regs->eip = infop->entry;
211 
212     /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
213        starts %edx contains a pointer to a function which might be
214        registered using `atexit'.  This provides a mean for the
215        dynamic linker to call DT_FINI functions for shared libraries
216        that have been loaded before the code runs.
217 
218        A value of 0 tells we have no such handler.  */
219     regs->edx = 0;
220 }
221 
222 #define ELF_NREG    17
223 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
224 
225 /*
226  * Note that ELF_NREG should be 19 as there should be place for
227  * TRAPNO and ERR "registers" as well but linux doesn't dump
228  * those.
229  *
230  * See linux kernel: arch/x86/include/asm/elf.h
231  */
232 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
233 {
234     (*regs)[0] = env->regs[R_EBX];
235     (*regs)[1] = env->regs[R_ECX];
236     (*regs)[2] = env->regs[R_EDX];
237     (*regs)[3] = env->regs[R_ESI];
238     (*regs)[4] = env->regs[R_EDI];
239     (*regs)[5] = env->regs[R_EBP];
240     (*regs)[6] = env->regs[R_EAX];
241     (*regs)[7] = env->segs[R_DS].selector & 0xffff;
242     (*regs)[8] = env->segs[R_ES].selector & 0xffff;
243     (*regs)[9] = env->segs[R_FS].selector & 0xffff;
244     (*regs)[10] = env->segs[R_GS].selector & 0xffff;
245     (*regs)[11] = env->regs[R_EAX]; /* XXX */
246     (*regs)[12] = env->eip;
247     (*regs)[13] = env->segs[R_CS].selector & 0xffff;
248     (*regs)[14] = env->eflags;
249     (*regs)[15] = env->regs[R_ESP];
250     (*regs)[16] = env->segs[R_SS].selector & 0xffff;
251 }
252 #endif
253 
254 #define USE_ELF_CORE_DUMP
255 #define ELF_EXEC_PAGESIZE       4096
256 
257 #endif
258 
259 #ifdef TARGET_ARM
260 
261 #define ELF_START_MMAP 0x80000000
262 
263 #define elf_check_arch(x) ( (x) == EM_ARM )
264 
265 #define ELF_CLASS       ELFCLASS32
266 #define ELF_ARCH        EM_ARM
267 
268 static inline void init_thread(struct target_pt_regs *regs,
269                                struct image_info *infop)
270 {
271     abi_long stack = infop->start_stack;
272     memset(regs, 0, sizeof(*regs));
273     regs->ARM_cpsr = 0x10;
274     if (infop->entry & 1)
275         regs->ARM_cpsr |= CPSR_T;
276     regs->ARM_pc = infop->entry & 0xfffffffe;
277     regs->ARM_sp = infop->start_stack;
278     /* FIXME - what to for failure of get_user()? */
279     get_user_ual(regs->ARM_r2, stack + 8); /* envp */
280     get_user_ual(regs->ARM_r1, stack + 4); /* envp */
281     /* XXX: it seems that r0 is zeroed after ! */
282     regs->ARM_r0 = 0;
283     /* For uClinux PIC binaries.  */
284     /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
285     regs->ARM_r10 = infop->start_data;
286 }
287 
288 #define ELF_NREG    18
289 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
290 
291 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
292 {
293     (*regs)[0] = tswapl(env->regs[0]);
294     (*regs)[1] = tswapl(env->regs[1]);
295     (*regs)[2] = tswapl(env->regs[2]);
296     (*regs)[3] = tswapl(env->regs[3]);
297     (*regs)[4] = tswapl(env->regs[4]);
298     (*regs)[5] = tswapl(env->regs[5]);
299     (*regs)[6] = tswapl(env->regs[6]);
300     (*regs)[7] = tswapl(env->regs[7]);
301     (*regs)[8] = tswapl(env->regs[8]);
302     (*regs)[9] = tswapl(env->regs[9]);
303     (*regs)[10] = tswapl(env->regs[10]);
304     (*regs)[11] = tswapl(env->regs[11]);
305     (*regs)[12] = tswapl(env->regs[12]);
306     (*regs)[13] = tswapl(env->regs[13]);
307     (*regs)[14] = tswapl(env->regs[14]);
308     (*regs)[15] = tswapl(env->regs[15]);
309 
310     (*regs)[16] = tswapl(cpsr_read((CPUState *)env));
311     (*regs)[17] = tswapl(env->regs[0]); /* XXX */
312 }
313 
314 #define USE_ELF_CORE_DUMP
315 #define ELF_EXEC_PAGESIZE       4096
316 
317 enum
318 {
319     ARM_HWCAP_ARM_SWP       = 1 << 0,
320     ARM_HWCAP_ARM_HALF      = 1 << 1,
321     ARM_HWCAP_ARM_THUMB     = 1 << 2,
322     ARM_HWCAP_ARM_26BIT     = 1 << 3,
323     ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
324     ARM_HWCAP_ARM_FPA       = 1 << 5,
325     ARM_HWCAP_ARM_VFP       = 1 << 6,
326     ARM_HWCAP_ARM_EDSP      = 1 << 7,
327     ARM_HWCAP_ARM_JAVA      = 1 << 8,
328     ARM_HWCAP_ARM_IWMMXT    = 1 << 9,
329     ARM_HWCAP_ARM_THUMBEE   = 1 << 10,
330     ARM_HWCAP_ARM_NEON      = 1 << 11,
331     ARM_HWCAP_ARM_VFPv3     = 1 << 12,
332     ARM_HWCAP_ARM_VFPv3D16  = 1 << 13,
333 };
334 
335 #define TARGET_HAS_GUEST_VALIDATE_BASE
336 /* We want the opportunity to check the suggested base */
337 bool guest_validate_base(unsigned long guest_base)
338 {
339     unsigned long real_start, test_page_addr;
340 
341     /* We need to check that we can force a fault on access to the
342      * commpage at 0xffff0fxx
343      */
344     test_page_addr = guest_base + (0xffff0f00 & qemu_host_page_mask);
345     /* Note it needs to be writeable to let us initialise it */
346     real_start = (unsigned long)
347                  mmap((void *)test_page_addr, qemu_host_page_size,
348                      PROT_READ | PROT_WRITE,
349                      MAP_ANONYMOUS | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
350 
351     /* If we can't map it then try another address */
352     if (real_start == -1ul) {
353         return 0;
354     }
355 
356     if (real_start != test_page_addr) {
357         /* OS didn't put the page where we asked - unmap and reject */
358         munmap((void *)real_start, qemu_host_page_size);
359         return 0;
360     }
361 
362     /* Leave the page mapped
363      * Populate it (mmap should have left it all 0'd)
364      */
365 
366     /* Kernel helper versions */
367     __put_user(5, (uint32_t *)g2h(0xffff0ffcul));
368 
369     /* Now it's populated make it RO */
370     if (mprotect((void *)test_page_addr, qemu_host_page_size, PROT_READ)) {
371         perror("Protecting guest commpage");
372         exit(-1);
373     }
374 
375     return 1; /* All good */
376 }
377 
378 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF               \
379                    | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT      \
380                    | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP              \
381                    | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
382 
383 #endif
384 
385 #ifdef TARGET_UNICORE32
386 
387 #define ELF_START_MMAP          0x80000000
388 
389 #define elf_check_arch(x)       ((x) == EM_UNICORE32)
390 
391 #define ELF_CLASS               ELFCLASS32
392 #define ELF_DATA                ELFDATA2LSB
393 #define ELF_ARCH                EM_UNICORE32
394 
395 static inline void init_thread(struct target_pt_regs *regs,
396         struct image_info *infop)
397 {
398     abi_long stack = infop->start_stack;
399     memset(regs, 0, sizeof(*regs));
400     regs->UC32_REG_asr = 0x10;
401     regs->UC32_REG_pc = infop->entry & 0xfffffffe;
402     regs->UC32_REG_sp = infop->start_stack;
403     /* FIXME - what to for failure of get_user()? */
404     get_user_ual(regs->UC32_REG_02, stack + 8); /* envp */
405     get_user_ual(regs->UC32_REG_01, stack + 4); /* envp */
406     /* XXX: it seems that r0 is zeroed after ! */
407     regs->UC32_REG_00 = 0;
408 }
409 
410 #define ELF_NREG    34
411 typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
412 
413 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
414 {
415     (*regs)[0] = env->regs[0];
416     (*regs)[1] = env->regs[1];
417     (*regs)[2] = env->regs[2];
418     (*regs)[3] = env->regs[3];
419     (*regs)[4] = env->regs[4];
420     (*regs)[5] = env->regs[5];
421     (*regs)[6] = env->regs[6];
422     (*regs)[7] = env->regs[7];
423     (*regs)[8] = env->regs[8];
424     (*regs)[9] = env->regs[9];
425     (*regs)[10] = env->regs[10];
426     (*regs)[11] = env->regs[11];
427     (*regs)[12] = env->regs[12];
428     (*regs)[13] = env->regs[13];
429     (*regs)[14] = env->regs[14];
430     (*regs)[15] = env->regs[15];
431     (*regs)[16] = env->regs[16];
432     (*regs)[17] = env->regs[17];
433     (*regs)[18] = env->regs[18];
434     (*regs)[19] = env->regs[19];
435     (*regs)[20] = env->regs[20];
436     (*regs)[21] = env->regs[21];
437     (*regs)[22] = env->regs[22];
438     (*regs)[23] = env->regs[23];
439     (*regs)[24] = env->regs[24];
440     (*regs)[25] = env->regs[25];
441     (*regs)[26] = env->regs[26];
442     (*regs)[27] = env->regs[27];
443     (*regs)[28] = env->regs[28];
444     (*regs)[29] = env->regs[29];
445     (*regs)[30] = env->regs[30];
446     (*regs)[31] = env->regs[31];
447 
448     (*regs)[32] = cpu_asr_read((CPUState *)env);
449     (*regs)[33] = env->regs[0]; /* XXX */
450 }
451 
452 #define USE_ELF_CORE_DUMP
453 #define ELF_EXEC_PAGESIZE               4096
454 
455 #define ELF_HWCAP                       (UC32_HWCAP_CMOV | UC32_HWCAP_UCF64)
456 
457 #endif
458 
459 #ifdef TARGET_SPARC
460 #ifdef TARGET_SPARC64
461 
462 #define ELF_START_MMAP 0x80000000
463 #define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
464                     | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
465 #ifndef TARGET_ABI32
466 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
467 #else
468 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
469 #endif
470 
471 #define ELF_CLASS   ELFCLASS64
472 #define ELF_ARCH    EM_SPARCV9
473 
474 #define STACK_BIAS              2047
475 
476 static inline void init_thread(struct target_pt_regs *regs,
477                                struct image_info *infop)
478 {
479 #ifndef TARGET_ABI32
480     regs->tstate = 0;
481 #endif
482     regs->pc = infop->entry;
483     regs->npc = regs->pc + 4;
484     regs->y = 0;
485 #ifdef TARGET_ABI32
486     regs->u_regs[14] = infop->start_stack - 16 * 4;
487 #else
488     if (personality(infop->personality) == PER_LINUX32)
489         regs->u_regs[14] = infop->start_stack - 16 * 4;
490     else
491         regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
492 #endif
493 }
494 
495 #else
496 #define ELF_START_MMAP 0x80000000
497 #define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
498                     | HWCAP_SPARC_MULDIV)
499 #define elf_check_arch(x) ( (x) == EM_SPARC )
500 
501 #define ELF_CLASS   ELFCLASS32
502 #define ELF_ARCH    EM_SPARC
503 
504 static inline void init_thread(struct target_pt_regs *regs,
505                                struct image_info *infop)
506 {
507     regs->psr = 0;
508     regs->pc = infop->entry;
509     regs->npc = regs->pc + 4;
510     regs->y = 0;
511     regs->u_regs[14] = infop->start_stack - 16 * 4;
512 }
513 
514 #endif
515 #endif
516 
517 #ifdef TARGET_PPC
518 
519 #define ELF_START_MMAP 0x80000000
520 
521 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
522 
523 #define elf_check_arch(x) ( (x) == EM_PPC64 )
524 
525 #define ELF_CLASS       ELFCLASS64
526 
527 #else
528 
529 #define elf_check_arch(x) ( (x) == EM_PPC )
530 
531 #define ELF_CLASS       ELFCLASS32
532 
533 #endif
534 
535 #define ELF_ARCH        EM_PPC
536 
537 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
538    See arch/powerpc/include/asm/cputable.h.  */
539 enum {
540     QEMU_PPC_FEATURE_32 = 0x80000000,
541     QEMU_PPC_FEATURE_64 = 0x40000000,
542     QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
543     QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
544     QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
545     QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
546     QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
547     QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
548     QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
549     QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
550     QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
551     QEMU_PPC_FEATURE_NO_TB = 0x00100000,
552     QEMU_PPC_FEATURE_POWER4 = 0x00080000,
553     QEMU_PPC_FEATURE_POWER5 = 0x00040000,
554     QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
555     QEMU_PPC_FEATURE_CELL = 0x00010000,
556     QEMU_PPC_FEATURE_BOOKE = 0x00008000,
557     QEMU_PPC_FEATURE_SMT = 0x00004000,
558     QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
559     QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
560     QEMU_PPC_FEATURE_PA6T = 0x00000800,
561     QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
562     QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
563     QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
564     QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
565     QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
566 
567     QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
568     QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
569 };
570 
571 #define ELF_HWCAP get_elf_hwcap()
572 
573 static uint32_t get_elf_hwcap(void)
574 {
575     CPUState *e = thread_env;
576     uint32_t features = 0;
577 
578     /* We don't have to be terribly complete here; the high points are
579        Altivec/FP/SPE support.  Anything else is just a bonus.  */
580 #define GET_FEATURE(flag, feature)                                      \
581     do {if (e->insns_flags & flag) features |= feature; } while(0)
582     GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
583     GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
584     GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
585     GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
586     GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
587     GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
588     GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
589     GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
590 #undef GET_FEATURE
591 
592     return features;
593 }
594 
595 /*
596  * The requirements here are:
597  * - keep the final alignment of sp (sp & 0xf)
598  * - make sure the 32-bit value at the first 16 byte aligned position of
599  *   AUXV is greater than 16 for glibc compatibility.
600  *   AT_IGNOREPPC is used for that.
601  * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
602  *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
603  */
604 #define DLINFO_ARCH_ITEMS       5
605 #define ARCH_DLINFO                                     \
606     do {                                                \
607         NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20);              \
608         NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20);              \
609         NEW_AUX_ENT(AT_UCACHEBSIZE, 0);                 \
610         /*                                              \
611          * Now handle glibc compatibility.              \
612          */                                             \
613         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
614         NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
615     } while (0)
616 
617 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
618 {
619     _regs->gpr[1] = infop->start_stack;
620 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
621     _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_bias;
622     infop->entry = ldq_raw(infop->entry) + infop->load_bias;
623 #endif
624     _regs->nip = infop->entry;
625 }
626 
627 /* See linux kernel: arch/powerpc/include/asm/elf.h.  */
628 #define ELF_NREG 48
629 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
630 
631 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
632 {
633     int i;
634     target_ulong ccr = 0;
635 
636     for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
637         (*regs)[i] = tswapl(env->gpr[i]);
638     }
639 
640     (*regs)[32] = tswapl(env->nip);
641     (*regs)[33] = tswapl(env->msr);
642     (*regs)[35] = tswapl(env->ctr);
643     (*regs)[36] = tswapl(env->lr);
644     (*regs)[37] = tswapl(env->xer);
645 
646     for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
647         ccr |= env->crf[i] << (32 - ((i + 1) * 4));
648     }
649     (*regs)[38] = tswapl(ccr);
650 }
651 
652 #define USE_ELF_CORE_DUMP
653 #define ELF_EXEC_PAGESIZE       4096
654 
655 #endif
656 
657 #ifdef TARGET_MIPS
658 
659 #define ELF_START_MMAP 0x80000000
660 
661 #define elf_check_arch(x) ( (x) == EM_MIPS )
662 
663 #ifdef TARGET_MIPS64
664 #define ELF_CLASS   ELFCLASS64
665 #else
666 #define ELF_CLASS   ELFCLASS32
667 #endif
668 #define ELF_ARCH    EM_MIPS
669 
670 static inline void init_thread(struct target_pt_regs *regs,
671                                struct image_info *infop)
672 {
673     regs->cp0_status = 2 << CP0St_KSU;
674     regs->cp0_epc = infop->entry;
675     regs->regs[29] = infop->start_stack;
676 }
677 
678 /* See linux kernel: arch/mips/include/asm/elf.h.  */
679 #define ELF_NREG 45
680 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
681 
682 /* See linux kernel: arch/mips/include/asm/reg.h.  */
683 enum {
684 #ifdef TARGET_MIPS64
685     TARGET_EF_R0 = 0,
686 #else
687     TARGET_EF_R0 = 6,
688 #endif
689     TARGET_EF_R26 = TARGET_EF_R0 + 26,
690     TARGET_EF_R27 = TARGET_EF_R0 + 27,
691     TARGET_EF_LO = TARGET_EF_R0 + 32,
692     TARGET_EF_HI = TARGET_EF_R0 + 33,
693     TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
694     TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
695     TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
696     TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
697 };
698 
699 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
700 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
701 {
702     int i;
703 
704     for (i = 0; i < TARGET_EF_R0; i++) {
705         (*regs)[i] = 0;
706     }
707     (*regs)[TARGET_EF_R0] = 0;
708 
709     for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
710         (*regs)[TARGET_EF_R0 + i] = tswapl(env->active_tc.gpr[i]);
711     }
712 
713     (*regs)[TARGET_EF_R26] = 0;
714     (*regs)[TARGET_EF_R27] = 0;
715     (*regs)[TARGET_EF_LO] = tswapl(env->active_tc.LO[0]);
716     (*regs)[TARGET_EF_HI] = tswapl(env->active_tc.HI[0]);
717     (*regs)[TARGET_EF_CP0_EPC] = tswapl(env->active_tc.PC);
718     (*regs)[TARGET_EF_CP0_BADVADDR] = tswapl(env->CP0_BadVAddr);
719     (*regs)[TARGET_EF_CP0_STATUS] = tswapl(env->CP0_Status);
720     (*regs)[TARGET_EF_CP0_CAUSE] = tswapl(env->CP0_Cause);
721 }
722 
723 #define USE_ELF_CORE_DUMP
724 #define ELF_EXEC_PAGESIZE        4096
725 
726 #endif /* TARGET_MIPS */
727 
728 #ifdef TARGET_MICROBLAZE
729 
730 #define ELF_START_MMAP 0x80000000
731 
732 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
733 
734 #define ELF_CLASS   ELFCLASS32
735 #define ELF_ARCH    EM_MICROBLAZE
736 
737 static inline void init_thread(struct target_pt_regs *regs,
738                                struct image_info *infop)
739 {
740     regs->pc = infop->entry;
741     regs->r1 = infop->start_stack;
742 
743 }
744 
745 #define ELF_EXEC_PAGESIZE        4096
746 
747 #define USE_ELF_CORE_DUMP
748 #define ELF_NREG 38
749 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
750 
751 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
752 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
753 {
754     int i, pos = 0;
755 
756     for (i = 0; i < 32; i++) {
757         (*regs)[pos++] = tswapl(env->regs[i]);
758     }
759 
760     for (i = 0; i < 6; i++) {
761         (*regs)[pos++] = tswapl(env->sregs[i]);
762     }
763 }
764 
765 #endif /* TARGET_MICROBLAZE */
766 
767 #ifdef TARGET_SH4
768 
769 #define ELF_START_MMAP 0x80000000
770 
771 #define elf_check_arch(x) ( (x) == EM_SH )
772 
773 #define ELF_CLASS ELFCLASS32
774 #define ELF_ARCH  EM_SH
775 
776 static inline void init_thread(struct target_pt_regs *regs,
777                                struct image_info *infop)
778 {
779     /* Check other registers XXXXX */
780     regs->pc = infop->entry;
781     regs->regs[15] = infop->start_stack;
782 }
783 
784 /* See linux kernel: arch/sh/include/asm/elf.h.  */
785 #define ELF_NREG 23
786 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
787 
788 /* See linux kernel: arch/sh/include/asm/ptrace.h.  */
789 enum {
790     TARGET_REG_PC = 16,
791     TARGET_REG_PR = 17,
792     TARGET_REG_SR = 18,
793     TARGET_REG_GBR = 19,
794     TARGET_REG_MACH = 20,
795     TARGET_REG_MACL = 21,
796     TARGET_REG_SYSCALL = 22
797 };
798 
799 static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
800                                       const CPUState *env)
801 {
802     int i;
803 
804     for (i = 0; i < 16; i++) {
805         (*regs[i]) = tswapl(env->gregs[i]);
806     }
807 
808     (*regs)[TARGET_REG_PC] = tswapl(env->pc);
809     (*regs)[TARGET_REG_PR] = tswapl(env->pr);
810     (*regs)[TARGET_REG_SR] = tswapl(env->sr);
811     (*regs)[TARGET_REG_GBR] = tswapl(env->gbr);
812     (*regs)[TARGET_REG_MACH] = tswapl(env->mach);
813     (*regs)[TARGET_REG_MACL] = tswapl(env->macl);
814     (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
815 }
816 
817 #define USE_ELF_CORE_DUMP
818 #define ELF_EXEC_PAGESIZE        4096
819 
820 #endif
821 
822 #ifdef TARGET_CRIS
823 
824 #define ELF_START_MMAP 0x80000000
825 
826 #define elf_check_arch(x) ( (x) == EM_CRIS )
827 
828 #define ELF_CLASS ELFCLASS32
829 #define ELF_ARCH  EM_CRIS
830 
831 static inline void init_thread(struct target_pt_regs *regs,
832                                struct image_info *infop)
833 {
834     regs->erp = infop->entry;
835 }
836 
837 #define ELF_EXEC_PAGESIZE        8192
838 
839 #endif
840 
841 #ifdef TARGET_M68K
842 
843 #define ELF_START_MMAP 0x80000000
844 
845 #define elf_check_arch(x) ( (x) == EM_68K )
846 
847 #define ELF_CLASS       ELFCLASS32
848 #define ELF_ARCH        EM_68K
849 
850 /* ??? Does this need to do anything?
851    #define ELF_PLAT_INIT(_r) */
852 
853 static inline void init_thread(struct target_pt_regs *regs,
854                                struct image_info *infop)
855 {
856     regs->usp = infop->start_stack;
857     regs->sr = 0;
858     regs->pc = infop->entry;
859 }
860 
861 /* See linux kernel: arch/m68k/include/asm/elf.h.  */
862 #define ELF_NREG 20
863 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
864 
865 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
866 {
867     (*regs)[0] = tswapl(env->dregs[1]);
868     (*regs)[1] = tswapl(env->dregs[2]);
869     (*regs)[2] = tswapl(env->dregs[3]);
870     (*regs)[3] = tswapl(env->dregs[4]);
871     (*regs)[4] = tswapl(env->dregs[5]);
872     (*regs)[5] = tswapl(env->dregs[6]);
873     (*regs)[6] = tswapl(env->dregs[7]);
874     (*regs)[7] = tswapl(env->aregs[0]);
875     (*regs)[8] = tswapl(env->aregs[1]);
876     (*regs)[9] = tswapl(env->aregs[2]);
877     (*regs)[10] = tswapl(env->aregs[3]);
878     (*regs)[11] = tswapl(env->aregs[4]);
879     (*regs)[12] = tswapl(env->aregs[5]);
880     (*regs)[13] = tswapl(env->aregs[6]);
881     (*regs)[14] = tswapl(env->dregs[0]);
882     (*regs)[15] = tswapl(env->aregs[7]);
883     (*regs)[16] = tswapl(env->dregs[0]); /* FIXME: orig_d0 */
884     (*regs)[17] = tswapl(env->sr);
885     (*regs)[18] = tswapl(env->pc);
886     (*regs)[19] = 0;  /* FIXME: regs->format | regs->vector */
887 }
888 
889 #define USE_ELF_CORE_DUMP
890 #define ELF_EXEC_PAGESIZE       8192
891 
892 #endif
893 
894 #ifdef TARGET_ALPHA
895 
896 #define ELF_START_MMAP (0x30000000000ULL)
897 
898 #define elf_check_arch(x) ( (x) == ELF_ARCH )
899 
900 #define ELF_CLASS      ELFCLASS64
901 #define ELF_ARCH       EM_ALPHA
902 
903 static inline void init_thread(struct target_pt_regs *regs,
904                                struct image_info *infop)
905 {
906     regs->pc = infop->entry;
907     regs->ps = 8;
908     regs->usp = infop->start_stack;
909 }
910 
911 #define ELF_EXEC_PAGESIZE        8192
912 
913 #endif /* TARGET_ALPHA */
914 
915 #ifdef TARGET_S390X
916 
917 #define ELF_START_MMAP (0x20000000000ULL)
918 
919 #define elf_check_arch(x) ( (x) == ELF_ARCH )
920 
921 #define ELF_CLASS	ELFCLASS64
922 #define ELF_DATA	ELFDATA2MSB
923 #define ELF_ARCH	EM_S390
924 
925 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
926 {
927     regs->psw.addr = infop->entry;
928     regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
929     regs->gprs[15] = infop->start_stack;
930 }
931 
932 #endif /* TARGET_S390X */
933 
934 #ifndef ELF_PLATFORM
935 #define ELF_PLATFORM (NULL)
936 #endif
937 
938 #ifndef ELF_HWCAP
939 #define ELF_HWCAP 0
940 #endif
941 
942 #ifdef TARGET_ABI32
943 #undef ELF_CLASS
944 #define ELF_CLASS ELFCLASS32
945 #undef bswaptls
946 #define bswaptls(ptr) bswap32s(ptr)
947 #endif
948 
949 #include "elf.h"
950 
951 struct exec
952 {
953     unsigned int a_info;   /* Use macros N_MAGIC, etc for access */
954     unsigned int a_text;   /* length of text, in bytes */
955     unsigned int a_data;   /* length of data, in bytes */
956     unsigned int a_bss;    /* length of uninitialized data area, in bytes */
957     unsigned int a_syms;   /* length of symbol table data in file, in bytes */
958     unsigned int a_entry;  /* start address */
959     unsigned int a_trsize; /* length of relocation info for text, in bytes */
960     unsigned int a_drsize; /* length of relocation info for data, in bytes */
961 };
962 
963 
964 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
965 #define OMAGIC 0407
966 #define NMAGIC 0410
967 #define ZMAGIC 0413
968 #define QMAGIC 0314
969 
970 /* Necessary parameters */
971 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
972 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
973 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
974 
975 #define DLINFO_ITEMS 13
976 
977 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
978 {
979     memcpy(to, from, n);
980 }
981 
982 #ifdef BSWAP_NEEDED
983 static void bswap_ehdr(struct elfhdr *ehdr)
984 {
985     bswap16s(&ehdr->e_type);            /* Object file type */
986     bswap16s(&ehdr->e_machine);         /* Architecture */
987     bswap32s(&ehdr->e_version);         /* Object file version */
988     bswaptls(&ehdr->e_entry);           /* Entry point virtual address */
989     bswaptls(&ehdr->e_phoff);           /* Program header table file offset */
990     bswaptls(&ehdr->e_shoff);           /* Section header table file offset */
991     bswap32s(&ehdr->e_flags);           /* Processor-specific flags */
992     bswap16s(&ehdr->e_ehsize);          /* ELF header size in bytes */
993     bswap16s(&ehdr->e_phentsize);       /* Program header table entry size */
994     bswap16s(&ehdr->e_phnum);           /* Program header table entry count */
995     bswap16s(&ehdr->e_shentsize);       /* Section header table entry size */
996     bswap16s(&ehdr->e_shnum);           /* Section header table entry count */
997     bswap16s(&ehdr->e_shstrndx);        /* Section header string table index */
998 }
999 
1000 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
1001 {
1002     int i;
1003     for (i = 0; i < phnum; ++i, ++phdr) {
1004         bswap32s(&phdr->p_type);        /* Segment type */
1005         bswap32s(&phdr->p_flags);       /* Segment flags */
1006         bswaptls(&phdr->p_offset);      /* Segment file offset */
1007         bswaptls(&phdr->p_vaddr);       /* Segment virtual address */
1008         bswaptls(&phdr->p_paddr);       /* Segment physical address */
1009         bswaptls(&phdr->p_filesz);      /* Segment size in file */
1010         bswaptls(&phdr->p_memsz);       /* Segment size in memory */
1011         bswaptls(&phdr->p_align);       /* Segment alignment */
1012     }
1013 }
1014 
1015 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
1016 {
1017     int i;
1018     for (i = 0; i < shnum; ++i, ++shdr) {
1019         bswap32s(&shdr->sh_name);
1020         bswap32s(&shdr->sh_type);
1021         bswaptls(&shdr->sh_flags);
1022         bswaptls(&shdr->sh_addr);
1023         bswaptls(&shdr->sh_offset);
1024         bswaptls(&shdr->sh_size);
1025         bswap32s(&shdr->sh_link);
1026         bswap32s(&shdr->sh_info);
1027         bswaptls(&shdr->sh_addralign);
1028         bswaptls(&shdr->sh_entsize);
1029     }
1030 }
1031 
1032 static void bswap_sym(struct elf_sym *sym)
1033 {
1034     bswap32s(&sym->st_name);
1035     bswaptls(&sym->st_value);
1036     bswaptls(&sym->st_size);
1037     bswap16s(&sym->st_shndx);
1038 }
1039 #else
1040 static inline void bswap_ehdr(struct elfhdr *ehdr) { }
1041 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
1042 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
1043 static inline void bswap_sym(struct elf_sym *sym) { }
1044 #endif
1045 
1046 #ifdef USE_ELF_CORE_DUMP
1047 static int elf_core_dump(int, const CPUState *);
1048 #endif /* USE_ELF_CORE_DUMP */
1049 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
1050 
1051 /* Verify the portions of EHDR within E_IDENT for the target.
1052    This can be performed before bswapping the entire header.  */
1053 static bool elf_check_ident(struct elfhdr *ehdr)
1054 {
1055     return (ehdr->e_ident[EI_MAG0] == ELFMAG0
1056             && ehdr->e_ident[EI_MAG1] == ELFMAG1
1057             && ehdr->e_ident[EI_MAG2] == ELFMAG2
1058             && ehdr->e_ident[EI_MAG3] == ELFMAG3
1059             && ehdr->e_ident[EI_CLASS] == ELF_CLASS
1060             && ehdr->e_ident[EI_DATA] == ELF_DATA
1061             && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
1062 }
1063 
1064 /* Verify the portions of EHDR outside of E_IDENT for the target.
1065    This has to wait until after bswapping the header.  */
1066 static bool elf_check_ehdr(struct elfhdr *ehdr)
1067 {
1068     return (elf_check_arch(ehdr->e_machine)
1069             && ehdr->e_ehsize == sizeof(struct elfhdr)
1070             && ehdr->e_phentsize == sizeof(struct elf_phdr)
1071             && ehdr->e_shentsize == sizeof(struct elf_shdr)
1072             && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
1073 }
1074 
1075 /*
1076  * 'copy_elf_strings()' copies argument/envelope strings from user
1077  * memory to free pages in kernel mem. These are in a format ready
1078  * to be put directly into the top of new user memory.
1079  *
1080  */
1081 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
1082                                   abi_ulong p)
1083 {
1084     char *tmp, *tmp1, *pag = NULL;
1085     int len, offset = 0;
1086 
1087     if (!p) {
1088         return 0;       /* bullet-proofing */
1089     }
1090     while (argc-- > 0) {
1091         tmp = argv[argc];
1092         if (!tmp) {
1093             fprintf(stderr, "VFS: argc is wrong");
1094             exit(-1);
1095         }
1096         tmp1 = tmp;
1097         while (*tmp++);
1098         len = tmp - tmp1;
1099         if (p < len) {  /* this shouldn't happen - 128kB */
1100             return 0;
1101         }
1102         while (len) {
1103             --p; --tmp; --len;
1104             if (--offset < 0) {
1105                 offset = p % TARGET_PAGE_SIZE;
1106                 pag = (char *)page[p/TARGET_PAGE_SIZE];
1107                 if (!pag) {
1108                     pag = g_try_malloc0(TARGET_PAGE_SIZE);
1109                     page[p/TARGET_PAGE_SIZE] = pag;
1110                     if (!pag)
1111                         return 0;
1112                 }
1113             }
1114             if (len == 0 || offset == 0) {
1115                 *(pag + offset) = *tmp;
1116             }
1117             else {
1118                 int bytes_to_copy = (len > offset) ? offset : len;
1119                 tmp -= bytes_to_copy;
1120                 p -= bytes_to_copy;
1121                 offset -= bytes_to_copy;
1122                 len -= bytes_to_copy;
1123                 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
1124             }
1125         }
1126     }
1127     return p;
1128 }
1129 
1130 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
1131                                  struct image_info *info)
1132 {
1133     abi_ulong stack_base, size, error, guard;
1134     int i;
1135 
1136     /* Create enough stack to hold everything.  If we don't use
1137        it for args, we'll use it for something else.  */
1138     size = guest_stack_size;
1139     if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) {
1140         size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1141     }
1142     guard = TARGET_PAGE_SIZE;
1143     if (guard < qemu_real_host_page_size) {
1144         guard = qemu_real_host_page_size;
1145     }
1146 
1147     error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
1148                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1149     if (error == -1) {
1150         perror("mmap stack");
1151         exit(-1);
1152     }
1153 
1154     /* We reserve one extra page at the top of the stack as guard.  */
1155     target_mprotect(error, guard, PROT_NONE);
1156 
1157     info->stack_limit = error + guard;
1158     stack_base = info->stack_limit + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1159     p += stack_base;
1160 
1161     for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1162         if (bprm->page[i]) {
1163             info->rss++;
1164             /* FIXME - check return value of memcpy_to_target() for failure */
1165             memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
1166             g_free(bprm->page[i]);
1167         }
1168         stack_base += TARGET_PAGE_SIZE;
1169     }
1170     return p;
1171 }
1172 
1173 /* Map and zero the bss.  We need to explicitly zero any fractional pages
1174    after the data section (i.e. bss).  */
1175 static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1176 {
1177     uintptr_t host_start, host_map_start, host_end;
1178 
1179     last_bss = TARGET_PAGE_ALIGN(last_bss);
1180 
1181     /* ??? There is confusion between qemu_real_host_page_size and
1182        qemu_host_page_size here and elsewhere in target_mmap, which
1183        may lead to the end of the data section mapping from the file
1184        not being mapped.  At least there was an explicit test and
1185        comment for that here, suggesting that "the file size must
1186        be known".  The comment probably pre-dates the introduction
1187        of the fstat system call in target_mmap which does in fact
1188        find out the size.  What isn't clear is if the workaround
1189        here is still actually needed.  For now, continue with it,
1190        but merge it with the "normal" mmap that would allocate the bss.  */
1191 
1192     host_start = (uintptr_t) g2h(elf_bss);
1193     host_end = (uintptr_t) g2h(last_bss);
1194     host_map_start = (host_start + qemu_real_host_page_size - 1);
1195     host_map_start &= -qemu_real_host_page_size;
1196 
1197     if (host_map_start < host_end) {
1198         void *p = mmap((void *)host_map_start, host_end - host_map_start,
1199                        prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1200         if (p == MAP_FAILED) {
1201             perror("cannot mmap brk");
1202             exit(-1);
1203         }
1204 
1205         /* Since we didn't use target_mmap, make sure to record
1206            the validity of the pages with qemu.  */
1207         page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID);
1208     }
1209 
1210     if (host_start < host_map_start) {
1211         memset((void *)host_start, 0, host_map_start - host_start);
1212     }
1213 }
1214 
1215 #ifdef CONFIG_USE_FDPIC
1216 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
1217 {
1218     uint16_t n;
1219     struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
1220 
1221     /* elf32_fdpic_loadseg */
1222     n = info->nsegs;
1223     while (n--) {
1224         sp -= 12;
1225         put_user_u32(loadsegs[n].addr, sp+0);
1226         put_user_u32(loadsegs[n].p_vaddr, sp+4);
1227         put_user_u32(loadsegs[n].p_memsz, sp+8);
1228     }
1229 
1230     /* elf32_fdpic_loadmap */
1231     sp -= 4;
1232     put_user_u16(0, sp+0); /* version */
1233     put_user_u16(info->nsegs, sp+2); /* nsegs */
1234 
1235     info->personality = PER_LINUX_FDPIC;
1236     info->loadmap_addr = sp;
1237 
1238     return sp;
1239 }
1240 #endif
1241 
1242 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1243                                    struct elfhdr *exec,
1244                                    struct image_info *info,
1245                                    struct image_info *interp_info)
1246 {
1247     abi_ulong sp;
1248     abi_ulong sp_auxv;
1249     int size;
1250     int i;
1251     abi_ulong u_rand_bytes;
1252     uint8_t k_rand_bytes[16];
1253     abi_ulong u_platform;
1254     const char *k_platform;
1255     const int n = sizeof(elf_addr_t);
1256 
1257     sp = p;
1258 
1259 #ifdef CONFIG_USE_FDPIC
1260     /* Needs to be before we load the env/argc/... */
1261     if (elf_is_fdpic(exec)) {
1262         /* Need 4 byte alignment for these structs */
1263         sp &= ~3;
1264         sp = loader_build_fdpic_loadmap(info, sp);
1265         info->other_info = interp_info;
1266         if (interp_info) {
1267             interp_info->other_info = info;
1268             sp = loader_build_fdpic_loadmap(interp_info, sp);
1269         }
1270     }
1271 #endif
1272 
1273     u_platform = 0;
1274     k_platform = ELF_PLATFORM;
1275     if (k_platform) {
1276         size_t len = strlen(k_platform) + 1;
1277         sp -= (len + n - 1) & ~(n - 1);
1278         u_platform = sp;
1279         /* FIXME - check return value of memcpy_to_target() for failure */
1280         memcpy_to_target(sp, k_platform, len);
1281     }
1282 
1283     /*
1284      * Generate 16 random bytes for userspace PRNG seeding (not
1285      * cryptically secure but it's not the aim of QEMU).
1286      */
1287     srand((unsigned int) time(NULL));
1288     for (i = 0; i < 16; i++) {
1289         k_rand_bytes[i] = rand();
1290     }
1291     sp -= 16;
1292     u_rand_bytes = sp;
1293     /* FIXME - check return value of memcpy_to_target() for failure */
1294     memcpy_to_target(sp, k_rand_bytes, 16);
1295 
1296     /*
1297      * Force 16 byte _final_ alignment here for generality.
1298      */
1299     sp = sp &~ (abi_ulong)15;
1300     size = (DLINFO_ITEMS + 1) * 2;
1301     if (k_platform)
1302         size += 2;
1303 #ifdef DLINFO_ARCH_ITEMS
1304     size += DLINFO_ARCH_ITEMS * 2;
1305 #endif
1306     size += envc + argc + 2;
1307     size += 1;  /* argc itself */
1308     size *= n;
1309     if (size & 15)
1310         sp -= 16 - (size & 15);
1311 
1312     /* This is correct because Linux defines
1313      * elf_addr_t as Elf32_Off / Elf64_Off
1314      */
1315 #define NEW_AUX_ENT(id, val) do {               \
1316         sp -= n; put_user_ual(val, sp);         \
1317         sp -= n; put_user_ual(id, sp);          \
1318     } while(0)
1319 
1320     sp_auxv = sp;
1321     NEW_AUX_ENT (AT_NULL, 0);
1322 
1323     /* There must be exactly DLINFO_ITEMS entries here.  */
1324     NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
1325     NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1326     NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1327     NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1328     NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
1329     NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1330     NEW_AUX_ENT(AT_ENTRY, info->entry);
1331     NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1332     NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1333     NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1334     NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1335     NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1336     NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1337     NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes);
1338 
1339     if (k_platform)
1340         NEW_AUX_ENT(AT_PLATFORM, u_platform);
1341 #ifdef ARCH_DLINFO
1342     /*
1343      * ARCH_DLINFO must come last so platform specific code can enforce
1344      * special alignment requirements on the AUXV if necessary (eg. PPC).
1345      */
1346     ARCH_DLINFO;
1347 #endif
1348 #undef NEW_AUX_ENT
1349 
1350     info->saved_auxv = sp;
1351     info->auxv_len = sp_auxv - sp;
1352 
1353     sp = loader_build_argptr(envc, argc, sp, p, 0);
1354     return sp;
1355 }
1356 
1357 #ifndef TARGET_HAS_GUEST_VALIDATE_BASE
1358 /* If the guest doesn't have a validation function just agree */
1359 bool guest_validate_base(unsigned long guest_base)
1360 {
1361     return 1;
1362 }
1363 #endif
1364 
1365 static void probe_guest_base(const char *image_name,
1366                              abi_ulong loaddr, abi_ulong hiaddr)
1367 {
1368     /* Probe for a suitable guest base address, if the user has not set
1369      * it explicitly, and set guest_base appropriately.
1370      * In case of error we will print a suitable message and exit.
1371      */
1372 #if defined(CONFIG_USE_GUEST_BASE)
1373     const char *errmsg;
1374     if (!have_guest_base && !reserved_va) {
1375         unsigned long host_start, real_start, host_size;
1376 
1377         /* Round addresses to page boundaries.  */
1378         loaddr &= qemu_host_page_mask;
1379         hiaddr = HOST_PAGE_ALIGN(hiaddr);
1380 
1381         if (loaddr < mmap_min_addr) {
1382             host_start = HOST_PAGE_ALIGN(mmap_min_addr);
1383         } else {
1384             host_start = loaddr;
1385             if (host_start != loaddr) {
1386                 errmsg = "Address overflow loading ELF binary";
1387                 goto exit_errmsg;
1388             }
1389         }
1390         host_size = hiaddr - loaddr;
1391         while (1) {
1392             /* Do not use mmap_find_vma here because that is limited to the
1393                guest address space.  We are going to make the
1394                guest address space fit whatever we're given.  */
1395             real_start = (unsigned long)
1396                 mmap((void *)host_start, host_size, PROT_NONE,
1397                      MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
1398             if (real_start == (unsigned long)-1) {
1399                 goto exit_perror;
1400             }
1401             guest_base = real_start - loaddr;
1402             if ((real_start == host_start) &&
1403                 guest_validate_base(guest_base)) {
1404                 break;
1405             }
1406             /* That address didn't work.  Unmap and try a different one.
1407                The address the host picked because is typically right at
1408                the top of the host address space and leaves the guest with
1409                no usable address space.  Resort to a linear search.  We
1410                already compensated for mmap_min_addr, so this should not
1411                happen often.  Probably means we got unlucky and host
1412                address space randomization put a shared library somewhere
1413                inconvenient.  */
1414             munmap((void *)real_start, host_size);
1415             host_start += qemu_host_page_size;
1416             if (host_start == loaddr) {
1417                 /* Theoretically possible if host doesn't have any suitably
1418                    aligned areas.  Normally the first mmap will fail.  */
1419                 errmsg = "Unable to find space for application";
1420                 goto exit_errmsg;
1421             }
1422         }
1423         qemu_log("Relocating guest address space from 0x"
1424                  TARGET_ABI_FMT_lx " to 0x%lx\n",
1425                  loaddr, real_start);
1426     }
1427     return;
1428 
1429 exit_perror:
1430     errmsg = strerror(errno);
1431 exit_errmsg:
1432     fprintf(stderr, "%s: %s\n", image_name, errmsg);
1433     exit(-1);
1434 #endif
1435 }
1436 
1437 
1438 /* Load an ELF image into the address space.
1439 
1440    IMAGE_NAME is the filename of the image, to use in error messages.
1441    IMAGE_FD is the open file descriptor for the image.
1442 
1443    BPRM_BUF is a copy of the beginning of the file; this of course
1444    contains the elf file header at offset 0.  It is assumed that this
1445    buffer is sufficiently aligned to present no problems to the host
1446    in accessing data at aligned offsets within the buffer.
1447 
1448    On return: INFO values will be filled in, as necessary or available.  */
1449 
1450 static void load_elf_image(const char *image_name, int image_fd,
1451                            struct image_info *info, char **pinterp_name,
1452                            char bprm_buf[BPRM_BUF_SIZE])
1453 {
1454     struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
1455     struct elf_phdr *phdr;
1456     abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
1457     int i, retval;
1458     const char *errmsg;
1459 
1460     /* First of all, some simple consistency checks */
1461     errmsg = "Invalid ELF image for this architecture";
1462     if (!elf_check_ident(ehdr)) {
1463         goto exit_errmsg;
1464     }
1465     bswap_ehdr(ehdr);
1466     if (!elf_check_ehdr(ehdr)) {
1467         goto exit_errmsg;
1468     }
1469 
1470     i = ehdr->e_phnum * sizeof(struct elf_phdr);
1471     if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
1472         phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
1473     } else {
1474         phdr = (struct elf_phdr *) alloca(i);
1475         retval = pread(image_fd, phdr, i, ehdr->e_phoff);
1476         if (retval != i) {
1477             goto exit_read;
1478         }
1479     }
1480     bswap_phdr(phdr, ehdr->e_phnum);
1481 
1482 #ifdef CONFIG_USE_FDPIC
1483     info->nsegs = 0;
1484     info->pt_dynamic_addr = 0;
1485 #endif
1486 
1487     /* Find the maximum size of the image and allocate an appropriate
1488        amount of memory to handle that.  */
1489     loaddr = -1, hiaddr = 0;
1490     for (i = 0; i < ehdr->e_phnum; ++i) {
1491         if (phdr[i].p_type == PT_LOAD) {
1492             abi_ulong a = phdr[i].p_vaddr;
1493             if (a < loaddr) {
1494                 loaddr = a;
1495             }
1496             a += phdr[i].p_memsz;
1497             if (a > hiaddr) {
1498                 hiaddr = a;
1499             }
1500 #ifdef CONFIG_USE_FDPIC
1501             ++info->nsegs;
1502 #endif
1503         }
1504     }
1505 
1506     load_addr = loaddr;
1507     if (ehdr->e_type == ET_DYN) {
1508         /* The image indicates that it can be loaded anywhere.  Find a
1509            location that can hold the memory space required.  If the
1510            image is pre-linked, LOADDR will be non-zero.  Since we do
1511            not supply MAP_FIXED here we'll use that address if and
1512            only if it remains available.  */
1513         load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
1514                                 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
1515                                 -1, 0);
1516         if (load_addr == -1) {
1517             goto exit_perror;
1518         }
1519     } else if (pinterp_name != NULL) {
1520         /* This is the main executable.  Make sure that the low
1521            address does not conflict with MMAP_MIN_ADDR or the
1522            QEMU application itself.  */
1523         probe_guest_base(image_name, loaddr, hiaddr);
1524     }
1525     load_bias = load_addr - loaddr;
1526 
1527 #ifdef CONFIG_USE_FDPIC
1528     {
1529         struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
1530             g_malloc(sizeof(*loadsegs) * info->nsegs);
1531 
1532         for (i = 0; i < ehdr->e_phnum; ++i) {
1533             switch (phdr[i].p_type) {
1534             case PT_DYNAMIC:
1535                 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
1536                 break;
1537             case PT_LOAD:
1538                 loadsegs->addr = phdr[i].p_vaddr + load_bias;
1539                 loadsegs->p_vaddr = phdr[i].p_vaddr;
1540                 loadsegs->p_memsz = phdr[i].p_memsz;
1541                 ++loadsegs;
1542                 break;
1543             }
1544         }
1545     }
1546 #endif
1547 
1548     info->load_bias = load_bias;
1549     info->load_addr = load_addr;
1550     info->entry = ehdr->e_entry + load_bias;
1551     info->start_code = -1;
1552     info->end_code = 0;
1553     info->start_data = -1;
1554     info->end_data = 0;
1555     info->brk = 0;
1556 
1557     for (i = 0; i < ehdr->e_phnum; i++) {
1558         struct elf_phdr *eppnt = phdr + i;
1559         if (eppnt->p_type == PT_LOAD) {
1560             abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
1561             int elf_prot = 0;
1562 
1563             if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
1564             if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1565             if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1566 
1567             vaddr = load_bias + eppnt->p_vaddr;
1568             vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
1569             vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
1570 
1571             error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
1572                                 elf_prot, MAP_PRIVATE | MAP_FIXED,
1573                                 image_fd, eppnt->p_offset - vaddr_po);
1574             if (error == -1) {
1575                 goto exit_perror;
1576             }
1577 
1578             vaddr_ef = vaddr + eppnt->p_filesz;
1579             vaddr_em = vaddr + eppnt->p_memsz;
1580 
1581             /* If the load segment requests extra zeros (e.g. bss), map it.  */
1582             if (vaddr_ef < vaddr_em) {
1583                 zero_bss(vaddr_ef, vaddr_em, elf_prot);
1584             }
1585 
1586             /* Find the full program boundaries.  */
1587             if (elf_prot & PROT_EXEC) {
1588                 if (vaddr < info->start_code) {
1589                     info->start_code = vaddr;
1590                 }
1591                 if (vaddr_ef > info->end_code) {
1592                     info->end_code = vaddr_ef;
1593                 }
1594             }
1595             if (elf_prot & PROT_WRITE) {
1596                 if (vaddr < info->start_data) {
1597                     info->start_data = vaddr;
1598                 }
1599                 if (vaddr_ef > info->end_data) {
1600                     info->end_data = vaddr_ef;
1601                 }
1602                 if (vaddr_em > info->brk) {
1603                     info->brk = vaddr_em;
1604                 }
1605             }
1606         } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
1607             char *interp_name;
1608 
1609             if (*pinterp_name) {
1610                 errmsg = "Multiple PT_INTERP entries";
1611                 goto exit_errmsg;
1612             }
1613             interp_name = malloc(eppnt->p_filesz);
1614             if (!interp_name) {
1615                 goto exit_perror;
1616             }
1617 
1618             if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
1619                 memcpy(interp_name, bprm_buf + eppnt->p_offset,
1620                        eppnt->p_filesz);
1621             } else {
1622                 retval = pread(image_fd, interp_name, eppnt->p_filesz,
1623                                eppnt->p_offset);
1624                 if (retval != eppnt->p_filesz) {
1625                     goto exit_perror;
1626                 }
1627             }
1628             if (interp_name[eppnt->p_filesz - 1] != 0) {
1629                 errmsg = "Invalid PT_INTERP entry";
1630                 goto exit_errmsg;
1631             }
1632             *pinterp_name = interp_name;
1633         }
1634     }
1635 
1636     if (info->end_data == 0) {
1637         info->start_data = info->end_code;
1638         info->end_data = info->end_code;
1639         info->brk = info->end_code;
1640     }
1641 
1642     if (qemu_log_enabled()) {
1643         load_symbols(ehdr, image_fd, load_bias);
1644     }
1645 
1646     close(image_fd);
1647     return;
1648 
1649  exit_read:
1650     if (retval >= 0) {
1651         errmsg = "Incomplete read of file header";
1652         goto exit_errmsg;
1653     }
1654  exit_perror:
1655     errmsg = strerror(errno);
1656  exit_errmsg:
1657     fprintf(stderr, "%s: %s\n", image_name, errmsg);
1658     exit(-1);
1659 }
1660 
1661 static void load_elf_interp(const char *filename, struct image_info *info,
1662                             char bprm_buf[BPRM_BUF_SIZE])
1663 {
1664     int fd, retval;
1665 
1666     fd = open(path(filename), O_RDONLY);
1667     if (fd < 0) {
1668         goto exit_perror;
1669     }
1670 
1671     retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
1672     if (retval < 0) {
1673         goto exit_perror;
1674     }
1675     if (retval < BPRM_BUF_SIZE) {
1676         memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
1677     }
1678 
1679     load_elf_image(filename, fd, info, NULL, bprm_buf);
1680     return;
1681 
1682  exit_perror:
1683     fprintf(stderr, "%s: %s\n", filename, strerror(errno));
1684     exit(-1);
1685 }
1686 
1687 static int symfind(const void *s0, const void *s1)
1688 {
1689     target_ulong addr = *(target_ulong *)s0;
1690     struct elf_sym *sym = (struct elf_sym *)s1;
1691     int result = 0;
1692     if (addr < sym->st_value) {
1693         result = -1;
1694     } else if (addr >= sym->st_value + sym->st_size) {
1695         result = 1;
1696     }
1697     return result;
1698 }
1699 
1700 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1701 {
1702 #if ELF_CLASS == ELFCLASS32
1703     struct elf_sym *syms = s->disas_symtab.elf32;
1704 #else
1705     struct elf_sym *syms = s->disas_symtab.elf64;
1706 #endif
1707 
1708     // binary search
1709     struct elf_sym *sym;
1710 
1711     sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
1712     if (sym != NULL) {
1713         return s->disas_strtab + sym->st_name;
1714     }
1715 
1716     return "";
1717 }
1718 
1719 /* FIXME: This should use elf_ops.h  */
1720 static int symcmp(const void *s0, const void *s1)
1721 {
1722     struct elf_sym *sym0 = (struct elf_sym *)s0;
1723     struct elf_sym *sym1 = (struct elf_sym *)s1;
1724     return (sym0->st_value < sym1->st_value)
1725         ? -1
1726         : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1727 }
1728 
1729 /* Best attempt to load symbols from this ELF object. */
1730 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
1731 {
1732     int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
1733     struct elf_shdr *shdr;
1734     char *strings = NULL;
1735     struct syminfo *s = NULL;
1736     struct elf_sym *new_syms, *syms = NULL;
1737 
1738     shnum = hdr->e_shnum;
1739     i = shnum * sizeof(struct elf_shdr);
1740     shdr = (struct elf_shdr *)alloca(i);
1741     if (pread(fd, shdr, i, hdr->e_shoff) != i) {
1742         return;
1743     }
1744 
1745     bswap_shdr(shdr, shnum);
1746     for (i = 0; i < shnum; ++i) {
1747         if (shdr[i].sh_type == SHT_SYMTAB) {
1748             sym_idx = i;
1749             str_idx = shdr[i].sh_link;
1750             goto found;
1751         }
1752     }
1753 
1754     /* There will be no symbol table if the file was stripped.  */
1755     return;
1756 
1757  found:
1758     /* Now know where the strtab and symtab are.  Snarf them.  */
1759     s = malloc(sizeof(*s));
1760     if (!s) {
1761         goto give_up;
1762     }
1763 
1764     i = shdr[str_idx].sh_size;
1765     s->disas_strtab = strings = malloc(i);
1766     if (!strings || pread(fd, strings, i, shdr[str_idx].sh_offset) != i) {
1767         goto give_up;
1768     }
1769 
1770     i = shdr[sym_idx].sh_size;
1771     syms = malloc(i);
1772     if (!syms || pread(fd, syms, i, shdr[sym_idx].sh_offset) != i) {
1773         goto give_up;
1774     }
1775 
1776     nsyms = i / sizeof(struct elf_sym);
1777     for (i = 0; i < nsyms; ) {
1778         bswap_sym(syms + i);
1779         /* Throw away entries which we do not need.  */
1780         if (syms[i].st_shndx == SHN_UNDEF
1781             || syms[i].st_shndx >= SHN_LORESERVE
1782             || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1783             if (i < --nsyms) {
1784                 syms[i] = syms[nsyms];
1785             }
1786         } else {
1787 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1788             /* The bottom address bit marks a Thumb or MIPS16 symbol.  */
1789             syms[i].st_value &= ~(target_ulong)1;
1790 #endif
1791             syms[i].st_value += load_bias;
1792             i++;
1793         }
1794     }
1795 
1796     /* No "useful" symbol.  */
1797     if (nsyms == 0) {
1798         goto give_up;
1799     }
1800 
1801     /* Attempt to free the storage associated with the local symbols
1802        that we threw away.  Whether or not this has any effect on the
1803        memory allocation depends on the malloc implementation and how
1804        many symbols we managed to discard.  */
1805     new_syms = realloc(syms, nsyms * sizeof(*syms));
1806     if (new_syms == NULL) {
1807         goto give_up;
1808     }
1809     syms = new_syms;
1810 
1811     qsort(syms, nsyms, sizeof(*syms), symcmp);
1812 
1813     s->disas_num_syms = nsyms;
1814 #if ELF_CLASS == ELFCLASS32
1815     s->disas_symtab.elf32 = syms;
1816 #else
1817     s->disas_symtab.elf64 = syms;
1818 #endif
1819     s->lookup_symbol = lookup_symbolxx;
1820     s->next = syminfos;
1821     syminfos = s;
1822 
1823     return;
1824 
1825 give_up:
1826     free(s);
1827     free(strings);
1828     free(syms);
1829 }
1830 
1831 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1832                     struct image_info * info)
1833 {
1834     struct image_info interp_info;
1835     struct elfhdr elf_ex;
1836     char *elf_interpreter = NULL;
1837 
1838     info->start_mmap = (abi_ulong)ELF_START_MMAP;
1839     info->mmap = 0;
1840     info->rss = 0;
1841 
1842     load_elf_image(bprm->filename, bprm->fd, info,
1843                    &elf_interpreter, bprm->buf);
1844 
1845     /* ??? We need a copy of the elf header for passing to create_elf_tables.
1846        If we do nothing, we'll have overwritten this when we re-use bprm->buf
1847        when we load the interpreter.  */
1848     elf_ex = *(struct elfhdr *)bprm->buf;
1849 
1850     bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1851     bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1852     bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1853     if (!bprm->p) {
1854         fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
1855         exit(-1);
1856     }
1857 
1858     /* Do this so that we can load the interpreter, if need be.  We will
1859        change some of these later */
1860     bprm->p = setup_arg_pages(bprm->p, bprm, info);
1861 
1862     if (elf_interpreter) {
1863         load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
1864 
1865         /* If the program interpreter is one of these two, then assume
1866            an iBCS2 image.  Otherwise assume a native linux image.  */
1867 
1868         if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
1869             || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
1870             info->personality = PER_SVR4;
1871 
1872             /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
1873                and some applications "depend" upon this behavior.  Since
1874                we do not have the power to recompile these, we emulate
1875                the SVr4 behavior.  Sigh.  */
1876             target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1877                         MAP_FIXED | MAP_PRIVATE, -1, 0);
1878         }
1879     }
1880 
1881     bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
1882                                 info, (elf_interpreter ? &interp_info : NULL));
1883     info->start_stack = bprm->p;
1884 
1885     /* If we have an interpreter, set that as the program's entry point.
1886        Copy the load_bias as well, to help PPC64 interpret the entry
1887        point as a function descriptor.  Do this after creating elf tables
1888        so that we copy the original program entry point into the AUXV.  */
1889     if (elf_interpreter) {
1890         info->load_bias = interp_info.load_bias;
1891         info->entry = interp_info.entry;
1892         free(elf_interpreter);
1893     }
1894 
1895 #ifdef USE_ELF_CORE_DUMP
1896     bprm->core_dump = &elf_core_dump;
1897 #endif
1898 
1899     return 0;
1900 }
1901 
1902 #ifdef USE_ELF_CORE_DUMP
1903 /*
1904  * Definitions to generate Intel SVR4-like core files.
1905  * These mostly have the same names as the SVR4 types with "target_elf_"
1906  * tacked on the front to prevent clashes with linux definitions,
1907  * and the typedef forms have been avoided.  This is mostly like
1908  * the SVR4 structure, but more Linuxy, with things that Linux does
1909  * not support and which gdb doesn't really use excluded.
1910  *
1911  * Fields we don't dump (their contents is zero) in linux-user qemu
1912  * are marked with XXX.
1913  *
1914  * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1915  *
1916  * Porting ELF coredump for target is (quite) simple process.  First you
1917  * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1918  * the target resides):
1919  *
1920  * #define USE_ELF_CORE_DUMP
1921  *
1922  * Next you define type of register set used for dumping.  ELF specification
1923  * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1924  *
1925  * typedef <target_regtype> target_elf_greg_t;
1926  * #define ELF_NREG <number of registers>
1927  * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1928  *
1929  * Last step is to implement target specific function that copies registers
1930  * from given cpu into just specified register set.  Prototype is:
1931  *
1932  * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1933  *                                const CPUState *env);
1934  *
1935  * Parameters:
1936  *     regs - copy register values into here (allocated and zeroed by caller)
1937  *     env - copy registers from here
1938  *
1939  * Example for ARM target is provided in this file.
1940  */
1941 
1942 /* An ELF note in memory */
1943 struct memelfnote {
1944     const char *name;
1945     size_t     namesz;
1946     size_t     namesz_rounded;
1947     int        type;
1948     size_t     datasz;
1949     size_t     datasz_rounded;
1950     void       *data;
1951     size_t     notesz;
1952 };
1953 
1954 struct target_elf_siginfo {
1955     target_int  si_signo; /* signal number */
1956     target_int  si_code;  /* extra code */
1957     target_int  si_errno; /* errno */
1958 };
1959 
1960 struct target_elf_prstatus {
1961     struct target_elf_siginfo pr_info;      /* Info associated with signal */
1962     target_short       pr_cursig;    /* Current signal */
1963     target_ulong       pr_sigpend;   /* XXX */
1964     target_ulong       pr_sighold;   /* XXX */
1965     target_pid_t       pr_pid;
1966     target_pid_t       pr_ppid;
1967     target_pid_t       pr_pgrp;
1968     target_pid_t       pr_sid;
1969     struct target_timeval pr_utime;  /* XXX User time */
1970     struct target_timeval pr_stime;  /* XXX System time */
1971     struct target_timeval pr_cutime; /* XXX Cumulative user time */
1972     struct target_timeval pr_cstime; /* XXX Cumulative system time */
1973     target_elf_gregset_t      pr_reg;       /* GP registers */
1974     target_int         pr_fpvalid;   /* XXX */
1975 };
1976 
1977 #define ELF_PRARGSZ     (80) /* Number of chars for args */
1978 
1979 struct target_elf_prpsinfo {
1980     char         pr_state;       /* numeric process state */
1981     char         pr_sname;       /* char for pr_state */
1982     char         pr_zomb;        /* zombie */
1983     char         pr_nice;        /* nice val */
1984     target_ulong pr_flag;        /* flags */
1985     target_uid_t pr_uid;
1986     target_gid_t pr_gid;
1987     target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
1988     /* Lots missing */
1989     char    pr_fname[16];           /* filename of executable */
1990     char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
1991 };
1992 
1993 /* Here is the structure in which status of each thread is captured. */
1994 struct elf_thread_status {
1995     QTAILQ_ENTRY(elf_thread_status)  ets_link;
1996     struct target_elf_prstatus prstatus;   /* NT_PRSTATUS */
1997 #if 0
1998     elf_fpregset_t fpu;             /* NT_PRFPREG */
1999     struct task_struct *thread;
2000     elf_fpxregset_t xfpu;           /* ELF_CORE_XFPREG_TYPE */
2001 #endif
2002     struct memelfnote notes[1];
2003     int num_notes;
2004 };
2005 
2006 struct elf_note_info {
2007     struct memelfnote   *notes;
2008     struct target_elf_prstatus *prstatus;  /* NT_PRSTATUS */
2009     struct target_elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
2010 
2011     QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
2012 #if 0
2013     /*
2014      * Current version of ELF coredump doesn't support
2015      * dumping fp regs etc.
2016      */
2017     elf_fpregset_t *fpu;
2018     elf_fpxregset_t *xfpu;
2019     int thread_status_size;
2020 #endif
2021     int notes_size;
2022     int numnote;
2023 };
2024 
2025 struct vm_area_struct {
2026     abi_ulong   vma_start;  /* start vaddr of memory region */
2027     abi_ulong   vma_end;    /* end vaddr of memory region */
2028     abi_ulong   vma_flags;  /* protection etc. flags for the region */
2029     QTAILQ_ENTRY(vm_area_struct) vma_link;
2030 };
2031 
2032 struct mm_struct {
2033     QTAILQ_HEAD(, vm_area_struct) mm_mmap;
2034     int mm_count;           /* number of mappings */
2035 };
2036 
2037 static struct mm_struct *vma_init(void);
2038 static void vma_delete(struct mm_struct *);
2039 static int vma_add_mapping(struct mm_struct *, abi_ulong,
2040                            abi_ulong, abi_ulong);
2041 static int vma_get_mapping_count(const struct mm_struct *);
2042 static struct vm_area_struct *vma_first(const struct mm_struct *);
2043 static struct vm_area_struct *vma_next(struct vm_area_struct *);
2044 static abi_ulong vma_dump_size(const struct vm_area_struct *);
2045 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2046                       unsigned long flags);
2047 
2048 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
2049 static void fill_note(struct memelfnote *, const char *, int,
2050                       unsigned int, void *);
2051 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
2052 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
2053 static void fill_auxv_note(struct memelfnote *, const TaskState *);
2054 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
2055 static size_t note_size(const struct memelfnote *);
2056 static void free_note_info(struct elf_note_info *);
2057 static int fill_note_info(struct elf_note_info *, long, const CPUState *);
2058 static void fill_thread_info(struct elf_note_info *, const CPUState *);
2059 static int core_dump_filename(const TaskState *, char *, size_t);
2060 
2061 static int dump_write(int, const void *, size_t);
2062 static int write_note(struct memelfnote *, int);
2063 static int write_note_info(struct elf_note_info *, int);
2064 
2065 #ifdef BSWAP_NEEDED
2066 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
2067 {
2068     prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
2069     prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
2070     prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
2071     prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
2072     prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
2073     prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
2074     prstatus->pr_pid = tswap32(prstatus->pr_pid);
2075     prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
2076     prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
2077     prstatus->pr_sid = tswap32(prstatus->pr_sid);
2078     /* cpu times are not filled, so we skip them */
2079     /* regs should be in correct format already */
2080     prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
2081 }
2082 
2083 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
2084 {
2085     psinfo->pr_flag = tswapl(psinfo->pr_flag);
2086     psinfo->pr_uid = tswap16(psinfo->pr_uid);
2087     psinfo->pr_gid = tswap16(psinfo->pr_gid);
2088     psinfo->pr_pid = tswap32(psinfo->pr_pid);
2089     psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
2090     psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
2091     psinfo->pr_sid = tswap32(psinfo->pr_sid);
2092 }
2093 
2094 static void bswap_note(struct elf_note *en)
2095 {
2096     bswap32s(&en->n_namesz);
2097     bswap32s(&en->n_descsz);
2098     bswap32s(&en->n_type);
2099 }
2100 #else
2101 static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
2102 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
2103 static inline void bswap_note(struct elf_note *en) { }
2104 #endif /* BSWAP_NEEDED */
2105 
2106 /*
2107  * Minimal support for linux memory regions.  These are needed
2108  * when we are finding out what memory exactly belongs to
2109  * emulated process.  No locks needed here, as long as
2110  * thread that received the signal is stopped.
2111  */
2112 
2113 static struct mm_struct *vma_init(void)
2114 {
2115     struct mm_struct *mm;
2116 
2117     if ((mm = g_malloc(sizeof (*mm))) == NULL)
2118         return (NULL);
2119 
2120     mm->mm_count = 0;
2121     QTAILQ_INIT(&mm->mm_mmap);
2122 
2123     return (mm);
2124 }
2125 
2126 static void vma_delete(struct mm_struct *mm)
2127 {
2128     struct vm_area_struct *vma;
2129 
2130     while ((vma = vma_first(mm)) != NULL) {
2131         QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
2132         g_free(vma);
2133     }
2134     g_free(mm);
2135 }
2136 
2137 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
2138                            abi_ulong end, abi_ulong flags)
2139 {
2140     struct vm_area_struct *vma;
2141 
2142     if ((vma = g_malloc0(sizeof (*vma))) == NULL)
2143         return (-1);
2144 
2145     vma->vma_start = start;
2146     vma->vma_end = end;
2147     vma->vma_flags = flags;
2148 
2149     QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
2150     mm->mm_count++;
2151 
2152     return (0);
2153 }
2154 
2155 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2156 {
2157     return (QTAILQ_FIRST(&mm->mm_mmap));
2158 }
2159 
2160 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2161 {
2162     return (QTAILQ_NEXT(vma, vma_link));
2163 }
2164 
2165 static int vma_get_mapping_count(const struct mm_struct *mm)
2166 {
2167     return (mm->mm_count);
2168 }
2169 
2170 /*
2171  * Calculate file (dump) size of given memory region.
2172  */
2173 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2174 {
2175     /* if we cannot even read the first page, skip it */
2176     if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2177         return (0);
2178 
2179     /*
2180      * Usually we don't dump executable pages as they contain
2181      * non-writable code that debugger can read directly from
2182      * target library etc.  However, thread stacks are marked
2183      * also executable so we read in first page of given region
2184      * and check whether it contains elf header.  If there is
2185      * no elf header, we dump it.
2186      */
2187     if (vma->vma_flags & PROT_EXEC) {
2188         char page[TARGET_PAGE_SIZE];
2189 
2190         copy_from_user(page, vma->vma_start, sizeof (page));
2191         if ((page[EI_MAG0] == ELFMAG0) &&
2192             (page[EI_MAG1] == ELFMAG1) &&
2193             (page[EI_MAG2] == ELFMAG2) &&
2194             (page[EI_MAG3] == ELFMAG3)) {
2195             /*
2196              * Mappings are possibly from ELF binary.  Don't dump
2197              * them.
2198              */
2199             return (0);
2200         }
2201     }
2202 
2203     return (vma->vma_end - vma->vma_start);
2204 }
2205 
2206 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2207                       unsigned long flags)
2208 {
2209     struct mm_struct *mm = (struct mm_struct *)priv;
2210 
2211     vma_add_mapping(mm, start, end, flags);
2212     return (0);
2213 }
2214 
2215 static void fill_note(struct memelfnote *note, const char *name, int type,
2216                       unsigned int sz, void *data)
2217 {
2218     unsigned int namesz;
2219 
2220     namesz = strlen(name) + 1;
2221     note->name = name;
2222     note->namesz = namesz;
2223     note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2224     note->type = type;
2225     note->datasz = sz;
2226     note->datasz_rounded = roundup(sz, sizeof (int32_t));
2227 
2228     note->data = data;
2229 
2230     /*
2231      * We calculate rounded up note size here as specified by
2232      * ELF document.
2233      */
2234     note->notesz = sizeof (struct elf_note) +
2235         note->namesz_rounded + note->datasz_rounded;
2236 }
2237 
2238 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2239                             uint32_t flags)
2240 {
2241     (void) memset(elf, 0, sizeof(*elf));
2242 
2243     (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2244     elf->e_ident[EI_CLASS] = ELF_CLASS;
2245     elf->e_ident[EI_DATA] = ELF_DATA;
2246     elf->e_ident[EI_VERSION] = EV_CURRENT;
2247     elf->e_ident[EI_OSABI] = ELF_OSABI;
2248 
2249     elf->e_type = ET_CORE;
2250     elf->e_machine = machine;
2251     elf->e_version = EV_CURRENT;
2252     elf->e_phoff = sizeof(struct elfhdr);
2253     elf->e_flags = flags;
2254     elf->e_ehsize = sizeof(struct elfhdr);
2255     elf->e_phentsize = sizeof(struct elf_phdr);
2256     elf->e_phnum = segs;
2257 
2258     bswap_ehdr(elf);
2259 }
2260 
2261 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2262 {
2263     phdr->p_type = PT_NOTE;
2264     phdr->p_offset = offset;
2265     phdr->p_vaddr = 0;
2266     phdr->p_paddr = 0;
2267     phdr->p_filesz = sz;
2268     phdr->p_memsz = 0;
2269     phdr->p_flags = 0;
2270     phdr->p_align = 0;
2271 
2272     bswap_phdr(phdr, 1);
2273 }
2274 
2275 static size_t note_size(const struct memelfnote *note)
2276 {
2277     return (note->notesz);
2278 }
2279 
2280 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2281                           const TaskState *ts, int signr)
2282 {
2283     (void) memset(prstatus, 0, sizeof (*prstatus));
2284     prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2285     prstatus->pr_pid = ts->ts_tid;
2286     prstatus->pr_ppid = getppid();
2287     prstatus->pr_pgrp = getpgrp();
2288     prstatus->pr_sid = getsid(0);
2289 
2290     bswap_prstatus(prstatus);
2291 }
2292 
2293 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2294 {
2295     char *filename, *base_filename;
2296     unsigned int i, len;
2297 
2298     (void) memset(psinfo, 0, sizeof (*psinfo));
2299 
2300     len = ts->info->arg_end - ts->info->arg_start;
2301     if (len >= ELF_PRARGSZ)
2302         len = ELF_PRARGSZ - 1;
2303     if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2304         return -EFAULT;
2305     for (i = 0; i < len; i++)
2306         if (psinfo->pr_psargs[i] == 0)
2307             psinfo->pr_psargs[i] = ' ';
2308     psinfo->pr_psargs[len] = 0;
2309 
2310     psinfo->pr_pid = getpid();
2311     psinfo->pr_ppid = getppid();
2312     psinfo->pr_pgrp = getpgrp();
2313     psinfo->pr_sid = getsid(0);
2314     psinfo->pr_uid = getuid();
2315     psinfo->pr_gid = getgid();
2316 
2317     filename = strdup(ts->bprm->filename);
2318     base_filename = strdup(basename(filename));
2319     (void) strncpy(psinfo->pr_fname, base_filename,
2320                    sizeof(psinfo->pr_fname));
2321     free(base_filename);
2322     free(filename);
2323 
2324     bswap_psinfo(psinfo);
2325     return (0);
2326 }
2327 
2328 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2329 {
2330     elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2331     elf_addr_t orig_auxv = auxv;
2332     void *ptr;
2333     int len = ts->info->auxv_len;
2334 
2335     /*
2336      * Auxiliary vector is stored in target process stack.  It contains
2337      * {type, value} pairs that we need to dump into note.  This is not
2338      * strictly necessary but we do it here for sake of completeness.
2339      */
2340 
2341     /* read in whole auxv vector and copy it to memelfnote */
2342     ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2343     if (ptr != NULL) {
2344         fill_note(note, "CORE", NT_AUXV, len, ptr);
2345         unlock_user(ptr, auxv, len);
2346     }
2347 }
2348 
2349 /*
2350  * Constructs name of coredump file.  We have following convention
2351  * for the name:
2352  *     qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2353  *
2354  * Returns 0 in case of success, -1 otherwise (errno is set).
2355  */
2356 static int core_dump_filename(const TaskState *ts, char *buf,
2357                               size_t bufsize)
2358 {
2359     char timestamp[64];
2360     char *filename = NULL;
2361     char *base_filename = NULL;
2362     struct timeval tv;
2363     struct tm tm;
2364 
2365     assert(bufsize >= PATH_MAX);
2366 
2367     if (gettimeofday(&tv, NULL) < 0) {
2368         (void) fprintf(stderr, "unable to get current timestamp: %s",
2369                        strerror(errno));
2370         return (-1);
2371     }
2372 
2373     filename = strdup(ts->bprm->filename);
2374     base_filename = strdup(basename(filename));
2375     (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2376                     localtime_r(&tv.tv_sec, &tm));
2377     (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2378                     base_filename, timestamp, (int)getpid());
2379     free(base_filename);
2380     free(filename);
2381 
2382     return (0);
2383 }
2384 
2385 static int dump_write(int fd, const void *ptr, size_t size)
2386 {
2387     const char *bufp = (const char *)ptr;
2388     ssize_t bytes_written, bytes_left;
2389     struct rlimit dumpsize;
2390     off_t pos;
2391 
2392     bytes_written = 0;
2393     getrlimit(RLIMIT_CORE, &dumpsize);
2394     if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2395         if (errno == ESPIPE) { /* not a seekable stream */
2396             bytes_left = size;
2397         } else {
2398             return pos;
2399         }
2400     } else {
2401         if (dumpsize.rlim_cur <= pos) {
2402             return -1;
2403         } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2404             bytes_left = size;
2405         } else {
2406             size_t limit_left=dumpsize.rlim_cur - pos;
2407             bytes_left = limit_left >= size ? size : limit_left ;
2408         }
2409     }
2410 
2411     /*
2412      * In normal conditions, single write(2) should do but
2413      * in case of socket etc. this mechanism is more portable.
2414      */
2415     do {
2416         bytes_written = write(fd, bufp, bytes_left);
2417         if (bytes_written < 0) {
2418             if (errno == EINTR)
2419                 continue;
2420             return (-1);
2421         } else if (bytes_written == 0) { /* eof */
2422             return (-1);
2423         }
2424         bufp += bytes_written;
2425         bytes_left -= bytes_written;
2426     } while (bytes_left > 0);
2427 
2428     return (0);
2429 }
2430 
2431 static int write_note(struct memelfnote *men, int fd)
2432 {
2433     struct elf_note en;
2434 
2435     en.n_namesz = men->namesz;
2436     en.n_type = men->type;
2437     en.n_descsz = men->datasz;
2438 
2439     bswap_note(&en);
2440 
2441     if (dump_write(fd, &en, sizeof(en)) != 0)
2442         return (-1);
2443     if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2444         return (-1);
2445     if (dump_write(fd, men->data, men->datasz_rounded) != 0)
2446         return (-1);
2447 
2448     return (0);
2449 }
2450 
2451 static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2452 {
2453     TaskState *ts = (TaskState *)env->opaque;
2454     struct elf_thread_status *ets;
2455 
2456     ets = g_malloc0(sizeof (*ets));
2457     ets->num_notes = 1; /* only prstatus is dumped */
2458     fill_prstatus(&ets->prstatus, ts, 0);
2459     elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2460     fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2461               &ets->prstatus);
2462 
2463     QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2464 
2465     info->notes_size += note_size(&ets->notes[0]);
2466 }
2467 
2468 static int fill_note_info(struct elf_note_info *info,
2469                           long signr, const CPUState *env)
2470 {
2471 #define NUMNOTES 3
2472     CPUState *cpu = NULL;
2473     TaskState *ts = (TaskState *)env->opaque;
2474     int i;
2475 
2476     (void) memset(info, 0, sizeof (*info));
2477 
2478     QTAILQ_INIT(&info->thread_list);
2479 
2480     info->notes = g_malloc0(NUMNOTES * sizeof (struct memelfnote));
2481     if (info->notes == NULL)
2482         return (-ENOMEM);
2483     info->prstatus = g_malloc0(sizeof (*info->prstatus));
2484     if (info->prstatus == NULL)
2485         return (-ENOMEM);
2486     info->psinfo = g_malloc0(sizeof (*info->psinfo));
2487     if (info->prstatus == NULL)
2488         return (-ENOMEM);
2489 
2490     /*
2491      * First fill in status (and registers) of current thread
2492      * including process info & aux vector.
2493      */
2494     fill_prstatus(info->prstatus, ts, signr);
2495     elf_core_copy_regs(&info->prstatus->pr_reg, env);
2496     fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2497               sizeof (*info->prstatus), info->prstatus);
2498     fill_psinfo(info->psinfo, ts);
2499     fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2500               sizeof (*info->psinfo), info->psinfo);
2501     fill_auxv_note(&info->notes[2], ts);
2502     info->numnote = 3;
2503 
2504     info->notes_size = 0;
2505     for (i = 0; i < info->numnote; i++)
2506         info->notes_size += note_size(&info->notes[i]);
2507 
2508     /* read and fill status of all threads */
2509     cpu_list_lock();
2510     for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2511         if (cpu == thread_env)
2512             continue;
2513         fill_thread_info(info, cpu);
2514     }
2515     cpu_list_unlock();
2516 
2517     return (0);
2518 }
2519 
2520 static void free_note_info(struct elf_note_info *info)
2521 {
2522     struct elf_thread_status *ets;
2523 
2524     while (!QTAILQ_EMPTY(&info->thread_list)) {
2525         ets = QTAILQ_FIRST(&info->thread_list);
2526         QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2527         g_free(ets);
2528     }
2529 
2530     g_free(info->prstatus);
2531     g_free(info->psinfo);
2532     g_free(info->notes);
2533 }
2534 
2535 static int write_note_info(struct elf_note_info *info, int fd)
2536 {
2537     struct elf_thread_status *ets;
2538     int i, error = 0;
2539 
2540     /* write prstatus, psinfo and auxv for current thread */
2541     for (i = 0; i < info->numnote; i++)
2542         if ((error = write_note(&info->notes[i], fd)) != 0)
2543             return (error);
2544 
2545     /* write prstatus for each thread */
2546     for (ets = info->thread_list.tqh_first; ets != NULL;
2547          ets = ets->ets_link.tqe_next) {
2548         if ((error = write_note(&ets->notes[0], fd)) != 0)
2549             return (error);
2550     }
2551 
2552     return (0);
2553 }
2554 
2555 /*
2556  * Write out ELF coredump.
2557  *
2558  * See documentation of ELF object file format in:
2559  * http://www.caldera.com/developers/devspecs/gabi41.pdf
2560  *
2561  * Coredump format in linux is following:
2562  *
2563  * 0   +----------------------+         \
2564  *     | ELF header           | ET_CORE  |
2565  *     +----------------------+          |
2566  *     | ELF program headers  |          |--- headers
2567  *     | - NOTE section       |          |
2568  *     | - PT_LOAD sections   |          |
2569  *     +----------------------+         /
2570  *     | NOTEs:               |
2571  *     | - NT_PRSTATUS        |
2572  *     | - NT_PRSINFO         |
2573  *     | - NT_AUXV            |
2574  *     +----------------------+ <-- aligned to target page
2575  *     | Process memory dump  |
2576  *     :                      :
2577  *     .                      .
2578  *     :                      :
2579  *     |                      |
2580  *     +----------------------+
2581  *
2582  * NT_PRSTATUS -> struct elf_prstatus (per thread)
2583  * NT_PRSINFO  -> struct elf_prpsinfo
2584  * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2585  *
2586  * Format follows System V format as close as possible.  Current
2587  * version limitations are as follows:
2588  *     - no floating point registers are dumped
2589  *
2590  * Function returns 0 in case of success, negative errno otherwise.
2591  *
2592  * TODO: make this work also during runtime: it should be
2593  * possible to force coredump from running process and then
2594  * continue processing.  For example qemu could set up SIGUSR2
2595  * handler (provided that target process haven't registered
2596  * handler for that) that does the dump when signal is received.
2597  */
2598 static int elf_core_dump(int signr, const CPUState *env)
2599 {
2600     const TaskState *ts = (const TaskState *)env->opaque;
2601     struct vm_area_struct *vma = NULL;
2602     char corefile[PATH_MAX];
2603     struct elf_note_info info;
2604     struct elfhdr elf;
2605     struct elf_phdr phdr;
2606     struct rlimit dumpsize;
2607     struct mm_struct *mm = NULL;
2608     off_t offset = 0, data_offset = 0;
2609     int segs = 0;
2610     int fd = -1;
2611 
2612     errno = 0;
2613     getrlimit(RLIMIT_CORE, &dumpsize);
2614     if (dumpsize.rlim_cur == 0)
2615         return 0;
2616 
2617     if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2618         return (-errno);
2619 
2620     if ((fd = open(corefile, O_WRONLY | O_CREAT,
2621                    S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2622         return (-errno);
2623 
2624     /*
2625      * Walk through target process memory mappings and
2626      * set up structure containing this information.  After
2627      * this point vma_xxx functions can be used.
2628      */
2629     if ((mm = vma_init()) == NULL)
2630         goto out;
2631 
2632     walk_memory_regions(mm, vma_walker);
2633     segs = vma_get_mapping_count(mm);
2634 
2635     /*
2636      * Construct valid coredump ELF header.  We also
2637      * add one more segment for notes.
2638      */
2639     fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2640     if (dump_write(fd, &elf, sizeof (elf)) != 0)
2641         goto out;
2642 
2643     /* fill in in-memory version of notes */
2644     if (fill_note_info(&info, signr, env) < 0)
2645         goto out;
2646 
2647     offset += sizeof (elf);                             /* elf header */
2648     offset += (segs + 1) * sizeof (struct elf_phdr);    /* program headers */
2649 
2650     /* write out notes program header */
2651     fill_elf_note_phdr(&phdr, info.notes_size, offset);
2652 
2653     offset += info.notes_size;
2654     if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2655         goto out;
2656 
2657     /*
2658      * ELF specification wants data to start at page boundary so
2659      * we align it here.
2660      */
2661     data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2662 
2663     /*
2664      * Write program headers for memory regions mapped in
2665      * the target process.
2666      */
2667     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2668         (void) memset(&phdr, 0, sizeof (phdr));
2669 
2670         phdr.p_type = PT_LOAD;
2671         phdr.p_offset = offset;
2672         phdr.p_vaddr = vma->vma_start;
2673         phdr.p_paddr = 0;
2674         phdr.p_filesz = vma_dump_size(vma);
2675         offset += phdr.p_filesz;
2676         phdr.p_memsz = vma->vma_end - vma->vma_start;
2677         phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2678         if (vma->vma_flags & PROT_WRITE)
2679             phdr.p_flags |= PF_W;
2680         if (vma->vma_flags & PROT_EXEC)
2681             phdr.p_flags |= PF_X;
2682         phdr.p_align = ELF_EXEC_PAGESIZE;
2683 
2684         bswap_phdr(&phdr, 1);
2685         dump_write(fd, &phdr, sizeof (phdr));
2686     }
2687 
2688     /*
2689      * Next we write notes just after program headers.  No
2690      * alignment needed here.
2691      */
2692     if (write_note_info(&info, fd) < 0)
2693         goto out;
2694 
2695     /* align data to page boundary */
2696     if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2697         goto out;
2698 
2699     /*
2700      * Finally we can dump process memory into corefile as well.
2701      */
2702     for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2703         abi_ulong addr;
2704         abi_ulong end;
2705 
2706         end = vma->vma_start + vma_dump_size(vma);
2707 
2708         for (addr = vma->vma_start; addr < end;
2709              addr += TARGET_PAGE_SIZE) {
2710             char page[TARGET_PAGE_SIZE];
2711             int error;
2712 
2713             /*
2714              *  Read in page from target process memory and
2715              *  write it to coredump file.
2716              */
2717             error = copy_from_user(page, addr, sizeof (page));
2718             if (error != 0) {
2719                 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2720                                addr);
2721                 errno = -error;
2722                 goto out;
2723             }
2724             if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2725                 goto out;
2726         }
2727     }
2728 
2729  out:
2730     free_note_info(&info);
2731     if (mm != NULL)
2732         vma_delete(mm);
2733     (void) close(fd);
2734 
2735     if (errno != 0)
2736         return (-errno);
2737     return (0);
2738 }
2739 #endif /* USE_ELF_CORE_DUMP */
2740 
2741 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2742 {
2743     init_thread(regs, infop);
2744 }
2745