1 /*
2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include <jni.h>
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <string.h>
29 #include <stdlib.h>
30 #include <stddef.h>
31 #include <elf.h>
32 #include <link.h>
33 #include "libproc_impl.h"
34 #include "ps_core_common.h"
35 #include "proc_service.h"
36 #include "salibelf.h"
37
38 // This file has the libproc implementation to read core files.
39 // For live processes, refer to ps_proc.c. Portions of this is adapted
40 // /modelled after Solaris libproc.so (in particular Pcore.c)
41
42
43 //---------------------------------------------------------------------------
44 // functions to handle map_info
45
46 // Order mappings based on virtual address. We use this function as the
47 // callback for sorting the array of map_info pointers.
core_cmp_mapping(const void * lhsp,const void * rhsp)48 static int core_cmp_mapping(const void *lhsp, const void *rhsp)
49 {
50 const map_info *lhs = *((const map_info **)lhsp);
51 const map_info *rhs = *((const map_info **)rhsp);
52
53 if (lhs->vaddr == rhs->vaddr) {
54 return (0);
55 }
56
57 return (lhs->vaddr < rhs->vaddr ? -1 : 1);
58 }
59
60 // we sort map_info by starting virtual address so that we can do
61 // binary search to read from an address.
sort_map_array(struct ps_prochandle * ph)62 static bool sort_map_array(struct ps_prochandle* ph) {
63 size_t num_maps = ph->core->num_maps;
64 map_info* map = ph->core->maps;
65 int i = 0;
66
67 // allocate map_array
68 map_info** array;
69 if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
70 print_debug("can't allocate memory for map array\n");
71 return false;
72 }
73
74 // add maps to array
75 while (map) {
76 array[i] = map;
77 i++;
78 map = map->next;
79 }
80
81 // sort is called twice. If this is second time, clear map array
82 if (ph->core->map_array) {
83 free(ph->core->map_array);
84 }
85
86 ph->core->map_array = array;
87 // sort the map_info array by base virtual address.
88 qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
89 core_cmp_mapping);
90
91 // print map
92 if (is_debug()) {
93 int j = 0;
94 print_debug("---- sorted virtual address map ----\n");
95 for (j = 0; j < ph->core->num_maps; j++) {
96 print_debug("base = 0x%lx\tsize = %zu\n", ph->core->map_array[j]->vaddr,
97 ph->core->map_array[j]->memsz);
98 }
99 }
100
101 return true;
102 }
103
104 #ifndef MIN
105 #define MIN(x, y) (((x) < (y))? (x): (y))
106 #endif
107
core_read_data(struct ps_prochandle * ph,uintptr_t addr,char * buf,size_t size)108 static bool core_read_data(struct ps_prochandle* ph, uintptr_t addr, char *buf, size_t size) {
109 ssize_t resid = size;
110 int page_size=sysconf(_SC_PAGE_SIZE);
111 while (resid != 0) {
112 map_info *mp = core_lookup(ph, addr);
113 uintptr_t mapoff;
114 ssize_t len, rem;
115 off_t off;
116 int fd;
117
118 if (mp == NULL) {
119 break; /* No mapping for this address */
120 }
121
122 fd = mp->fd;
123 mapoff = addr - mp->vaddr;
124 len = MIN(resid, mp->memsz - mapoff);
125 off = mp->offset + mapoff;
126
127 if ((len = pread(fd, buf, len, off)) <= 0) {
128 break;
129 }
130
131 resid -= len;
132 addr += len;
133 buf = (char *)buf + len;
134
135 // mappings always start at page boundary. But, may end in fractional
136 // page. fill zeros for possible fractional page at the end of a mapping.
137 rem = mp->memsz % page_size;
138 if (rem > 0) {
139 rem = page_size - rem;
140 len = MIN(resid, rem);
141 resid -= len;
142 addr += len;
143 // we are not assuming 'buf' to be zero initialized.
144 memset(buf, 0, len);
145 buf += len;
146 }
147 }
148
149 if (resid) {
150 print_debug("core read failed for %d byte(s) @ 0x%lx (%d more bytes)\n",
151 size, addr, resid);
152 return false;
153 } else {
154 return true;
155 }
156 }
157
158 // null implementation for write
core_write_data(struct ps_prochandle * ph,uintptr_t addr,const char * buf,size_t size)159 static bool core_write_data(struct ps_prochandle* ph,
160 uintptr_t addr, const char *buf , size_t size) {
161 return false;
162 }
163
core_get_lwp_regs(struct ps_prochandle * ph,lwpid_t lwp_id,struct user_regs_struct * regs)164 static bool core_get_lwp_regs(struct ps_prochandle* ph, lwpid_t lwp_id,
165 struct user_regs_struct* regs) {
166 // for core we have cached the lwp regs from NOTE section
167 thread_info* thr = ph->threads;
168 while (thr) {
169 if (thr->lwp_id == lwp_id) {
170 memcpy(regs, &thr->regs, sizeof(struct user_regs_struct));
171 return true;
172 }
173 thr = thr->next;
174 }
175 return false;
176 }
177
178 static ps_prochandle_ops core_ops = {
179 .release= core_release,
180 .p_pread= core_read_data,
181 .p_pwrite= core_write_data,
182 .get_lwp_regs= core_get_lwp_regs
183 };
184
185 // read regs and create thread from NT_PRSTATUS entries from core file
core_handle_prstatus(struct ps_prochandle * ph,const char * buf,size_t nbytes)186 static bool core_handle_prstatus(struct ps_prochandle* ph, const char* buf, size_t nbytes) {
187 // we have to read prstatus_t from buf
188 // assert(nbytes == sizeof(prstaus_t), "size mismatch on prstatus_t");
189 prstatus_t* prstat = (prstatus_t*) buf;
190 thread_info* newthr;
191 print_debug("got integer regset for lwp %d\n", prstat->pr_pid);
192 if((newthr = add_thread_info(ph, prstat->pr_pid)) == NULL)
193 return false;
194
195 // copy regs
196 memcpy(&newthr->regs, prstat->pr_reg, sizeof(struct user_regs_struct));
197
198 if (is_debug()) {
199 print_debug("integer regset\n");
200 #ifdef i386
201 // print the regset
202 print_debug("\teax = 0x%x\n", newthr->regs.eax);
203 print_debug("\tebx = 0x%x\n", newthr->regs.ebx);
204 print_debug("\tecx = 0x%x\n", newthr->regs.ecx);
205 print_debug("\tedx = 0x%x\n", newthr->regs.edx);
206 print_debug("\tesp = 0x%x\n", newthr->regs.esp);
207 print_debug("\tebp = 0x%x\n", newthr->regs.ebp);
208 print_debug("\tesi = 0x%x\n", newthr->regs.esi);
209 print_debug("\tedi = 0x%x\n", newthr->regs.edi);
210 print_debug("\teip = 0x%x\n", newthr->regs.eip);
211 #endif
212
213 #if defined(amd64) || defined(x86_64)
214 // print the regset
215 print_debug("\tr15 = 0x%lx\n", newthr->regs.r15);
216 print_debug("\tr14 = 0x%lx\n", newthr->regs.r14);
217 print_debug("\tr13 = 0x%lx\n", newthr->regs.r13);
218 print_debug("\tr12 = 0x%lx\n", newthr->regs.r12);
219 print_debug("\trbp = 0x%lx\n", newthr->regs.rbp);
220 print_debug("\trbx = 0x%lx\n", newthr->regs.rbx);
221 print_debug("\tr11 = 0x%lx\n", newthr->regs.r11);
222 print_debug("\tr10 = 0x%lx\n", newthr->regs.r10);
223 print_debug("\tr9 = 0x%lx\n", newthr->regs.r9);
224 print_debug("\tr8 = 0x%lx\n", newthr->regs.r8);
225 print_debug("\trax = 0x%lx\n", newthr->regs.rax);
226 print_debug("\trcx = 0x%lx\n", newthr->regs.rcx);
227 print_debug("\trdx = 0x%lx\n", newthr->regs.rdx);
228 print_debug("\trsi = 0x%lx\n", newthr->regs.rsi);
229 print_debug("\trdi = 0x%lx\n", newthr->regs.rdi);
230 print_debug("\torig_rax = 0x%lx\n", newthr->regs.orig_rax);
231 print_debug("\trip = 0x%lx\n", newthr->regs.rip);
232 print_debug("\tcs = 0x%lx\n", newthr->regs.cs);
233 print_debug("\teflags = 0x%lx\n", newthr->regs.eflags);
234 print_debug("\trsp = 0x%lx\n", newthr->regs.rsp);
235 print_debug("\tss = 0x%lx\n", newthr->regs.ss);
236 print_debug("\tfs_base = 0x%lx\n", newthr->regs.fs_base);
237 print_debug("\tgs_base = 0x%lx\n", newthr->regs.gs_base);
238 print_debug("\tds = 0x%lx\n", newthr->regs.ds);
239 print_debug("\tes = 0x%lx\n", newthr->regs.es);
240 print_debug("\tfs = 0x%lx\n", newthr->regs.fs);
241 print_debug("\tgs = 0x%lx\n", newthr->regs.gs);
242 #endif
243 }
244
245 return true;
246 }
247
248 #define ROUNDUP(x, y) ((((x)+((y)-1))/(y))*(y))
249
250 // read NT_PRSTATUS entries from core NOTE segment
core_handle_note(struct ps_prochandle * ph,ELF_PHDR * note_phdr)251 static bool core_handle_note(struct ps_prochandle* ph, ELF_PHDR* note_phdr) {
252 char* buf = NULL;
253 char* p = NULL;
254 size_t size = note_phdr->p_filesz;
255
256 // we are interested in just prstatus entries. we will ignore the rest.
257 // Advance the seek pointer to the start of the PT_NOTE data
258 if (lseek(ph->core->core_fd, note_phdr->p_offset, SEEK_SET) == (off_t)-1) {
259 print_debug("failed to lseek to PT_NOTE data\n");
260 return false;
261 }
262
263 // Now process the PT_NOTE structures. Each one is preceded by
264 // an Elf{32/64}_Nhdr structure describing its type and size.
265 if ( (buf = (char*) malloc(size)) == NULL) {
266 print_debug("can't allocate memory for reading core notes\n");
267 goto err;
268 }
269
270 // read notes into buffer
271 if (read(ph->core->core_fd, buf, size) != size) {
272 print_debug("failed to read notes, core file must have been truncated\n");
273 goto err;
274 }
275
276 p = buf;
277 while (p < buf + size) {
278 ELF_NHDR* notep = (ELF_NHDR*) p;
279 char* descdata = p + sizeof(ELF_NHDR) + ROUNDUP(notep->n_namesz, 4);
280 print_debug("Note header with n_type = %d and n_descsz = %u\n",
281 notep->n_type, notep->n_descsz);
282
283 if (notep->n_type == NT_PRSTATUS) {
284 if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
285 return false;
286 }
287 } else if (notep->n_type == NT_AUXV) {
288 // Get first segment from entry point
289 ELF_AUXV *auxv = (ELF_AUXV *)descdata;
290 while (auxv->a_type != AT_NULL) {
291 if (auxv->a_type == AT_ENTRY) {
292 // Set entry point address to address of dynamic section.
293 // We will adjust it in read_exec_segments().
294 ph->core->dynamic_addr = auxv->a_un.a_val;
295 break;
296 }
297 auxv++;
298 }
299 }
300 p = descdata + ROUNDUP(notep->n_descsz, 4);
301 }
302
303 free(buf);
304 return true;
305
306 err:
307 if (buf) free(buf);
308 return false;
309 }
310
311 // read all segments from core file
read_core_segments(struct ps_prochandle * ph,ELF_EHDR * core_ehdr)312 static bool read_core_segments(struct ps_prochandle* ph, ELF_EHDR* core_ehdr) {
313 int i = 0;
314 ELF_PHDR* phbuf = NULL;
315 ELF_PHDR* core_php = NULL;
316
317 if ((phbuf = read_program_header_table(ph->core->core_fd, core_ehdr)) == NULL)
318 return false;
319
320 /*
321 * Now iterate through the program headers in the core file.
322 * We're interested in two types of Phdrs: PT_NOTE (which
323 * contains a set of saved /proc structures), and PT_LOAD (which
324 * represents a memory mapping from the process's address space).
325 *
326 * Difference b/w Solaris PT_NOTE and Linux/BSD PT_NOTE:
327 *
328 * In Solaris there are two PT_NOTE segments the first PT_NOTE (if present)
329 * contains /proc structs in the pre-2.6 unstructured /proc format. the last
330 * PT_NOTE has data in new /proc format.
331 *
332 * In Solaris, there is only one pstatus (process status). pstatus contains
333 * integer register set among other stuff. For each LWP, we have one lwpstatus
334 * entry that has integer regset for that LWP.
335 *
336 * Linux threads are actually 'clone'd processes. To support core analysis
337 * of "multithreaded" process, Linux creates more than one pstatus (called
338 * "prstatus") entry in PT_NOTE. Each prstatus entry has integer regset for one
339 * "thread". Please refer to Linux kernel src file 'fs/binfmt_elf.c', in particular
340 * function "elf_core_dump".
341 */
342
343 for (core_php = phbuf, i = 0; i < core_ehdr->e_phnum; i++) {
344 switch (core_php->p_type) {
345 case PT_NOTE:
346 if (core_handle_note(ph, core_php) != true) {
347 goto err;
348 }
349 break;
350
351 case PT_LOAD: {
352 if (core_php->p_filesz != 0) {
353 if (add_map_info(ph, ph->core->core_fd, core_php->p_offset,
354 core_php->p_vaddr, core_php->p_filesz) == NULL) goto err;
355 }
356 break;
357 }
358 }
359
360 core_php++;
361 }
362
363 free(phbuf);
364 return true;
365 err:
366 free(phbuf);
367 return false;
368 }
369
370 // read segments of a shared object
read_lib_segments(struct ps_prochandle * ph,int lib_fd,ELF_EHDR * lib_ehdr,uintptr_t lib_base)371 static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) {
372 int i = 0;
373 ELF_PHDR* phbuf;
374 ELF_PHDR* lib_php = NULL;
375
376 int page_size = sysconf(_SC_PAGE_SIZE);
377
378 if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) {
379 return false;
380 }
381
382 // we want to process only PT_LOAD segments that are not writable.
383 // i.e., text segments. The read/write/exec (data) segments would
384 // have been already added from core file segments.
385 for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
386 if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
387
388 uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
389 map_info *existing_map = core_lookup(ph, target_vaddr);
390
391 if (existing_map == NULL){
392 if (add_map_info(ph, lib_fd, lib_php->p_offset,
393 target_vaddr, lib_php->p_memsz) == NULL) {
394 goto err;
395 }
396 } else {
397 // Coredump stores value of p_memsz elf field
398 // rounded up to page boundary.
399
400 if ((existing_map->memsz != page_size) &&
401 (existing_map->fd != lib_fd) &&
402 (ROUNDUP(existing_map->memsz, page_size) != ROUNDUP(lib_php->p_memsz, page_size))) {
403
404 print_debug("address conflict @ 0x%lx (existing map size = %ld, size = %ld, flags = %d)\n",
405 target_vaddr, existing_map->memsz, lib_php->p_memsz, lib_php->p_flags);
406 goto err;
407 }
408
409 /* replace PT_LOAD segment with library segment */
410 print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
411 existing_map->memsz, ROUNDUP(lib_php->p_memsz, page_size));
412
413 existing_map->fd = lib_fd;
414 existing_map->offset = lib_php->p_offset;
415 existing_map->memsz = ROUNDUP(lib_php->p_memsz, page_size);
416 }
417 }
418
419 lib_php++;
420 }
421
422 free(phbuf);
423 return true;
424 err:
425 free(phbuf);
426 return false;
427 }
428
429 // process segments from interpreter (ld.so or ld-linux.so)
read_interp_segments(struct ps_prochandle * ph)430 static bool read_interp_segments(struct ps_prochandle* ph) {
431 ELF_EHDR interp_ehdr;
432
433 if (read_elf_header(ph->core->interp_fd, &interp_ehdr) != true) {
434 print_debug("interpreter is not a valid ELF file\n");
435 return false;
436 }
437
438 if (read_lib_segments(ph, ph->core->interp_fd, &interp_ehdr, ph->core->ld_base_addr) != true) {
439 print_debug("can't read segments of interpreter\n");
440 return false;
441 }
442
443 return true;
444 }
445
446 // process segments of a a.out
read_exec_segments(struct ps_prochandle * ph,ELF_EHDR * exec_ehdr)447 static bool read_exec_segments(struct ps_prochandle* ph, ELF_EHDR* exec_ehdr) {
448 int i = 0;
449 ELF_PHDR* phbuf = NULL;
450 ELF_PHDR* exec_php = NULL;
451
452 if ((phbuf = read_program_header_table(ph->core->exec_fd, exec_ehdr)) == NULL) {
453 return false;
454 }
455
456 for (exec_php = phbuf, i = 0; i < exec_ehdr->e_phnum; i++) {
457 switch (exec_php->p_type) {
458
459 // add mappings for PT_LOAD segments
460 case PT_LOAD: {
461 // add only non-writable segments of non-zero filesz
462 if (!(exec_php->p_flags & PF_W) && exec_php->p_filesz != 0) {
463 if (add_map_info(ph, ph->core->exec_fd, exec_php->p_offset, exec_php->p_vaddr, exec_php->p_filesz) == NULL) goto err;
464 }
465 break;
466 }
467
468 // read the interpreter and it's segments
469 case PT_INTERP: {
470 char interp_name[BUF_SIZE + 1];
471
472 // BUF_SIZE is PATH_MAX + NAME_MAX + 1.
473 if (exec_php->p_filesz > BUF_SIZE) {
474 goto err;
475 }
476 if (pread(ph->core->exec_fd, interp_name,
477 exec_php->p_filesz, exec_php->p_offset) != exec_php->p_filesz) {
478 print_debug("Unable to read in the ELF interpreter\n");
479 goto err;
480 }
481 interp_name[exec_php->p_filesz] = '\0';
482 print_debug("ELF interpreter %s\n", interp_name);
483 // read interpreter segments as well
484 if ((ph->core->interp_fd = pathmap_open(interp_name)) < 0) {
485 print_debug("can't open runtime loader\n");
486 goto err;
487 }
488 break;
489 }
490
491 // from PT_DYNAMIC we want to read address of first link_map addr
492 case PT_DYNAMIC: {
493 if (exec_ehdr->e_type == ET_EXEC) {
494 ph->core->dynamic_addr = exec_php->p_vaddr;
495 } else { // ET_DYN
496 // dynamic_addr has entry point of executable.
497 // Thus we should substract it.
498 ph->core->dynamic_addr += exec_php->p_vaddr - exec_ehdr->e_entry;
499 }
500 print_debug("address of _DYNAMIC is 0x%lx\n", ph->core->dynamic_addr);
501 break;
502 }
503
504 } // switch
505 exec_php++;
506 } // for
507
508 free(phbuf);
509 return true;
510 err:
511 free(phbuf);
512 return false;
513 }
514
515
516 #define FIRST_LINK_MAP_OFFSET offsetof(struct r_debug, r_map)
517 #define LD_BASE_OFFSET offsetof(struct r_debug, r_ldbase)
518 #define LINK_MAP_ADDR_OFFSET offsetof(struct link_map, l_addr)
519 #define LINK_MAP_NAME_OFFSET offsetof(struct link_map, l_name)
520 #define LINK_MAP_LD_OFFSET offsetof(struct link_map, l_ld)
521 #define LINK_MAP_NEXT_OFFSET offsetof(struct link_map, l_next)
522
523 #define INVALID_LOAD_ADDRESS -1L
524 #define ZERO_LOAD_ADDRESS 0x0L
525
526 // Calculate the load address of shared library
527 // on prelink-enabled environment.
528 //
529 // In case of GDB, it would be calculated by offset of link_map.l_ld
530 // and the address of .dynamic section.
531 // See GDB implementation: lm_addr_check @ solib-svr4.c
calc_prelinked_load_address(struct ps_prochandle * ph,int lib_fd,ELF_EHDR * elf_ehdr,uintptr_t link_map_addr)532 static uintptr_t calc_prelinked_load_address(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* elf_ehdr, uintptr_t link_map_addr) {
533 ELF_PHDR *phbuf;
534 uintptr_t lib_ld;
535 uintptr_t lib_dyn_addr = 0L;
536 uintptr_t load_addr;
537 int i;
538
539 phbuf = read_program_header_table(lib_fd, elf_ehdr);
540 if (phbuf == NULL) {
541 print_debug("can't read program header of shared object\n");
542 return INVALID_LOAD_ADDRESS;
543 }
544
545 // Get the address of .dynamic section from shared library.
546 for (i = 0; i < elf_ehdr->e_phnum; i++) {
547 if (phbuf[i].p_type == PT_DYNAMIC) {
548 lib_dyn_addr = phbuf[i].p_vaddr;
549 break;
550 }
551 }
552
553 free(phbuf);
554
555 if (ps_pdread(ph, (psaddr_t)link_map_addr + LINK_MAP_LD_OFFSET,
556 &lib_ld, sizeof(uintptr_t)) != PS_OK) {
557 print_debug("can't read address of dynamic section in shared object\n");
558 return INVALID_LOAD_ADDRESS;
559 }
560
561 // Return the load address which is calculated by the address of .dynamic
562 // and link_map.l_ld .
563 load_addr = lib_ld - lib_dyn_addr;
564 print_debug("lib_ld = 0x%lx, lib_dyn_addr = 0x%lx -> lib_base_diff = 0x%lx\n", lib_ld, lib_dyn_addr, load_addr);
565 return load_addr;
566 }
567
568 // read shared library info from runtime linker's data structures.
569 // This work is done by librtlb_db in Solaris
read_shared_lib_info(struct ps_prochandle * ph)570 static bool read_shared_lib_info(struct ps_prochandle* ph) {
571 uintptr_t addr = ph->core->dynamic_addr;
572 uintptr_t debug_base;
573 uintptr_t first_link_map_addr;
574 uintptr_t ld_base_addr;
575 uintptr_t link_map_addr;
576 uintptr_t lib_base_diff;
577 uintptr_t lib_base;
578 uintptr_t lib_name_addr;
579 char lib_name[BUF_SIZE];
580 ELF_DYN dyn;
581 ELF_EHDR elf_ehdr;
582 int lib_fd;
583
584 // _DYNAMIC has information of the form
585 // [tag] [data] [tag] [data] .....
586 // Both tag and data are pointer sized.
587 // We look for dynamic info with DT_DEBUG. This has shared object info.
588 // refer to struct r_debug in link.h
589
590 dyn.d_tag = DT_NULL;
591 while (dyn.d_tag != DT_DEBUG) {
592 if (ps_pdread(ph, (psaddr_t) addr, &dyn, sizeof(ELF_DYN)) != PS_OK) {
593 print_debug("can't read debug info from _DYNAMIC\n");
594 return false;
595 }
596 addr += sizeof(ELF_DYN);
597 }
598
599 // we have got Dyn entry with DT_DEBUG
600 debug_base = dyn.d_un.d_ptr;
601 // at debug_base we have struct r_debug. This has first link map in r_map field
602 if (ps_pdread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
603 &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
604 print_debug("can't read first link map address\n");
605 return false;
606 }
607
608 // read ld_base address from struct r_debug
609 if (ps_pdread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
610 sizeof(uintptr_t)) != PS_OK) {
611 print_debug("can't read ld base address\n");
612 return false;
613 }
614 ph->core->ld_base_addr = ld_base_addr;
615
616 print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
617
618 // now read segments from interp (i.e ld.so or ld-linux.so or ld-elf.so)
619 if (read_interp_segments(ph) != true) {
620 return false;
621 }
622
623 // after adding interpreter (ld.so) mappings sort again
624 if (sort_map_array(ph) != true) {
625 return false;
626 }
627
628 print_debug("first link map is at 0x%lx\n", first_link_map_addr);
629
630 link_map_addr = first_link_map_addr;
631 while (link_map_addr != 0) {
632 // read library base address of the .so. Note that even though <sys/link.h> calls
633 // link_map->l_addr as "base address", this is * not * really base virtual
634 // address of the shared object. This is actually the difference b/w the virtual
635 // address mentioned in shared object and the actual virtual base where runtime
636 // linker loaded it. We use "base diff" in read_lib_segments call below.
637
638 if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_ADDR_OFFSET,
639 &lib_base_diff, sizeof(uintptr_t)) != PS_OK) {
640 print_debug("can't read shared object base address diff\n");
641 return false;
642 }
643
644 // read address of the name
645 if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_NAME_OFFSET,
646 &lib_name_addr, sizeof(uintptr_t)) != PS_OK) {
647 print_debug("can't read address of shared object name\n");
648 return false;
649 }
650
651 // read name of the shared object
652 lib_name[0] = '\0';
653 if (lib_name_addr != 0 &&
654 read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) {
655 print_debug("can't read shared object name\n");
656 // don't let failure to read the name stop opening the file. If something is really wrong
657 // it will fail later.
658 }
659
660 if (lib_name[0] != '\0') {
661 // ignore empty lib names
662 lib_fd = pathmap_open(lib_name);
663
664 if (lib_fd < 0) {
665 print_debug("can't open shared object %s\n", lib_name);
666 // continue with other libraries...
667 } else {
668 if (read_elf_header(lib_fd, &elf_ehdr)) {
669 if (lib_base_diff == ZERO_LOAD_ADDRESS ) {
670 lib_base_diff = calc_prelinked_load_address(ph, lib_fd, &elf_ehdr, link_map_addr);
671 if (lib_base_diff == INVALID_LOAD_ADDRESS) {
672 close(lib_fd);
673 return false;
674 }
675 }
676
677 lib_base = lib_base_diff + find_base_address(lib_fd, &elf_ehdr);
678 print_debug("reading library %s @ 0x%lx [ 0x%lx ]\n",
679 lib_name, lib_base, lib_base_diff);
680 // while adding library mappings we need to use "base difference".
681 if (! read_lib_segments(ph, lib_fd, &elf_ehdr, lib_base_diff)) {
682 print_debug("can't read shared object's segments\n");
683 close(lib_fd);
684 return false;
685 }
686 add_lib_info_fd(ph, lib_name, lib_fd, lib_base);
687 // Map info is added for the library (lib_name) so
688 // we need to re-sort it before calling the p_pdread.
689 if (sort_map_array(ph) != true)
690 return false;
691 } else {
692 print_debug("can't read ELF header for shared object %s\n", lib_name);
693 close(lib_fd);
694 // continue with other libraries...
695 }
696 }
697 }
698
699 // read next link_map address
700 if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
701 &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
702 print_debug("can't read next link in link_map\n");
703 return false;
704 }
705 }
706
707 return true;
708 }
709
710 // the one and only one exposed stuff from this file
711 JNIEXPORT struct ps_prochandle* JNICALL
Pgrab_core(const char * exec_file,const char * core_file)712 Pgrab_core(const char* exec_file, const char* core_file) {
713 ELF_EHDR core_ehdr;
714 ELF_EHDR exec_ehdr;
715 ELF_EHDR lib_ehdr;
716
717 struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
718 if (ph == NULL) {
719 print_debug("can't allocate ps_prochandle\n");
720 return NULL;
721 }
722
723 if ((ph->core = (struct core_data*) calloc(1, sizeof(struct core_data))) == NULL) {
724 free(ph);
725 print_debug("can't allocate ps_prochandle\n");
726 return NULL;
727 }
728
729 // initialize ph
730 ph->ops = &core_ops;
731 ph->core->core_fd = -1;
732 ph->core->exec_fd = -1;
733 ph->core->interp_fd = -1;
734
735 // open the core file
736 if ((ph->core->core_fd = open(core_file, O_RDONLY)) < 0) {
737 print_debug("can't open core file\n");
738 goto err;
739 }
740
741 // read core file ELF header
742 if (read_elf_header(ph->core->core_fd, &core_ehdr) != true || core_ehdr.e_type != ET_CORE) {
743 print_debug("core file is not a valid ELF ET_CORE file\n");
744 goto err;
745 }
746
747 if ((ph->core->exec_fd = open(exec_file, O_RDONLY)) < 0) {
748 print_debug("can't open executable file\n");
749 goto err;
750 }
751
752 if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true ||
753 ((exec_ehdr.e_type != ET_EXEC) && (exec_ehdr.e_type != ET_DYN))) {
754 print_debug("executable file is not a valid ELF file\n");
755 goto err;
756 }
757
758 // process core file segments
759 if (read_core_segments(ph, &core_ehdr) != true) {
760 goto err;
761 }
762
763 // process exec file segments
764 if (read_exec_segments(ph, &exec_ehdr) != true) {
765 goto err;
766 }
767
768 // exec file is also treated like a shared object for symbol search
769 if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
770 (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) {
771 goto err;
772 }
773
774 // allocate and sort maps into map_array, we need to do this
775 // here because read_shared_lib_info needs to read from debuggee
776 // address space
777 if (sort_map_array(ph) != true) {
778 goto err;
779 }
780
781 if (read_shared_lib_info(ph) != true) {
782 goto err;
783 }
784
785 // sort again because we have added more mappings from shared objects
786 if (sort_map_array(ph) != true) {
787 goto err;
788 }
789
790 if (init_classsharing_workaround(ph) != true) {
791 goto err;
792 }
793
794 return ph;
795
796 err:
797 Prelease(ph);
798 return NULL;
799 }
800