1 // SPDX-License-Identifier: GPL-2.0
2 #include <fcntl.h>
3 #include <stdio.h>
4 #include <errno.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <unistd.h>
8 #include <inttypes.h>
9
10 #include "dso.h"
11 #include "map.h"
12 #include "maps.h"
13 #include "symbol.h"
14 #include "symsrc.h"
15 #include "demangle-cxx.h"
16 #include "demangle-ocaml.h"
17 #include "demangle-java.h"
18 #include "demangle-rust.h"
19 #include "machine.h"
20 #include "vdso.h"
21 #include "debug.h"
22 #include "util/copyfile.h"
23 #include <linux/ctype.h>
24 #include <linux/kernel.h>
25 #include <linux/zalloc.h>
26 #include <linux/string.h>
27 #include <symbol/kallsyms.h>
28 #include <internal/lib.h>
29
30 #ifdef HAVE_LIBBFD_SUPPORT
31 #define PACKAGE 'perf'
32 #include <bfd.h>
33 #endif
34
35 #if defined(HAVE_LIBBFD_SUPPORT) || defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
36 #ifndef DMGL_PARAMS
37 #define DMGL_PARAMS (1 << 0) /* Include function args */
38 #define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
39 #endif
40 #endif
41
42 #ifndef EM_AARCH64
43 #define EM_AARCH64 183 /* ARM 64 bit */
44 #endif
45
46 #ifndef EM_LOONGARCH
47 #define EM_LOONGARCH 258
48 #endif
49
50 #ifndef ELF32_ST_VISIBILITY
51 #define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
52 #endif
53
54 /* For ELF64 the definitions are the same. */
55 #ifndef ELF64_ST_VISIBILITY
56 #define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
57 #endif
58
59 /* How to extract information held in the st_other field. */
60 #ifndef GELF_ST_VISIBILITY
61 #define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val)
62 #endif
63
64 typedef Elf64_Nhdr GElf_Nhdr;
65
66
67 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
elf_getphdrnum(Elf * elf,size_t * dst)68 static int elf_getphdrnum(Elf *elf, size_t *dst)
69 {
70 GElf_Ehdr gehdr;
71 GElf_Ehdr *ehdr;
72
73 ehdr = gelf_getehdr(elf, &gehdr);
74 if (!ehdr)
75 return -1;
76
77 *dst = ehdr->e_phnum;
78
79 return 0;
80 }
81 #endif
82
83 #ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT
elf_getshdrstrndx(Elf * elf __maybe_unused,size_t * dst __maybe_unused)84 static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused)
85 {
86 pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__);
87 return -1;
88 }
89 #endif
90
91 #ifndef NT_GNU_BUILD_ID
92 #define NT_GNU_BUILD_ID 3
93 #endif
94
95 /**
96 * elf_symtab__for_each_symbol - iterate thru all the symbols
97 *
98 * @syms: struct elf_symtab instance to iterate
99 * @idx: uint32_t idx
100 * @sym: GElf_Sym iterator
101 */
102 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
103 for (idx = 0, gelf_getsym(syms, idx, &sym);\
104 idx < nr_syms; \
105 idx++, gelf_getsym(syms, idx, &sym))
106
elf_sym__type(const GElf_Sym * sym)107 static inline uint8_t elf_sym__type(const GElf_Sym *sym)
108 {
109 return GELF_ST_TYPE(sym->st_info);
110 }
111
elf_sym__visibility(const GElf_Sym * sym)112 static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
113 {
114 return GELF_ST_VISIBILITY(sym->st_other);
115 }
116
117 #ifndef STT_GNU_IFUNC
118 #define STT_GNU_IFUNC 10
119 #endif
120
elf_sym__is_function(const GElf_Sym * sym)121 static inline int elf_sym__is_function(const GElf_Sym *sym)
122 {
123 return (elf_sym__type(sym) == STT_FUNC ||
124 elf_sym__type(sym) == STT_GNU_IFUNC) &&
125 sym->st_name != 0 &&
126 sym->st_shndx != SHN_UNDEF;
127 }
128
elf_sym__is_object(const GElf_Sym * sym)129 static inline bool elf_sym__is_object(const GElf_Sym *sym)
130 {
131 return elf_sym__type(sym) == STT_OBJECT &&
132 sym->st_name != 0 &&
133 sym->st_shndx != SHN_UNDEF;
134 }
135
elf_sym__is_label(const GElf_Sym * sym)136 static inline int elf_sym__is_label(const GElf_Sym *sym)
137 {
138 return elf_sym__type(sym) == STT_NOTYPE &&
139 sym->st_name != 0 &&
140 sym->st_shndx != SHN_UNDEF &&
141 sym->st_shndx != SHN_ABS &&
142 elf_sym__visibility(sym) != STV_HIDDEN &&
143 elf_sym__visibility(sym) != STV_INTERNAL;
144 }
145
elf_sym__filter(GElf_Sym * sym)146 static bool elf_sym__filter(GElf_Sym *sym)
147 {
148 return elf_sym__is_function(sym) || elf_sym__is_object(sym);
149 }
150
elf_sym__name(const GElf_Sym * sym,const Elf_Data * symstrs)151 static inline const char *elf_sym__name(const GElf_Sym *sym,
152 const Elf_Data *symstrs)
153 {
154 return symstrs->d_buf + sym->st_name;
155 }
156
elf_sec__name(const GElf_Shdr * shdr,const Elf_Data * secstrs)157 static inline const char *elf_sec__name(const GElf_Shdr *shdr,
158 const Elf_Data *secstrs)
159 {
160 return secstrs->d_buf + shdr->sh_name;
161 }
162
elf_sec__is_text(const GElf_Shdr * shdr,const Elf_Data * secstrs)163 static inline int elf_sec__is_text(const GElf_Shdr *shdr,
164 const Elf_Data *secstrs)
165 {
166 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
167 }
168
elf_sec__is_data(const GElf_Shdr * shdr,const Elf_Data * secstrs)169 static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
170 const Elf_Data *secstrs)
171 {
172 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
173 }
174
elf_sec__filter(GElf_Shdr * shdr,Elf_Data * secstrs)175 static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs)
176 {
177 return elf_sec__is_text(shdr, secstrs) ||
178 elf_sec__is_data(shdr, secstrs);
179 }
180
elf_addr_to_index(Elf * elf,GElf_Addr addr)181 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
182 {
183 Elf_Scn *sec = NULL;
184 GElf_Shdr shdr;
185 size_t cnt = 1;
186
187 while ((sec = elf_nextscn(elf, sec)) != NULL) {
188 gelf_getshdr(sec, &shdr);
189
190 if ((addr >= shdr.sh_addr) &&
191 (addr < (shdr.sh_addr + shdr.sh_size)))
192 return cnt;
193
194 ++cnt;
195 }
196
197 return -1;
198 }
199
elf_section_by_name(Elf * elf,GElf_Ehdr * ep,GElf_Shdr * shp,const char * name,size_t * idx)200 Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
201 GElf_Shdr *shp, const char *name, size_t *idx)
202 {
203 Elf_Scn *sec = NULL;
204 size_t cnt = 1;
205
206 /* ELF is corrupted/truncated, avoid calling elf_strptr. */
207 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
208 return NULL;
209
210 while ((sec = elf_nextscn(elf, sec)) != NULL) {
211 char *str;
212
213 gelf_getshdr(sec, shp);
214 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
215 if (str && !strcmp(name, str)) {
216 if (idx)
217 *idx = cnt;
218 return sec;
219 }
220 ++cnt;
221 }
222
223 return NULL;
224 }
225
filename__has_section(const char * filename,const char * sec)226 bool filename__has_section(const char *filename, const char *sec)
227 {
228 int fd;
229 Elf *elf;
230 GElf_Ehdr ehdr;
231 GElf_Shdr shdr;
232 bool found = false;
233
234 fd = open(filename, O_RDONLY);
235 if (fd < 0)
236 return false;
237
238 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
239 if (elf == NULL)
240 goto out;
241
242 if (gelf_getehdr(elf, &ehdr) == NULL)
243 goto elf_out;
244
245 found = !!elf_section_by_name(elf, &ehdr, &shdr, sec, NULL);
246
247 elf_out:
248 elf_end(elf);
249 out:
250 close(fd);
251 return found;
252 }
253
elf_read_program_header(Elf * elf,u64 vaddr,GElf_Phdr * phdr)254 static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)
255 {
256 size_t i, phdrnum;
257 u64 sz;
258
259 if (elf_getphdrnum(elf, &phdrnum))
260 return -1;
261
262 for (i = 0; i < phdrnum; i++) {
263 if (gelf_getphdr(elf, i, phdr) == NULL)
264 return -1;
265
266 if (phdr->p_type != PT_LOAD)
267 continue;
268
269 sz = max(phdr->p_memsz, phdr->p_filesz);
270 if (!sz)
271 continue;
272
273 if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz))
274 return 0;
275 }
276
277 /* Not found any valid program header */
278 return -1;
279 }
280
want_demangle(bool is_kernel_sym)281 static bool want_demangle(bool is_kernel_sym)
282 {
283 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
284 }
285
286 /*
287 * Demangle C++ function signature, typically replaced by demangle-cxx.cpp
288 * version.
289 */
cxx_demangle_sym(const char * str __maybe_unused,bool params __maybe_unused,bool modifiers __maybe_unused)290 __weak char *cxx_demangle_sym(const char *str __maybe_unused, bool params __maybe_unused,
291 bool modifiers __maybe_unused)
292 {
293 #ifdef HAVE_LIBBFD_SUPPORT
294 int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
295
296 return bfd_demangle(NULL, str, flags);
297 #elif defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
298 int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
299
300 return cplus_demangle(str, flags);
301 #else
302 return NULL;
303 #endif
304 }
305
demangle_sym(struct dso * dso,int kmodule,const char * elf_name)306 static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
307 {
308 char *demangled = NULL;
309
310 /*
311 * We need to figure out if the object was created from C++ sources
312 * DWARF DW_compile_unit has this, but we don't always have access
313 * to it...
314 */
315 if (!want_demangle(dso__kernel(dso) || kmodule))
316 return demangled;
317
318 demangled = cxx_demangle_sym(elf_name, verbose > 0, verbose > 0);
319 if (demangled == NULL) {
320 demangled = ocaml_demangle_sym(elf_name);
321 if (demangled == NULL) {
322 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
323 }
324 }
325 else if (rust_is_mangled(demangled))
326 /*
327 * Input to Rust demangling is the BFD-demangled
328 * name which it Rust-demangles in place.
329 */
330 rust_demangle_sym(demangled);
331
332 return demangled;
333 }
334
335 struct rel_info {
336 u32 nr_entries;
337 u32 *sorted;
338 bool is_rela;
339 Elf_Data *reldata;
340 GElf_Rela rela;
341 GElf_Rel rel;
342 };
343
get_rel_symidx(struct rel_info * ri,u32 idx)344 static u32 get_rel_symidx(struct rel_info *ri, u32 idx)
345 {
346 idx = ri->sorted ? ri->sorted[idx] : idx;
347 if (ri->is_rela) {
348 gelf_getrela(ri->reldata, idx, &ri->rela);
349 return GELF_R_SYM(ri->rela.r_info);
350 }
351 gelf_getrel(ri->reldata, idx, &ri->rel);
352 return GELF_R_SYM(ri->rel.r_info);
353 }
354
get_rel_offset(struct rel_info * ri,u32 x)355 static u64 get_rel_offset(struct rel_info *ri, u32 x)
356 {
357 if (ri->is_rela) {
358 GElf_Rela rela;
359
360 gelf_getrela(ri->reldata, x, &rela);
361 return rela.r_offset;
362 } else {
363 GElf_Rel rel;
364
365 gelf_getrel(ri->reldata, x, &rel);
366 return rel.r_offset;
367 }
368 }
369
rel_cmp(const void * a,const void * b,void * r)370 static int rel_cmp(const void *a, const void *b, void *r)
371 {
372 struct rel_info *ri = r;
373 u64 a_offset = get_rel_offset(ri, *(const u32 *)a);
374 u64 b_offset = get_rel_offset(ri, *(const u32 *)b);
375
376 return a_offset < b_offset ? -1 : (a_offset > b_offset ? 1 : 0);
377 }
378
sort_rel(struct rel_info * ri)379 static int sort_rel(struct rel_info *ri)
380 {
381 size_t sz = sizeof(ri->sorted[0]);
382 u32 i;
383
384 ri->sorted = calloc(ri->nr_entries, sz);
385 if (!ri->sorted)
386 return -1;
387 for (i = 0; i < ri->nr_entries; i++)
388 ri->sorted[i] = i;
389 qsort_r(ri->sorted, ri->nr_entries, sz, rel_cmp, ri);
390 return 0;
391 }
392
393 /*
394 * For x86_64, the GNU linker is putting IFUNC information in the relocation
395 * addend.
396 */
addend_may_be_ifunc(GElf_Ehdr * ehdr,struct rel_info * ri)397 static bool addend_may_be_ifunc(GElf_Ehdr *ehdr, struct rel_info *ri)
398 {
399 return ehdr->e_machine == EM_X86_64 && ri->is_rela &&
400 GELF_R_TYPE(ri->rela.r_info) == R_X86_64_IRELATIVE;
401 }
402
get_ifunc_name(Elf * elf,struct dso * dso,GElf_Ehdr * ehdr,struct rel_info * ri,char * buf,size_t buf_sz)403 static bool get_ifunc_name(Elf *elf, struct dso *dso, GElf_Ehdr *ehdr,
404 struct rel_info *ri, char *buf, size_t buf_sz)
405 {
406 u64 addr = ri->rela.r_addend;
407 struct symbol *sym;
408 GElf_Phdr phdr;
409
410 if (!addend_may_be_ifunc(ehdr, ri))
411 return false;
412
413 if (elf_read_program_header(elf, addr, &phdr))
414 return false;
415
416 addr -= phdr.p_vaddr - phdr.p_offset;
417
418 sym = dso__find_symbol_nocache(dso, addr);
419
420 /* Expecting the address to be an IFUNC or IFUNC alias */
421 if (!sym || sym->start != addr || (sym->type != STT_GNU_IFUNC && !sym->ifunc_alias))
422 return false;
423
424 snprintf(buf, buf_sz, "%s@plt", sym->name);
425
426 return true;
427 }
428
exit_rel(struct rel_info * ri)429 static void exit_rel(struct rel_info *ri)
430 {
431 zfree(&ri->sorted);
432 }
433
get_plt_sizes(struct dso * dso,GElf_Ehdr * ehdr,GElf_Shdr * shdr_plt,u64 * plt_header_size,u64 * plt_entry_size)434 static bool get_plt_sizes(struct dso *dso, GElf_Ehdr *ehdr, GElf_Shdr *shdr_plt,
435 u64 *plt_header_size, u64 *plt_entry_size)
436 {
437 switch (ehdr->e_machine) {
438 case EM_ARM:
439 *plt_header_size = 20;
440 *plt_entry_size = 12;
441 return true;
442 case EM_AARCH64:
443 *plt_header_size = 32;
444 *plt_entry_size = 16;
445 return true;
446 case EM_LOONGARCH:
447 *plt_header_size = 32;
448 *plt_entry_size = 16;
449 return true;
450 case EM_SPARC:
451 *plt_header_size = 48;
452 *plt_entry_size = 12;
453 return true;
454 case EM_SPARCV9:
455 *plt_header_size = 128;
456 *plt_entry_size = 32;
457 return true;
458 case EM_386:
459 case EM_X86_64:
460 *plt_entry_size = shdr_plt->sh_entsize;
461 /* Size is 8 or 16, if not, assume alignment indicates size */
462 if (*plt_entry_size != 8 && *plt_entry_size != 16)
463 *plt_entry_size = shdr_plt->sh_addralign == 8 ? 8 : 16;
464 *plt_header_size = *plt_entry_size;
465 break;
466 default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */
467 *plt_header_size = shdr_plt->sh_entsize;
468 *plt_entry_size = shdr_plt->sh_entsize;
469 break;
470 }
471 if (*plt_entry_size)
472 return true;
473 pr_debug("Missing PLT entry size for %s\n", dso__long_name(dso));
474 return false;
475 }
476
machine_is_x86(GElf_Half e_machine)477 static bool machine_is_x86(GElf_Half e_machine)
478 {
479 return e_machine == EM_386 || e_machine == EM_X86_64;
480 }
481
482 struct rela_dyn {
483 GElf_Addr offset;
484 u32 sym_idx;
485 };
486
487 struct rela_dyn_info {
488 struct dso *dso;
489 Elf_Data *plt_got_data;
490 u32 nr_entries;
491 struct rela_dyn *sorted;
492 Elf_Data *dynsym_data;
493 Elf_Data *dynstr_data;
494 Elf_Data *rela_dyn_data;
495 };
496
exit_rela_dyn(struct rela_dyn_info * di)497 static void exit_rela_dyn(struct rela_dyn_info *di)
498 {
499 zfree(&di->sorted);
500 }
501
cmp_offset(const void * a,const void * b)502 static int cmp_offset(const void *a, const void *b)
503 {
504 const struct rela_dyn *va = a;
505 const struct rela_dyn *vb = b;
506
507 return va->offset < vb->offset ? -1 : (va->offset > vb->offset ? 1 : 0);
508 }
509
sort_rela_dyn(struct rela_dyn_info * di)510 static int sort_rela_dyn(struct rela_dyn_info *di)
511 {
512 u32 i, n;
513
514 di->sorted = calloc(di->nr_entries, sizeof(di->sorted[0]));
515 if (!di->sorted)
516 return -1;
517
518 /* Get data for sorting: the offset and symbol index */
519 for (i = 0, n = 0; i < di->nr_entries; i++) {
520 GElf_Rela rela;
521 u32 sym_idx;
522
523 gelf_getrela(di->rela_dyn_data, i, &rela);
524 sym_idx = GELF_R_SYM(rela.r_info);
525 if (sym_idx) {
526 di->sorted[n].sym_idx = sym_idx;
527 di->sorted[n].offset = rela.r_offset;
528 n += 1;
529 }
530 }
531
532 /* Sort by offset */
533 di->nr_entries = n;
534 qsort(di->sorted, n, sizeof(di->sorted[0]), cmp_offset);
535
536 return 0;
537 }
538
get_rela_dyn_info(Elf * elf,GElf_Ehdr * ehdr,struct rela_dyn_info * di,Elf_Scn * scn)539 static void get_rela_dyn_info(Elf *elf, GElf_Ehdr *ehdr, struct rela_dyn_info *di, Elf_Scn *scn)
540 {
541 GElf_Shdr rela_dyn_shdr;
542 GElf_Shdr shdr;
543
544 di->plt_got_data = elf_getdata(scn, NULL);
545
546 scn = elf_section_by_name(elf, ehdr, &rela_dyn_shdr, ".rela.dyn", NULL);
547 if (!scn || !rela_dyn_shdr.sh_link || !rela_dyn_shdr.sh_entsize)
548 return;
549
550 di->nr_entries = rela_dyn_shdr.sh_size / rela_dyn_shdr.sh_entsize;
551 di->rela_dyn_data = elf_getdata(scn, NULL);
552
553 scn = elf_getscn(elf, rela_dyn_shdr.sh_link);
554 if (!scn || !gelf_getshdr(scn, &shdr) || !shdr.sh_link)
555 return;
556
557 di->dynsym_data = elf_getdata(scn, NULL);
558 di->dynstr_data = elf_getdata(elf_getscn(elf, shdr.sh_link), NULL);
559
560 if (!di->plt_got_data || !di->dynstr_data || !di->dynsym_data || !di->rela_dyn_data)
561 return;
562
563 /* Sort into offset order */
564 sort_rela_dyn(di);
565 }
566
567 /* Get instruction displacement from a plt entry for x86_64 */
get_x86_64_plt_disp(const u8 * p)568 static u32 get_x86_64_plt_disp(const u8 *p)
569 {
570 u8 endbr64[] = {0xf3, 0x0f, 0x1e, 0xfa};
571 int n = 0;
572
573 /* Skip endbr64 */
574 if (!memcmp(p, endbr64, sizeof(endbr64)))
575 n += sizeof(endbr64);
576 /* Skip bnd prefix */
577 if (p[n] == 0xf2)
578 n += 1;
579 /* jmp with 4-byte displacement */
580 if (p[n] == 0xff && p[n + 1] == 0x25) {
581 u32 disp;
582
583 n += 2;
584 /* Also add offset from start of entry to end of instruction */
585 memcpy(&disp, p + n, sizeof(disp));
586 return n + 4 + le32toh(disp);
587 }
588 return 0;
589 }
590
get_plt_got_name(GElf_Shdr * shdr,size_t i,struct rela_dyn_info * di,char * buf,size_t buf_sz)591 static bool get_plt_got_name(GElf_Shdr *shdr, size_t i,
592 struct rela_dyn_info *di,
593 char *buf, size_t buf_sz)
594 {
595 struct rela_dyn vi, *vr;
596 const char *sym_name;
597 char *demangled;
598 GElf_Sym sym;
599 bool result;
600 u32 disp;
601
602 if (!di->sorted)
603 return false;
604
605 disp = get_x86_64_plt_disp(di->plt_got_data->d_buf + i);
606 if (!disp)
607 return false;
608
609 /* Compute target offset of the .plt.got entry */
610 vi.offset = shdr->sh_offset + di->plt_got_data->d_off + i + disp;
611
612 /* Find that offset in .rela.dyn (sorted by offset) */
613 vr = bsearch(&vi, di->sorted, di->nr_entries, sizeof(di->sorted[0]), cmp_offset);
614 if (!vr)
615 return false;
616
617 /* Get the associated symbol */
618 gelf_getsym(di->dynsym_data, vr->sym_idx, &sym);
619 sym_name = elf_sym__name(&sym, di->dynstr_data);
620 demangled = demangle_sym(di->dso, 0, sym_name);
621 if (demangled != NULL)
622 sym_name = demangled;
623
624 snprintf(buf, buf_sz, "%s@plt", sym_name);
625
626 result = *sym_name;
627
628 free(demangled);
629
630 return result;
631 }
632
dso__synthesize_plt_got_symbols(struct dso * dso,Elf * elf,GElf_Ehdr * ehdr,char * buf,size_t buf_sz)633 static int dso__synthesize_plt_got_symbols(struct dso *dso, Elf *elf,
634 GElf_Ehdr *ehdr,
635 char *buf, size_t buf_sz)
636 {
637 struct rela_dyn_info di = { .dso = dso };
638 struct symbol *sym;
639 GElf_Shdr shdr;
640 Elf_Scn *scn;
641 int err = -1;
642 size_t i;
643
644 scn = elf_section_by_name(elf, ehdr, &shdr, ".plt.got", NULL);
645 if (!scn || !shdr.sh_entsize)
646 return 0;
647
648 if (ehdr->e_machine == EM_X86_64)
649 get_rela_dyn_info(elf, ehdr, &di, scn);
650
651 for (i = 0; i < shdr.sh_size; i += shdr.sh_entsize) {
652 if (!get_plt_got_name(&shdr, i, &di, buf, buf_sz))
653 snprintf(buf, buf_sz, "offset_%#" PRIx64 "@plt", (u64)shdr.sh_offset + i);
654 sym = symbol__new(shdr.sh_offset + i, shdr.sh_entsize, STB_GLOBAL, STT_FUNC, buf);
655 if (!sym)
656 goto out;
657 symbols__insert(dso__symbols(dso), sym);
658 }
659 err = 0;
660 out:
661 exit_rela_dyn(&di);
662 return err;
663 }
664
665 /*
666 * We need to check if we have a .dynsym, so that we can handle the
667 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
668 * .dynsym or .symtab).
669 * And always look at the original dso, not at debuginfo packages, that
670 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
671 */
dso__synthesize_plt_symbols(struct dso * dso,struct symsrc * ss)672 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
673 {
674 uint32_t idx;
675 GElf_Sym sym;
676 u64 plt_offset, plt_header_size, plt_entry_size;
677 GElf_Shdr shdr_plt, plt_sec_shdr;
678 struct symbol *f, *plt_sym;
679 GElf_Shdr shdr_rel_plt, shdr_dynsym;
680 Elf_Data *syms, *symstrs;
681 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
682 GElf_Ehdr ehdr;
683 char sympltname[1024];
684 Elf *elf;
685 int nr = 0, err = -1;
686 struct rel_info ri = { .is_rela = false };
687 bool lazy_plt;
688
689 elf = ss->elf;
690 ehdr = ss->ehdr;
691
692 if (!elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL))
693 return 0;
694
695 /*
696 * A symbol from a previous section (e.g. .init) can have been expanded
697 * by symbols__fixup_end() to overlap .plt. Truncate it before adding
698 * a symbol for .plt header.
699 */
700 f = dso__find_symbol_nocache(dso, shdr_plt.sh_offset);
701 if (f && f->start < shdr_plt.sh_offset && f->end > shdr_plt.sh_offset)
702 f->end = shdr_plt.sh_offset;
703
704 if (!get_plt_sizes(dso, &ehdr, &shdr_plt, &plt_header_size, &plt_entry_size))
705 return 0;
706
707 /* Add a symbol for .plt header */
708 plt_sym = symbol__new(shdr_plt.sh_offset, plt_header_size, STB_GLOBAL, STT_FUNC, ".plt");
709 if (!plt_sym)
710 goto out_elf_end;
711 symbols__insert(dso__symbols(dso), plt_sym);
712
713 /* Only x86 has .plt.got */
714 if (machine_is_x86(ehdr.e_machine) &&
715 dso__synthesize_plt_got_symbols(dso, elf, &ehdr, sympltname, sizeof(sympltname)))
716 goto out_elf_end;
717
718 /* Only x86 has .plt.sec */
719 if (machine_is_x86(ehdr.e_machine) &&
720 elf_section_by_name(elf, &ehdr, &plt_sec_shdr, ".plt.sec", NULL)) {
721 if (!get_plt_sizes(dso, &ehdr, &plt_sec_shdr, &plt_header_size, &plt_entry_size))
722 return 0;
723 /* Extend .plt symbol to entire .plt */
724 plt_sym->end = plt_sym->start + shdr_plt.sh_size;
725 /* Use .plt.sec offset */
726 plt_offset = plt_sec_shdr.sh_offset;
727 lazy_plt = false;
728 } else {
729 plt_offset = shdr_plt.sh_offset;
730 lazy_plt = true;
731 }
732
733 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
734 ".rela.plt", NULL);
735 if (scn_plt_rel == NULL) {
736 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
737 ".rel.plt", NULL);
738 if (scn_plt_rel == NULL)
739 return 0;
740 }
741
742 if (shdr_rel_plt.sh_type != SHT_RELA &&
743 shdr_rel_plt.sh_type != SHT_REL)
744 return 0;
745
746 if (!shdr_rel_plt.sh_link)
747 return 0;
748
749 if (shdr_rel_plt.sh_link == ss->dynsym_idx) {
750 scn_dynsym = ss->dynsym;
751 shdr_dynsym = ss->dynshdr;
752 } else if (shdr_rel_plt.sh_link == ss->symtab_idx) {
753 /*
754 * A static executable can have a .plt due to IFUNCs, in which
755 * case .symtab is used not .dynsym.
756 */
757 scn_dynsym = ss->symtab;
758 shdr_dynsym = ss->symshdr;
759 } else {
760 goto out_elf_end;
761 }
762
763 if (!scn_dynsym)
764 return 0;
765
766 /*
767 * Fetch the relocation section to find the idxes to the GOT
768 * and the symbols in the .dynsym they refer to.
769 */
770 ri.reldata = elf_getdata(scn_plt_rel, NULL);
771 if (!ri.reldata)
772 goto out_elf_end;
773
774 syms = elf_getdata(scn_dynsym, NULL);
775 if (syms == NULL)
776 goto out_elf_end;
777
778 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
779 if (scn_symstrs == NULL)
780 goto out_elf_end;
781
782 symstrs = elf_getdata(scn_symstrs, NULL);
783 if (symstrs == NULL)
784 goto out_elf_end;
785
786 if (symstrs->d_size == 0)
787 goto out_elf_end;
788
789 ri.nr_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
790
791 ri.is_rela = shdr_rel_plt.sh_type == SHT_RELA;
792
793 if (lazy_plt) {
794 /*
795 * Assume a .plt with the same number of entries as the number
796 * of relocation entries is not lazy and does not have a header.
797 */
798 if (ri.nr_entries * plt_entry_size == shdr_plt.sh_size)
799 dso__delete_symbol(dso, plt_sym);
800 else
801 plt_offset += plt_header_size;
802 }
803
804 /*
805 * x86 doesn't insert IFUNC relocations in .plt order, so sort to get
806 * back in order.
807 */
808 if (machine_is_x86(ehdr.e_machine) && sort_rel(&ri))
809 goto out_elf_end;
810
811 for (idx = 0; idx < ri.nr_entries; idx++) {
812 const char *elf_name = NULL;
813 char *demangled = NULL;
814
815 gelf_getsym(syms, get_rel_symidx(&ri, idx), &sym);
816
817 elf_name = elf_sym__name(&sym, symstrs);
818 demangled = demangle_sym(dso, 0, elf_name);
819 if (demangled)
820 elf_name = demangled;
821 if (*elf_name)
822 snprintf(sympltname, sizeof(sympltname), "%s@plt", elf_name);
823 else if (!get_ifunc_name(elf, dso, &ehdr, &ri, sympltname, sizeof(sympltname)))
824 snprintf(sympltname, sizeof(sympltname),
825 "offset_%#" PRIx64 "@plt", plt_offset);
826 free(demangled);
827
828 f = symbol__new(plt_offset, plt_entry_size, STB_GLOBAL, STT_FUNC, sympltname);
829 if (!f)
830 goto out_elf_end;
831
832 plt_offset += plt_entry_size;
833 symbols__insert(dso__symbols(dso), f);
834 ++nr;
835 }
836
837 err = 0;
838 out_elf_end:
839 exit_rel(&ri);
840 if (err == 0)
841 return nr;
842 pr_debug("%s: problems reading %s PLT info.\n",
843 __func__, dso__long_name(dso));
844 return 0;
845 }
846
dso__demangle_sym(struct dso * dso,int kmodule,const char * elf_name)847 char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
848 {
849 return demangle_sym(dso, kmodule, elf_name);
850 }
851
852 /*
853 * Align offset to 4 bytes as needed for note name and descriptor data.
854 */
855 #define NOTE_ALIGN(n) (((n) + 3) & -4U)
856
elf_read_build_id(Elf * elf,void * bf,size_t size)857 static int elf_read_build_id(Elf *elf, void *bf, size_t size)
858 {
859 int err = -1;
860 GElf_Ehdr ehdr;
861 GElf_Shdr shdr;
862 Elf_Data *data;
863 Elf_Scn *sec;
864 Elf_Kind ek;
865 void *ptr;
866
867 if (size < BUILD_ID_SIZE)
868 goto out;
869
870 ek = elf_kind(elf);
871 if (ek != ELF_K_ELF)
872 goto out;
873
874 if (gelf_getehdr(elf, &ehdr) == NULL) {
875 pr_err("%s: cannot get elf header.\n", __func__);
876 goto out;
877 }
878
879 /*
880 * Check following sections for notes:
881 * '.note.gnu.build-id'
882 * '.notes'
883 * '.note' (VDSO specific)
884 */
885 do {
886 sec = elf_section_by_name(elf, &ehdr, &shdr,
887 ".note.gnu.build-id", NULL);
888 if (sec)
889 break;
890
891 sec = elf_section_by_name(elf, &ehdr, &shdr,
892 ".notes", NULL);
893 if (sec)
894 break;
895
896 sec = elf_section_by_name(elf, &ehdr, &shdr,
897 ".note", NULL);
898 if (sec)
899 break;
900
901 return err;
902
903 } while (0);
904
905 data = elf_getdata(sec, NULL);
906 if (data == NULL)
907 goto out;
908
909 ptr = data->d_buf;
910 while (ptr < (data->d_buf + data->d_size)) {
911 GElf_Nhdr *nhdr = ptr;
912 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
913 descsz = NOTE_ALIGN(nhdr->n_descsz);
914 const char *name;
915
916 ptr += sizeof(*nhdr);
917 name = ptr;
918 ptr += namesz;
919 if (nhdr->n_type == NT_GNU_BUILD_ID &&
920 nhdr->n_namesz == sizeof("GNU")) {
921 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
922 size_t sz = min(size, descsz);
923 memcpy(bf, ptr, sz);
924 memset(bf + sz, 0, size - sz);
925 err = sz;
926 break;
927 }
928 }
929 ptr += descsz;
930 }
931
932 out:
933 return err;
934 }
935
936 #ifdef HAVE_LIBBFD_BUILDID_SUPPORT
937
read_build_id(const char * filename,struct build_id * bid)938 static int read_build_id(const char *filename, struct build_id *bid)
939 {
940 size_t size = sizeof(bid->data);
941 int err = -1;
942 bfd *abfd;
943
944 abfd = bfd_openr(filename, NULL);
945 if (!abfd)
946 return -1;
947
948 if (!bfd_check_format(abfd, bfd_object)) {
949 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
950 goto out_close;
951 }
952
953 if (!abfd->build_id || abfd->build_id->size > size)
954 goto out_close;
955
956 memcpy(bid->data, abfd->build_id->data, abfd->build_id->size);
957 memset(bid->data + abfd->build_id->size, 0, size - abfd->build_id->size);
958 err = bid->size = abfd->build_id->size;
959
960 out_close:
961 bfd_close(abfd);
962 return err;
963 }
964
965 #else // HAVE_LIBBFD_BUILDID_SUPPORT
966
read_build_id(const char * filename,struct build_id * bid)967 static int read_build_id(const char *filename, struct build_id *bid)
968 {
969 size_t size = sizeof(bid->data);
970 int fd, err = -1;
971 Elf *elf;
972
973 if (size < BUILD_ID_SIZE)
974 goto out;
975
976 fd = open(filename, O_RDONLY);
977 if (fd < 0)
978 goto out;
979
980 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
981 if (elf == NULL) {
982 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
983 goto out_close;
984 }
985
986 err = elf_read_build_id(elf, bid->data, size);
987 if (err > 0)
988 bid->size = err;
989
990 elf_end(elf);
991 out_close:
992 close(fd);
993 out:
994 return err;
995 }
996
997 #endif // HAVE_LIBBFD_BUILDID_SUPPORT
998
filename__read_build_id(const char * filename,struct build_id * bid)999 int filename__read_build_id(const char *filename, struct build_id *bid)
1000 {
1001 struct kmod_path m = { .name = NULL, };
1002 char path[PATH_MAX];
1003 int err;
1004
1005 if (!filename)
1006 return -EFAULT;
1007
1008 err = kmod_path__parse(&m, filename);
1009 if (err)
1010 return -1;
1011
1012 if (m.comp) {
1013 int error = 0, fd;
1014
1015 fd = filename__decompress(filename, path, sizeof(path), m.comp, &error);
1016 if (fd < 0) {
1017 pr_debug("Failed to decompress (error %d) %s\n",
1018 error, filename);
1019 return -1;
1020 }
1021 close(fd);
1022 filename = path;
1023 }
1024
1025 err = read_build_id(filename, bid);
1026
1027 if (m.comp)
1028 unlink(filename);
1029 return err;
1030 }
1031
sysfs__read_build_id(const char * filename,struct build_id * bid)1032 int sysfs__read_build_id(const char *filename, struct build_id *bid)
1033 {
1034 size_t size = sizeof(bid->data);
1035 int fd, err = -1;
1036
1037 fd = open(filename, O_RDONLY);
1038 if (fd < 0)
1039 goto out;
1040
1041 while (1) {
1042 char bf[BUFSIZ];
1043 GElf_Nhdr nhdr;
1044 size_t namesz, descsz;
1045
1046 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
1047 break;
1048
1049 namesz = NOTE_ALIGN(nhdr.n_namesz);
1050 descsz = NOTE_ALIGN(nhdr.n_descsz);
1051 if (nhdr.n_type == NT_GNU_BUILD_ID &&
1052 nhdr.n_namesz == sizeof("GNU")) {
1053 if (read(fd, bf, namesz) != (ssize_t)namesz)
1054 break;
1055 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
1056 size_t sz = min(descsz, size);
1057 if (read(fd, bid->data, sz) == (ssize_t)sz) {
1058 memset(bid->data + sz, 0, size - sz);
1059 bid->size = sz;
1060 err = 0;
1061 break;
1062 }
1063 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
1064 break;
1065 } else {
1066 int n = namesz + descsz;
1067
1068 if (n > (int)sizeof(bf)) {
1069 n = sizeof(bf);
1070 pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
1071 __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
1072 }
1073 if (read(fd, bf, n) != n)
1074 break;
1075 }
1076 }
1077 close(fd);
1078 out:
1079 return err;
1080 }
1081
1082 #ifdef HAVE_LIBBFD_SUPPORT
1083
filename__read_debuglink(const char * filename,char * debuglink,size_t size)1084 int filename__read_debuglink(const char *filename, char *debuglink,
1085 size_t size)
1086 {
1087 int err = -1;
1088 asection *section;
1089 bfd *abfd;
1090
1091 abfd = bfd_openr(filename, NULL);
1092 if (!abfd)
1093 return -1;
1094
1095 if (!bfd_check_format(abfd, bfd_object)) {
1096 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
1097 goto out_close;
1098 }
1099
1100 section = bfd_get_section_by_name(abfd, ".gnu_debuglink");
1101 if (!section)
1102 goto out_close;
1103
1104 if (section->size > size)
1105 goto out_close;
1106
1107 if (!bfd_get_section_contents(abfd, section, debuglink, 0,
1108 section->size))
1109 goto out_close;
1110
1111 err = 0;
1112
1113 out_close:
1114 bfd_close(abfd);
1115 return err;
1116 }
1117
1118 #else
1119
filename__read_debuglink(const char * filename,char * debuglink,size_t size)1120 int filename__read_debuglink(const char *filename, char *debuglink,
1121 size_t size)
1122 {
1123 int fd, err = -1;
1124 Elf *elf;
1125 GElf_Ehdr ehdr;
1126 GElf_Shdr shdr;
1127 Elf_Data *data;
1128 Elf_Scn *sec;
1129 Elf_Kind ek;
1130
1131 fd = open(filename, O_RDONLY);
1132 if (fd < 0)
1133 goto out;
1134
1135 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1136 if (elf == NULL) {
1137 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
1138 goto out_close;
1139 }
1140
1141 ek = elf_kind(elf);
1142 if (ek != ELF_K_ELF)
1143 goto out_elf_end;
1144
1145 if (gelf_getehdr(elf, &ehdr) == NULL) {
1146 pr_err("%s: cannot get elf header.\n", __func__);
1147 goto out_elf_end;
1148 }
1149
1150 sec = elf_section_by_name(elf, &ehdr, &shdr,
1151 ".gnu_debuglink", NULL);
1152 if (sec == NULL)
1153 goto out_elf_end;
1154
1155 data = elf_getdata(sec, NULL);
1156 if (data == NULL)
1157 goto out_elf_end;
1158
1159 /* the start of this section is a zero-terminated string */
1160 strncpy(debuglink, data->d_buf, size);
1161
1162 err = 0;
1163
1164 out_elf_end:
1165 elf_end(elf);
1166 out_close:
1167 close(fd);
1168 out:
1169 return err;
1170 }
1171
1172 #endif
1173
dso__swap_init(struct dso * dso,unsigned char eidata)1174 static int dso__swap_init(struct dso *dso, unsigned char eidata)
1175 {
1176 static unsigned int const endian = 1;
1177
1178 dso__set_needs_swap(dso, DSO_SWAP__NO);
1179
1180 switch (eidata) {
1181 case ELFDATA2LSB:
1182 /* We are big endian, DSO is little endian. */
1183 if (*(unsigned char const *)&endian != 1)
1184 dso__set_needs_swap(dso, DSO_SWAP__YES);
1185 break;
1186
1187 case ELFDATA2MSB:
1188 /* We are little endian, DSO is big endian. */
1189 if (*(unsigned char const *)&endian != 0)
1190 dso__set_needs_swap(dso, DSO_SWAP__YES);
1191 break;
1192
1193 default:
1194 pr_err("unrecognized DSO data encoding %d\n", eidata);
1195 return -EINVAL;
1196 }
1197
1198 return 0;
1199 }
1200
symsrc__possibly_runtime(struct symsrc * ss)1201 bool symsrc__possibly_runtime(struct symsrc *ss)
1202 {
1203 return ss->dynsym || ss->opdsec;
1204 }
1205
symsrc__has_symtab(struct symsrc * ss)1206 bool symsrc__has_symtab(struct symsrc *ss)
1207 {
1208 return ss->symtab != NULL;
1209 }
1210
symsrc__destroy(struct symsrc * ss)1211 void symsrc__destroy(struct symsrc *ss)
1212 {
1213 zfree(&ss->name);
1214 elf_end(ss->elf);
1215 close(ss->fd);
1216 }
1217
elf__needs_adjust_symbols(GElf_Ehdr ehdr)1218 bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
1219 {
1220 /*
1221 * Usually vmlinux is an ELF file with type ET_EXEC for most
1222 * architectures; except Arm64 kernel is linked with option
1223 * '-share', so need to check type ET_DYN.
1224 */
1225 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL ||
1226 ehdr.e_type == ET_DYN;
1227 }
1228
symsrc__init(struct symsrc * ss,struct dso * dso,const char * name,enum dso_binary_type type)1229 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
1230 enum dso_binary_type type)
1231 {
1232 GElf_Ehdr ehdr;
1233 Elf *elf;
1234 int fd;
1235
1236 if (dso__needs_decompress(dso)) {
1237 fd = dso__decompress_kmodule_fd(dso, name);
1238 if (fd < 0)
1239 return -1;
1240
1241 type = dso__symtab_type(dso);
1242 } else {
1243 fd = open(name, O_RDONLY);
1244 if (fd < 0) {
1245 *dso__load_errno(dso) = errno;
1246 return -1;
1247 }
1248 }
1249
1250 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1251 if (elf == NULL) {
1252 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
1253 *dso__load_errno(dso) = DSO_LOAD_ERRNO__INVALID_ELF;
1254 goto out_close;
1255 }
1256
1257 if (gelf_getehdr(elf, &ehdr) == NULL) {
1258 *dso__load_errno(dso) = DSO_LOAD_ERRNO__INVALID_ELF;
1259 pr_debug("%s: cannot get elf header.\n", __func__);
1260 goto out_elf_end;
1261 }
1262
1263 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
1264 *dso__load_errno(dso) = DSO_LOAD_ERRNO__INTERNAL_ERROR;
1265 goto out_elf_end;
1266 }
1267
1268 /* Always reject images with a mismatched build-id: */
1269 if (dso__has_build_id(dso) && !symbol_conf.ignore_vmlinux_buildid) {
1270 u8 build_id[BUILD_ID_SIZE];
1271 struct build_id bid;
1272 int size;
1273
1274 size = elf_read_build_id(elf, build_id, BUILD_ID_SIZE);
1275 if (size <= 0) {
1276 *dso__load_errno(dso) = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
1277 goto out_elf_end;
1278 }
1279
1280 build_id__init(&bid, build_id, size);
1281 if (!dso__build_id_equal(dso, &bid)) {
1282 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
1283 *dso__load_errno(dso) = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
1284 goto out_elf_end;
1285 }
1286 }
1287
1288 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1289
1290 ss->symtab_idx = 0;
1291 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
1292 &ss->symtab_idx);
1293 if (ss->symshdr.sh_type != SHT_SYMTAB)
1294 ss->symtab = NULL;
1295
1296 ss->dynsym_idx = 0;
1297 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
1298 &ss->dynsym_idx);
1299 if (ss->dynshdr.sh_type != SHT_DYNSYM)
1300 ss->dynsym = NULL;
1301
1302 ss->opdidx = 0;
1303 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
1304 &ss->opdidx);
1305 if (ss->opdshdr.sh_type != SHT_PROGBITS)
1306 ss->opdsec = NULL;
1307
1308 if (dso__kernel(dso) == DSO_SPACE__USER)
1309 ss->adjust_symbols = true;
1310 else
1311 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
1312
1313 ss->name = strdup(name);
1314 if (!ss->name) {
1315 *dso__load_errno(dso) = errno;
1316 goto out_elf_end;
1317 }
1318
1319 ss->elf = elf;
1320 ss->fd = fd;
1321 ss->ehdr = ehdr;
1322 ss->type = type;
1323
1324 return 0;
1325
1326 out_elf_end:
1327 elf_end(elf);
1328 out_close:
1329 close(fd);
1330 return -1;
1331 }
1332
is_exe_text(int flags)1333 static bool is_exe_text(int flags)
1334 {
1335 return (flags & (SHF_ALLOC | SHF_EXECINSTR)) == (SHF_ALLOC | SHF_EXECINSTR);
1336 }
1337
1338 /*
1339 * Some executable module sections like .noinstr.text might be laid out with
1340 * .text so they can use the same mapping (memory address to file offset).
1341 * Check if that is the case. Refer to kernel layout_sections(). Return the
1342 * maximum offset.
1343 */
max_text_section(Elf * elf,GElf_Ehdr * ehdr)1344 static u64 max_text_section(Elf *elf, GElf_Ehdr *ehdr)
1345 {
1346 Elf_Scn *sec = NULL;
1347 GElf_Shdr shdr;
1348 u64 offs = 0;
1349
1350 /* Doesn't work for some arch */
1351 if (ehdr->e_machine == EM_PARISC ||
1352 ehdr->e_machine == EM_ALPHA)
1353 return 0;
1354
1355 /* ELF is corrupted/truncated, avoid calling elf_strptr. */
1356 if (!elf_rawdata(elf_getscn(elf, ehdr->e_shstrndx), NULL))
1357 return 0;
1358
1359 while ((sec = elf_nextscn(elf, sec)) != NULL) {
1360 char *sec_name;
1361
1362 if (!gelf_getshdr(sec, &shdr))
1363 break;
1364
1365 if (!is_exe_text(shdr.sh_flags))
1366 continue;
1367
1368 /* .init and .exit sections are not placed with .text */
1369 sec_name = elf_strptr(elf, ehdr->e_shstrndx, shdr.sh_name);
1370 if (!sec_name ||
1371 strstarts(sec_name, ".init") ||
1372 strstarts(sec_name, ".exit"))
1373 break;
1374
1375 /* Must be next to previous, assumes .text is first */
1376 if (offs && PERF_ALIGN(offs, shdr.sh_addralign ?: 1) != shdr.sh_offset)
1377 break;
1378
1379 offs = shdr.sh_offset + shdr.sh_size;
1380 }
1381
1382 return offs;
1383 }
1384
1385 /**
1386 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
1387 * @kmap: kernel maps and relocation reference symbol
1388 *
1389 * This function returns %true if we are dealing with the kernel maps and the
1390 * relocation reference symbol has not yet been found. Otherwise %false is
1391 * returned.
1392 */
ref_reloc_sym_not_found(struct kmap * kmap)1393 static bool ref_reloc_sym_not_found(struct kmap *kmap)
1394 {
1395 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
1396 !kmap->ref_reloc_sym->unrelocated_addr;
1397 }
1398
1399 /**
1400 * ref_reloc - kernel relocation offset.
1401 * @kmap: kernel maps and relocation reference symbol
1402 *
1403 * This function returns the offset of kernel addresses as determined by using
1404 * the relocation reference symbol i.e. if the kernel has not been relocated
1405 * then the return value is zero.
1406 */
ref_reloc(struct kmap * kmap)1407 static u64 ref_reloc(struct kmap *kmap)
1408 {
1409 if (kmap && kmap->ref_reloc_sym &&
1410 kmap->ref_reloc_sym->unrelocated_addr)
1411 return kmap->ref_reloc_sym->addr -
1412 kmap->ref_reloc_sym->unrelocated_addr;
1413 return 0;
1414 }
1415
arch__sym_update(struct symbol * s __maybe_unused,GElf_Sym * sym __maybe_unused)1416 void __weak arch__sym_update(struct symbol *s __maybe_unused,
1417 GElf_Sym *sym __maybe_unused) { }
1418
dso__process_kernel_symbol(struct dso * dso,struct map * map,GElf_Sym * sym,GElf_Shdr * shdr,struct maps * kmaps,struct kmap * kmap,struct dso ** curr_dsop,const char * section_name,bool adjust_kernel_syms,bool kmodule,bool * remap_kernel,u64 max_text_sh_offset)1419 static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
1420 GElf_Sym *sym, GElf_Shdr *shdr,
1421 struct maps *kmaps, struct kmap *kmap,
1422 struct dso **curr_dsop,
1423 const char *section_name,
1424 bool adjust_kernel_syms, bool kmodule, bool *remap_kernel,
1425 u64 max_text_sh_offset)
1426 {
1427 struct dso *curr_dso = *curr_dsop;
1428 struct map *curr_map;
1429 char dso_name[PATH_MAX];
1430
1431 /* Adjust symbol to map to file offset */
1432 if (adjust_kernel_syms)
1433 sym->st_value -= shdr->sh_addr - shdr->sh_offset;
1434
1435 if (strcmp(section_name, (dso__short_name(curr_dso) + dso__short_name_len(dso))) == 0)
1436 return 0;
1437
1438 if (strcmp(section_name, ".text") == 0) {
1439 /*
1440 * The initial kernel mapping is based on
1441 * kallsyms and identity maps. Overwrite it to
1442 * map to the kernel dso.
1443 */
1444 if (*remap_kernel && dso__kernel(dso) && !kmodule) {
1445 *remap_kernel = false;
1446 map__set_start(map, shdr->sh_addr + ref_reloc(kmap));
1447 map__set_end(map, map__start(map) + shdr->sh_size);
1448 map__set_pgoff(map, shdr->sh_offset);
1449 map__set_mapping_type(map, MAPPING_TYPE__DSO);
1450 /* Ensure maps are correctly ordered */
1451 if (kmaps) {
1452 int err;
1453 struct map *tmp = map__get(map);
1454
1455 maps__remove(kmaps, map);
1456 err = maps__insert(kmaps, map);
1457 map__put(tmp);
1458 if (err)
1459 return err;
1460 }
1461 }
1462
1463 /*
1464 * The initial module mapping is based on
1465 * /proc/modules mapped to offset zero.
1466 * Overwrite it to map to the module dso.
1467 */
1468 if (*remap_kernel && kmodule) {
1469 *remap_kernel = false;
1470 map__set_pgoff(map, shdr->sh_offset);
1471 }
1472
1473 dso__put(*curr_dsop);
1474 *curr_dsop = dso__get(dso);
1475 return 0;
1476 }
1477
1478 if (!kmap)
1479 return 0;
1480
1481 /*
1482 * perf does not record module section addresses except for .text, but
1483 * some sections can use the same mapping as .text.
1484 */
1485 if (kmodule && adjust_kernel_syms && is_exe_text(shdr->sh_flags) &&
1486 shdr->sh_offset <= max_text_sh_offset) {
1487 dso__put(*curr_dsop);
1488 *curr_dsop = dso__get(dso);
1489 return 0;
1490 }
1491
1492 snprintf(dso_name, sizeof(dso_name), "%s%s", dso__short_name(dso), section_name);
1493
1494 curr_map = maps__find_by_name(kmaps, dso_name);
1495 if (curr_map == NULL) {
1496 u64 start = sym->st_value;
1497
1498 if (kmodule)
1499 start += map__start(map) + shdr->sh_offset;
1500
1501 curr_dso = dso__new(dso_name);
1502 if (curr_dso == NULL)
1503 return -1;
1504 dso__set_kernel(curr_dso, dso__kernel(dso));
1505 RC_CHK_ACCESS(curr_dso)->long_name = dso__long_name(dso);
1506 RC_CHK_ACCESS(curr_dso)->long_name_len = dso__long_name_len(dso);
1507 dso__set_binary_type(curr_dso, dso__binary_type(dso));
1508 dso__set_adjust_symbols(curr_dso, dso__adjust_symbols(dso));
1509 curr_map = map__new2(start, curr_dso);
1510 if (curr_map == NULL) {
1511 dso__put(curr_dso);
1512 return -1;
1513 }
1514 if (dso__kernel(curr_dso))
1515 map__kmap(curr_map)->kmaps = kmaps;
1516
1517 if (adjust_kernel_syms) {
1518 map__set_start(curr_map, shdr->sh_addr + ref_reloc(kmap));
1519 map__set_end(curr_map, map__start(curr_map) + shdr->sh_size);
1520 map__set_pgoff(curr_map, shdr->sh_offset);
1521 } else {
1522 map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
1523 }
1524 dso__set_symtab_type(curr_dso, dso__symtab_type(dso));
1525 if (maps__insert(kmaps, curr_map))
1526 return -1;
1527 dsos__add(&maps__machine(kmaps)->dsos, curr_dso);
1528 dso__set_loaded(curr_dso);
1529 dso__put(*curr_dsop);
1530 *curr_dsop = curr_dso;
1531 } else {
1532 dso__put(*curr_dsop);
1533 *curr_dsop = dso__get(map__dso(curr_map));
1534 }
1535 map__put(curr_map);
1536
1537 return 0;
1538 }
1539
1540 static int
dso__load_sym_internal(struct dso * dso,struct map * map,struct symsrc * syms_ss,struct symsrc * runtime_ss,int kmodule,int dynsym)1541 dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
1542 struct symsrc *runtime_ss, int kmodule, int dynsym)
1543 {
1544 struct kmap *kmap = dso__kernel(dso) ? map__kmap(map) : NULL;
1545 struct maps *kmaps = kmap ? map__kmaps(map) : NULL;
1546 struct dso *curr_dso = NULL;
1547 Elf_Data *symstrs, *secstrs, *secstrs_run, *secstrs_sym;
1548 uint32_t nr_syms;
1549 uint32_t idx;
1550 GElf_Ehdr ehdr;
1551 GElf_Shdr shdr;
1552 GElf_Shdr tshdr;
1553 Elf_Data *syms, *opddata = NULL;
1554 GElf_Sym sym;
1555 Elf_Scn *sec, *sec_strndx;
1556 Elf *elf;
1557 int nr = 0;
1558 bool remap_kernel = false, adjust_kernel_syms = false;
1559 u64 max_text_sh_offset = 0;
1560
1561 if (kmap && !kmaps)
1562 return -1;
1563
1564 elf = syms_ss->elf;
1565 ehdr = syms_ss->ehdr;
1566 if (dynsym) {
1567 sec = syms_ss->dynsym;
1568 shdr = syms_ss->dynshdr;
1569 } else {
1570 sec = syms_ss->symtab;
1571 shdr = syms_ss->symshdr;
1572 }
1573
1574 if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
1575 ".text", NULL)) {
1576 dso__set_text_offset(dso, tshdr.sh_addr - tshdr.sh_offset);
1577 dso__set_text_end(dso, tshdr.sh_offset + tshdr.sh_size);
1578 }
1579
1580 if (runtime_ss->opdsec)
1581 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
1582
1583 syms = elf_getdata(sec, NULL);
1584 if (syms == NULL)
1585 goto out_elf_end;
1586
1587 sec = elf_getscn(elf, shdr.sh_link);
1588 if (sec == NULL)
1589 goto out_elf_end;
1590
1591 symstrs = elf_getdata(sec, NULL);
1592 if (symstrs == NULL)
1593 goto out_elf_end;
1594
1595 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
1596 if (sec_strndx == NULL)
1597 goto out_elf_end;
1598
1599 secstrs_run = elf_getdata(sec_strndx, NULL);
1600 if (secstrs_run == NULL)
1601 goto out_elf_end;
1602
1603 sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
1604 if (sec_strndx == NULL)
1605 goto out_elf_end;
1606
1607 secstrs_sym = elf_getdata(sec_strndx, NULL);
1608 if (secstrs_sym == NULL)
1609 goto out_elf_end;
1610
1611 nr_syms = shdr.sh_size / shdr.sh_entsize;
1612
1613 memset(&sym, 0, sizeof(sym));
1614
1615 /*
1616 * The kernel relocation symbol is needed in advance in order to adjust
1617 * kernel maps correctly.
1618 */
1619 if (ref_reloc_sym_not_found(kmap)) {
1620 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1621 const char *elf_name = elf_sym__name(&sym, symstrs);
1622
1623 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
1624 continue;
1625 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
1626 map__set_reloc(map, kmap->ref_reloc_sym->addr - kmap->ref_reloc_sym->unrelocated_addr);
1627 break;
1628 }
1629 }
1630
1631 /*
1632 * Handle any relocation of vdso necessary because older kernels
1633 * attempted to prelink vdso to its virtual address.
1634 */
1635 if (dso__is_vdso(dso))
1636 map__set_reloc(map, map__start(map) - dso__text_offset(dso));
1637
1638 dso__set_adjust_symbols(dso, runtime_ss->adjust_symbols || ref_reloc(kmap));
1639 /*
1640 * Initial kernel and module mappings do not map to the dso.
1641 * Flag the fixups.
1642 */
1643 if (dso__kernel(dso)) {
1644 remap_kernel = true;
1645 adjust_kernel_syms = dso__adjust_symbols(dso);
1646 }
1647
1648 if (kmodule && adjust_kernel_syms)
1649 max_text_sh_offset = max_text_section(runtime_ss->elf, &runtime_ss->ehdr);
1650
1651 curr_dso = dso__get(dso);
1652 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1653 struct symbol *f;
1654 const char *elf_name = elf_sym__name(&sym, symstrs);
1655 char *demangled = NULL;
1656 int is_label = elf_sym__is_label(&sym);
1657 const char *section_name;
1658 bool used_opd = false;
1659
1660 if (!is_label && !elf_sym__filter(&sym))
1661 continue;
1662
1663 /* Reject ARM ELF "mapping symbols": these aren't unique and
1664 * don't identify functions, so will confuse the profile
1665 * output: */
1666 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
1667 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
1668 && (elf_name[2] == '\0' || elf_name[2] == '.'))
1669 continue;
1670 }
1671
1672 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
1673 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
1674 u64 *opd = opddata->d_buf + offset;
1675 sym.st_value = DSO__SWAP(dso, u64, *opd);
1676 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
1677 sym.st_value);
1678 used_opd = true;
1679 }
1680
1681 /*
1682 * When loading symbols in a data mapping, ABS symbols (which
1683 * has a value of SHN_ABS in its st_shndx) failed at
1684 * elf_getscn(). And it marks the loading as a failure so
1685 * already loaded symbols cannot be fixed up.
1686 *
1687 * I'm not sure what should be done. Just ignore them for now.
1688 * - Namhyung Kim
1689 */
1690 if (sym.st_shndx == SHN_ABS)
1691 continue;
1692
1693 sec = elf_getscn(syms_ss->elf, sym.st_shndx);
1694 if (!sec)
1695 goto out_elf_end;
1696
1697 gelf_getshdr(sec, &shdr);
1698
1699 /*
1700 * If the attribute bit SHF_ALLOC is not set, the section
1701 * doesn't occupy memory during process execution.
1702 * E.g. ".gnu.warning.*" section is used by linker to generate
1703 * warnings when calling deprecated functions, the symbols in
1704 * the section aren't loaded to memory during process execution,
1705 * so skip them.
1706 */
1707 if (!(shdr.sh_flags & SHF_ALLOC))
1708 continue;
1709
1710 secstrs = secstrs_sym;
1711
1712 /*
1713 * We have to fallback to runtime when syms' section header has
1714 * NOBITS set. NOBITS results in file offset (sh_offset) not
1715 * being incremented. So sh_offset used below has different
1716 * values for syms (invalid) and runtime (valid).
1717 */
1718 if (shdr.sh_type == SHT_NOBITS) {
1719 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
1720 if (!sec)
1721 goto out_elf_end;
1722
1723 gelf_getshdr(sec, &shdr);
1724 secstrs = secstrs_run;
1725 }
1726
1727 if (is_label && !elf_sec__filter(&shdr, secstrs))
1728 continue;
1729
1730 section_name = elf_sec__name(&shdr, secstrs);
1731
1732 /* On ARM, symbols for thumb functions have 1 added to
1733 * the symbol address as a flag - remove it */
1734 if ((ehdr.e_machine == EM_ARM) &&
1735 (GELF_ST_TYPE(sym.st_info) == STT_FUNC) &&
1736 (sym.st_value & 1))
1737 --sym.st_value;
1738
1739 if (dso__kernel(dso)) {
1740 if (dso__process_kernel_symbol(dso, map, &sym, &shdr,
1741 kmaps, kmap, &curr_dso,
1742 section_name,
1743 adjust_kernel_syms,
1744 kmodule,
1745 &remap_kernel,
1746 max_text_sh_offset))
1747 goto out_elf_end;
1748 } else if ((used_opd && runtime_ss->adjust_symbols) ||
1749 (!used_opd && syms_ss->adjust_symbols)) {
1750 GElf_Phdr phdr;
1751
1752 if (elf_read_program_header(runtime_ss->elf,
1753 (u64)sym.st_value, &phdr)) {
1754 pr_debug4("%s: failed to find program header for "
1755 "symbol: %s st_value: %#" PRIx64 "\n",
1756 __func__, elf_name, (u64)sym.st_value);
1757 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1758 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n",
1759 __func__, (u64)sym.st_value, (u64)shdr.sh_addr,
1760 (u64)shdr.sh_offset);
1761 /*
1762 * Fail to find program header, let's rollback
1763 * to use shdr.sh_addr and shdr.sh_offset to
1764 * calibrate symbol's file address, though this
1765 * is not necessary for normal C ELF file, we
1766 * still need to handle java JIT symbols in this
1767 * case.
1768 */
1769 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1770 } else {
1771 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1772 "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
1773 __func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
1774 (u64)phdr.p_offset);
1775 sym.st_value -= phdr.p_vaddr - phdr.p_offset;
1776 }
1777 }
1778
1779 demangled = demangle_sym(dso, kmodule, elf_name);
1780 if (demangled != NULL)
1781 elf_name = demangled;
1782
1783 f = symbol__new(sym.st_value, sym.st_size,
1784 GELF_ST_BIND(sym.st_info),
1785 GELF_ST_TYPE(sym.st_info), elf_name);
1786 free(demangled);
1787 if (!f)
1788 goto out_elf_end;
1789
1790 arch__sym_update(f, &sym);
1791
1792 __symbols__insert(dso__symbols(curr_dso), f, dso__kernel(dso));
1793 nr++;
1794 }
1795 dso__put(curr_dso);
1796
1797 /*
1798 * For misannotated, zeroed, ASM function sizes.
1799 */
1800 if (nr > 0) {
1801 symbols__fixup_end(dso__symbols(dso), false);
1802 symbols__fixup_duplicate(dso__symbols(dso));
1803 if (kmap) {
1804 /*
1805 * We need to fixup this here too because we create new
1806 * maps here, for things like vsyscall sections.
1807 */
1808 maps__fixup_end(kmaps);
1809 }
1810 }
1811 return nr;
1812 out_elf_end:
1813 dso__put(curr_dso);
1814 return -1;
1815 }
1816
dso__load_sym(struct dso * dso,struct map * map,struct symsrc * syms_ss,struct symsrc * runtime_ss,int kmodule)1817 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
1818 struct symsrc *runtime_ss, int kmodule)
1819 {
1820 int nr = 0;
1821 int err = -1;
1822
1823 dso__set_symtab_type(dso, syms_ss->type);
1824 dso__set_is_64_bit(dso, syms_ss->is_64_bit);
1825 dso__set_rel(dso, syms_ss->ehdr.e_type == ET_REL);
1826
1827 /*
1828 * Modules may already have symbols from kallsyms, but those symbols
1829 * have the wrong values for the dso maps, so remove them.
1830 */
1831 if (kmodule && syms_ss->symtab)
1832 symbols__delete(dso__symbols(dso));
1833
1834 if (!syms_ss->symtab) {
1835 /*
1836 * If the vmlinux is stripped, fail so we will fall back
1837 * to using kallsyms. The vmlinux runtime symbols aren't
1838 * of much use.
1839 */
1840 if (dso__kernel(dso))
1841 return err;
1842 } else {
1843 err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
1844 kmodule, 0);
1845 if (err < 0)
1846 return err;
1847 nr = err;
1848 }
1849
1850 if (syms_ss->dynsym) {
1851 err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
1852 kmodule, 1);
1853 if (err < 0)
1854 return err;
1855 err += nr;
1856 }
1857
1858 return err;
1859 }
1860
elf_read_maps(Elf * elf,bool exe,mapfn_t mapfn,void * data)1861 static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1862 {
1863 GElf_Phdr phdr;
1864 size_t i, phdrnum;
1865 int err;
1866 u64 sz;
1867
1868 if (elf_getphdrnum(elf, &phdrnum))
1869 return -1;
1870
1871 for (i = 0; i < phdrnum; i++) {
1872 if (gelf_getphdr(elf, i, &phdr) == NULL)
1873 return -1;
1874 if (phdr.p_type != PT_LOAD)
1875 continue;
1876 if (exe) {
1877 if (!(phdr.p_flags & PF_X))
1878 continue;
1879 } else {
1880 if (!(phdr.p_flags & PF_R))
1881 continue;
1882 }
1883 sz = min(phdr.p_memsz, phdr.p_filesz);
1884 if (!sz)
1885 continue;
1886 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1887 if (err)
1888 return err;
1889 }
1890 return 0;
1891 }
1892
file__read_maps(int fd,bool exe,mapfn_t mapfn,void * data,bool * is_64_bit)1893 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1894 bool *is_64_bit)
1895 {
1896 int err;
1897 Elf *elf;
1898
1899 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1900 if (elf == NULL)
1901 return -1;
1902
1903 if (is_64_bit)
1904 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1905
1906 err = elf_read_maps(elf, exe, mapfn, data);
1907
1908 elf_end(elf);
1909 return err;
1910 }
1911
dso__type_fd(int fd)1912 enum dso_type dso__type_fd(int fd)
1913 {
1914 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1915 GElf_Ehdr ehdr;
1916 Elf_Kind ek;
1917 Elf *elf;
1918
1919 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1920 if (elf == NULL)
1921 goto out;
1922
1923 ek = elf_kind(elf);
1924 if (ek != ELF_K_ELF)
1925 goto out_end;
1926
1927 if (gelf_getclass(elf) == ELFCLASS64) {
1928 dso_type = DSO__TYPE_64BIT;
1929 goto out_end;
1930 }
1931
1932 if (gelf_getehdr(elf, &ehdr) == NULL)
1933 goto out_end;
1934
1935 if (ehdr.e_machine == EM_X86_64)
1936 dso_type = DSO__TYPE_X32BIT;
1937 else
1938 dso_type = DSO__TYPE_32BIT;
1939 out_end:
1940 elf_end(elf);
1941 out:
1942 return dso_type;
1943 }
1944
copy_bytes(int from,off_t from_offs,int to,off_t to_offs,u64 len)1945 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1946 {
1947 ssize_t r;
1948 size_t n;
1949 int err = -1;
1950 char *buf = malloc(page_size);
1951
1952 if (buf == NULL)
1953 return -1;
1954
1955 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1956 goto out;
1957
1958 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1959 goto out;
1960
1961 while (len) {
1962 n = page_size;
1963 if (len < n)
1964 n = len;
1965 /* Use read because mmap won't work on proc files */
1966 r = read(from, buf, n);
1967 if (r < 0)
1968 goto out;
1969 if (!r)
1970 break;
1971 n = r;
1972 r = write(to, buf, n);
1973 if (r < 0)
1974 goto out;
1975 if ((size_t)r != n)
1976 goto out;
1977 len -= n;
1978 }
1979
1980 err = 0;
1981 out:
1982 free(buf);
1983 return err;
1984 }
1985
1986 struct kcore {
1987 int fd;
1988 int elfclass;
1989 Elf *elf;
1990 GElf_Ehdr ehdr;
1991 };
1992
kcore__open(struct kcore * kcore,const char * filename)1993 static int kcore__open(struct kcore *kcore, const char *filename)
1994 {
1995 GElf_Ehdr *ehdr;
1996
1997 kcore->fd = open(filename, O_RDONLY);
1998 if (kcore->fd == -1)
1999 return -1;
2000
2001 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
2002 if (!kcore->elf)
2003 goto out_close;
2004
2005 kcore->elfclass = gelf_getclass(kcore->elf);
2006 if (kcore->elfclass == ELFCLASSNONE)
2007 goto out_end;
2008
2009 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
2010 if (!ehdr)
2011 goto out_end;
2012
2013 return 0;
2014
2015 out_end:
2016 elf_end(kcore->elf);
2017 out_close:
2018 close(kcore->fd);
2019 return -1;
2020 }
2021
kcore__init(struct kcore * kcore,char * filename,int elfclass,bool temp)2022 static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
2023 bool temp)
2024 {
2025 kcore->elfclass = elfclass;
2026
2027 if (temp)
2028 kcore->fd = mkstemp(filename);
2029 else
2030 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
2031 if (kcore->fd == -1)
2032 return -1;
2033
2034 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
2035 if (!kcore->elf)
2036 goto out_close;
2037
2038 if (!gelf_newehdr(kcore->elf, elfclass))
2039 goto out_end;
2040
2041 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
2042
2043 return 0;
2044
2045 out_end:
2046 elf_end(kcore->elf);
2047 out_close:
2048 close(kcore->fd);
2049 unlink(filename);
2050 return -1;
2051 }
2052
kcore__close(struct kcore * kcore)2053 static void kcore__close(struct kcore *kcore)
2054 {
2055 elf_end(kcore->elf);
2056 close(kcore->fd);
2057 }
2058
kcore__copy_hdr(struct kcore * from,struct kcore * to,size_t count)2059 static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
2060 {
2061 GElf_Ehdr *ehdr = &to->ehdr;
2062 GElf_Ehdr *kehdr = &from->ehdr;
2063
2064 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
2065 ehdr->e_type = kehdr->e_type;
2066 ehdr->e_machine = kehdr->e_machine;
2067 ehdr->e_version = kehdr->e_version;
2068 ehdr->e_entry = 0;
2069 ehdr->e_shoff = 0;
2070 ehdr->e_flags = kehdr->e_flags;
2071 ehdr->e_phnum = count;
2072 ehdr->e_shentsize = 0;
2073 ehdr->e_shnum = 0;
2074 ehdr->e_shstrndx = 0;
2075
2076 if (from->elfclass == ELFCLASS32) {
2077 ehdr->e_phoff = sizeof(Elf32_Ehdr);
2078 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
2079 ehdr->e_phentsize = sizeof(Elf32_Phdr);
2080 } else {
2081 ehdr->e_phoff = sizeof(Elf64_Ehdr);
2082 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
2083 ehdr->e_phentsize = sizeof(Elf64_Phdr);
2084 }
2085
2086 if (!gelf_update_ehdr(to->elf, ehdr))
2087 return -1;
2088
2089 if (!gelf_newphdr(to->elf, count))
2090 return -1;
2091
2092 return 0;
2093 }
2094
kcore__add_phdr(struct kcore * kcore,int idx,off_t offset,u64 addr,u64 len)2095 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
2096 u64 addr, u64 len)
2097 {
2098 GElf_Phdr phdr = {
2099 .p_type = PT_LOAD,
2100 .p_flags = PF_R | PF_W | PF_X,
2101 .p_offset = offset,
2102 .p_vaddr = addr,
2103 .p_paddr = 0,
2104 .p_filesz = len,
2105 .p_memsz = len,
2106 .p_align = page_size,
2107 };
2108
2109 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
2110 return -1;
2111
2112 return 0;
2113 }
2114
kcore__write(struct kcore * kcore)2115 static off_t kcore__write(struct kcore *kcore)
2116 {
2117 return elf_update(kcore->elf, ELF_C_WRITE);
2118 }
2119
2120 struct phdr_data {
2121 off_t offset;
2122 off_t rel;
2123 u64 addr;
2124 u64 len;
2125 struct list_head node;
2126 struct phdr_data *remaps;
2127 };
2128
2129 struct sym_data {
2130 u64 addr;
2131 struct list_head node;
2132 };
2133
2134 struct kcore_copy_info {
2135 u64 stext;
2136 u64 etext;
2137 u64 first_symbol;
2138 u64 last_symbol;
2139 u64 first_module;
2140 u64 first_module_symbol;
2141 u64 last_module_symbol;
2142 size_t phnum;
2143 struct list_head phdrs;
2144 struct list_head syms;
2145 };
2146
2147 #define kcore_copy__for_each_phdr(k, p) \
2148 list_for_each_entry((p), &(k)->phdrs, node)
2149
phdr_data__new(u64 addr,u64 len,off_t offset)2150 static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset)
2151 {
2152 struct phdr_data *p = zalloc(sizeof(*p));
2153
2154 if (p) {
2155 p->addr = addr;
2156 p->len = len;
2157 p->offset = offset;
2158 }
2159
2160 return p;
2161 }
2162
kcore_copy_info__addnew(struct kcore_copy_info * kci,u64 addr,u64 len,off_t offset)2163 static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci,
2164 u64 addr, u64 len,
2165 off_t offset)
2166 {
2167 struct phdr_data *p = phdr_data__new(addr, len, offset);
2168
2169 if (p)
2170 list_add_tail(&p->node, &kci->phdrs);
2171
2172 return p;
2173 }
2174
kcore_copy__free_phdrs(struct kcore_copy_info * kci)2175 static void kcore_copy__free_phdrs(struct kcore_copy_info *kci)
2176 {
2177 struct phdr_data *p, *tmp;
2178
2179 list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
2180 list_del_init(&p->node);
2181 free(p);
2182 }
2183 }
2184
kcore_copy__new_sym(struct kcore_copy_info * kci,u64 addr)2185 static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci,
2186 u64 addr)
2187 {
2188 struct sym_data *s = zalloc(sizeof(*s));
2189
2190 if (s) {
2191 s->addr = addr;
2192 list_add_tail(&s->node, &kci->syms);
2193 }
2194
2195 return s;
2196 }
2197
kcore_copy__free_syms(struct kcore_copy_info * kci)2198 static void kcore_copy__free_syms(struct kcore_copy_info *kci)
2199 {
2200 struct sym_data *s, *tmp;
2201
2202 list_for_each_entry_safe(s, tmp, &kci->syms, node) {
2203 list_del_init(&s->node);
2204 free(s);
2205 }
2206 }
2207
kcore_copy__process_kallsyms(void * arg,const char * name,char type,u64 start)2208 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
2209 u64 start)
2210 {
2211 struct kcore_copy_info *kci = arg;
2212
2213 if (!kallsyms__is_function(type))
2214 return 0;
2215
2216 if (strchr(name, '[')) {
2217 if (!kci->first_module_symbol || start < kci->first_module_symbol)
2218 kci->first_module_symbol = start;
2219 if (start > kci->last_module_symbol)
2220 kci->last_module_symbol = start;
2221 return 0;
2222 }
2223
2224 if (!kci->first_symbol || start < kci->first_symbol)
2225 kci->first_symbol = start;
2226
2227 if (!kci->last_symbol || start > kci->last_symbol)
2228 kci->last_symbol = start;
2229
2230 if (!strcmp(name, "_stext")) {
2231 kci->stext = start;
2232 return 0;
2233 }
2234
2235 if (!strcmp(name, "_etext")) {
2236 kci->etext = start;
2237 return 0;
2238 }
2239
2240 if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start))
2241 return -1;
2242
2243 return 0;
2244 }
2245
kcore_copy__parse_kallsyms(struct kcore_copy_info * kci,const char * dir)2246 static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
2247 const char *dir)
2248 {
2249 char kallsyms_filename[PATH_MAX];
2250
2251 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
2252
2253 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
2254 return -1;
2255
2256 if (kallsyms__parse(kallsyms_filename, kci,
2257 kcore_copy__process_kallsyms) < 0)
2258 return -1;
2259
2260 return 0;
2261 }
2262
kcore_copy__process_modules(void * arg,const char * name __maybe_unused,u64 start,u64 size __maybe_unused)2263 static int kcore_copy__process_modules(void *arg,
2264 const char *name __maybe_unused,
2265 u64 start, u64 size __maybe_unused)
2266 {
2267 struct kcore_copy_info *kci = arg;
2268
2269 if (!kci->first_module || start < kci->first_module)
2270 kci->first_module = start;
2271
2272 return 0;
2273 }
2274
kcore_copy__parse_modules(struct kcore_copy_info * kci,const char * dir)2275 static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
2276 const char *dir)
2277 {
2278 char modules_filename[PATH_MAX];
2279
2280 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
2281
2282 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
2283 return -1;
2284
2285 if (modules__parse(modules_filename, kci,
2286 kcore_copy__process_modules) < 0)
2287 return -1;
2288
2289 return 0;
2290 }
2291
kcore_copy__map(struct kcore_copy_info * kci,u64 start,u64 end,u64 pgoff,u64 s,u64 e)2292 static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end,
2293 u64 pgoff, u64 s, u64 e)
2294 {
2295 u64 len, offset;
2296
2297 if (s < start || s >= end)
2298 return 0;
2299
2300 offset = (s - start) + pgoff;
2301 len = e < end ? e - s : end - s;
2302
2303 return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1;
2304 }
2305
kcore_copy__read_map(u64 start,u64 len,u64 pgoff,void * data)2306 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
2307 {
2308 struct kcore_copy_info *kci = data;
2309 u64 end = start + len;
2310 struct sym_data *sdat;
2311
2312 if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext))
2313 return -1;
2314
2315 if (kcore_copy__map(kci, start, end, pgoff, kci->first_module,
2316 kci->last_module_symbol))
2317 return -1;
2318
2319 list_for_each_entry(sdat, &kci->syms, node) {
2320 u64 s = round_down(sdat->addr, page_size);
2321
2322 if (kcore_copy__map(kci, start, end, pgoff, s, s + len))
2323 return -1;
2324 }
2325
2326 return 0;
2327 }
2328
kcore_copy__read_maps(struct kcore_copy_info * kci,Elf * elf)2329 static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
2330 {
2331 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
2332 return -1;
2333
2334 return 0;
2335 }
2336
kcore_copy__find_remaps(struct kcore_copy_info * kci)2337 static void kcore_copy__find_remaps(struct kcore_copy_info *kci)
2338 {
2339 struct phdr_data *p, *k = NULL;
2340 u64 kend;
2341
2342 if (!kci->stext)
2343 return;
2344
2345 /* Find phdr that corresponds to the kernel map (contains stext) */
2346 kcore_copy__for_each_phdr(kci, p) {
2347 u64 pend = p->addr + p->len - 1;
2348
2349 if (p->addr <= kci->stext && pend >= kci->stext) {
2350 k = p;
2351 break;
2352 }
2353 }
2354
2355 if (!k)
2356 return;
2357
2358 kend = k->offset + k->len;
2359
2360 /* Find phdrs that remap the kernel */
2361 kcore_copy__for_each_phdr(kci, p) {
2362 u64 pend = p->offset + p->len;
2363
2364 if (p == k)
2365 continue;
2366
2367 if (p->offset >= k->offset && pend <= kend)
2368 p->remaps = k;
2369 }
2370 }
2371
kcore_copy__layout(struct kcore_copy_info * kci)2372 static void kcore_copy__layout(struct kcore_copy_info *kci)
2373 {
2374 struct phdr_data *p;
2375 off_t rel = 0;
2376
2377 kcore_copy__find_remaps(kci);
2378
2379 kcore_copy__for_each_phdr(kci, p) {
2380 if (!p->remaps) {
2381 p->rel = rel;
2382 rel += p->len;
2383 }
2384 kci->phnum += 1;
2385 }
2386
2387 kcore_copy__for_each_phdr(kci, p) {
2388 struct phdr_data *k = p->remaps;
2389
2390 if (k)
2391 p->rel = p->offset - k->offset + k->rel;
2392 }
2393 }
2394
kcore_copy__calc_maps(struct kcore_copy_info * kci,const char * dir,Elf * elf)2395 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
2396 Elf *elf)
2397 {
2398 if (kcore_copy__parse_kallsyms(kci, dir))
2399 return -1;
2400
2401 if (kcore_copy__parse_modules(kci, dir))
2402 return -1;
2403
2404 if (kci->stext)
2405 kci->stext = round_down(kci->stext, page_size);
2406 else
2407 kci->stext = round_down(kci->first_symbol, page_size);
2408
2409 if (kci->etext) {
2410 kci->etext = round_up(kci->etext, page_size);
2411 } else if (kci->last_symbol) {
2412 kci->etext = round_up(kci->last_symbol, page_size);
2413 kci->etext += page_size;
2414 }
2415
2416 if (kci->first_module_symbol &&
2417 (!kci->first_module || kci->first_module_symbol < kci->first_module))
2418 kci->first_module = kci->first_module_symbol;
2419
2420 kci->first_module = round_down(kci->first_module, page_size);
2421
2422 if (kci->last_module_symbol) {
2423 kci->last_module_symbol = round_up(kci->last_module_symbol,
2424 page_size);
2425 kci->last_module_symbol += page_size;
2426 }
2427
2428 if (!kci->stext || !kci->etext)
2429 return -1;
2430
2431 if (kci->first_module && !kci->last_module_symbol)
2432 return -1;
2433
2434 if (kcore_copy__read_maps(kci, elf))
2435 return -1;
2436
2437 kcore_copy__layout(kci);
2438
2439 return 0;
2440 }
2441
kcore_copy__copy_file(const char * from_dir,const char * to_dir,const char * name)2442 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
2443 const char *name)
2444 {
2445 char from_filename[PATH_MAX];
2446 char to_filename[PATH_MAX];
2447
2448 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
2449 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
2450
2451 return copyfile_mode(from_filename, to_filename, 0400);
2452 }
2453
kcore_copy__unlink(const char * dir,const char * name)2454 static int kcore_copy__unlink(const char *dir, const char *name)
2455 {
2456 char filename[PATH_MAX];
2457
2458 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
2459
2460 return unlink(filename);
2461 }
2462
kcore_copy__compare_fds(int from,int to)2463 static int kcore_copy__compare_fds(int from, int to)
2464 {
2465 char *buf_from;
2466 char *buf_to;
2467 ssize_t ret;
2468 size_t len;
2469 int err = -1;
2470
2471 buf_from = malloc(page_size);
2472 buf_to = malloc(page_size);
2473 if (!buf_from || !buf_to)
2474 goto out;
2475
2476 while (1) {
2477 /* Use read because mmap won't work on proc files */
2478 ret = read(from, buf_from, page_size);
2479 if (ret < 0)
2480 goto out;
2481
2482 if (!ret)
2483 break;
2484
2485 len = ret;
2486
2487 if (readn(to, buf_to, len) != (int)len)
2488 goto out;
2489
2490 if (memcmp(buf_from, buf_to, len))
2491 goto out;
2492 }
2493
2494 err = 0;
2495 out:
2496 free(buf_to);
2497 free(buf_from);
2498 return err;
2499 }
2500
kcore_copy__compare_files(const char * from_filename,const char * to_filename)2501 static int kcore_copy__compare_files(const char *from_filename,
2502 const char *to_filename)
2503 {
2504 int from, to, err = -1;
2505
2506 from = open(from_filename, O_RDONLY);
2507 if (from < 0)
2508 return -1;
2509
2510 to = open(to_filename, O_RDONLY);
2511 if (to < 0)
2512 goto out_close_from;
2513
2514 err = kcore_copy__compare_fds(from, to);
2515
2516 close(to);
2517 out_close_from:
2518 close(from);
2519 return err;
2520 }
2521
kcore_copy__compare_file(const char * from_dir,const char * to_dir,const char * name)2522 static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
2523 const char *name)
2524 {
2525 char from_filename[PATH_MAX];
2526 char to_filename[PATH_MAX];
2527
2528 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
2529 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
2530
2531 return kcore_copy__compare_files(from_filename, to_filename);
2532 }
2533
2534 /**
2535 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
2536 * @from_dir: from directory
2537 * @to_dir: to directory
2538 *
2539 * This function copies kallsyms, modules and kcore files from one directory to
2540 * another. kallsyms and modules are copied entirely. Only code segments are
2541 * copied from kcore. It is assumed that two segments suffice: one for the
2542 * kernel proper and one for all the modules. The code segments are determined
2543 * from kallsyms and modules files. The kernel map starts at _stext or the
2544 * lowest function symbol, and ends at _etext or the highest function symbol.
2545 * The module map starts at the lowest module address and ends at the highest
2546 * module symbol. Start addresses are rounded down to the nearest page. End
2547 * addresses are rounded up to the nearest page. An extra page is added to the
2548 * highest kernel symbol and highest module symbol to, hopefully, encompass that
2549 * symbol too. Because it contains only code sections, the resulting kcore is
2550 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
2551 * is not the same for the kernel map and the modules map. That happens because
2552 * the data is copied adjacently whereas the original kcore has gaps. Finally,
2553 * kallsyms file is compared with its copy to check that modules have not been
2554 * loaded or unloaded while the copies were taking place.
2555 *
2556 * Return: %0 on success, %-1 on failure.
2557 */
kcore_copy(const char * from_dir,const char * to_dir)2558 int kcore_copy(const char *from_dir, const char *to_dir)
2559 {
2560 struct kcore kcore;
2561 struct kcore extract;
2562 int idx = 0, err = -1;
2563 off_t offset, sz;
2564 struct kcore_copy_info kci = { .stext = 0, };
2565 char kcore_filename[PATH_MAX];
2566 char extract_filename[PATH_MAX];
2567 struct phdr_data *p;
2568
2569 INIT_LIST_HEAD(&kci.phdrs);
2570 INIT_LIST_HEAD(&kci.syms);
2571
2572 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
2573 return -1;
2574
2575 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
2576 goto out_unlink_kallsyms;
2577
2578 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
2579 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
2580
2581 if (kcore__open(&kcore, kcore_filename))
2582 goto out_unlink_modules;
2583
2584 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
2585 goto out_kcore_close;
2586
2587 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
2588 goto out_kcore_close;
2589
2590 if (kcore__copy_hdr(&kcore, &extract, kci.phnum))
2591 goto out_extract_close;
2592
2593 offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) +
2594 gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT);
2595 offset = round_up(offset, page_size);
2596
2597 kcore_copy__for_each_phdr(&kci, p) {
2598 off_t offs = p->rel + offset;
2599
2600 if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len))
2601 goto out_extract_close;
2602 }
2603
2604 sz = kcore__write(&extract);
2605 if (sz < 0 || sz > offset)
2606 goto out_extract_close;
2607
2608 kcore_copy__for_each_phdr(&kci, p) {
2609 off_t offs = p->rel + offset;
2610
2611 if (p->remaps)
2612 continue;
2613 if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len))
2614 goto out_extract_close;
2615 }
2616
2617 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
2618 goto out_extract_close;
2619
2620 err = 0;
2621
2622 out_extract_close:
2623 kcore__close(&extract);
2624 if (err)
2625 unlink(extract_filename);
2626 out_kcore_close:
2627 kcore__close(&kcore);
2628 out_unlink_modules:
2629 if (err)
2630 kcore_copy__unlink(to_dir, "modules");
2631 out_unlink_kallsyms:
2632 if (err)
2633 kcore_copy__unlink(to_dir, "kallsyms");
2634
2635 kcore_copy__free_phdrs(&kci);
2636 kcore_copy__free_syms(&kci);
2637
2638 return err;
2639 }
2640
kcore_extract__create(struct kcore_extract * kce)2641 int kcore_extract__create(struct kcore_extract *kce)
2642 {
2643 struct kcore kcore;
2644 struct kcore extract;
2645 size_t count = 1;
2646 int idx = 0, err = -1;
2647 off_t offset = page_size, sz;
2648
2649 if (kcore__open(&kcore, kce->kcore_filename))
2650 return -1;
2651
2652 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
2653 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
2654 goto out_kcore_close;
2655
2656 if (kcore__copy_hdr(&kcore, &extract, count))
2657 goto out_extract_close;
2658
2659 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
2660 goto out_extract_close;
2661
2662 sz = kcore__write(&extract);
2663 if (sz < 0 || sz > offset)
2664 goto out_extract_close;
2665
2666 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
2667 goto out_extract_close;
2668
2669 err = 0;
2670
2671 out_extract_close:
2672 kcore__close(&extract);
2673 if (err)
2674 unlink(kce->extract_filename);
2675 out_kcore_close:
2676 kcore__close(&kcore);
2677
2678 return err;
2679 }
2680
kcore_extract__delete(struct kcore_extract * kce)2681 void kcore_extract__delete(struct kcore_extract *kce)
2682 {
2683 unlink(kce->extract_filename);
2684 }
2685
2686 #ifdef HAVE_GELF_GETNOTE_SUPPORT
2687
sdt_adjust_loc(struct sdt_note * tmp,GElf_Addr base_off)2688 static void sdt_adjust_loc(struct sdt_note *tmp, GElf_Addr base_off)
2689 {
2690 if (!base_off)
2691 return;
2692
2693 if (tmp->bit32)
2694 tmp->addr.a32[SDT_NOTE_IDX_LOC] =
2695 tmp->addr.a32[SDT_NOTE_IDX_LOC] + base_off -
2696 tmp->addr.a32[SDT_NOTE_IDX_BASE];
2697 else
2698 tmp->addr.a64[SDT_NOTE_IDX_LOC] =
2699 tmp->addr.a64[SDT_NOTE_IDX_LOC] + base_off -
2700 tmp->addr.a64[SDT_NOTE_IDX_BASE];
2701 }
2702
sdt_adjust_refctr(struct sdt_note * tmp,GElf_Addr base_addr,GElf_Addr base_off)2703 static void sdt_adjust_refctr(struct sdt_note *tmp, GElf_Addr base_addr,
2704 GElf_Addr base_off)
2705 {
2706 if (!base_off)
2707 return;
2708
2709 if (tmp->bit32 && tmp->addr.a32[SDT_NOTE_IDX_REFCTR])
2710 tmp->addr.a32[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
2711 else if (tmp->addr.a64[SDT_NOTE_IDX_REFCTR])
2712 tmp->addr.a64[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
2713 }
2714
2715 /**
2716 * populate_sdt_note : Parse raw data and identify SDT note
2717 * @elf: elf of the opened file
2718 * @data: raw data of a section with description offset applied
2719 * @len: note description size
2720 * @type: type of the note
2721 * @sdt_notes: List to add the SDT note
2722 *
2723 * Responsible for parsing the @data in section .note.stapsdt in @elf and
2724 * if its an SDT note, it appends to @sdt_notes list.
2725 */
populate_sdt_note(Elf ** elf,const char * data,size_t len,struct list_head * sdt_notes)2726 static int populate_sdt_note(Elf **elf, const char *data, size_t len,
2727 struct list_head *sdt_notes)
2728 {
2729 const char *provider, *name, *args;
2730 struct sdt_note *tmp = NULL;
2731 GElf_Ehdr ehdr;
2732 GElf_Shdr shdr;
2733 int ret = -EINVAL;
2734
2735 union {
2736 Elf64_Addr a64[NR_ADDR];
2737 Elf32_Addr a32[NR_ADDR];
2738 } buf;
2739
2740 Elf_Data dst = {
2741 .d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT,
2742 .d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT),
2743 .d_off = 0, .d_align = 0
2744 };
2745 Elf_Data src = {
2746 .d_buf = (void *) data, .d_type = ELF_T_ADDR,
2747 .d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0,
2748 .d_align = 0
2749 };
2750
2751 tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note));
2752 if (!tmp) {
2753 ret = -ENOMEM;
2754 goto out_err;
2755 }
2756
2757 INIT_LIST_HEAD(&tmp->note_list);
2758
2759 if (len < dst.d_size + 3)
2760 goto out_free_note;
2761
2762 /* Translation from file representation to memory representation */
2763 if (gelf_xlatetom(*elf, &dst, &src,
2764 elf_getident(*elf, NULL)[EI_DATA]) == NULL) {
2765 pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1));
2766 goto out_free_note;
2767 }
2768
2769 /* Populate the fields of sdt_note */
2770 provider = data + dst.d_size;
2771
2772 name = (const char *)memchr(provider, '\0', data + len - provider);
2773 if (name++ == NULL)
2774 goto out_free_note;
2775
2776 tmp->provider = strdup(provider);
2777 if (!tmp->provider) {
2778 ret = -ENOMEM;
2779 goto out_free_note;
2780 }
2781 tmp->name = strdup(name);
2782 if (!tmp->name) {
2783 ret = -ENOMEM;
2784 goto out_free_prov;
2785 }
2786
2787 args = memchr(name, '\0', data + len - name);
2788
2789 /*
2790 * There is no argument if:
2791 * - We reached the end of the note;
2792 * - There is not enough room to hold a potential string;
2793 * - The argument string is empty or just contains ':'.
2794 */
2795 if (args == NULL || data + len - args < 2 ||
2796 args[1] == ':' || args[1] == '\0')
2797 tmp->args = NULL;
2798 else {
2799 tmp->args = strdup(++args);
2800 if (!tmp->args) {
2801 ret = -ENOMEM;
2802 goto out_free_name;
2803 }
2804 }
2805
2806 if (gelf_getclass(*elf) == ELFCLASS32) {
2807 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr));
2808 tmp->bit32 = true;
2809 } else {
2810 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr));
2811 tmp->bit32 = false;
2812 }
2813
2814 if (!gelf_getehdr(*elf, &ehdr)) {
2815 pr_debug("%s : cannot get elf header.\n", __func__);
2816 ret = -EBADF;
2817 goto out_free_args;
2818 }
2819
2820 /* Adjust the prelink effect :
2821 * Find out the .stapsdt.base section.
2822 * This scn will help us to handle prelinking (if present).
2823 * Compare the retrieved file offset of the base section with the
2824 * base address in the description of the SDT note. If its different,
2825 * then accordingly, adjust the note location.
2826 */
2827 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL))
2828 sdt_adjust_loc(tmp, shdr.sh_offset);
2829
2830 /* Adjust reference counter offset */
2831 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_PROBES_SCN, NULL))
2832 sdt_adjust_refctr(tmp, shdr.sh_addr, shdr.sh_offset);
2833
2834 list_add_tail(&tmp->note_list, sdt_notes);
2835 return 0;
2836
2837 out_free_args:
2838 zfree(&tmp->args);
2839 out_free_name:
2840 zfree(&tmp->name);
2841 out_free_prov:
2842 zfree(&tmp->provider);
2843 out_free_note:
2844 free(tmp);
2845 out_err:
2846 return ret;
2847 }
2848
2849 /**
2850 * construct_sdt_notes_list : constructs a list of SDT notes
2851 * @elf : elf to look into
2852 * @sdt_notes : empty list_head
2853 *
2854 * Scans the sections in 'elf' for the section
2855 * .note.stapsdt. It, then calls populate_sdt_note to find
2856 * out the SDT events and populates the 'sdt_notes'.
2857 */
construct_sdt_notes_list(Elf * elf,struct list_head * sdt_notes)2858 static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes)
2859 {
2860 GElf_Ehdr ehdr;
2861 Elf_Scn *scn = NULL;
2862 Elf_Data *data;
2863 GElf_Shdr shdr;
2864 size_t shstrndx, next;
2865 GElf_Nhdr nhdr;
2866 size_t name_off, desc_off, offset;
2867 int ret = 0;
2868
2869 if (gelf_getehdr(elf, &ehdr) == NULL) {
2870 ret = -EBADF;
2871 goto out_ret;
2872 }
2873 if (elf_getshdrstrndx(elf, &shstrndx) != 0) {
2874 ret = -EBADF;
2875 goto out_ret;
2876 }
2877
2878 /* Look for the required section */
2879 scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL);
2880 if (!scn) {
2881 ret = -ENOENT;
2882 goto out_ret;
2883 }
2884
2885 if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) {
2886 ret = -ENOENT;
2887 goto out_ret;
2888 }
2889
2890 data = elf_getdata(scn, NULL);
2891
2892 /* Get the SDT notes */
2893 for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off,
2894 &desc_off)) > 0; offset = next) {
2895 if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) &&
2896 !memcmp(data->d_buf + name_off, SDT_NOTE_NAME,
2897 sizeof(SDT_NOTE_NAME))) {
2898 /* Check the type of the note */
2899 if (nhdr.n_type != SDT_NOTE_TYPE)
2900 goto out_ret;
2901
2902 ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off),
2903 nhdr.n_descsz, sdt_notes);
2904 if (ret < 0)
2905 goto out_ret;
2906 }
2907 }
2908 if (list_empty(sdt_notes))
2909 ret = -ENOENT;
2910
2911 out_ret:
2912 return ret;
2913 }
2914
2915 /**
2916 * get_sdt_note_list : Wrapper to construct a list of sdt notes
2917 * @head : empty list_head
2918 * @target : file to find SDT notes from
2919 *
2920 * This opens the file, initializes
2921 * the ELF and then calls construct_sdt_notes_list.
2922 */
get_sdt_note_list(struct list_head * head,const char * target)2923 int get_sdt_note_list(struct list_head *head, const char *target)
2924 {
2925 Elf *elf;
2926 int fd, ret;
2927
2928 fd = open(target, O_RDONLY);
2929 if (fd < 0)
2930 return -EBADF;
2931
2932 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
2933 if (!elf) {
2934 ret = -EBADF;
2935 goto out_close;
2936 }
2937 ret = construct_sdt_notes_list(elf, head);
2938 elf_end(elf);
2939 out_close:
2940 close(fd);
2941 return ret;
2942 }
2943
2944 /**
2945 * cleanup_sdt_note_list : free the sdt notes' list
2946 * @sdt_notes: sdt notes' list
2947 *
2948 * Free up the SDT notes in @sdt_notes.
2949 * Returns the number of SDT notes free'd.
2950 */
cleanup_sdt_note_list(struct list_head * sdt_notes)2951 int cleanup_sdt_note_list(struct list_head *sdt_notes)
2952 {
2953 struct sdt_note *tmp, *pos;
2954 int nr_free = 0;
2955
2956 list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
2957 list_del_init(&pos->note_list);
2958 zfree(&pos->args);
2959 zfree(&pos->name);
2960 zfree(&pos->provider);
2961 free(pos);
2962 nr_free++;
2963 }
2964 return nr_free;
2965 }
2966
2967 /**
2968 * sdt_notes__get_count: Counts the number of sdt events
2969 * @start: list_head to sdt_notes list
2970 *
2971 * Returns the number of SDT notes in a list
2972 */
sdt_notes__get_count(struct list_head * start)2973 int sdt_notes__get_count(struct list_head *start)
2974 {
2975 struct sdt_note *sdt_ptr;
2976 int count = 0;
2977
2978 list_for_each_entry(sdt_ptr, start, note_list)
2979 count++;
2980 return count;
2981 }
2982 #endif
2983
symbol__elf_init(void)2984 void symbol__elf_init(void)
2985 {
2986 elf_version(EV_CURRENT);
2987 }
2988