1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <inttypes.h>
5 #include <regex.h>
6 #include <stdlib.h>
7 #include "callchain.h"
8 #include "debug.h"
9 #include "dso.h"
10 #include "env.h"
11 #include "event.h"
12 #include "evsel.h"
13 #include "hist.h"
14 #include "machine.h"
15 #include "map.h"
16 #include "map_symbol.h"
17 #include "branch.h"
18 #include "mem-events.h"
19 #include "srcline.h"
20 #include "symbol.h"
21 #include "sort.h"
22 #include "strlist.h"
23 #include "target.h"
24 #include "thread.h"
25 #include "util.h"
26 #include "vdso.h"
27 #include <stdbool.h>
28 #include <sys/types.h>
29 #include <sys/stat.h>
30 #include <unistd.h>
31 #include "unwind.h"
32 #include "linux/hash.h"
33 #include "asm/bug.h"
34 #include "bpf-event.h"
35 #include <internal/lib.h> // page_size
36 #include "cgroup.h"
37
38 #include <linux/ctype.h>
39 #include <symbol/kallsyms.h>
40 #include <linux/mman.h>
41 #include <linux/string.h>
42 #include <linux/zalloc.h>
43
44 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
45
machine__kernel_dso(struct machine * machine)46 static struct dso *machine__kernel_dso(struct machine *machine)
47 {
48 return machine->vmlinux_map->dso;
49 }
50
dsos__init(struct dsos * dsos)51 static void dsos__init(struct dsos *dsos)
52 {
53 INIT_LIST_HEAD(&dsos->head);
54 dsos->root = RB_ROOT;
55 init_rwsem(&dsos->lock);
56 }
57
machine__threads_init(struct machine * machine)58 static void machine__threads_init(struct machine *machine)
59 {
60 int i;
61
62 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
63 struct threads *threads = &machine->threads[i];
64 threads->entries = RB_ROOT_CACHED;
65 init_rwsem(&threads->lock);
66 threads->nr = 0;
67 INIT_LIST_HEAD(&threads->dead);
68 threads->last_match = NULL;
69 }
70 }
71
machine__set_mmap_name(struct machine * machine)72 static int machine__set_mmap_name(struct machine *machine)
73 {
74 if (machine__is_host(machine))
75 machine->mmap_name = strdup("[kernel.kallsyms]");
76 else if (machine__is_default_guest(machine))
77 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
78 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
79 machine->pid) < 0)
80 machine->mmap_name = NULL;
81
82 return machine->mmap_name ? 0 : -ENOMEM;
83 }
84
machine__init(struct machine * machine,const char * root_dir,pid_t pid)85 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
86 {
87 int err = -ENOMEM;
88
89 memset(machine, 0, sizeof(*machine));
90 maps__init(&machine->kmaps, machine);
91 RB_CLEAR_NODE(&machine->rb_node);
92 dsos__init(&machine->dsos);
93
94 machine__threads_init(machine);
95
96 machine->vdso_info = NULL;
97 machine->env = NULL;
98
99 machine->pid = pid;
100
101 machine->id_hdr_size = 0;
102 machine->kptr_restrict_warned = false;
103 machine->comm_exec = false;
104 machine->kernel_start = 0;
105 machine->vmlinux_map = NULL;
106
107 machine->root_dir = strdup(root_dir);
108 if (machine->root_dir == NULL)
109 return -ENOMEM;
110
111 if (machine__set_mmap_name(machine))
112 goto out;
113
114 if (pid != HOST_KERNEL_ID) {
115 struct thread *thread = machine__findnew_thread(machine, -1,
116 pid);
117 char comm[64];
118
119 if (thread == NULL)
120 goto out;
121
122 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
123 thread__set_comm(thread, comm, 0);
124 thread__put(thread);
125 }
126
127 machine->current_tid = NULL;
128 err = 0;
129
130 out:
131 if (err) {
132 zfree(&machine->root_dir);
133 zfree(&machine->mmap_name);
134 }
135 return 0;
136 }
137
machine__new_host(void)138 struct machine *machine__new_host(void)
139 {
140 struct machine *machine = malloc(sizeof(*machine));
141
142 if (machine != NULL) {
143 machine__init(machine, "", HOST_KERNEL_ID);
144
145 if (machine__create_kernel_maps(machine) < 0)
146 goto out_delete;
147 }
148
149 return machine;
150 out_delete:
151 free(machine);
152 return NULL;
153 }
154
machine__new_kallsyms(void)155 struct machine *machine__new_kallsyms(void)
156 {
157 struct machine *machine = machine__new_host();
158 /*
159 * FIXME:
160 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
161 * ask for not using the kcore parsing code, once this one is fixed
162 * to create a map per module.
163 */
164 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
165 machine__delete(machine);
166 machine = NULL;
167 }
168
169 return machine;
170 }
171
dsos__purge(struct dsos * dsos)172 static void dsos__purge(struct dsos *dsos)
173 {
174 struct dso *pos, *n;
175
176 down_write(&dsos->lock);
177
178 list_for_each_entry_safe(pos, n, &dsos->head, node) {
179 RB_CLEAR_NODE(&pos->rb_node);
180 pos->root = NULL;
181 list_del_init(&pos->node);
182 dso__put(pos);
183 }
184
185 up_write(&dsos->lock);
186 }
187
dsos__exit(struct dsos * dsos)188 static void dsos__exit(struct dsos *dsos)
189 {
190 dsos__purge(dsos);
191 exit_rwsem(&dsos->lock);
192 }
193
machine__delete_threads(struct machine * machine)194 void machine__delete_threads(struct machine *machine)
195 {
196 struct rb_node *nd;
197 int i;
198
199 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
200 struct threads *threads = &machine->threads[i];
201 down_write(&threads->lock);
202 nd = rb_first_cached(&threads->entries);
203 while (nd) {
204 struct thread *t = rb_entry(nd, struct thread, rb_node);
205
206 nd = rb_next(nd);
207 __machine__remove_thread(machine, t, false);
208 }
209 up_write(&threads->lock);
210 }
211 }
212
machine__exit(struct machine * machine)213 void machine__exit(struct machine *machine)
214 {
215 int i;
216
217 if (machine == NULL)
218 return;
219
220 machine__destroy_kernel_maps(machine);
221 maps__exit(&machine->kmaps);
222 dsos__exit(&machine->dsos);
223 machine__exit_vdso(machine);
224 zfree(&machine->root_dir);
225 zfree(&machine->mmap_name);
226 zfree(&machine->current_tid);
227
228 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
229 struct threads *threads = &machine->threads[i];
230 struct thread *thread, *n;
231 /*
232 * Forget about the dead, at this point whatever threads were
233 * left in the dead lists better have a reference count taken
234 * by who is using them, and then, when they drop those references
235 * and it finally hits zero, thread__put() will check and see that
236 * its not in the dead threads list and will not try to remove it
237 * from there, just calling thread__delete() straight away.
238 */
239 list_for_each_entry_safe(thread, n, &threads->dead, node)
240 list_del_init(&thread->node);
241
242 exit_rwsem(&threads->lock);
243 }
244 }
245
machine__delete(struct machine * machine)246 void machine__delete(struct machine *machine)
247 {
248 if (machine) {
249 machine__exit(machine);
250 free(machine);
251 }
252 }
253
machines__init(struct machines * machines)254 void machines__init(struct machines *machines)
255 {
256 machine__init(&machines->host, "", HOST_KERNEL_ID);
257 machines->guests = RB_ROOT_CACHED;
258 }
259
machines__exit(struct machines * machines)260 void machines__exit(struct machines *machines)
261 {
262 machine__exit(&machines->host);
263 /* XXX exit guest */
264 }
265
machines__add(struct machines * machines,pid_t pid,const char * root_dir)266 struct machine *machines__add(struct machines *machines, pid_t pid,
267 const char *root_dir)
268 {
269 struct rb_node **p = &machines->guests.rb_root.rb_node;
270 struct rb_node *parent = NULL;
271 struct machine *pos, *machine = malloc(sizeof(*machine));
272 bool leftmost = true;
273
274 if (machine == NULL)
275 return NULL;
276
277 if (machine__init(machine, root_dir, pid) != 0) {
278 free(machine);
279 return NULL;
280 }
281
282 while (*p != NULL) {
283 parent = *p;
284 pos = rb_entry(parent, struct machine, rb_node);
285 if (pid < pos->pid)
286 p = &(*p)->rb_left;
287 else {
288 p = &(*p)->rb_right;
289 leftmost = false;
290 }
291 }
292
293 rb_link_node(&machine->rb_node, parent, p);
294 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
295
296 return machine;
297 }
298
machines__set_comm_exec(struct machines * machines,bool comm_exec)299 void machines__set_comm_exec(struct machines *machines, bool comm_exec)
300 {
301 struct rb_node *nd;
302
303 machines->host.comm_exec = comm_exec;
304
305 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
306 struct machine *machine = rb_entry(nd, struct machine, rb_node);
307
308 machine->comm_exec = comm_exec;
309 }
310 }
311
machines__find(struct machines * machines,pid_t pid)312 struct machine *machines__find(struct machines *machines, pid_t pid)
313 {
314 struct rb_node **p = &machines->guests.rb_root.rb_node;
315 struct rb_node *parent = NULL;
316 struct machine *machine;
317 struct machine *default_machine = NULL;
318
319 if (pid == HOST_KERNEL_ID)
320 return &machines->host;
321
322 while (*p != NULL) {
323 parent = *p;
324 machine = rb_entry(parent, struct machine, rb_node);
325 if (pid < machine->pid)
326 p = &(*p)->rb_left;
327 else if (pid > machine->pid)
328 p = &(*p)->rb_right;
329 else
330 return machine;
331 if (!machine->pid)
332 default_machine = machine;
333 }
334
335 return default_machine;
336 }
337
machines__findnew(struct machines * machines,pid_t pid)338 struct machine *machines__findnew(struct machines *machines, pid_t pid)
339 {
340 char path[PATH_MAX];
341 const char *root_dir = "";
342 struct machine *machine = machines__find(machines, pid);
343
344 if (machine && (machine->pid == pid))
345 goto out;
346
347 if ((pid != HOST_KERNEL_ID) &&
348 (pid != DEFAULT_GUEST_KERNEL_ID) &&
349 (symbol_conf.guestmount)) {
350 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
351 if (access(path, R_OK)) {
352 static struct strlist *seen;
353
354 if (!seen)
355 seen = strlist__new(NULL, NULL);
356
357 if (!strlist__has_entry(seen, path)) {
358 pr_err("Can't access file %s\n", path);
359 strlist__add(seen, path);
360 }
361 machine = NULL;
362 goto out;
363 }
364 root_dir = path;
365 }
366
367 machine = machines__add(machines, pid, root_dir);
368 out:
369 return machine;
370 }
371
machines__find_guest(struct machines * machines,pid_t pid)372 struct machine *machines__find_guest(struct machines *machines, pid_t pid)
373 {
374 struct machine *machine = machines__find(machines, pid);
375
376 if (!machine)
377 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
378 return machine;
379 }
380
machines__process_guests(struct machines * machines,machine__process_t process,void * data)381 void machines__process_guests(struct machines *machines,
382 machine__process_t process, void *data)
383 {
384 struct rb_node *nd;
385
386 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
387 struct machine *pos = rb_entry(nd, struct machine, rb_node);
388 process(pos, data);
389 }
390 }
391
machines__set_id_hdr_size(struct machines * machines,u16 id_hdr_size)392 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
393 {
394 struct rb_node *node;
395 struct machine *machine;
396
397 machines->host.id_hdr_size = id_hdr_size;
398
399 for (node = rb_first_cached(&machines->guests); node;
400 node = rb_next(node)) {
401 machine = rb_entry(node, struct machine, rb_node);
402 machine->id_hdr_size = id_hdr_size;
403 }
404
405 return;
406 }
407
machine__update_thread_pid(struct machine * machine,struct thread * th,pid_t pid)408 static void machine__update_thread_pid(struct machine *machine,
409 struct thread *th, pid_t pid)
410 {
411 struct thread *leader;
412
413 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
414 return;
415
416 th->pid_ = pid;
417
418 if (th->pid_ == th->tid)
419 return;
420
421 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
422 if (!leader)
423 goto out_err;
424
425 if (!leader->maps)
426 leader->maps = maps__new(machine);
427
428 if (!leader->maps)
429 goto out_err;
430
431 if (th->maps == leader->maps)
432 return;
433
434 if (th->maps) {
435 /*
436 * Maps are created from MMAP events which provide the pid and
437 * tid. Consequently there never should be any maps on a thread
438 * with an unknown pid. Just print an error if there are.
439 */
440 if (!maps__empty(th->maps))
441 pr_err("Discarding thread maps for %d:%d\n",
442 th->pid_, th->tid);
443 maps__put(th->maps);
444 }
445
446 th->maps = maps__get(leader->maps);
447 out_put:
448 thread__put(leader);
449 return;
450 out_err:
451 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
452 goto out_put;
453 }
454
455 /*
456 * Front-end cache - TID lookups come in blocks,
457 * so most of the time we dont have to look up
458 * the full rbtree:
459 */
460 static struct thread*
__threads__get_last_match(struct threads * threads,struct machine * machine,int pid,int tid)461 __threads__get_last_match(struct threads *threads, struct machine *machine,
462 int pid, int tid)
463 {
464 struct thread *th;
465
466 th = threads->last_match;
467 if (th != NULL) {
468 if (th->tid == tid) {
469 machine__update_thread_pid(machine, th, pid);
470 return thread__get(th);
471 }
472
473 threads->last_match = NULL;
474 }
475
476 return NULL;
477 }
478
479 static struct thread*
threads__get_last_match(struct threads * threads,struct machine * machine,int pid,int tid)480 threads__get_last_match(struct threads *threads, struct machine *machine,
481 int pid, int tid)
482 {
483 struct thread *th = NULL;
484
485 if (perf_singlethreaded)
486 th = __threads__get_last_match(threads, machine, pid, tid);
487
488 return th;
489 }
490
491 static void
__threads__set_last_match(struct threads * threads,struct thread * th)492 __threads__set_last_match(struct threads *threads, struct thread *th)
493 {
494 threads->last_match = th;
495 }
496
497 static void
threads__set_last_match(struct threads * threads,struct thread * th)498 threads__set_last_match(struct threads *threads, struct thread *th)
499 {
500 if (perf_singlethreaded)
501 __threads__set_last_match(threads, th);
502 }
503
504 /*
505 * Caller must eventually drop thread->refcnt returned with a successful
506 * lookup/new thread inserted.
507 */
____machine__findnew_thread(struct machine * machine,struct threads * threads,pid_t pid,pid_t tid,bool create)508 static struct thread *____machine__findnew_thread(struct machine *machine,
509 struct threads *threads,
510 pid_t pid, pid_t tid,
511 bool create)
512 {
513 struct rb_node **p = &threads->entries.rb_root.rb_node;
514 struct rb_node *parent = NULL;
515 struct thread *th;
516 bool leftmost = true;
517
518 th = threads__get_last_match(threads, machine, pid, tid);
519 if (th)
520 return th;
521
522 while (*p != NULL) {
523 parent = *p;
524 th = rb_entry(parent, struct thread, rb_node);
525
526 if (th->tid == tid) {
527 threads__set_last_match(threads, th);
528 machine__update_thread_pid(machine, th, pid);
529 return thread__get(th);
530 }
531
532 if (tid < th->tid)
533 p = &(*p)->rb_left;
534 else {
535 p = &(*p)->rb_right;
536 leftmost = false;
537 }
538 }
539
540 if (!create)
541 return NULL;
542
543 th = thread__new(pid, tid);
544 if (th != NULL) {
545 rb_link_node(&th->rb_node, parent, p);
546 rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
547
548 /*
549 * We have to initialize maps separately after rb tree is updated.
550 *
551 * The reason is that we call machine__findnew_thread
552 * within thread__init_maps to find the thread
553 * leader and that would screwed the rb tree.
554 */
555 if (thread__init_maps(th, machine)) {
556 rb_erase_cached(&th->rb_node, &threads->entries);
557 RB_CLEAR_NODE(&th->rb_node);
558 thread__put(th);
559 return NULL;
560 }
561 /*
562 * It is now in the rbtree, get a ref
563 */
564 thread__get(th);
565 threads__set_last_match(threads, th);
566 ++threads->nr;
567 }
568
569 return th;
570 }
571
__machine__findnew_thread(struct machine * machine,pid_t pid,pid_t tid)572 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
573 {
574 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
575 }
576
machine__findnew_thread(struct machine * machine,pid_t pid,pid_t tid)577 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
578 pid_t tid)
579 {
580 struct threads *threads = machine__threads(machine, tid);
581 struct thread *th;
582
583 down_write(&threads->lock);
584 th = __machine__findnew_thread(machine, pid, tid);
585 up_write(&threads->lock);
586 return th;
587 }
588
machine__find_thread(struct machine * machine,pid_t pid,pid_t tid)589 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
590 pid_t tid)
591 {
592 struct threads *threads = machine__threads(machine, tid);
593 struct thread *th;
594
595 down_read(&threads->lock);
596 th = ____machine__findnew_thread(machine, threads, pid, tid, false);
597 up_read(&threads->lock);
598 return th;
599 }
600
601 /*
602 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
603 * So here a single thread is created for that, but actually there is a separate
604 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
605 * is only 1. That causes problems for some tools, requiring workarounds. For
606 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
607 */
machine__idle_thread(struct machine * machine)608 struct thread *machine__idle_thread(struct machine *machine)
609 {
610 struct thread *thread = machine__findnew_thread(machine, 0, 0);
611
612 if (!thread || thread__set_comm(thread, "swapper", 0) ||
613 thread__set_namespaces(thread, 0, NULL))
614 pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
615
616 return thread;
617 }
618
machine__thread_exec_comm(struct machine * machine,struct thread * thread)619 struct comm *machine__thread_exec_comm(struct machine *machine,
620 struct thread *thread)
621 {
622 if (machine->comm_exec)
623 return thread__exec_comm(thread);
624 else
625 return thread__comm(thread);
626 }
627
machine__process_comm_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)628 int machine__process_comm_event(struct machine *machine, union perf_event *event,
629 struct perf_sample *sample)
630 {
631 struct thread *thread = machine__findnew_thread(machine,
632 event->comm.pid,
633 event->comm.tid);
634 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
635 int err = 0;
636
637 if (exec)
638 machine->comm_exec = true;
639
640 if (dump_trace)
641 perf_event__fprintf_comm(event, stdout);
642
643 if (thread == NULL ||
644 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
645 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
646 err = -1;
647 }
648
649 thread__put(thread);
650
651 return err;
652 }
653
machine__process_namespaces_event(struct machine * machine __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused)654 int machine__process_namespaces_event(struct machine *machine __maybe_unused,
655 union perf_event *event,
656 struct perf_sample *sample __maybe_unused)
657 {
658 struct thread *thread = machine__findnew_thread(machine,
659 event->namespaces.pid,
660 event->namespaces.tid);
661 int err = 0;
662
663 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
664 "\nWARNING: kernel seems to support more namespaces than perf"
665 " tool.\nTry updating the perf tool..\n\n");
666
667 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
668 "\nWARNING: perf tool seems to support more namespaces than"
669 " the kernel.\nTry updating the kernel..\n\n");
670
671 if (dump_trace)
672 perf_event__fprintf_namespaces(event, stdout);
673
674 if (thread == NULL ||
675 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
676 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
677 err = -1;
678 }
679
680 thread__put(thread);
681
682 return err;
683 }
684
machine__process_cgroup_event(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)685 int machine__process_cgroup_event(struct machine *machine,
686 union perf_event *event,
687 struct perf_sample *sample __maybe_unused)
688 {
689 struct cgroup *cgrp;
690
691 if (dump_trace)
692 perf_event__fprintf_cgroup(event, stdout);
693
694 cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
695 if (cgrp == NULL)
696 return -ENOMEM;
697
698 return 0;
699 }
700
machine__process_lost_event(struct machine * machine __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused)701 int machine__process_lost_event(struct machine *machine __maybe_unused,
702 union perf_event *event, struct perf_sample *sample __maybe_unused)
703 {
704 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
705 event->lost.id, event->lost.lost);
706 return 0;
707 }
708
machine__process_lost_samples_event(struct machine * machine __maybe_unused,union perf_event * event,struct perf_sample * sample)709 int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
710 union perf_event *event, struct perf_sample *sample)
711 {
712 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
713 sample->id, event->lost_samples.lost);
714 return 0;
715 }
716
machine__findnew_module_dso(struct machine * machine,struct kmod_path * m,const char * filename)717 static struct dso *machine__findnew_module_dso(struct machine *machine,
718 struct kmod_path *m,
719 const char *filename)
720 {
721 struct dso *dso;
722
723 down_write(&machine->dsos.lock);
724
725 dso = __dsos__find(&machine->dsos, m->name, true);
726 if (!dso) {
727 dso = __dsos__addnew(&machine->dsos, m->name);
728 if (dso == NULL)
729 goto out_unlock;
730
731 dso__set_module_info(dso, m, machine);
732 dso__set_long_name(dso, strdup(filename), true);
733 dso->kernel = DSO_SPACE__KERNEL;
734 }
735
736 dso__get(dso);
737 out_unlock:
738 up_write(&machine->dsos.lock);
739 return dso;
740 }
741
machine__process_aux_event(struct machine * machine __maybe_unused,union perf_event * event)742 int machine__process_aux_event(struct machine *machine __maybe_unused,
743 union perf_event *event)
744 {
745 if (dump_trace)
746 perf_event__fprintf_aux(event, stdout);
747 return 0;
748 }
749
machine__process_itrace_start_event(struct machine * machine __maybe_unused,union perf_event * event)750 int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
751 union perf_event *event)
752 {
753 if (dump_trace)
754 perf_event__fprintf_itrace_start(event, stdout);
755 return 0;
756 }
757
machine__process_switch_event(struct machine * machine __maybe_unused,union perf_event * event)758 int machine__process_switch_event(struct machine *machine __maybe_unused,
759 union perf_event *event)
760 {
761 if (dump_trace)
762 perf_event__fprintf_switch(event, stdout);
763 return 0;
764 }
765
machine__process_ksymbol_register(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)766 static int machine__process_ksymbol_register(struct machine *machine,
767 union perf_event *event,
768 struct perf_sample *sample __maybe_unused)
769 {
770 struct symbol *sym;
771 struct map *map = maps__find(&machine->kmaps, event->ksymbol.addr);
772
773 if (!map) {
774 struct dso *dso = dso__new(event->ksymbol.name);
775
776 if (dso) {
777 dso->kernel = DSO_SPACE__KERNEL;
778 map = map__new2(0, dso);
779 }
780
781 if (!dso || !map) {
782 dso__put(dso);
783 return -ENOMEM;
784 }
785
786 if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
787 map->dso->binary_type = DSO_BINARY_TYPE__OOL;
788 map->dso->data.file_size = event->ksymbol.len;
789 dso__set_loaded(map->dso);
790 }
791
792 map->start = event->ksymbol.addr;
793 map->end = map->start + event->ksymbol.len;
794 maps__insert(&machine->kmaps, map);
795 dso__set_loaded(dso);
796
797 if (is_bpf_image(event->ksymbol.name)) {
798 dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
799 dso__set_long_name(dso, "", false);
800 }
801 }
802
803 sym = symbol__new(map->map_ip(map, map->start),
804 event->ksymbol.len,
805 0, 0, event->ksymbol.name);
806 if (!sym)
807 return -ENOMEM;
808 dso__insert_symbol(map->dso, sym);
809 return 0;
810 }
811
machine__process_ksymbol_unregister(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)812 static int machine__process_ksymbol_unregister(struct machine *machine,
813 union perf_event *event,
814 struct perf_sample *sample __maybe_unused)
815 {
816 struct symbol *sym;
817 struct map *map;
818
819 map = maps__find(&machine->kmaps, event->ksymbol.addr);
820 if (!map)
821 return 0;
822
823 if (map != machine->vmlinux_map)
824 maps__remove(&machine->kmaps, map);
825 else {
826 sym = dso__find_symbol(map->dso, map->map_ip(map, map->start));
827 if (sym)
828 dso__delete_symbol(map->dso, sym);
829 }
830
831 return 0;
832 }
833
machine__process_ksymbol(struct machine * machine __maybe_unused,union perf_event * event,struct perf_sample * sample)834 int machine__process_ksymbol(struct machine *machine __maybe_unused,
835 union perf_event *event,
836 struct perf_sample *sample)
837 {
838 if (dump_trace)
839 perf_event__fprintf_ksymbol(event, stdout);
840
841 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
842 return machine__process_ksymbol_unregister(machine, event,
843 sample);
844 return machine__process_ksymbol_register(machine, event, sample);
845 }
846
machine__process_text_poke(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)847 int machine__process_text_poke(struct machine *machine, union perf_event *event,
848 struct perf_sample *sample __maybe_unused)
849 {
850 struct map *map = maps__find(&machine->kmaps, event->text_poke.addr);
851 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
852
853 if (dump_trace)
854 perf_event__fprintf_text_poke(event, machine, stdout);
855
856 if (!event->text_poke.new_len)
857 return 0;
858
859 if (cpumode != PERF_RECORD_MISC_KERNEL) {
860 pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
861 return 0;
862 }
863
864 if (map && map->dso) {
865 u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
866 int ret;
867
868 /*
869 * Kernel maps might be changed when loading symbols so loading
870 * must be done prior to using kernel maps.
871 */
872 map__load(map);
873 ret = dso__data_write_cache_addr(map->dso, map, machine,
874 event->text_poke.addr,
875 new_bytes,
876 event->text_poke.new_len);
877 if (ret != event->text_poke.new_len)
878 pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
879 event->text_poke.addr);
880 } else {
881 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
882 event->text_poke.addr);
883 }
884
885 return 0;
886 }
887
machine__addnew_module_map(struct machine * machine,u64 start,const char * filename)888 static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
889 const char *filename)
890 {
891 struct map *map = NULL;
892 struct kmod_path m;
893 struct dso *dso;
894
895 if (kmod_path__parse_name(&m, filename))
896 return NULL;
897
898 dso = machine__findnew_module_dso(machine, &m, filename);
899 if (dso == NULL)
900 goto out;
901
902 map = map__new2(start, dso);
903 if (map == NULL)
904 goto out;
905
906 maps__insert(&machine->kmaps, map);
907
908 /* Put the map here because maps__insert already got it */
909 map__put(map);
910 out:
911 /* put the dso here, corresponding to machine__findnew_module_dso */
912 dso__put(dso);
913 zfree(&m.name);
914 return map;
915 }
916
machines__fprintf_dsos(struct machines * machines,FILE * fp)917 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
918 {
919 struct rb_node *nd;
920 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
921
922 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
923 struct machine *pos = rb_entry(nd, struct machine, rb_node);
924 ret += __dsos__fprintf(&pos->dsos.head, fp);
925 }
926
927 return ret;
928 }
929
machine__fprintf_dsos_buildid(struct machine * m,FILE * fp,bool (skip)(struct dso * dso,int parm),int parm)930 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
931 bool (skip)(struct dso *dso, int parm), int parm)
932 {
933 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
934 }
935
machines__fprintf_dsos_buildid(struct machines * machines,FILE * fp,bool (skip)(struct dso * dso,int parm),int parm)936 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
937 bool (skip)(struct dso *dso, int parm), int parm)
938 {
939 struct rb_node *nd;
940 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
941
942 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
943 struct machine *pos = rb_entry(nd, struct machine, rb_node);
944 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
945 }
946 return ret;
947 }
948
machine__fprintf_vmlinux_path(struct machine * machine,FILE * fp)949 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
950 {
951 int i;
952 size_t printed = 0;
953 struct dso *kdso = machine__kernel_dso(machine);
954
955 if (kdso->has_build_id) {
956 char filename[PATH_MAX];
957 if (dso__build_id_filename(kdso, filename, sizeof(filename),
958 false))
959 printed += fprintf(fp, "[0] %s\n", filename);
960 }
961
962 for (i = 0; i < vmlinux_path__nr_entries; ++i)
963 printed += fprintf(fp, "[%d] %s\n",
964 i + kdso->has_build_id, vmlinux_path[i]);
965
966 return printed;
967 }
968
machine__fprintf(struct machine * machine,FILE * fp)969 size_t machine__fprintf(struct machine *machine, FILE *fp)
970 {
971 struct rb_node *nd;
972 size_t ret;
973 int i;
974
975 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
976 struct threads *threads = &machine->threads[i];
977
978 down_read(&threads->lock);
979
980 ret = fprintf(fp, "Threads: %u\n", threads->nr);
981
982 for (nd = rb_first_cached(&threads->entries); nd;
983 nd = rb_next(nd)) {
984 struct thread *pos = rb_entry(nd, struct thread, rb_node);
985
986 ret += thread__fprintf(pos, fp);
987 }
988
989 up_read(&threads->lock);
990 }
991 return ret;
992 }
993
machine__get_kernel(struct machine * machine)994 static struct dso *machine__get_kernel(struct machine *machine)
995 {
996 const char *vmlinux_name = machine->mmap_name;
997 struct dso *kernel;
998
999 if (machine__is_host(machine)) {
1000 if (symbol_conf.vmlinux_name)
1001 vmlinux_name = symbol_conf.vmlinux_name;
1002
1003 kernel = machine__findnew_kernel(machine, vmlinux_name,
1004 "[kernel]", DSO_SPACE__KERNEL);
1005 } else {
1006 if (symbol_conf.default_guest_vmlinux_name)
1007 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
1008
1009 kernel = machine__findnew_kernel(machine, vmlinux_name,
1010 "[guest.kernel]",
1011 DSO_SPACE__KERNEL_GUEST);
1012 }
1013
1014 if (kernel != NULL && (!kernel->has_build_id))
1015 dso__read_running_kernel_build_id(kernel, machine);
1016
1017 return kernel;
1018 }
1019
1020 struct process_args {
1021 u64 start;
1022 };
1023
machine__get_kallsyms_filename(struct machine * machine,char * buf,size_t bufsz)1024 void machine__get_kallsyms_filename(struct machine *machine, char *buf,
1025 size_t bufsz)
1026 {
1027 if (machine__is_default_guest(machine))
1028 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
1029 else
1030 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
1031 }
1032
1033 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
1034
1035 /* Figure out the start address of kernel map from /proc/kallsyms.
1036 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
1037 * symbol_name if it's not that important.
1038 */
machine__get_running_kernel_start(struct machine * machine,const char ** symbol_name,u64 * start,u64 * end)1039 static int machine__get_running_kernel_start(struct machine *machine,
1040 const char **symbol_name,
1041 u64 *start, u64 *end)
1042 {
1043 char filename[PATH_MAX];
1044 int i, err = -1;
1045 const char *name;
1046 u64 addr = 0;
1047
1048 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
1049
1050 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1051 return 0;
1052
1053 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
1054 err = kallsyms__get_function_start(filename, name, &addr);
1055 if (!err)
1056 break;
1057 }
1058
1059 if (err)
1060 return -1;
1061
1062 if (symbol_name)
1063 *symbol_name = name;
1064
1065 *start = addr;
1066
1067 err = kallsyms__get_function_start(filename, "_etext", &addr);
1068 if (!err)
1069 *end = addr;
1070
1071 return 0;
1072 }
1073
machine__create_extra_kernel_map(struct machine * machine,struct dso * kernel,struct extra_kernel_map * xm)1074 int machine__create_extra_kernel_map(struct machine *machine,
1075 struct dso *kernel,
1076 struct extra_kernel_map *xm)
1077 {
1078 struct kmap *kmap;
1079 struct map *map;
1080
1081 map = map__new2(xm->start, kernel);
1082 if (!map)
1083 return -1;
1084
1085 map->end = xm->end;
1086 map->pgoff = xm->pgoff;
1087
1088 kmap = map__kmap(map);
1089
1090 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1091
1092 maps__insert(&machine->kmaps, map);
1093
1094 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1095 kmap->name, map->start, map->end);
1096
1097 map__put(map);
1098
1099 return 0;
1100 }
1101
find_entry_trampoline(struct dso * dso)1102 static u64 find_entry_trampoline(struct dso *dso)
1103 {
1104 /* Duplicates are removed so lookup all aliases */
1105 const char *syms[] = {
1106 "_entry_trampoline",
1107 "__entry_trampoline_start",
1108 "entry_SYSCALL_64_trampoline",
1109 };
1110 struct symbol *sym = dso__first_symbol(dso);
1111 unsigned int i;
1112
1113 for (; sym; sym = dso__next_symbol(sym)) {
1114 if (sym->binding != STB_GLOBAL)
1115 continue;
1116 for (i = 0; i < ARRAY_SIZE(syms); i++) {
1117 if (!strcmp(sym->name, syms[i]))
1118 return sym->start;
1119 }
1120 }
1121
1122 return 0;
1123 }
1124
1125 /*
1126 * These values can be used for kernels that do not have symbols for the entry
1127 * trampolines in kallsyms.
1128 */
1129 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1130 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1131 #define X86_64_ENTRY_TRAMPOLINE 0x6000
1132
1133 /* Map x86_64 PTI entry trampolines */
machine__map_x86_64_entry_trampolines(struct machine * machine,struct dso * kernel)1134 int machine__map_x86_64_entry_trampolines(struct machine *machine,
1135 struct dso *kernel)
1136 {
1137 struct maps *kmaps = &machine->kmaps;
1138 int nr_cpus_avail, cpu;
1139 bool found = false;
1140 struct map *map;
1141 u64 pgoff;
1142
1143 /*
1144 * In the vmlinux case, pgoff is a virtual address which must now be
1145 * mapped to a vmlinux offset.
1146 */
1147 maps__for_each_entry(kmaps, map) {
1148 struct kmap *kmap = __map__kmap(map);
1149 struct map *dest_map;
1150
1151 if (!kmap || !is_entry_trampoline(kmap->name))
1152 continue;
1153
1154 dest_map = maps__find(kmaps, map->pgoff);
1155 if (dest_map != map)
1156 map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
1157 found = true;
1158 }
1159 if (found || machine->trampolines_mapped)
1160 return 0;
1161
1162 pgoff = find_entry_trampoline(kernel);
1163 if (!pgoff)
1164 return 0;
1165
1166 nr_cpus_avail = machine__nr_cpus_avail(machine);
1167
1168 /* Add a 1 page map for each CPU's entry trampoline */
1169 for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1170 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1171 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1172 X86_64_ENTRY_TRAMPOLINE;
1173 struct extra_kernel_map xm = {
1174 .start = va,
1175 .end = va + page_size,
1176 .pgoff = pgoff,
1177 };
1178
1179 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1180
1181 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1182 return -1;
1183 }
1184
1185 machine->trampolines_mapped = nr_cpus_avail;
1186
1187 return 0;
1188 }
1189
machine__create_extra_kernel_maps(struct machine * machine __maybe_unused,struct dso * kernel __maybe_unused)1190 int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1191 struct dso *kernel __maybe_unused)
1192 {
1193 return 0;
1194 }
1195
1196 static int
__machine__create_kernel_maps(struct machine * machine,struct dso * kernel)1197 __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1198 {
1199 /* In case of renewal the kernel map, destroy previous one */
1200 machine__destroy_kernel_maps(machine);
1201
1202 machine->vmlinux_map = map__new2(0, kernel);
1203 if (machine->vmlinux_map == NULL)
1204 return -1;
1205
1206 machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
1207 maps__insert(&machine->kmaps, machine->vmlinux_map);
1208 return 0;
1209 }
1210
machine__destroy_kernel_maps(struct machine * machine)1211 void machine__destroy_kernel_maps(struct machine *machine)
1212 {
1213 struct kmap *kmap;
1214 struct map *map = machine__kernel_map(machine);
1215
1216 if (map == NULL)
1217 return;
1218
1219 kmap = map__kmap(map);
1220 maps__remove(&machine->kmaps, map);
1221 if (kmap && kmap->ref_reloc_sym) {
1222 zfree((char **)&kmap->ref_reloc_sym->name);
1223 zfree(&kmap->ref_reloc_sym);
1224 }
1225
1226 map__zput(machine->vmlinux_map);
1227 }
1228
machines__create_guest_kernel_maps(struct machines * machines)1229 int machines__create_guest_kernel_maps(struct machines *machines)
1230 {
1231 int ret = 0;
1232 struct dirent **namelist = NULL;
1233 int i, items = 0;
1234 char path[PATH_MAX];
1235 pid_t pid;
1236 char *endp;
1237
1238 if (symbol_conf.default_guest_vmlinux_name ||
1239 symbol_conf.default_guest_modules ||
1240 symbol_conf.default_guest_kallsyms) {
1241 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1242 }
1243
1244 if (symbol_conf.guestmount) {
1245 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1246 if (items <= 0)
1247 return -ENOENT;
1248 for (i = 0; i < items; i++) {
1249 if (!isdigit(namelist[i]->d_name[0])) {
1250 /* Filter out . and .. */
1251 continue;
1252 }
1253 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1254 if ((*endp != '\0') ||
1255 (endp == namelist[i]->d_name) ||
1256 (errno == ERANGE)) {
1257 pr_debug("invalid directory (%s). Skipping.\n",
1258 namelist[i]->d_name);
1259 continue;
1260 }
1261 sprintf(path, "%s/%s/proc/kallsyms",
1262 symbol_conf.guestmount,
1263 namelist[i]->d_name);
1264 ret = access(path, R_OK);
1265 if (ret) {
1266 pr_debug("Can't access file %s\n", path);
1267 goto failure;
1268 }
1269 machines__create_kernel_maps(machines, pid);
1270 }
1271 failure:
1272 free(namelist);
1273 }
1274
1275 return ret;
1276 }
1277
machines__destroy_kernel_maps(struct machines * machines)1278 void machines__destroy_kernel_maps(struct machines *machines)
1279 {
1280 struct rb_node *next = rb_first_cached(&machines->guests);
1281
1282 machine__destroy_kernel_maps(&machines->host);
1283
1284 while (next) {
1285 struct machine *pos = rb_entry(next, struct machine, rb_node);
1286
1287 next = rb_next(&pos->rb_node);
1288 rb_erase_cached(&pos->rb_node, &machines->guests);
1289 machine__delete(pos);
1290 }
1291 }
1292
machines__create_kernel_maps(struct machines * machines,pid_t pid)1293 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1294 {
1295 struct machine *machine = machines__findnew(machines, pid);
1296
1297 if (machine == NULL)
1298 return -1;
1299
1300 return machine__create_kernel_maps(machine);
1301 }
1302
machine__load_kallsyms(struct machine * machine,const char * filename)1303 int machine__load_kallsyms(struct machine *machine, const char *filename)
1304 {
1305 struct map *map = machine__kernel_map(machine);
1306 int ret = __dso__load_kallsyms(map->dso, filename, map, true);
1307
1308 if (ret > 0) {
1309 dso__set_loaded(map->dso);
1310 /*
1311 * Since /proc/kallsyms will have multiple sessions for the
1312 * kernel, with modules between them, fixup the end of all
1313 * sections.
1314 */
1315 maps__fixup_end(&machine->kmaps);
1316 }
1317
1318 return ret;
1319 }
1320
machine__load_vmlinux_path(struct machine * machine)1321 int machine__load_vmlinux_path(struct machine *machine)
1322 {
1323 struct map *map = machine__kernel_map(machine);
1324 int ret = dso__load_vmlinux_path(map->dso, map);
1325
1326 if (ret > 0)
1327 dso__set_loaded(map->dso);
1328
1329 return ret;
1330 }
1331
get_kernel_version(const char * root_dir)1332 static char *get_kernel_version(const char *root_dir)
1333 {
1334 char version[PATH_MAX];
1335 FILE *file;
1336 char *name, *tmp;
1337 const char *prefix = "Linux version ";
1338
1339 sprintf(version, "%s/proc/version", root_dir);
1340 file = fopen(version, "r");
1341 if (!file)
1342 return NULL;
1343
1344 tmp = fgets(version, sizeof(version), file);
1345 fclose(file);
1346 if (!tmp)
1347 return NULL;
1348
1349 name = strstr(version, prefix);
1350 if (!name)
1351 return NULL;
1352 name += strlen(prefix);
1353 tmp = strchr(name, ' ');
1354 if (tmp)
1355 *tmp = '\0';
1356
1357 return strdup(name);
1358 }
1359
is_kmod_dso(struct dso * dso)1360 static bool is_kmod_dso(struct dso *dso)
1361 {
1362 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1363 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1364 }
1365
maps__set_module_path(struct maps * maps,const char * path,struct kmod_path * m)1366 static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1367 {
1368 char *long_name;
1369 struct map *map = maps__find_by_name(maps, m->name);
1370
1371 if (map == NULL)
1372 return 0;
1373
1374 long_name = strdup(path);
1375 if (long_name == NULL)
1376 return -ENOMEM;
1377
1378 dso__set_long_name(map->dso, long_name, true);
1379 dso__kernel_module_get_build_id(map->dso, "");
1380
1381 /*
1382 * Full name could reveal us kmod compression, so
1383 * we need to update the symtab_type if needed.
1384 */
1385 if (m->comp && is_kmod_dso(map->dso)) {
1386 map->dso->symtab_type++;
1387 map->dso->comp = m->comp;
1388 }
1389
1390 return 0;
1391 }
1392
maps__set_modules_path_dir(struct maps * maps,const char * dir_name,int depth)1393 static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1394 {
1395 struct dirent *dent;
1396 DIR *dir = opendir(dir_name);
1397 int ret = 0;
1398
1399 if (!dir) {
1400 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1401 return -1;
1402 }
1403
1404 while ((dent = readdir(dir)) != NULL) {
1405 char path[PATH_MAX];
1406 struct stat st;
1407
1408 /*sshfs might return bad dent->d_type, so we have to stat*/
1409 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1410 if (stat(path, &st))
1411 continue;
1412
1413 if (S_ISDIR(st.st_mode)) {
1414 if (!strcmp(dent->d_name, ".") ||
1415 !strcmp(dent->d_name, ".."))
1416 continue;
1417
1418 /* Do not follow top-level source and build symlinks */
1419 if (depth == 0) {
1420 if (!strcmp(dent->d_name, "source") ||
1421 !strcmp(dent->d_name, "build"))
1422 continue;
1423 }
1424
1425 ret = maps__set_modules_path_dir(maps, path, depth + 1);
1426 if (ret < 0)
1427 goto out;
1428 } else {
1429 struct kmod_path m;
1430
1431 ret = kmod_path__parse_name(&m, dent->d_name);
1432 if (ret)
1433 goto out;
1434
1435 if (m.kmod)
1436 ret = maps__set_module_path(maps, path, &m);
1437
1438 zfree(&m.name);
1439
1440 if (ret)
1441 goto out;
1442 }
1443 }
1444
1445 out:
1446 closedir(dir);
1447 return ret;
1448 }
1449
machine__set_modules_path(struct machine * machine)1450 static int machine__set_modules_path(struct machine *machine)
1451 {
1452 char *version;
1453 char modules_path[PATH_MAX];
1454
1455 version = get_kernel_version(machine->root_dir);
1456 if (!version)
1457 return -1;
1458
1459 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1460 machine->root_dir, version);
1461 free(version);
1462
1463 return maps__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1464 }
arch__fix_module_text_start(u64 * start __maybe_unused,u64 * size __maybe_unused,const char * name __maybe_unused)1465 int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1466 u64 *size __maybe_unused,
1467 const char *name __maybe_unused)
1468 {
1469 return 0;
1470 }
1471
machine__create_module(void * arg,const char * name,u64 start,u64 size)1472 static int machine__create_module(void *arg, const char *name, u64 start,
1473 u64 size)
1474 {
1475 struct machine *machine = arg;
1476 struct map *map;
1477
1478 if (arch__fix_module_text_start(&start, &size, name) < 0)
1479 return -1;
1480
1481 map = machine__addnew_module_map(machine, start, name);
1482 if (map == NULL)
1483 return -1;
1484 map->end = start + size;
1485
1486 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1487
1488 return 0;
1489 }
1490
machine__create_modules(struct machine * machine)1491 static int machine__create_modules(struct machine *machine)
1492 {
1493 const char *modules;
1494 char path[PATH_MAX];
1495
1496 if (machine__is_default_guest(machine)) {
1497 modules = symbol_conf.default_guest_modules;
1498 } else {
1499 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1500 modules = path;
1501 }
1502
1503 if (symbol__restricted_filename(modules, "/proc/modules"))
1504 return -1;
1505
1506 if (modules__parse(modules, machine, machine__create_module))
1507 return -1;
1508
1509 if (!machine__set_modules_path(machine))
1510 return 0;
1511
1512 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1513
1514 return 0;
1515 }
1516
machine__set_kernel_mmap(struct machine * machine,u64 start,u64 end)1517 static void machine__set_kernel_mmap(struct machine *machine,
1518 u64 start, u64 end)
1519 {
1520 machine->vmlinux_map->start = start;
1521 machine->vmlinux_map->end = end;
1522 /*
1523 * Be a bit paranoid here, some perf.data file came with
1524 * a zero sized synthesized MMAP event for the kernel.
1525 */
1526 if (start == 0 && end == 0)
1527 machine->vmlinux_map->end = ~0ULL;
1528 }
1529
machine__update_kernel_mmap(struct machine * machine,u64 start,u64 end)1530 static void machine__update_kernel_mmap(struct machine *machine,
1531 u64 start, u64 end)
1532 {
1533 struct map *map = machine__kernel_map(machine);
1534
1535 map__get(map);
1536 maps__remove(&machine->kmaps, map);
1537
1538 machine__set_kernel_mmap(machine, start, end);
1539
1540 maps__insert(&machine->kmaps, map);
1541 map__put(map);
1542 }
1543
machine__create_kernel_maps(struct machine * machine)1544 int machine__create_kernel_maps(struct machine *machine)
1545 {
1546 struct dso *kernel = machine__get_kernel(machine);
1547 const char *name = NULL;
1548 struct map *map;
1549 u64 start = 0, end = ~0ULL;
1550 int ret;
1551
1552 if (kernel == NULL)
1553 return -1;
1554
1555 ret = __machine__create_kernel_maps(machine, kernel);
1556 if (ret < 0)
1557 goto out_put;
1558
1559 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1560 if (machine__is_host(machine))
1561 pr_debug("Problems creating module maps, "
1562 "continuing anyway...\n");
1563 else
1564 pr_debug("Problems creating module maps for guest %d, "
1565 "continuing anyway...\n", machine->pid);
1566 }
1567
1568 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1569 if (name &&
1570 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1571 machine__destroy_kernel_maps(machine);
1572 ret = -1;
1573 goto out_put;
1574 }
1575
1576 /*
1577 * we have a real start address now, so re-order the kmaps
1578 * assume it's the last in the kmaps
1579 */
1580 machine__update_kernel_mmap(machine, start, end);
1581 }
1582
1583 if (machine__create_extra_kernel_maps(machine, kernel))
1584 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1585
1586 if (end == ~0ULL) {
1587 /* update end address of the kernel map using adjacent module address */
1588 map = map__next(machine__kernel_map(machine));
1589 if (map)
1590 machine__set_kernel_mmap(machine, start, map->start);
1591 }
1592
1593 out_put:
1594 dso__put(kernel);
1595 return ret;
1596 }
1597
machine__uses_kcore(struct machine * machine)1598 static bool machine__uses_kcore(struct machine *machine)
1599 {
1600 struct dso *dso;
1601
1602 list_for_each_entry(dso, &machine->dsos.head, node) {
1603 if (dso__is_kcore(dso))
1604 return true;
1605 }
1606
1607 return false;
1608 }
1609
perf_event__is_extra_kernel_mmap(struct machine * machine,struct extra_kernel_map * xm)1610 static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1611 struct extra_kernel_map *xm)
1612 {
1613 return machine__is(machine, "x86_64") &&
1614 is_entry_trampoline(xm->name);
1615 }
1616
machine__process_extra_kernel_map(struct machine * machine,struct extra_kernel_map * xm)1617 static int machine__process_extra_kernel_map(struct machine *machine,
1618 struct extra_kernel_map *xm)
1619 {
1620 struct dso *kernel = machine__kernel_dso(machine);
1621
1622 if (kernel == NULL)
1623 return -1;
1624
1625 return machine__create_extra_kernel_map(machine, kernel, xm);
1626 }
1627
machine__process_kernel_mmap_event(struct machine * machine,struct extra_kernel_map * xm,struct build_id * bid)1628 static int machine__process_kernel_mmap_event(struct machine *machine,
1629 struct extra_kernel_map *xm,
1630 struct build_id *bid)
1631 {
1632 struct map *map;
1633 enum dso_space_type dso_space;
1634 bool is_kernel_mmap;
1635
1636 /* If we have maps from kcore then we do not need or want any others */
1637 if (machine__uses_kcore(machine))
1638 return 0;
1639
1640 if (machine__is_host(machine))
1641 dso_space = DSO_SPACE__KERNEL;
1642 else
1643 dso_space = DSO_SPACE__KERNEL_GUEST;
1644
1645 is_kernel_mmap = memcmp(xm->name, machine->mmap_name,
1646 strlen(machine->mmap_name) - 1) == 0;
1647 if (xm->name[0] == '/' ||
1648 (!is_kernel_mmap && xm->name[0] == '[')) {
1649 map = machine__addnew_module_map(machine, xm->start,
1650 xm->name);
1651 if (map == NULL)
1652 goto out_problem;
1653
1654 map->end = map->start + xm->end - xm->start;
1655
1656 if (build_id__is_defined(bid))
1657 dso__set_build_id(map->dso, bid);
1658
1659 } else if (is_kernel_mmap) {
1660 const char *symbol_name = (xm->name + strlen(machine->mmap_name));
1661 /*
1662 * Should be there already, from the build-id table in
1663 * the header.
1664 */
1665 struct dso *kernel = NULL;
1666 struct dso *dso;
1667
1668 down_read(&machine->dsos.lock);
1669
1670 list_for_each_entry(dso, &machine->dsos.head, node) {
1671
1672 /*
1673 * The cpumode passed to is_kernel_module is not the
1674 * cpumode of *this* event. If we insist on passing
1675 * correct cpumode to is_kernel_module, we should
1676 * record the cpumode when we adding this dso to the
1677 * linked list.
1678 *
1679 * However we don't really need passing correct
1680 * cpumode. We know the correct cpumode must be kernel
1681 * mode (if not, we should not link it onto kernel_dsos
1682 * list).
1683 *
1684 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1685 * is_kernel_module() treats it as a kernel cpumode.
1686 */
1687
1688 if (!dso->kernel ||
1689 is_kernel_module(dso->long_name,
1690 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1691 continue;
1692
1693
1694 kernel = dso;
1695 break;
1696 }
1697
1698 up_read(&machine->dsos.lock);
1699
1700 if (kernel == NULL)
1701 kernel = machine__findnew_dso(machine, machine->mmap_name);
1702 if (kernel == NULL)
1703 goto out_problem;
1704
1705 kernel->kernel = dso_space;
1706 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1707 dso__put(kernel);
1708 goto out_problem;
1709 }
1710
1711 if (strstr(kernel->long_name, "vmlinux"))
1712 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1713
1714 machine__update_kernel_mmap(machine, xm->start, xm->end);
1715
1716 if (build_id__is_defined(bid))
1717 dso__set_build_id(kernel, bid);
1718
1719 /*
1720 * Avoid using a zero address (kptr_restrict) for the ref reloc
1721 * symbol. Effectively having zero here means that at record
1722 * time /proc/sys/kernel/kptr_restrict was non zero.
1723 */
1724 if (xm->pgoff != 0) {
1725 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1726 symbol_name,
1727 xm->pgoff);
1728 }
1729
1730 if (machine__is_default_guest(machine)) {
1731 /*
1732 * preload dso of guest kernel and modules
1733 */
1734 dso__load(kernel, machine__kernel_map(machine));
1735 }
1736 } else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
1737 return machine__process_extra_kernel_map(machine, xm);
1738 }
1739 return 0;
1740 out_problem:
1741 return -1;
1742 }
1743
machine__process_mmap2_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)1744 int machine__process_mmap2_event(struct machine *machine,
1745 union perf_event *event,
1746 struct perf_sample *sample)
1747 {
1748 struct thread *thread;
1749 struct map *map;
1750 struct dso_id dso_id = {
1751 .maj = event->mmap2.maj,
1752 .min = event->mmap2.min,
1753 .ino = event->mmap2.ino,
1754 .ino_generation = event->mmap2.ino_generation,
1755 };
1756 struct build_id __bid, *bid = NULL;
1757 int ret = 0;
1758
1759 if (dump_trace)
1760 perf_event__fprintf_mmap2(event, stdout);
1761
1762 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
1763 bid = &__bid;
1764 build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
1765 }
1766
1767 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1768 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1769 struct extra_kernel_map xm = {
1770 .start = event->mmap2.start,
1771 .end = event->mmap2.start + event->mmap2.len,
1772 .pgoff = event->mmap2.pgoff,
1773 };
1774
1775 strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
1776 ret = machine__process_kernel_mmap_event(machine, &xm, bid);
1777 if (ret < 0)
1778 goto out_problem;
1779 return 0;
1780 }
1781
1782 thread = machine__findnew_thread(machine, event->mmap2.pid,
1783 event->mmap2.tid);
1784 if (thread == NULL)
1785 goto out_problem;
1786
1787 map = map__new(machine, event->mmap2.start,
1788 event->mmap2.len, event->mmap2.pgoff,
1789 &dso_id, event->mmap2.prot,
1790 event->mmap2.flags, bid,
1791 event->mmap2.filename, thread);
1792
1793 if (map == NULL)
1794 goto out_problem_map;
1795
1796 ret = thread__insert_map(thread, map);
1797 if (ret)
1798 goto out_problem_insert;
1799
1800 thread__put(thread);
1801 map__put(map);
1802 return 0;
1803
1804 out_problem_insert:
1805 map__put(map);
1806 out_problem_map:
1807 thread__put(thread);
1808 out_problem:
1809 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1810 return 0;
1811 }
1812
machine__process_mmap_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)1813 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1814 struct perf_sample *sample)
1815 {
1816 struct thread *thread;
1817 struct map *map;
1818 u32 prot = 0;
1819 int ret = 0;
1820
1821 if (dump_trace)
1822 perf_event__fprintf_mmap(event, stdout);
1823
1824 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1825 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1826 struct extra_kernel_map xm = {
1827 .start = event->mmap.start,
1828 .end = event->mmap.start + event->mmap.len,
1829 .pgoff = event->mmap.pgoff,
1830 };
1831
1832 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1833 ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
1834 if (ret < 0)
1835 goto out_problem;
1836 return 0;
1837 }
1838
1839 thread = machine__findnew_thread(machine, event->mmap.pid,
1840 event->mmap.tid);
1841 if (thread == NULL)
1842 goto out_problem;
1843
1844 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1845 prot = PROT_EXEC;
1846
1847 map = map__new(machine, event->mmap.start,
1848 event->mmap.len, event->mmap.pgoff,
1849 NULL, prot, 0, NULL, event->mmap.filename, thread);
1850
1851 if (map == NULL)
1852 goto out_problem_map;
1853
1854 ret = thread__insert_map(thread, map);
1855 if (ret)
1856 goto out_problem_insert;
1857
1858 thread__put(thread);
1859 map__put(map);
1860 return 0;
1861
1862 out_problem_insert:
1863 map__put(map);
1864 out_problem_map:
1865 thread__put(thread);
1866 out_problem:
1867 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1868 return 0;
1869 }
1870
__machine__remove_thread(struct machine * machine,struct thread * th,bool lock)1871 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1872 {
1873 struct threads *threads = machine__threads(machine, th->tid);
1874
1875 if (threads->last_match == th)
1876 threads__set_last_match(threads, NULL);
1877
1878 if (lock)
1879 down_write(&threads->lock);
1880
1881 BUG_ON(refcount_read(&th->refcnt) == 0);
1882
1883 rb_erase_cached(&th->rb_node, &threads->entries);
1884 RB_CLEAR_NODE(&th->rb_node);
1885 --threads->nr;
1886 /*
1887 * Move it first to the dead_threads list, then drop the reference,
1888 * if this is the last reference, then the thread__delete destructor
1889 * will be called and we will remove it from the dead_threads list.
1890 */
1891 list_add_tail(&th->node, &threads->dead);
1892
1893 /*
1894 * We need to do the put here because if this is the last refcount,
1895 * then we will be touching the threads->dead head when removing the
1896 * thread.
1897 */
1898 thread__put(th);
1899
1900 if (lock)
1901 up_write(&threads->lock);
1902 }
1903
machine__remove_thread(struct machine * machine,struct thread * th)1904 void machine__remove_thread(struct machine *machine, struct thread *th)
1905 {
1906 return __machine__remove_thread(machine, th, true);
1907 }
1908
machine__process_fork_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)1909 int machine__process_fork_event(struct machine *machine, union perf_event *event,
1910 struct perf_sample *sample)
1911 {
1912 struct thread *thread = machine__find_thread(machine,
1913 event->fork.pid,
1914 event->fork.tid);
1915 struct thread *parent = machine__findnew_thread(machine,
1916 event->fork.ppid,
1917 event->fork.ptid);
1918 bool do_maps_clone = true;
1919 int err = 0;
1920
1921 if (dump_trace)
1922 perf_event__fprintf_task(event, stdout);
1923
1924 /*
1925 * There may be an existing thread that is not actually the parent,
1926 * either because we are processing events out of order, or because the
1927 * (fork) event that would have removed the thread was lost. Assume the
1928 * latter case and continue on as best we can.
1929 */
1930 if (parent->pid_ != (pid_t)event->fork.ppid) {
1931 dump_printf("removing erroneous parent thread %d/%d\n",
1932 parent->pid_, parent->tid);
1933 machine__remove_thread(machine, parent);
1934 thread__put(parent);
1935 parent = machine__findnew_thread(machine, event->fork.ppid,
1936 event->fork.ptid);
1937 }
1938
1939 /* if a thread currently exists for the thread id remove it */
1940 if (thread != NULL) {
1941 machine__remove_thread(machine, thread);
1942 thread__put(thread);
1943 }
1944
1945 thread = machine__findnew_thread(machine, event->fork.pid,
1946 event->fork.tid);
1947 /*
1948 * When synthesizing FORK events, we are trying to create thread
1949 * objects for the already running tasks on the machine.
1950 *
1951 * Normally, for a kernel FORK event, we want to clone the parent's
1952 * maps because that is what the kernel just did.
1953 *
1954 * But when synthesizing, this should not be done. If we do, we end up
1955 * with overlapping maps as we process the synthesized MMAP2 events that
1956 * get delivered shortly thereafter.
1957 *
1958 * Use the FORK event misc flags in an internal way to signal this
1959 * situation, so we can elide the map clone when appropriate.
1960 */
1961 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1962 do_maps_clone = false;
1963
1964 if (thread == NULL || parent == NULL ||
1965 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
1966 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1967 err = -1;
1968 }
1969 thread__put(thread);
1970 thread__put(parent);
1971
1972 return err;
1973 }
1974
machine__process_exit_event(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)1975 int machine__process_exit_event(struct machine *machine, union perf_event *event,
1976 struct perf_sample *sample __maybe_unused)
1977 {
1978 struct thread *thread = machine__find_thread(machine,
1979 event->fork.pid,
1980 event->fork.tid);
1981
1982 if (dump_trace)
1983 perf_event__fprintf_task(event, stdout);
1984
1985 if (thread != NULL) {
1986 thread__exited(thread);
1987 thread__put(thread);
1988 }
1989
1990 return 0;
1991 }
1992
machine__process_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)1993 int machine__process_event(struct machine *machine, union perf_event *event,
1994 struct perf_sample *sample)
1995 {
1996 int ret;
1997
1998 switch (event->header.type) {
1999 case PERF_RECORD_COMM:
2000 ret = machine__process_comm_event(machine, event, sample); break;
2001 case PERF_RECORD_MMAP:
2002 ret = machine__process_mmap_event(machine, event, sample); break;
2003 case PERF_RECORD_NAMESPACES:
2004 ret = machine__process_namespaces_event(machine, event, sample); break;
2005 case PERF_RECORD_CGROUP:
2006 ret = machine__process_cgroup_event(machine, event, sample); break;
2007 case PERF_RECORD_MMAP2:
2008 ret = machine__process_mmap2_event(machine, event, sample); break;
2009 case PERF_RECORD_FORK:
2010 ret = machine__process_fork_event(machine, event, sample); break;
2011 case PERF_RECORD_EXIT:
2012 ret = machine__process_exit_event(machine, event, sample); break;
2013 case PERF_RECORD_LOST:
2014 ret = machine__process_lost_event(machine, event, sample); break;
2015 case PERF_RECORD_AUX:
2016 ret = machine__process_aux_event(machine, event); break;
2017 case PERF_RECORD_ITRACE_START:
2018 ret = machine__process_itrace_start_event(machine, event); break;
2019 case PERF_RECORD_LOST_SAMPLES:
2020 ret = machine__process_lost_samples_event(machine, event, sample); break;
2021 case PERF_RECORD_SWITCH:
2022 case PERF_RECORD_SWITCH_CPU_WIDE:
2023 ret = machine__process_switch_event(machine, event); break;
2024 case PERF_RECORD_KSYMBOL:
2025 ret = machine__process_ksymbol(machine, event, sample); break;
2026 case PERF_RECORD_BPF_EVENT:
2027 ret = machine__process_bpf(machine, event, sample); break;
2028 case PERF_RECORD_TEXT_POKE:
2029 ret = machine__process_text_poke(machine, event, sample); break;
2030 default:
2031 ret = -1;
2032 break;
2033 }
2034
2035 return ret;
2036 }
2037
symbol__match_regex(struct symbol * sym,regex_t * regex)2038 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
2039 {
2040 if (!regexec(regex, sym->name, 0, NULL, 0))
2041 return true;
2042 return false;
2043 }
2044
ip__resolve_ams(struct thread * thread,struct addr_map_symbol * ams,u64 ip)2045 static void ip__resolve_ams(struct thread *thread,
2046 struct addr_map_symbol *ams,
2047 u64 ip)
2048 {
2049 struct addr_location al;
2050
2051 memset(&al, 0, sizeof(al));
2052 /*
2053 * We cannot use the header.misc hint to determine whether a
2054 * branch stack address is user, kernel, guest, hypervisor.
2055 * Branches may straddle the kernel/user/hypervisor boundaries.
2056 * Thus, we have to try consecutively until we find a match
2057 * or else, the symbol is unknown
2058 */
2059 thread__find_cpumode_addr_location(thread, ip, &al);
2060
2061 ams->addr = ip;
2062 ams->al_addr = al.addr;
2063 ams->ms.maps = al.maps;
2064 ams->ms.sym = al.sym;
2065 ams->ms.map = al.map;
2066 ams->phys_addr = 0;
2067 ams->data_page_size = 0;
2068 }
2069
ip__resolve_data(struct thread * thread,u8 m,struct addr_map_symbol * ams,u64 addr,u64 phys_addr,u64 daddr_page_size)2070 static void ip__resolve_data(struct thread *thread,
2071 u8 m, struct addr_map_symbol *ams,
2072 u64 addr, u64 phys_addr, u64 daddr_page_size)
2073 {
2074 struct addr_location al;
2075
2076 memset(&al, 0, sizeof(al));
2077
2078 thread__find_symbol(thread, m, addr, &al);
2079
2080 ams->addr = addr;
2081 ams->al_addr = al.addr;
2082 ams->ms.maps = al.maps;
2083 ams->ms.sym = al.sym;
2084 ams->ms.map = al.map;
2085 ams->phys_addr = phys_addr;
2086 ams->data_page_size = daddr_page_size;
2087 }
2088
sample__resolve_mem(struct perf_sample * sample,struct addr_location * al)2089 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2090 struct addr_location *al)
2091 {
2092 struct mem_info *mi = mem_info__new();
2093
2094 if (!mi)
2095 return NULL;
2096
2097 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
2098 ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
2099 sample->addr, sample->phys_addr,
2100 sample->data_page_size);
2101 mi->data_src.val = sample->data_src;
2102
2103 return mi;
2104 }
2105
callchain_srcline(struct map_symbol * ms,u64 ip)2106 static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2107 {
2108 struct map *map = ms->map;
2109 char *srcline = NULL;
2110
2111 if (!map || callchain_param.key == CCKEY_FUNCTION)
2112 return srcline;
2113
2114 srcline = srcline__tree_find(&map->dso->srclines, ip);
2115 if (!srcline) {
2116 bool show_sym = false;
2117 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2118
2119 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
2120 ms->sym, show_sym, show_addr, ip);
2121 srcline__tree_insert(&map->dso->srclines, ip, srcline);
2122 }
2123
2124 return srcline;
2125 }
2126
2127 struct iterations {
2128 int nr_loop_iter;
2129 u64 cycles;
2130 };
2131
add_callchain_ip(struct thread * thread,struct callchain_cursor * cursor,struct symbol ** parent,struct addr_location * root_al,u8 * cpumode,u64 ip,bool branch,struct branch_flags * flags,struct iterations * iter,u64 branch_from)2132 static int add_callchain_ip(struct thread *thread,
2133 struct callchain_cursor *cursor,
2134 struct symbol **parent,
2135 struct addr_location *root_al,
2136 u8 *cpumode,
2137 u64 ip,
2138 bool branch,
2139 struct branch_flags *flags,
2140 struct iterations *iter,
2141 u64 branch_from)
2142 {
2143 struct map_symbol ms;
2144 struct addr_location al;
2145 int nr_loop_iter = 0;
2146 u64 iter_cycles = 0;
2147 const char *srcline = NULL;
2148
2149 al.filtered = 0;
2150 al.sym = NULL;
2151 if (!cpumode) {
2152 thread__find_cpumode_addr_location(thread, ip, &al);
2153 } else {
2154 if (ip >= PERF_CONTEXT_MAX) {
2155 switch (ip) {
2156 case PERF_CONTEXT_HV:
2157 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2158 break;
2159 case PERF_CONTEXT_KERNEL:
2160 *cpumode = PERF_RECORD_MISC_KERNEL;
2161 break;
2162 case PERF_CONTEXT_USER:
2163 *cpumode = PERF_RECORD_MISC_USER;
2164 break;
2165 default:
2166 pr_debug("invalid callchain context: "
2167 "%"PRId64"\n", (s64) ip);
2168 /*
2169 * It seems the callchain is corrupted.
2170 * Discard all.
2171 */
2172 callchain_cursor_reset(cursor);
2173 return 1;
2174 }
2175 return 0;
2176 }
2177 thread__find_symbol(thread, *cpumode, ip, &al);
2178 }
2179
2180 if (al.sym != NULL) {
2181 if (perf_hpp_list.parent && !*parent &&
2182 symbol__match_regex(al.sym, &parent_regex))
2183 *parent = al.sym;
2184 else if (have_ignore_callees && root_al &&
2185 symbol__match_regex(al.sym, &ignore_callees_regex)) {
2186 /* Treat this symbol as the root,
2187 forgetting its callees. */
2188 *root_al = al;
2189 callchain_cursor_reset(cursor);
2190 }
2191 }
2192
2193 if (symbol_conf.hide_unresolved && al.sym == NULL)
2194 return 0;
2195
2196 if (iter) {
2197 nr_loop_iter = iter->nr_loop_iter;
2198 iter_cycles = iter->cycles;
2199 }
2200
2201 ms.maps = al.maps;
2202 ms.map = al.map;
2203 ms.sym = al.sym;
2204 srcline = callchain_srcline(&ms, al.addr);
2205 return callchain_cursor_append(cursor, ip, &ms,
2206 branch, flags, nr_loop_iter,
2207 iter_cycles, branch_from, srcline);
2208 }
2209
sample__resolve_bstack(struct perf_sample * sample,struct addr_location * al)2210 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2211 struct addr_location *al)
2212 {
2213 unsigned int i;
2214 const struct branch_stack *bs = sample->branch_stack;
2215 struct branch_entry *entries = perf_sample__branch_entries(sample);
2216 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2217
2218 if (!bi)
2219 return NULL;
2220
2221 for (i = 0; i < bs->nr; i++) {
2222 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2223 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2224 bi[i].flags = entries[i].flags;
2225 }
2226 return bi;
2227 }
2228
save_iterations(struct iterations * iter,struct branch_entry * be,int nr)2229 static void save_iterations(struct iterations *iter,
2230 struct branch_entry *be, int nr)
2231 {
2232 int i;
2233
2234 iter->nr_loop_iter++;
2235 iter->cycles = 0;
2236
2237 for (i = 0; i < nr; i++)
2238 iter->cycles += be[i].flags.cycles;
2239 }
2240
2241 #define CHASHSZ 127
2242 #define CHASHBITS 7
2243 #define NO_ENTRY 0xff
2244
2245 #define PERF_MAX_BRANCH_DEPTH 127
2246
2247 /* Remove loops. */
remove_loops(struct branch_entry * l,int nr,struct iterations * iter)2248 static int remove_loops(struct branch_entry *l, int nr,
2249 struct iterations *iter)
2250 {
2251 int i, j, off;
2252 unsigned char chash[CHASHSZ];
2253
2254 memset(chash, NO_ENTRY, sizeof(chash));
2255
2256 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2257
2258 for (i = 0; i < nr; i++) {
2259 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2260
2261 /* no collision handling for now */
2262 if (chash[h] == NO_ENTRY) {
2263 chash[h] = i;
2264 } else if (l[chash[h]].from == l[i].from) {
2265 bool is_loop = true;
2266 /* check if it is a real loop */
2267 off = 0;
2268 for (j = chash[h]; j < i && i + off < nr; j++, off++)
2269 if (l[j].from != l[i + off].from) {
2270 is_loop = false;
2271 break;
2272 }
2273 if (is_loop) {
2274 j = nr - (i + off);
2275 if (j > 0) {
2276 save_iterations(iter + i + off,
2277 l + i, off);
2278
2279 memmove(iter + i, iter + i + off,
2280 j * sizeof(*iter));
2281
2282 memmove(l + i, l + i + off,
2283 j * sizeof(*l));
2284 }
2285
2286 nr -= off;
2287 }
2288 }
2289 }
2290 return nr;
2291 }
2292
lbr_callchain_add_kernel_ip(struct thread * thread,struct callchain_cursor * cursor,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,u64 branch_from,bool callee,int end)2293 static int lbr_callchain_add_kernel_ip(struct thread *thread,
2294 struct callchain_cursor *cursor,
2295 struct perf_sample *sample,
2296 struct symbol **parent,
2297 struct addr_location *root_al,
2298 u64 branch_from,
2299 bool callee, int end)
2300 {
2301 struct ip_callchain *chain = sample->callchain;
2302 u8 cpumode = PERF_RECORD_MISC_USER;
2303 int err, i;
2304
2305 if (callee) {
2306 for (i = 0; i < end + 1; i++) {
2307 err = add_callchain_ip(thread, cursor, parent,
2308 root_al, &cpumode, chain->ips[i],
2309 false, NULL, NULL, branch_from);
2310 if (err)
2311 return err;
2312 }
2313 return 0;
2314 }
2315
2316 for (i = end; i >= 0; i--) {
2317 err = add_callchain_ip(thread, cursor, parent,
2318 root_al, &cpumode, chain->ips[i],
2319 false, NULL, NULL, branch_from);
2320 if (err)
2321 return err;
2322 }
2323
2324 return 0;
2325 }
2326
save_lbr_cursor_node(struct thread * thread,struct callchain_cursor * cursor,int idx)2327 static void save_lbr_cursor_node(struct thread *thread,
2328 struct callchain_cursor *cursor,
2329 int idx)
2330 {
2331 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2332
2333 if (!lbr_stitch)
2334 return;
2335
2336 if (cursor->pos == cursor->nr) {
2337 lbr_stitch->prev_lbr_cursor[idx].valid = false;
2338 return;
2339 }
2340
2341 if (!cursor->curr)
2342 cursor->curr = cursor->first;
2343 else
2344 cursor->curr = cursor->curr->next;
2345 memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2346 sizeof(struct callchain_cursor_node));
2347
2348 lbr_stitch->prev_lbr_cursor[idx].valid = true;
2349 cursor->pos++;
2350 }
2351
lbr_callchain_add_lbr_ip(struct thread * thread,struct callchain_cursor * cursor,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,u64 * branch_from,bool callee)2352 static int lbr_callchain_add_lbr_ip(struct thread *thread,
2353 struct callchain_cursor *cursor,
2354 struct perf_sample *sample,
2355 struct symbol **parent,
2356 struct addr_location *root_al,
2357 u64 *branch_from,
2358 bool callee)
2359 {
2360 struct branch_stack *lbr_stack = sample->branch_stack;
2361 struct branch_entry *entries = perf_sample__branch_entries(sample);
2362 u8 cpumode = PERF_RECORD_MISC_USER;
2363 int lbr_nr = lbr_stack->nr;
2364 struct branch_flags *flags;
2365 int err, i;
2366 u64 ip;
2367
2368 /*
2369 * The curr and pos are not used in writing session. They are cleared
2370 * in callchain_cursor_commit() when the writing session is closed.
2371 * Using curr and pos to track the current cursor node.
2372 */
2373 if (thread->lbr_stitch) {
2374 cursor->curr = NULL;
2375 cursor->pos = cursor->nr;
2376 if (cursor->nr) {
2377 cursor->curr = cursor->first;
2378 for (i = 0; i < (int)(cursor->nr - 1); i++)
2379 cursor->curr = cursor->curr->next;
2380 }
2381 }
2382
2383 if (callee) {
2384 /* Add LBR ip from first entries.to */
2385 ip = entries[0].to;
2386 flags = &entries[0].flags;
2387 *branch_from = entries[0].from;
2388 err = add_callchain_ip(thread, cursor, parent,
2389 root_al, &cpumode, ip,
2390 true, flags, NULL,
2391 *branch_from);
2392 if (err)
2393 return err;
2394
2395 /*
2396 * The number of cursor node increases.
2397 * Move the current cursor node.
2398 * But does not need to save current cursor node for entry 0.
2399 * It's impossible to stitch the whole LBRs of previous sample.
2400 */
2401 if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
2402 if (!cursor->curr)
2403 cursor->curr = cursor->first;
2404 else
2405 cursor->curr = cursor->curr->next;
2406 cursor->pos++;
2407 }
2408
2409 /* Add LBR ip from entries.from one by one. */
2410 for (i = 0; i < lbr_nr; i++) {
2411 ip = entries[i].from;
2412 flags = &entries[i].flags;
2413 err = add_callchain_ip(thread, cursor, parent,
2414 root_al, &cpumode, ip,
2415 true, flags, NULL,
2416 *branch_from);
2417 if (err)
2418 return err;
2419 save_lbr_cursor_node(thread, cursor, i);
2420 }
2421 return 0;
2422 }
2423
2424 /* Add LBR ip from entries.from one by one. */
2425 for (i = lbr_nr - 1; i >= 0; i--) {
2426 ip = entries[i].from;
2427 flags = &entries[i].flags;
2428 err = add_callchain_ip(thread, cursor, parent,
2429 root_al, &cpumode, ip,
2430 true, flags, NULL,
2431 *branch_from);
2432 if (err)
2433 return err;
2434 save_lbr_cursor_node(thread, cursor, i);
2435 }
2436
2437 /* Add LBR ip from first entries.to */
2438 ip = entries[0].to;
2439 flags = &entries[0].flags;
2440 *branch_from = entries[0].from;
2441 err = add_callchain_ip(thread, cursor, parent,
2442 root_al, &cpumode, ip,
2443 true, flags, NULL,
2444 *branch_from);
2445 if (err)
2446 return err;
2447
2448 return 0;
2449 }
2450
lbr_callchain_add_stitched_lbr_ip(struct thread * thread,struct callchain_cursor * cursor)2451 static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2452 struct callchain_cursor *cursor)
2453 {
2454 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2455 struct callchain_cursor_node *cnode;
2456 struct stitch_list *stitch_node;
2457 int err;
2458
2459 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2460 cnode = &stitch_node->cursor;
2461
2462 err = callchain_cursor_append(cursor, cnode->ip,
2463 &cnode->ms,
2464 cnode->branch,
2465 &cnode->branch_flags,
2466 cnode->nr_loop_iter,
2467 cnode->iter_cycles,
2468 cnode->branch_from,
2469 cnode->srcline);
2470 if (err)
2471 return err;
2472 }
2473 return 0;
2474 }
2475
get_stitch_node(struct thread * thread)2476 static struct stitch_list *get_stitch_node(struct thread *thread)
2477 {
2478 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2479 struct stitch_list *stitch_node;
2480
2481 if (!list_empty(&lbr_stitch->free_lists)) {
2482 stitch_node = list_first_entry(&lbr_stitch->free_lists,
2483 struct stitch_list, node);
2484 list_del(&stitch_node->node);
2485
2486 return stitch_node;
2487 }
2488
2489 return malloc(sizeof(struct stitch_list));
2490 }
2491
has_stitched_lbr(struct thread * thread,struct perf_sample * cur,struct perf_sample * prev,unsigned int max_lbr,bool callee)2492 static bool has_stitched_lbr(struct thread *thread,
2493 struct perf_sample *cur,
2494 struct perf_sample *prev,
2495 unsigned int max_lbr,
2496 bool callee)
2497 {
2498 struct branch_stack *cur_stack = cur->branch_stack;
2499 struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2500 struct branch_stack *prev_stack = prev->branch_stack;
2501 struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2502 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2503 int i, j, nr_identical_branches = 0;
2504 struct stitch_list *stitch_node;
2505 u64 cur_base, distance;
2506
2507 if (!cur_stack || !prev_stack)
2508 return false;
2509
2510 /* Find the physical index of the base-of-stack for current sample. */
2511 cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2512
2513 distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2514 (max_lbr + prev_stack->hw_idx - cur_base);
2515 /* Previous sample has shorter stack. Nothing can be stitched. */
2516 if (distance + 1 > prev_stack->nr)
2517 return false;
2518
2519 /*
2520 * Check if there are identical LBRs between two samples.
2521 * Identical LBRs must have same from, to and flags values. Also,
2522 * they have to be saved in the same LBR registers (same physical
2523 * index).
2524 *
2525 * Starts from the base-of-stack of current sample.
2526 */
2527 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2528 if ((prev_entries[i].from != cur_entries[j].from) ||
2529 (prev_entries[i].to != cur_entries[j].to) ||
2530 (prev_entries[i].flags.value != cur_entries[j].flags.value))
2531 break;
2532 nr_identical_branches++;
2533 }
2534
2535 if (!nr_identical_branches)
2536 return false;
2537
2538 /*
2539 * Save the LBRs between the base-of-stack of previous sample
2540 * and the base-of-stack of current sample into lbr_stitch->lists.
2541 * These LBRs will be stitched later.
2542 */
2543 for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2544
2545 if (!lbr_stitch->prev_lbr_cursor[i].valid)
2546 continue;
2547
2548 stitch_node = get_stitch_node(thread);
2549 if (!stitch_node)
2550 return false;
2551
2552 memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2553 sizeof(struct callchain_cursor_node));
2554
2555 if (callee)
2556 list_add(&stitch_node->node, &lbr_stitch->lists);
2557 else
2558 list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2559 }
2560
2561 return true;
2562 }
2563
alloc_lbr_stitch(struct thread * thread,unsigned int max_lbr)2564 static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2565 {
2566 if (thread->lbr_stitch)
2567 return true;
2568
2569 thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
2570 if (!thread->lbr_stitch)
2571 goto err;
2572
2573 thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2574 if (!thread->lbr_stitch->prev_lbr_cursor)
2575 goto free_lbr_stitch;
2576
2577 INIT_LIST_HEAD(&thread->lbr_stitch->lists);
2578 INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
2579
2580 return true;
2581
2582 free_lbr_stitch:
2583 zfree(&thread->lbr_stitch);
2584 err:
2585 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2586 thread->lbr_stitch_enable = false;
2587 return false;
2588 }
2589
2590 /*
2591 * Resolve LBR callstack chain sample
2592 * Return:
2593 * 1 on success get LBR callchain information
2594 * 0 no available LBR callchain information, should try fp
2595 * negative error code on other errors.
2596 */
resolve_lbr_callchain_sample(struct thread * thread,struct callchain_cursor * cursor,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,int max_stack,unsigned int max_lbr)2597 static int resolve_lbr_callchain_sample(struct thread *thread,
2598 struct callchain_cursor *cursor,
2599 struct perf_sample *sample,
2600 struct symbol **parent,
2601 struct addr_location *root_al,
2602 int max_stack,
2603 unsigned int max_lbr)
2604 {
2605 bool callee = (callchain_param.order == ORDER_CALLEE);
2606 struct ip_callchain *chain = sample->callchain;
2607 int chain_nr = min(max_stack, (int)chain->nr), i;
2608 struct lbr_stitch *lbr_stitch;
2609 bool stitched_lbr = false;
2610 u64 branch_from = 0;
2611 int err;
2612
2613 for (i = 0; i < chain_nr; i++) {
2614 if (chain->ips[i] == PERF_CONTEXT_USER)
2615 break;
2616 }
2617
2618 /* LBR only affects the user callchain */
2619 if (i == chain_nr)
2620 return 0;
2621
2622 if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
2623 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2624 lbr_stitch = thread->lbr_stitch;
2625
2626 stitched_lbr = has_stitched_lbr(thread, sample,
2627 &lbr_stitch->prev_sample,
2628 max_lbr, callee);
2629
2630 if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2631 list_replace_init(&lbr_stitch->lists,
2632 &lbr_stitch->free_lists);
2633 }
2634 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2635 }
2636
2637 if (callee) {
2638 /* Add kernel ip */
2639 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2640 parent, root_al, branch_from,
2641 true, i);
2642 if (err)
2643 goto error;
2644
2645 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2646 root_al, &branch_from, true);
2647 if (err)
2648 goto error;
2649
2650 if (stitched_lbr) {
2651 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2652 if (err)
2653 goto error;
2654 }
2655
2656 } else {
2657 if (stitched_lbr) {
2658 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2659 if (err)
2660 goto error;
2661 }
2662 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2663 root_al, &branch_from, false);
2664 if (err)
2665 goto error;
2666
2667 /* Add kernel ip */
2668 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2669 parent, root_al, branch_from,
2670 false, i);
2671 if (err)
2672 goto error;
2673 }
2674 return 1;
2675
2676 error:
2677 return (err < 0) ? err : 0;
2678 }
2679
find_prev_cpumode(struct ip_callchain * chain,struct thread * thread,struct callchain_cursor * cursor,struct symbol ** parent,struct addr_location * root_al,u8 * cpumode,int ent)2680 static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2681 struct callchain_cursor *cursor,
2682 struct symbol **parent,
2683 struct addr_location *root_al,
2684 u8 *cpumode, int ent)
2685 {
2686 int err = 0;
2687
2688 while (--ent >= 0) {
2689 u64 ip = chain->ips[ent];
2690
2691 if (ip >= PERF_CONTEXT_MAX) {
2692 err = add_callchain_ip(thread, cursor, parent,
2693 root_al, cpumode, ip,
2694 false, NULL, NULL, 0);
2695 break;
2696 }
2697 }
2698 return err;
2699 }
2700
thread__resolve_callchain_sample(struct thread * thread,struct callchain_cursor * cursor,struct evsel * evsel,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,int max_stack)2701 static int thread__resolve_callchain_sample(struct thread *thread,
2702 struct callchain_cursor *cursor,
2703 struct evsel *evsel,
2704 struct perf_sample *sample,
2705 struct symbol **parent,
2706 struct addr_location *root_al,
2707 int max_stack)
2708 {
2709 struct branch_stack *branch = sample->branch_stack;
2710 struct branch_entry *entries = perf_sample__branch_entries(sample);
2711 struct ip_callchain *chain = sample->callchain;
2712 int chain_nr = 0;
2713 u8 cpumode = PERF_RECORD_MISC_USER;
2714 int i, j, err, nr_entries;
2715 int skip_idx = -1;
2716 int first_call = 0;
2717
2718 if (chain)
2719 chain_nr = chain->nr;
2720
2721 if (evsel__has_branch_callstack(evsel)) {
2722 struct perf_env *env = evsel__env(evsel);
2723
2724 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2725 root_al, max_stack,
2726 !env ? 0 : env->max_branches);
2727 if (err)
2728 return (err < 0) ? err : 0;
2729 }
2730
2731 /*
2732 * Based on DWARF debug information, some architectures skip
2733 * a callchain entry saved by the kernel.
2734 */
2735 skip_idx = arch_skip_callchain_idx(thread, chain);
2736
2737 /*
2738 * Add branches to call stack for easier browsing. This gives
2739 * more context for a sample than just the callers.
2740 *
2741 * This uses individual histograms of paths compared to the
2742 * aggregated histograms the normal LBR mode uses.
2743 *
2744 * Limitations for now:
2745 * - No extra filters
2746 * - No annotations (should annotate somehow)
2747 */
2748
2749 if (branch && callchain_param.branch_callstack) {
2750 int nr = min(max_stack, (int)branch->nr);
2751 struct branch_entry be[nr];
2752 struct iterations iter[nr];
2753
2754 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2755 pr_warning("corrupted branch chain. skipping...\n");
2756 goto check_calls;
2757 }
2758
2759 for (i = 0; i < nr; i++) {
2760 if (callchain_param.order == ORDER_CALLEE) {
2761 be[i] = entries[i];
2762
2763 if (chain == NULL)
2764 continue;
2765
2766 /*
2767 * Check for overlap into the callchain.
2768 * The return address is one off compared to
2769 * the branch entry. To adjust for this
2770 * assume the calling instruction is not longer
2771 * than 8 bytes.
2772 */
2773 if (i == skip_idx ||
2774 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2775 first_call++;
2776 else if (be[i].from < chain->ips[first_call] &&
2777 be[i].from >= chain->ips[first_call] - 8)
2778 first_call++;
2779 } else
2780 be[i] = entries[branch->nr - i - 1];
2781 }
2782
2783 memset(iter, 0, sizeof(struct iterations) * nr);
2784 nr = remove_loops(be, nr, iter);
2785
2786 for (i = 0; i < nr; i++) {
2787 err = add_callchain_ip(thread, cursor, parent,
2788 root_al,
2789 NULL, be[i].to,
2790 true, &be[i].flags,
2791 NULL, be[i].from);
2792
2793 if (!err)
2794 err = add_callchain_ip(thread, cursor, parent, root_al,
2795 NULL, be[i].from,
2796 true, &be[i].flags,
2797 &iter[i], 0);
2798 if (err == -EINVAL)
2799 break;
2800 if (err)
2801 return err;
2802 }
2803
2804 if (chain_nr == 0)
2805 return 0;
2806
2807 chain_nr -= nr;
2808 }
2809
2810 check_calls:
2811 if (chain && callchain_param.order != ORDER_CALLEE) {
2812 err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2813 &cpumode, chain->nr - first_call);
2814 if (err)
2815 return (err < 0) ? err : 0;
2816 }
2817 for (i = first_call, nr_entries = 0;
2818 i < chain_nr && nr_entries < max_stack; i++) {
2819 u64 ip;
2820
2821 if (callchain_param.order == ORDER_CALLEE)
2822 j = i;
2823 else
2824 j = chain->nr - i - 1;
2825
2826 #ifdef HAVE_SKIP_CALLCHAIN_IDX
2827 if (j == skip_idx)
2828 continue;
2829 #endif
2830 ip = chain->ips[j];
2831 if (ip < PERF_CONTEXT_MAX)
2832 ++nr_entries;
2833 else if (callchain_param.order != ORDER_CALLEE) {
2834 err = find_prev_cpumode(chain, thread, cursor, parent,
2835 root_al, &cpumode, j);
2836 if (err)
2837 return (err < 0) ? err : 0;
2838 continue;
2839 }
2840
2841 err = add_callchain_ip(thread, cursor, parent,
2842 root_al, &cpumode, ip,
2843 false, NULL, NULL, 0);
2844
2845 if (err)
2846 return (err < 0) ? err : 0;
2847 }
2848
2849 return 0;
2850 }
2851
append_inlines(struct callchain_cursor * cursor,struct map_symbol * ms,u64 ip)2852 static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
2853 {
2854 struct symbol *sym = ms->sym;
2855 struct map *map = ms->map;
2856 struct inline_node *inline_node;
2857 struct inline_list *ilist;
2858 u64 addr;
2859 int ret = 1;
2860
2861 if (!symbol_conf.inline_name || !map || !sym)
2862 return ret;
2863
2864 addr = map__map_ip(map, ip);
2865 addr = map__rip_2objdump(map, addr);
2866
2867 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2868 if (!inline_node) {
2869 inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2870 if (!inline_node)
2871 return ret;
2872 inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2873 }
2874
2875 list_for_each_entry(ilist, &inline_node->val, list) {
2876 struct map_symbol ilist_ms = {
2877 .maps = ms->maps,
2878 .map = map,
2879 .sym = ilist->symbol,
2880 };
2881 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
2882 NULL, 0, 0, 0, ilist->srcline);
2883
2884 if (ret != 0)
2885 return ret;
2886 }
2887
2888 return ret;
2889 }
2890
unwind_entry(struct unwind_entry * entry,void * arg)2891 static int unwind_entry(struct unwind_entry *entry, void *arg)
2892 {
2893 struct callchain_cursor *cursor = arg;
2894 const char *srcline = NULL;
2895 u64 addr = entry->ip;
2896
2897 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
2898 return 0;
2899
2900 if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
2901 return 0;
2902
2903 /*
2904 * Convert entry->ip from a virtual address to an offset in
2905 * its corresponding binary.
2906 */
2907 if (entry->ms.map)
2908 addr = map__map_ip(entry->ms.map, entry->ip);
2909
2910 srcline = callchain_srcline(&entry->ms, addr);
2911 return callchain_cursor_append(cursor, entry->ip, &entry->ms,
2912 false, NULL, 0, 0, 0, srcline);
2913 }
2914
thread__resolve_callchain_unwind(struct thread * thread,struct callchain_cursor * cursor,struct evsel * evsel,struct perf_sample * sample,int max_stack)2915 static int thread__resolve_callchain_unwind(struct thread *thread,
2916 struct callchain_cursor *cursor,
2917 struct evsel *evsel,
2918 struct perf_sample *sample,
2919 int max_stack)
2920 {
2921 /* Can we do dwarf post unwind? */
2922 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2923 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
2924 return 0;
2925
2926 /* Bail out if nothing was captured. */
2927 if ((!sample->user_regs.regs) ||
2928 (!sample->user_stack.size))
2929 return 0;
2930
2931 return unwind__get_entries(unwind_entry, cursor,
2932 thread, sample, max_stack);
2933 }
2934
thread__resolve_callchain(struct thread * thread,struct callchain_cursor * cursor,struct evsel * evsel,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,int max_stack)2935 int thread__resolve_callchain(struct thread *thread,
2936 struct callchain_cursor *cursor,
2937 struct evsel *evsel,
2938 struct perf_sample *sample,
2939 struct symbol **parent,
2940 struct addr_location *root_al,
2941 int max_stack)
2942 {
2943 int ret = 0;
2944
2945 callchain_cursor_reset(cursor);
2946
2947 if (callchain_param.order == ORDER_CALLEE) {
2948 ret = thread__resolve_callchain_sample(thread, cursor,
2949 evsel, sample,
2950 parent, root_al,
2951 max_stack);
2952 if (ret)
2953 return ret;
2954 ret = thread__resolve_callchain_unwind(thread, cursor,
2955 evsel, sample,
2956 max_stack);
2957 } else {
2958 ret = thread__resolve_callchain_unwind(thread, cursor,
2959 evsel, sample,
2960 max_stack);
2961 if (ret)
2962 return ret;
2963 ret = thread__resolve_callchain_sample(thread, cursor,
2964 evsel, sample,
2965 parent, root_al,
2966 max_stack);
2967 }
2968
2969 return ret;
2970 }
2971
machine__for_each_thread(struct machine * machine,int (* fn)(struct thread * thread,void * p),void * priv)2972 int machine__for_each_thread(struct machine *machine,
2973 int (*fn)(struct thread *thread, void *p),
2974 void *priv)
2975 {
2976 struct threads *threads;
2977 struct rb_node *nd;
2978 struct thread *thread;
2979 int rc = 0;
2980 int i;
2981
2982 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2983 threads = &machine->threads[i];
2984 for (nd = rb_first_cached(&threads->entries); nd;
2985 nd = rb_next(nd)) {
2986 thread = rb_entry(nd, struct thread, rb_node);
2987 rc = fn(thread, priv);
2988 if (rc != 0)
2989 return rc;
2990 }
2991
2992 list_for_each_entry(thread, &threads->dead, node) {
2993 rc = fn(thread, priv);
2994 if (rc != 0)
2995 return rc;
2996 }
2997 }
2998 return rc;
2999 }
3000
machines__for_each_thread(struct machines * machines,int (* fn)(struct thread * thread,void * p),void * priv)3001 int machines__for_each_thread(struct machines *machines,
3002 int (*fn)(struct thread *thread, void *p),
3003 void *priv)
3004 {
3005 struct rb_node *nd;
3006 int rc = 0;
3007
3008 rc = machine__for_each_thread(&machines->host, fn, priv);
3009 if (rc != 0)
3010 return rc;
3011
3012 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3013 struct machine *machine = rb_entry(nd, struct machine, rb_node);
3014
3015 rc = machine__for_each_thread(machine, fn, priv);
3016 if (rc != 0)
3017 return rc;
3018 }
3019 return rc;
3020 }
3021
machine__get_current_tid(struct machine * machine,int cpu)3022 pid_t machine__get_current_tid(struct machine *machine, int cpu)
3023 {
3024 int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
3025
3026 if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
3027 return -1;
3028
3029 return machine->current_tid[cpu];
3030 }
3031
machine__set_current_tid(struct machine * machine,int cpu,pid_t pid,pid_t tid)3032 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
3033 pid_t tid)
3034 {
3035 struct thread *thread;
3036 int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
3037
3038 if (cpu < 0)
3039 return -EINVAL;
3040
3041 if (!machine->current_tid) {
3042 int i;
3043
3044 machine->current_tid = calloc(nr_cpus, sizeof(pid_t));
3045 if (!machine->current_tid)
3046 return -ENOMEM;
3047 for (i = 0; i < nr_cpus; i++)
3048 machine->current_tid[i] = -1;
3049 }
3050
3051 if (cpu >= nr_cpus) {
3052 pr_err("Requested CPU %d too large. ", cpu);
3053 pr_err("Consider raising MAX_NR_CPUS\n");
3054 return -EINVAL;
3055 }
3056
3057 machine->current_tid[cpu] = tid;
3058
3059 thread = machine__findnew_thread(machine, pid, tid);
3060 if (!thread)
3061 return -ENOMEM;
3062
3063 thread->cpu = cpu;
3064 thread__put(thread);
3065
3066 return 0;
3067 }
3068
3069 /*
3070 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
3071 * normalized arch is needed.
3072 */
machine__is(struct machine * machine,const char * arch)3073 bool machine__is(struct machine *machine, const char *arch)
3074 {
3075 return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3076 }
3077
machine__nr_cpus_avail(struct machine * machine)3078 int machine__nr_cpus_avail(struct machine *machine)
3079 {
3080 return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3081 }
3082
machine__get_kernel_start(struct machine * machine)3083 int machine__get_kernel_start(struct machine *machine)
3084 {
3085 struct map *map = machine__kernel_map(machine);
3086 int err = 0;
3087
3088 /*
3089 * The only addresses above 2^63 are kernel addresses of a 64-bit
3090 * kernel. Note that addresses are unsigned so that on a 32-bit system
3091 * all addresses including kernel addresses are less than 2^32. In
3092 * that case (32-bit system), if the kernel mapping is unknown, all
3093 * addresses will be assumed to be in user space - see
3094 * machine__kernel_ip().
3095 */
3096 machine->kernel_start = 1ULL << 63;
3097 if (map) {
3098 err = map__load(map);
3099 /*
3100 * On x86_64, PTI entry trampolines are less than the
3101 * start of kernel text, but still above 2^63. So leave
3102 * kernel_start = 1ULL << 63 for x86_64.
3103 */
3104 if (!err && !machine__is(machine, "x86_64"))
3105 machine->kernel_start = map->start;
3106 }
3107 return err;
3108 }
3109
machine__addr_cpumode(struct machine * machine,u8 cpumode,u64 addr)3110 u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3111 {
3112 u8 addr_cpumode = cpumode;
3113 bool kernel_ip;
3114
3115 if (!machine->single_address_space)
3116 goto out;
3117
3118 kernel_ip = machine__kernel_ip(machine, addr);
3119 switch (cpumode) {
3120 case PERF_RECORD_MISC_KERNEL:
3121 case PERF_RECORD_MISC_USER:
3122 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3123 PERF_RECORD_MISC_USER;
3124 break;
3125 case PERF_RECORD_MISC_GUEST_KERNEL:
3126 case PERF_RECORD_MISC_GUEST_USER:
3127 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3128 PERF_RECORD_MISC_GUEST_USER;
3129 break;
3130 default:
3131 break;
3132 }
3133 out:
3134 return addr_cpumode;
3135 }
3136
machine__findnew_dso_id(struct machine * machine,const char * filename,struct dso_id * id)3137 struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
3138 {
3139 return dsos__findnew_id(&machine->dsos, filename, id);
3140 }
3141
machine__findnew_dso(struct machine * machine,const char * filename)3142 struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3143 {
3144 return machine__findnew_dso_id(machine, filename, NULL);
3145 }
3146
machine__resolve_kernel_addr(void * vmachine,unsigned long long * addrp,char ** modp)3147 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3148 {
3149 struct machine *machine = vmachine;
3150 struct map *map;
3151 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3152
3153 if (sym == NULL)
3154 return NULL;
3155
3156 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
3157 *addrp = map->unmap_ip(map, sym->start);
3158 return sym->name;
3159 }
3160
machine__for_each_dso(struct machine * machine,machine__dso_t fn,void * priv)3161 int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
3162 {
3163 struct dso *pos;
3164 int err = 0;
3165
3166 list_for_each_entry(pos, &machine->dsos.head, node) {
3167 if (fn(pos, machine, priv))
3168 err = -1;
3169 }
3170 return err;
3171 }
3172