xref: /linux/tools/perf/util/machine.c (revision 84b9b44b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <inttypes.h>
5 #include <regex.h>
6 #include <stdlib.h>
7 #include "callchain.h"
8 #include "debug.h"
9 #include "dso.h"
10 #include "env.h"
11 #include "event.h"
12 #include "evsel.h"
13 #include "hist.h"
14 #include "machine.h"
15 #include "map.h"
16 #include "map_symbol.h"
17 #include "branch.h"
18 #include "mem-events.h"
19 #include "path.h"
20 #include "srcline.h"
21 #include "symbol.h"
22 #include "sort.h"
23 #include "strlist.h"
24 #include "target.h"
25 #include "thread.h"
26 #include "util.h"
27 #include "vdso.h"
28 #include <stdbool.h>
29 #include <sys/types.h>
30 #include <sys/stat.h>
31 #include <unistd.h>
32 #include "unwind.h"
33 #include "linux/hash.h"
34 #include "asm/bug.h"
35 #include "bpf-event.h"
36 #include <internal/lib.h> // page_size
37 #include "cgroup.h"
38 #include "arm64-frame-pointer-unwind-support.h"
39 
40 #include <linux/ctype.h>
41 #include <symbol/kallsyms.h>
42 #include <linux/mman.h>
43 #include <linux/string.h>
44 #include <linux/zalloc.h>
45 
46 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
47 static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip);
48 
49 static struct dso *machine__kernel_dso(struct machine *machine)
50 {
51 	return map__dso(machine->vmlinux_map);
52 }
53 
54 static void dsos__init(struct dsos *dsos)
55 {
56 	INIT_LIST_HEAD(&dsos->head);
57 	dsos->root = RB_ROOT;
58 	init_rwsem(&dsos->lock);
59 }
60 
61 static void machine__threads_init(struct machine *machine)
62 {
63 	int i;
64 
65 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
66 		struct threads *threads = &machine->threads[i];
67 		threads->entries = RB_ROOT_CACHED;
68 		init_rwsem(&threads->lock);
69 		threads->nr = 0;
70 		INIT_LIST_HEAD(&threads->dead);
71 		threads->last_match = NULL;
72 	}
73 }
74 
75 static int machine__set_mmap_name(struct machine *machine)
76 {
77 	if (machine__is_host(machine))
78 		machine->mmap_name = strdup("[kernel.kallsyms]");
79 	else if (machine__is_default_guest(machine))
80 		machine->mmap_name = strdup("[guest.kernel.kallsyms]");
81 	else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
82 			  machine->pid) < 0)
83 		machine->mmap_name = NULL;
84 
85 	return machine->mmap_name ? 0 : -ENOMEM;
86 }
87 
88 static void thread__set_guest_comm(struct thread *thread, pid_t pid)
89 {
90 	char comm[64];
91 
92 	snprintf(comm, sizeof(comm), "[guest/%d]", pid);
93 	thread__set_comm(thread, comm, 0);
94 }
95 
96 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
97 {
98 	int err = -ENOMEM;
99 
100 	memset(machine, 0, sizeof(*machine));
101 	machine->kmaps = maps__new(machine);
102 	if (machine->kmaps == NULL)
103 		return -ENOMEM;
104 
105 	RB_CLEAR_NODE(&machine->rb_node);
106 	dsos__init(&machine->dsos);
107 
108 	machine__threads_init(machine);
109 
110 	machine->vdso_info = NULL;
111 	machine->env = NULL;
112 
113 	machine->pid = pid;
114 
115 	machine->id_hdr_size = 0;
116 	machine->kptr_restrict_warned = false;
117 	machine->comm_exec = false;
118 	machine->kernel_start = 0;
119 	machine->vmlinux_map = NULL;
120 
121 	machine->root_dir = strdup(root_dir);
122 	if (machine->root_dir == NULL)
123 		goto out;
124 
125 	if (machine__set_mmap_name(machine))
126 		goto out;
127 
128 	if (pid != HOST_KERNEL_ID) {
129 		struct thread *thread = machine__findnew_thread(machine, -1,
130 								pid);
131 
132 		if (thread == NULL)
133 			goto out;
134 
135 		thread__set_guest_comm(thread, pid);
136 		thread__put(thread);
137 	}
138 
139 	machine->current_tid = NULL;
140 	err = 0;
141 
142 out:
143 	if (err) {
144 		zfree(&machine->kmaps);
145 		zfree(&machine->root_dir);
146 		zfree(&machine->mmap_name);
147 	}
148 	return 0;
149 }
150 
151 struct machine *machine__new_host(void)
152 {
153 	struct machine *machine = malloc(sizeof(*machine));
154 
155 	if (machine != NULL) {
156 		machine__init(machine, "", HOST_KERNEL_ID);
157 
158 		if (machine__create_kernel_maps(machine) < 0)
159 			goto out_delete;
160 	}
161 
162 	return machine;
163 out_delete:
164 	free(machine);
165 	return NULL;
166 }
167 
168 struct machine *machine__new_kallsyms(void)
169 {
170 	struct machine *machine = machine__new_host();
171 	/*
172 	 * FIXME:
173 	 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
174 	 *    ask for not using the kcore parsing code, once this one is fixed
175 	 *    to create a map per module.
176 	 */
177 	if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
178 		machine__delete(machine);
179 		machine = NULL;
180 	}
181 
182 	return machine;
183 }
184 
185 static void dsos__purge(struct dsos *dsos)
186 {
187 	struct dso *pos, *n;
188 
189 	down_write(&dsos->lock);
190 
191 	list_for_each_entry_safe(pos, n, &dsos->head, node) {
192 		RB_CLEAR_NODE(&pos->rb_node);
193 		pos->root = NULL;
194 		list_del_init(&pos->node);
195 		dso__put(pos);
196 	}
197 
198 	up_write(&dsos->lock);
199 }
200 
201 static void dsos__exit(struct dsos *dsos)
202 {
203 	dsos__purge(dsos);
204 	exit_rwsem(&dsos->lock);
205 }
206 
207 void machine__delete_threads(struct machine *machine)
208 {
209 	struct rb_node *nd;
210 	int i;
211 
212 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
213 		struct threads *threads = &machine->threads[i];
214 		down_write(&threads->lock);
215 		nd = rb_first_cached(&threads->entries);
216 		while (nd) {
217 			struct thread *t = rb_entry(nd, struct thread, rb_node);
218 
219 			nd = rb_next(nd);
220 			__machine__remove_thread(machine, t, false);
221 		}
222 		up_write(&threads->lock);
223 	}
224 }
225 
226 void machine__exit(struct machine *machine)
227 {
228 	int i;
229 
230 	if (machine == NULL)
231 		return;
232 
233 	machine__destroy_kernel_maps(machine);
234 	maps__delete(machine->kmaps);
235 	dsos__exit(&machine->dsos);
236 	machine__exit_vdso(machine);
237 	zfree(&machine->root_dir);
238 	zfree(&machine->mmap_name);
239 	zfree(&machine->current_tid);
240 	zfree(&machine->kallsyms_filename);
241 
242 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
243 		struct threads *threads = &machine->threads[i];
244 		struct thread *thread, *n;
245 		/*
246 		 * Forget about the dead, at this point whatever threads were
247 		 * left in the dead lists better have a reference count taken
248 		 * by who is using them, and then, when they drop those references
249 		 * and it finally hits zero, thread__put() will check and see that
250 		 * its not in the dead threads list and will not try to remove it
251 		 * from there, just calling thread__delete() straight away.
252 		 */
253 		list_for_each_entry_safe(thread, n, &threads->dead, node)
254 			list_del_init(&thread->node);
255 
256 		exit_rwsem(&threads->lock);
257 	}
258 }
259 
260 void machine__delete(struct machine *machine)
261 {
262 	if (machine) {
263 		machine__exit(machine);
264 		free(machine);
265 	}
266 }
267 
268 void machines__init(struct machines *machines)
269 {
270 	machine__init(&machines->host, "", HOST_KERNEL_ID);
271 	machines->guests = RB_ROOT_CACHED;
272 }
273 
274 void machines__exit(struct machines *machines)
275 {
276 	machine__exit(&machines->host);
277 	/* XXX exit guest */
278 }
279 
280 struct machine *machines__add(struct machines *machines, pid_t pid,
281 			      const char *root_dir)
282 {
283 	struct rb_node **p = &machines->guests.rb_root.rb_node;
284 	struct rb_node *parent = NULL;
285 	struct machine *pos, *machine = malloc(sizeof(*machine));
286 	bool leftmost = true;
287 
288 	if (machine == NULL)
289 		return NULL;
290 
291 	if (machine__init(machine, root_dir, pid) != 0) {
292 		free(machine);
293 		return NULL;
294 	}
295 
296 	while (*p != NULL) {
297 		parent = *p;
298 		pos = rb_entry(parent, struct machine, rb_node);
299 		if (pid < pos->pid)
300 			p = &(*p)->rb_left;
301 		else {
302 			p = &(*p)->rb_right;
303 			leftmost = false;
304 		}
305 	}
306 
307 	rb_link_node(&machine->rb_node, parent, p);
308 	rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
309 
310 	machine->machines = machines;
311 
312 	return machine;
313 }
314 
315 void machines__set_comm_exec(struct machines *machines, bool comm_exec)
316 {
317 	struct rb_node *nd;
318 
319 	machines->host.comm_exec = comm_exec;
320 
321 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
322 		struct machine *machine = rb_entry(nd, struct machine, rb_node);
323 
324 		machine->comm_exec = comm_exec;
325 	}
326 }
327 
328 struct machine *machines__find(struct machines *machines, pid_t pid)
329 {
330 	struct rb_node **p = &machines->guests.rb_root.rb_node;
331 	struct rb_node *parent = NULL;
332 	struct machine *machine;
333 	struct machine *default_machine = NULL;
334 
335 	if (pid == HOST_KERNEL_ID)
336 		return &machines->host;
337 
338 	while (*p != NULL) {
339 		parent = *p;
340 		machine = rb_entry(parent, struct machine, rb_node);
341 		if (pid < machine->pid)
342 			p = &(*p)->rb_left;
343 		else if (pid > machine->pid)
344 			p = &(*p)->rb_right;
345 		else
346 			return machine;
347 		if (!machine->pid)
348 			default_machine = machine;
349 	}
350 
351 	return default_machine;
352 }
353 
354 struct machine *machines__findnew(struct machines *machines, pid_t pid)
355 {
356 	char path[PATH_MAX];
357 	const char *root_dir = "";
358 	struct machine *machine = machines__find(machines, pid);
359 
360 	if (machine && (machine->pid == pid))
361 		goto out;
362 
363 	if ((pid != HOST_KERNEL_ID) &&
364 	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
365 	    (symbol_conf.guestmount)) {
366 		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
367 		if (access(path, R_OK)) {
368 			static struct strlist *seen;
369 
370 			if (!seen)
371 				seen = strlist__new(NULL, NULL);
372 
373 			if (!strlist__has_entry(seen, path)) {
374 				pr_err("Can't access file %s\n", path);
375 				strlist__add(seen, path);
376 			}
377 			machine = NULL;
378 			goto out;
379 		}
380 		root_dir = path;
381 	}
382 
383 	machine = machines__add(machines, pid, root_dir);
384 out:
385 	return machine;
386 }
387 
388 struct machine *machines__find_guest(struct machines *machines, pid_t pid)
389 {
390 	struct machine *machine = machines__find(machines, pid);
391 
392 	if (!machine)
393 		machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
394 	return machine;
395 }
396 
397 /*
398  * A common case for KVM test programs is that the test program acts as the
399  * hypervisor, creating, running and destroying the virtual machine, and
400  * providing the guest object code from its own object code. In this case,
401  * the VM is not running an OS, but only the functions loaded into it by the
402  * hypervisor test program, and conveniently, loaded at the same virtual
403  * addresses.
404  *
405  * Normally to resolve addresses, MMAP events are needed to map addresses
406  * back to the object code and debug symbols for that object code.
407  *
408  * Currently, there is no way to get such mapping information from guests
409  * but, in the scenario described above, the guest has the same mappings
410  * as the hypervisor, so support for that scenario can be achieved.
411  *
412  * To support that, copy the host thread's maps to the guest thread's maps.
413  * Note, we do not discover the guest until we encounter a guest event,
414  * which works well because it is not until then that we know that the host
415  * thread's maps have been set up.
416  *
417  * This function returns the guest thread. Apart from keeping the data
418  * structures sane, using a thread belonging to the guest machine, instead
419  * of the host thread, allows it to have its own comm (refer
420  * thread__set_guest_comm()).
421  */
422 static struct thread *findnew_guest_code(struct machine *machine,
423 					 struct machine *host_machine,
424 					 pid_t pid)
425 {
426 	struct thread *host_thread;
427 	struct thread *thread;
428 	int err;
429 
430 	if (!machine)
431 		return NULL;
432 
433 	thread = machine__findnew_thread(machine, -1, pid);
434 	if (!thread)
435 		return NULL;
436 
437 	/* Assume maps are set up if there are any */
438 	if (maps__nr_maps(thread->maps))
439 		return thread;
440 
441 	host_thread = machine__find_thread(host_machine, -1, pid);
442 	if (!host_thread)
443 		goto out_err;
444 
445 	thread__set_guest_comm(thread, pid);
446 
447 	/*
448 	 * Guest code can be found in hypervisor process at the same address
449 	 * so copy host maps.
450 	 */
451 	err = maps__clone(thread, host_thread->maps);
452 	thread__put(host_thread);
453 	if (err)
454 		goto out_err;
455 
456 	return thread;
457 
458 out_err:
459 	thread__zput(thread);
460 	return NULL;
461 }
462 
463 struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
464 {
465 	struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID);
466 	struct machine *machine = machines__findnew(machines, pid);
467 
468 	return findnew_guest_code(machine, host_machine, pid);
469 }
470 
471 struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
472 {
473 	struct machines *machines = machine->machines;
474 	struct machine *host_machine;
475 
476 	if (!machines)
477 		return NULL;
478 
479 	host_machine = machines__find(machines, HOST_KERNEL_ID);
480 
481 	return findnew_guest_code(machine, host_machine, pid);
482 }
483 
484 void machines__process_guests(struct machines *machines,
485 			      machine__process_t process, void *data)
486 {
487 	struct rb_node *nd;
488 
489 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
490 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
491 		process(pos, data);
492 	}
493 }
494 
495 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
496 {
497 	struct rb_node *node;
498 	struct machine *machine;
499 
500 	machines->host.id_hdr_size = id_hdr_size;
501 
502 	for (node = rb_first_cached(&machines->guests); node;
503 	     node = rb_next(node)) {
504 		machine = rb_entry(node, struct machine, rb_node);
505 		machine->id_hdr_size = id_hdr_size;
506 	}
507 
508 	return;
509 }
510 
511 static void machine__update_thread_pid(struct machine *machine,
512 				       struct thread *th, pid_t pid)
513 {
514 	struct thread *leader;
515 
516 	if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
517 		return;
518 
519 	th->pid_ = pid;
520 
521 	if (th->pid_ == th->tid)
522 		return;
523 
524 	leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
525 	if (!leader)
526 		goto out_err;
527 
528 	if (!leader->maps)
529 		leader->maps = maps__new(machine);
530 
531 	if (!leader->maps)
532 		goto out_err;
533 
534 	if (th->maps == leader->maps)
535 		return;
536 
537 	if (th->maps) {
538 		/*
539 		 * Maps are created from MMAP events which provide the pid and
540 		 * tid.  Consequently there never should be any maps on a thread
541 		 * with an unknown pid.  Just print an error if there are.
542 		 */
543 		if (!maps__empty(th->maps))
544 			pr_err("Discarding thread maps for %d:%d\n",
545 			       th->pid_, th->tid);
546 		maps__put(th->maps);
547 	}
548 
549 	th->maps = maps__get(leader->maps);
550 out_put:
551 	thread__put(leader);
552 	return;
553 out_err:
554 	pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
555 	goto out_put;
556 }
557 
558 /*
559  * Front-end cache - TID lookups come in blocks,
560  * so most of the time we dont have to look up
561  * the full rbtree:
562  */
563 static struct thread*
564 __threads__get_last_match(struct threads *threads, struct machine *machine,
565 			  int pid, int tid)
566 {
567 	struct thread *th;
568 
569 	th = threads->last_match;
570 	if (th != NULL) {
571 		if (th->tid == tid) {
572 			machine__update_thread_pid(machine, th, pid);
573 			return thread__get(th);
574 		}
575 
576 		threads->last_match = NULL;
577 	}
578 
579 	return NULL;
580 }
581 
582 static struct thread*
583 threads__get_last_match(struct threads *threads, struct machine *machine,
584 			int pid, int tid)
585 {
586 	struct thread *th = NULL;
587 
588 	if (perf_singlethreaded)
589 		th = __threads__get_last_match(threads, machine, pid, tid);
590 
591 	return th;
592 }
593 
594 static void
595 __threads__set_last_match(struct threads *threads, struct thread *th)
596 {
597 	threads->last_match = th;
598 }
599 
600 static void
601 threads__set_last_match(struct threads *threads, struct thread *th)
602 {
603 	if (perf_singlethreaded)
604 		__threads__set_last_match(threads, th);
605 }
606 
607 /*
608  * Caller must eventually drop thread->refcnt returned with a successful
609  * lookup/new thread inserted.
610  */
611 static struct thread *____machine__findnew_thread(struct machine *machine,
612 						  struct threads *threads,
613 						  pid_t pid, pid_t tid,
614 						  bool create)
615 {
616 	struct rb_node **p = &threads->entries.rb_root.rb_node;
617 	struct rb_node *parent = NULL;
618 	struct thread *th;
619 	bool leftmost = true;
620 
621 	th = threads__get_last_match(threads, machine, pid, tid);
622 	if (th)
623 		return th;
624 
625 	while (*p != NULL) {
626 		parent = *p;
627 		th = rb_entry(parent, struct thread, rb_node);
628 
629 		if (th->tid == tid) {
630 			threads__set_last_match(threads, th);
631 			machine__update_thread_pid(machine, th, pid);
632 			return thread__get(th);
633 		}
634 
635 		if (tid < th->tid)
636 			p = &(*p)->rb_left;
637 		else {
638 			p = &(*p)->rb_right;
639 			leftmost = false;
640 		}
641 	}
642 
643 	if (!create)
644 		return NULL;
645 
646 	th = thread__new(pid, tid);
647 	if (th != NULL) {
648 		rb_link_node(&th->rb_node, parent, p);
649 		rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
650 
651 		/*
652 		 * We have to initialize maps separately after rb tree is updated.
653 		 *
654 		 * The reason is that we call machine__findnew_thread
655 		 * within thread__init_maps to find the thread
656 		 * leader and that would screwed the rb tree.
657 		 */
658 		if (thread__init_maps(th, machine)) {
659 			rb_erase_cached(&th->rb_node, &threads->entries);
660 			RB_CLEAR_NODE(&th->rb_node);
661 			thread__put(th);
662 			return NULL;
663 		}
664 		/*
665 		 * It is now in the rbtree, get a ref
666 		 */
667 		thread__get(th);
668 		threads__set_last_match(threads, th);
669 		++threads->nr;
670 	}
671 
672 	return th;
673 }
674 
675 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
676 {
677 	return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
678 }
679 
680 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
681 				       pid_t tid)
682 {
683 	struct threads *threads = machine__threads(machine, tid);
684 	struct thread *th;
685 
686 	down_write(&threads->lock);
687 	th = __machine__findnew_thread(machine, pid, tid);
688 	up_write(&threads->lock);
689 	return th;
690 }
691 
692 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
693 				    pid_t tid)
694 {
695 	struct threads *threads = machine__threads(machine, tid);
696 	struct thread *th;
697 
698 	down_read(&threads->lock);
699 	th =  ____machine__findnew_thread(machine, threads, pid, tid, false);
700 	up_read(&threads->lock);
701 	return th;
702 }
703 
704 /*
705  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
706  * So here a single thread is created for that, but actually there is a separate
707  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
708  * is only 1. That causes problems for some tools, requiring workarounds. For
709  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
710  */
711 struct thread *machine__idle_thread(struct machine *machine)
712 {
713 	struct thread *thread = machine__findnew_thread(machine, 0, 0);
714 
715 	if (!thread || thread__set_comm(thread, "swapper", 0) ||
716 	    thread__set_namespaces(thread, 0, NULL))
717 		pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
718 
719 	return thread;
720 }
721 
722 struct comm *machine__thread_exec_comm(struct machine *machine,
723 				       struct thread *thread)
724 {
725 	if (machine->comm_exec)
726 		return thread__exec_comm(thread);
727 	else
728 		return thread__comm(thread);
729 }
730 
731 int machine__process_comm_event(struct machine *machine, union perf_event *event,
732 				struct perf_sample *sample)
733 {
734 	struct thread *thread = machine__findnew_thread(machine,
735 							event->comm.pid,
736 							event->comm.tid);
737 	bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
738 	int err = 0;
739 
740 	if (exec)
741 		machine->comm_exec = true;
742 
743 	if (dump_trace)
744 		perf_event__fprintf_comm(event, stdout);
745 
746 	if (thread == NULL ||
747 	    __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
748 		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
749 		err = -1;
750 	}
751 
752 	thread__put(thread);
753 
754 	return err;
755 }
756 
757 int machine__process_namespaces_event(struct machine *machine __maybe_unused,
758 				      union perf_event *event,
759 				      struct perf_sample *sample __maybe_unused)
760 {
761 	struct thread *thread = machine__findnew_thread(machine,
762 							event->namespaces.pid,
763 							event->namespaces.tid);
764 	int err = 0;
765 
766 	WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
767 		  "\nWARNING: kernel seems to support more namespaces than perf"
768 		  " tool.\nTry updating the perf tool..\n\n");
769 
770 	WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
771 		  "\nWARNING: perf tool seems to support more namespaces than"
772 		  " the kernel.\nTry updating the kernel..\n\n");
773 
774 	if (dump_trace)
775 		perf_event__fprintf_namespaces(event, stdout);
776 
777 	if (thread == NULL ||
778 	    thread__set_namespaces(thread, sample->time, &event->namespaces)) {
779 		dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
780 		err = -1;
781 	}
782 
783 	thread__put(thread);
784 
785 	return err;
786 }
787 
788 int machine__process_cgroup_event(struct machine *machine,
789 				  union perf_event *event,
790 				  struct perf_sample *sample __maybe_unused)
791 {
792 	struct cgroup *cgrp;
793 
794 	if (dump_trace)
795 		perf_event__fprintf_cgroup(event, stdout);
796 
797 	cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
798 	if (cgrp == NULL)
799 		return -ENOMEM;
800 
801 	return 0;
802 }
803 
804 int machine__process_lost_event(struct machine *machine __maybe_unused,
805 				union perf_event *event, struct perf_sample *sample __maybe_unused)
806 {
807 	dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
808 		    event->lost.id, event->lost.lost);
809 	return 0;
810 }
811 
812 int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
813 					union perf_event *event, struct perf_sample *sample)
814 {
815 	dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
816 		    sample->id, event->lost_samples.lost);
817 	return 0;
818 }
819 
820 static struct dso *machine__findnew_module_dso(struct machine *machine,
821 					       struct kmod_path *m,
822 					       const char *filename)
823 {
824 	struct dso *dso;
825 
826 	down_write(&machine->dsos.lock);
827 
828 	dso = __dsos__find(&machine->dsos, m->name, true);
829 	if (!dso) {
830 		dso = __dsos__addnew(&machine->dsos, m->name);
831 		if (dso == NULL)
832 			goto out_unlock;
833 
834 		dso__set_module_info(dso, m, machine);
835 		dso__set_long_name(dso, strdup(filename), true);
836 		dso->kernel = DSO_SPACE__KERNEL;
837 	}
838 
839 	dso__get(dso);
840 out_unlock:
841 	up_write(&machine->dsos.lock);
842 	return dso;
843 }
844 
845 int machine__process_aux_event(struct machine *machine __maybe_unused,
846 			       union perf_event *event)
847 {
848 	if (dump_trace)
849 		perf_event__fprintf_aux(event, stdout);
850 	return 0;
851 }
852 
853 int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
854 					union perf_event *event)
855 {
856 	if (dump_trace)
857 		perf_event__fprintf_itrace_start(event, stdout);
858 	return 0;
859 }
860 
861 int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused,
862 					    union perf_event *event)
863 {
864 	if (dump_trace)
865 		perf_event__fprintf_aux_output_hw_id(event, stdout);
866 	return 0;
867 }
868 
869 int machine__process_switch_event(struct machine *machine __maybe_unused,
870 				  union perf_event *event)
871 {
872 	if (dump_trace)
873 		perf_event__fprintf_switch(event, stdout);
874 	return 0;
875 }
876 
877 static int machine__process_ksymbol_register(struct machine *machine,
878 					     union perf_event *event,
879 					     struct perf_sample *sample __maybe_unused)
880 {
881 	struct symbol *sym;
882 	struct dso *dso;
883 	struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
884 	bool put_map = false;
885 	int err = 0;
886 
887 	if (!map) {
888 		dso = dso__new(event->ksymbol.name);
889 
890 		if (!dso) {
891 			err = -ENOMEM;
892 			goto out;
893 		}
894 		dso->kernel = DSO_SPACE__KERNEL;
895 		map = map__new2(0, dso);
896 		dso__put(dso);
897 		if (!map) {
898 			err = -ENOMEM;
899 			goto out;
900 		}
901 		/*
902 		 * The inserted map has a get on it, we need to put to release
903 		 * the reference count here, but do it after all accesses are
904 		 * done.
905 		 */
906 		put_map = true;
907 		if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
908 			dso->binary_type = DSO_BINARY_TYPE__OOL;
909 			dso->data.file_size = event->ksymbol.len;
910 			dso__set_loaded(dso);
911 		}
912 
913 		map__set_start(map, event->ksymbol.addr);
914 		map__set_end(map, map__start(map) + event->ksymbol.len);
915 		err = maps__insert(machine__kernel_maps(machine), map);
916 		if (err) {
917 			err = -ENOMEM;
918 			goto out;
919 		}
920 
921 		dso__set_loaded(dso);
922 
923 		if (is_bpf_image(event->ksymbol.name)) {
924 			dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
925 			dso__set_long_name(dso, "", false);
926 		}
927 	} else {
928 		dso = map__dso(map);
929 	}
930 
931 	sym = symbol__new(map__map_ip(map, map__start(map)),
932 			  event->ksymbol.len,
933 			  0, 0, event->ksymbol.name);
934 	if (!sym) {
935 		err = -ENOMEM;
936 		goto out;
937 	}
938 	dso__insert_symbol(dso, sym);
939 out:
940 	if (put_map)
941 		map__put(map);
942 	return err;
943 }
944 
945 static int machine__process_ksymbol_unregister(struct machine *machine,
946 					       union perf_event *event,
947 					       struct perf_sample *sample __maybe_unused)
948 {
949 	struct symbol *sym;
950 	struct map *map;
951 
952 	map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
953 	if (!map)
954 		return 0;
955 
956 	if (RC_CHK_ACCESS(map) != RC_CHK_ACCESS(machine->vmlinux_map))
957 		maps__remove(machine__kernel_maps(machine), map);
958 	else {
959 		struct dso *dso = map__dso(map);
960 
961 		sym = dso__find_symbol(dso, map__map_ip(map, map__start(map)));
962 		if (sym)
963 			dso__delete_symbol(dso, sym);
964 	}
965 
966 	return 0;
967 }
968 
969 int machine__process_ksymbol(struct machine *machine __maybe_unused,
970 			     union perf_event *event,
971 			     struct perf_sample *sample)
972 {
973 	if (dump_trace)
974 		perf_event__fprintf_ksymbol(event, stdout);
975 
976 	if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
977 		return machine__process_ksymbol_unregister(machine, event,
978 							   sample);
979 	return machine__process_ksymbol_register(machine, event, sample);
980 }
981 
982 int machine__process_text_poke(struct machine *machine, union perf_event *event,
983 			       struct perf_sample *sample __maybe_unused)
984 {
985 	struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr);
986 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
987 	struct dso *dso = map ? map__dso(map) : NULL;
988 
989 	if (dump_trace)
990 		perf_event__fprintf_text_poke(event, machine, stdout);
991 
992 	if (!event->text_poke.new_len)
993 		return 0;
994 
995 	if (cpumode != PERF_RECORD_MISC_KERNEL) {
996 		pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
997 		return 0;
998 	}
999 
1000 	if (dso) {
1001 		u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
1002 		int ret;
1003 
1004 		/*
1005 		 * Kernel maps might be changed when loading symbols so loading
1006 		 * must be done prior to using kernel maps.
1007 		 */
1008 		map__load(map);
1009 		ret = dso__data_write_cache_addr(dso, map, machine,
1010 						 event->text_poke.addr,
1011 						 new_bytes,
1012 						 event->text_poke.new_len);
1013 		if (ret != event->text_poke.new_len)
1014 			pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
1015 				 event->text_poke.addr);
1016 	} else {
1017 		pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
1018 			 event->text_poke.addr);
1019 	}
1020 
1021 	return 0;
1022 }
1023 
1024 static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
1025 					      const char *filename)
1026 {
1027 	struct map *map = NULL;
1028 	struct kmod_path m;
1029 	struct dso *dso;
1030 	int err;
1031 
1032 	if (kmod_path__parse_name(&m, filename))
1033 		return NULL;
1034 
1035 	dso = machine__findnew_module_dso(machine, &m, filename);
1036 	if (dso == NULL)
1037 		goto out;
1038 
1039 	map = map__new2(start, dso);
1040 	if (map == NULL)
1041 		goto out;
1042 
1043 	err = maps__insert(machine__kernel_maps(machine), map);
1044 	/* If maps__insert failed, return NULL. */
1045 	if (err) {
1046 		map__put(map);
1047 		map = NULL;
1048 	}
1049 out:
1050 	/* put the dso here, corresponding to  machine__findnew_module_dso */
1051 	dso__put(dso);
1052 	zfree(&m.name);
1053 	return map;
1054 }
1055 
1056 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
1057 {
1058 	struct rb_node *nd;
1059 	size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
1060 
1061 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
1062 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
1063 		ret += __dsos__fprintf(&pos->dsos.head, fp);
1064 	}
1065 
1066 	return ret;
1067 }
1068 
1069 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
1070 				     bool (skip)(struct dso *dso, int parm), int parm)
1071 {
1072 	return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
1073 }
1074 
1075 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
1076 				     bool (skip)(struct dso *dso, int parm), int parm)
1077 {
1078 	struct rb_node *nd;
1079 	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
1080 
1081 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
1082 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
1083 		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
1084 	}
1085 	return ret;
1086 }
1087 
1088 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
1089 {
1090 	int i;
1091 	size_t printed = 0;
1092 	struct dso *kdso = machine__kernel_dso(machine);
1093 
1094 	if (kdso->has_build_id) {
1095 		char filename[PATH_MAX];
1096 		if (dso__build_id_filename(kdso, filename, sizeof(filename),
1097 					   false))
1098 			printed += fprintf(fp, "[0] %s\n", filename);
1099 	}
1100 
1101 	for (i = 0; i < vmlinux_path__nr_entries; ++i)
1102 		printed += fprintf(fp, "[%d] %s\n",
1103 				   i + kdso->has_build_id, vmlinux_path[i]);
1104 
1105 	return printed;
1106 }
1107 
1108 size_t machine__fprintf(struct machine *machine, FILE *fp)
1109 {
1110 	struct rb_node *nd;
1111 	size_t ret;
1112 	int i;
1113 
1114 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
1115 		struct threads *threads = &machine->threads[i];
1116 
1117 		down_read(&threads->lock);
1118 
1119 		ret = fprintf(fp, "Threads: %u\n", threads->nr);
1120 
1121 		for (nd = rb_first_cached(&threads->entries); nd;
1122 		     nd = rb_next(nd)) {
1123 			struct thread *pos = rb_entry(nd, struct thread, rb_node);
1124 
1125 			ret += thread__fprintf(pos, fp);
1126 		}
1127 
1128 		up_read(&threads->lock);
1129 	}
1130 	return ret;
1131 }
1132 
1133 static struct dso *machine__get_kernel(struct machine *machine)
1134 {
1135 	const char *vmlinux_name = machine->mmap_name;
1136 	struct dso *kernel;
1137 
1138 	if (machine__is_host(machine)) {
1139 		if (symbol_conf.vmlinux_name)
1140 			vmlinux_name = symbol_conf.vmlinux_name;
1141 
1142 		kernel = machine__findnew_kernel(machine, vmlinux_name,
1143 						 "[kernel]", DSO_SPACE__KERNEL);
1144 	} else {
1145 		if (symbol_conf.default_guest_vmlinux_name)
1146 			vmlinux_name = symbol_conf.default_guest_vmlinux_name;
1147 
1148 		kernel = machine__findnew_kernel(machine, vmlinux_name,
1149 						 "[guest.kernel]",
1150 						 DSO_SPACE__KERNEL_GUEST);
1151 	}
1152 
1153 	if (kernel != NULL && (!kernel->has_build_id))
1154 		dso__read_running_kernel_build_id(kernel, machine);
1155 
1156 	return kernel;
1157 }
1158 
1159 void machine__get_kallsyms_filename(struct machine *machine, char *buf,
1160 				    size_t bufsz)
1161 {
1162 	if (machine__is_default_guest(machine))
1163 		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
1164 	else
1165 		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
1166 }
1167 
1168 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
1169 
1170 /* Figure out the start address of kernel map from /proc/kallsyms.
1171  * Returns the name of the start symbol in *symbol_name. Pass in NULL as
1172  * symbol_name if it's not that important.
1173  */
1174 static int machine__get_running_kernel_start(struct machine *machine,
1175 					     const char **symbol_name,
1176 					     u64 *start, u64 *end)
1177 {
1178 	char filename[PATH_MAX];
1179 	int i, err = -1;
1180 	const char *name;
1181 	u64 addr = 0;
1182 
1183 	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
1184 
1185 	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1186 		return 0;
1187 
1188 	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
1189 		err = kallsyms__get_function_start(filename, name, &addr);
1190 		if (!err)
1191 			break;
1192 	}
1193 
1194 	if (err)
1195 		return -1;
1196 
1197 	if (symbol_name)
1198 		*symbol_name = name;
1199 
1200 	*start = addr;
1201 
1202 	err = kallsyms__get_function_start(filename, "_etext", &addr);
1203 	if (!err)
1204 		*end = addr;
1205 
1206 	return 0;
1207 }
1208 
1209 int machine__create_extra_kernel_map(struct machine *machine,
1210 				     struct dso *kernel,
1211 				     struct extra_kernel_map *xm)
1212 {
1213 	struct kmap *kmap;
1214 	struct map *map;
1215 	int err;
1216 
1217 	map = map__new2(xm->start, kernel);
1218 	if (!map)
1219 		return -ENOMEM;
1220 
1221 	map__set_end(map, xm->end);
1222 	map__set_pgoff(map, xm->pgoff);
1223 
1224 	kmap = map__kmap(map);
1225 
1226 	strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1227 
1228 	err = maps__insert(machine__kernel_maps(machine), map);
1229 
1230 	if (!err) {
1231 		pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1232 			kmap->name, map__start(map), map__end(map));
1233 	}
1234 
1235 	map__put(map);
1236 
1237 	return err;
1238 }
1239 
1240 static u64 find_entry_trampoline(struct dso *dso)
1241 {
1242 	/* Duplicates are removed so lookup all aliases */
1243 	const char *syms[] = {
1244 		"_entry_trampoline",
1245 		"__entry_trampoline_start",
1246 		"entry_SYSCALL_64_trampoline",
1247 	};
1248 	struct symbol *sym = dso__first_symbol(dso);
1249 	unsigned int i;
1250 
1251 	for (; sym; sym = dso__next_symbol(sym)) {
1252 		if (sym->binding != STB_GLOBAL)
1253 			continue;
1254 		for (i = 0; i < ARRAY_SIZE(syms); i++) {
1255 			if (!strcmp(sym->name, syms[i]))
1256 				return sym->start;
1257 		}
1258 	}
1259 
1260 	return 0;
1261 }
1262 
1263 /*
1264  * These values can be used for kernels that do not have symbols for the entry
1265  * trampolines in kallsyms.
1266  */
1267 #define X86_64_CPU_ENTRY_AREA_PER_CPU	0xfffffe0000000000ULL
1268 #define X86_64_CPU_ENTRY_AREA_SIZE	0x2c000
1269 #define X86_64_ENTRY_TRAMPOLINE		0x6000
1270 
1271 /* Map x86_64 PTI entry trampolines */
1272 int machine__map_x86_64_entry_trampolines(struct machine *machine,
1273 					  struct dso *kernel)
1274 {
1275 	struct maps *kmaps = machine__kernel_maps(machine);
1276 	int nr_cpus_avail, cpu;
1277 	bool found = false;
1278 	struct map_rb_node *rb_node;
1279 	u64 pgoff;
1280 
1281 	/*
1282 	 * In the vmlinux case, pgoff is a virtual address which must now be
1283 	 * mapped to a vmlinux offset.
1284 	 */
1285 	maps__for_each_entry(kmaps, rb_node) {
1286 		struct map *dest_map, *map = rb_node->map;
1287 		struct kmap *kmap = __map__kmap(map);
1288 
1289 		if (!kmap || !is_entry_trampoline(kmap->name))
1290 			continue;
1291 
1292 		dest_map = maps__find(kmaps, map__pgoff(map));
1293 		if (dest_map != map)
1294 			map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
1295 		found = true;
1296 	}
1297 	if (found || machine->trampolines_mapped)
1298 		return 0;
1299 
1300 	pgoff = find_entry_trampoline(kernel);
1301 	if (!pgoff)
1302 		return 0;
1303 
1304 	nr_cpus_avail = machine__nr_cpus_avail(machine);
1305 
1306 	/* Add a 1 page map for each CPU's entry trampoline */
1307 	for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1308 		u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1309 			 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1310 			 X86_64_ENTRY_TRAMPOLINE;
1311 		struct extra_kernel_map xm = {
1312 			.start = va,
1313 			.end   = va + page_size,
1314 			.pgoff = pgoff,
1315 		};
1316 
1317 		strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1318 
1319 		if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1320 			return -1;
1321 	}
1322 
1323 	machine->trampolines_mapped = nr_cpus_avail;
1324 
1325 	return 0;
1326 }
1327 
1328 int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1329 					     struct dso *kernel __maybe_unused)
1330 {
1331 	return 0;
1332 }
1333 
1334 static int
1335 __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1336 {
1337 	/* In case of renewal the kernel map, destroy previous one */
1338 	machine__destroy_kernel_maps(machine);
1339 
1340 	map__put(machine->vmlinux_map);
1341 	machine->vmlinux_map = map__new2(0, kernel);
1342 	if (machine->vmlinux_map == NULL)
1343 		return -ENOMEM;
1344 
1345 	map__set_map_ip(machine->vmlinux_map, identity__map_ip);
1346 	map__set_unmap_ip(machine->vmlinux_map, identity__map_ip);
1347 	return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
1348 }
1349 
1350 void machine__destroy_kernel_maps(struct machine *machine)
1351 {
1352 	struct kmap *kmap;
1353 	struct map *map = machine__kernel_map(machine);
1354 
1355 	if (map == NULL)
1356 		return;
1357 
1358 	kmap = map__kmap(map);
1359 	maps__remove(machine__kernel_maps(machine), map);
1360 	if (kmap && kmap->ref_reloc_sym) {
1361 		zfree((char **)&kmap->ref_reloc_sym->name);
1362 		zfree(&kmap->ref_reloc_sym);
1363 	}
1364 
1365 	map__zput(machine->vmlinux_map);
1366 }
1367 
1368 int machines__create_guest_kernel_maps(struct machines *machines)
1369 {
1370 	int ret = 0;
1371 	struct dirent **namelist = NULL;
1372 	int i, items = 0;
1373 	char path[PATH_MAX];
1374 	pid_t pid;
1375 	char *endp;
1376 
1377 	if (symbol_conf.default_guest_vmlinux_name ||
1378 	    symbol_conf.default_guest_modules ||
1379 	    symbol_conf.default_guest_kallsyms) {
1380 		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1381 	}
1382 
1383 	if (symbol_conf.guestmount) {
1384 		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1385 		if (items <= 0)
1386 			return -ENOENT;
1387 		for (i = 0; i < items; i++) {
1388 			if (!isdigit(namelist[i]->d_name[0])) {
1389 				/* Filter out . and .. */
1390 				continue;
1391 			}
1392 			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1393 			if ((*endp != '\0') ||
1394 			    (endp == namelist[i]->d_name) ||
1395 			    (errno == ERANGE)) {
1396 				pr_debug("invalid directory (%s). Skipping.\n",
1397 					 namelist[i]->d_name);
1398 				continue;
1399 			}
1400 			sprintf(path, "%s/%s/proc/kallsyms",
1401 				symbol_conf.guestmount,
1402 				namelist[i]->d_name);
1403 			ret = access(path, R_OK);
1404 			if (ret) {
1405 				pr_debug("Can't access file %s\n", path);
1406 				goto failure;
1407 			}
1408 			machines__create_kernel_maps(machines, pid);
1409 		}
1410 failure:
1411 		free(namelist);
1412 	}
1413 
1414 	return ret;
1415 }
1416 
1417 void machines__destroy_kernel_maps(struct machines *machines)
1418 {
1419 	struct rb_node *next = rb_first_cached(&machines->guests);
1420 
1421 	machine__destroy_kernel_maps(&machines->host);
1422 
1423 	while (next) {
1424 		struct machine *pos = rb_entry(next, struct machine, rb_node);
1425 
1426 		next = rb_next(&pos->rb_node);
1427 		rb_erase_cached(&pos->rb_node, &machines->guests);
1428 		machine__delete(pos);
1429 	}
1430 }
1431 
1432 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1433 {
1434 	struct machine *machine = machines__findnew(machines, pid);
1435 
1436 	if (machine == NULL)
1437 		return -1;
1438 
1439 	return machine__create_kernel_maps(machine);
1440 }
1441 
1442 int machine__load_kallsyms(struct machine *machine, const char *filename)
1443 {
1444 	struct map *map = machine__kernel_map(machine);
1445 	struct dso *dso = map__dso(map);
1446 	int ret = __dso__load_kallsyms(dso, filename, map, true);
1447 
1448 	if (ret > 0) {
1449 		dso__set_loaded(dso);
1450 		/*
1451 		 * Since /proc/kallsyms will have multiple sessions for the
1452 		 * kernel, with modules between them, fixup the end of all
1453 		 * sections.
1454 		 */
1455 		maps__fixup_end(machine__kernel_maps(machine));
1456 	}
1457 
1458 	return ret;
1459 }
1460 
1461 int machine__load_vmlinux_path(struct machine *machine)
1462 {
1463 	struct map *map = machine__kernel_map(machine);
1464 	struct dso *dso = map__dso(map);
1465 	int ret = dso__load_vmlinux_path(dso, map);
1466 
1467 	if (ret > 0)
1468 		dso__set_loaded(dso);
1469 
1470 	return ret;
1471 }
1472 
1473 static char *get_kernel_version(const char *root_dir)
1474 {
1475 	char version[PATH_MAX];
1476 	FILE *file;
1477 	char *name, *tmp;
1478 	const char *prefix = "Linux version ";
1479 
1480 	sprintf(version, "%s/proc/version", root_dir);
1481 	file = fopen(version, "r");
1482 	if (!file)
1483 		return NULL;
1484 
1485 	tmp = fgets(version, sizeof(version), file);
1486 	fclose(file);
1487 	if (!tmp)
1488 		return NULL;
1489 
1490 	name = strstr(version, prefix);
1491 	if (!name)
1492 		return NULL;
1493 	name += strlen(prefix);
1494 	tmp = strchr(name, ' ');
1495 	if (tmp)
1496 		*tmp = '\0';
1497 
1498 	return strdup(name);
1499 }
1500 
1501 static bool is_kmod_dso(struct dso *dso)
1502 {
1503 	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1504 	       dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1505 }
1506 
1507 static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1508 {
1509 	char *long_name;
1510 	struct dso *dso;
1511 	struct map *map = maps__find_by_name(maps, m->name);
1512 
1513 	if (map == NULL)
1514 		return 0;
1515 
1516 	long_name = strdup(path);
1517 	if (long_name == NULL)
1518 		return -ENOMEM;
1519 
1520 	dso = map__dso(map);
1521 	dso__set_long_name(dso, long_name, true);
1522 	dso__kernel_module_get_build_id(dso, "");
1523 
1524 	/*
1525 	 * Full name could reveal us kmod compression, so
1526 	 * we need to update the symtab_type if needed.
1527 	 */
1528 	if (m->comp && is_kmod_dso(dso)) {
1529 		dso->symtab_type++;
1530 		dso->comp = m->comp;
1531 	}
1532 
1533 	return 0;
1534 }
1535 
1536 static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1537 {
1538 	struct dirent *dent;
1539 	DIR *dir = opendir(dir_name);
1540 	int ret = 0;
1541 
1542 	if (!dir) {
1543 		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1544 		return -1;
1545 	}
1546 
1547 	while ((dent = readdir(dir)) != NULL) {
1548 		char path[PATH_MAX];
1549 		struct stat st;
1550 
1551 		/*sshfs might return bad dent->d_type, so we have to stat*/
1552 		path__join(path, sizeof(path), dir_name, dent->d_name);
1553 		if (stat(path, &st))
1554 			continue;
1555 
1556 		if (S_ISDIR(st.st_mode)) {
1557 			if (!strcmp(dent->d_name, ".") ||
1558 			    !strcmp(dent->d_name, ".."))
1559 				continue;
1560 
1561 			/* Do not follow top-level source and build symlinks */
1562 			if (depth == 0) {
1563 				if (!strcmp(dent->d_name, "source") ||
1564 				    !strcmp(dent->d_name, "build"))
1565 					continue;
1566 			}
1567 
1568 			ret = maps__set_modules_path_dir(maps, path, depth + 1);
1569 			if (ret < 0)
1570 				goto out;
1571 		} else {
1572 			struct kmod_path m;
1573 
1574 			ret = kmod_path__parse_name(&m, dent->d_name);
1575 			if (ret)
1576 				goto out;
1577 
1578 			if (m.kmod)
1579 				ret = maps__set_module_path(maps, path, &m);
1580 
1581 			zfree(&m.name);
1582 
1583 			if (ret)
1584 				goto out;
1585 		}
1586 	}
1587 
1588 out:
1589 	closedir(dir);
1590 	return ret;
1591 }
1592 
1593 static int machine__set_modules_path(struct machine *machine)
1594 {
1595 	char *version;
1596 	char modules_path[PATH_MAX];
1597 
1598 	version = get_kernel_version(machine->root_dir);
1599 	if (!version)
1600 		return -1;
1601 
1602 	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1603 		 machine->root_dir, version);
1604 	free(version);
1605 
1606 	return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0);
1607 }
1608 int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1609 				u64 *size __maybe_unused,
1610 				const char *name __maybe_unused)
1611 {
1612 	return 0;
1613 }
1614 
1615 static int machine__create_module(void *arg, const char *name, u64 start,
1616 				  u64 size)
1617 {
1618 	struct machine *machine = arg;
1619 	struct map *map;
1620 
1621 	if (arch__fix_module_text_start(&start, &size, name) < 0)
1622 		return -1;
1623 
1624 	map = machine__addnew_module_map(machine, start, name);
1625 	if (map == NULL)
1626 		return -1;
1627 	map__set_end(map, start + size);
1628 
1629 	dso__kernel_module_get_build_id(map__dso(map), machine->root_dir);
1630 	map__put(map);
1631 	return 0;
1632 }
1633 
1634 static int machine__create_modules(struct machine *machine)
1635 {
1636 	const char *modules;
1637 	char path[PATH_MAX];
1638 
1639 	if (machine__is_default_guest(machine)) {
1640 		modules = symbol_conf.default_guest_modules;
1641 	} else {
1642 		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1643 		modules = path;
1644 	}
1645 
1646 	if (symbol__restricted_filename(modules, "/proc/modules"))
1647 		return -1;
1648 
1649 	if (modules__parse(modules, machine, machine__create_module))
1650 		return -1;
1651 
1652 	if (!machine__set_modules_path(machine))
1653 		return 0;
1654 
1655 	pr_debug("Problems setting modules path maps, continuing anyway...\n");
1656 
1657 	return 0;
1658 }
1659 
1660 static void machine__set_kernel_mmap(struct machine *machine,
1661 				     u64 start, u64 end)
1662 {
1663 	map__set_start(machine->vmlinux_map, start);
1664 	map__set_end(machine->vmlinux_map, end);
1665 	/*
1666 	 * Be a bit paranoid here, some perf.data file came with
1667 	 * a zero sized synthesized MMAP event for the kernel.
1668 	 */
1669 	if (start == 0 && end == 0)
1670 		map__set_end(machine->vmlinux_map, ~0ULL);
1671 }
1672 
1673 static int machine__update_kernel_mmap(struct machine *machine,
1674 				     u64 start, u64 end)
1675 {
1676 	struct map *orig, *updated;
1677 	int err;
1678 
1679 	orig = machine->vmlinux_map;
1680 	updated = map__get(orig);
1681 
1682 	machine->vmlinux_map = updated;
1683 	machine__set_kernel_mmap(machine, start, end);
1684 	maps__remove(machine__kernel_maps(machine), orig);
1685 	err = maps__insert(machine__kernel_maps(machine), updated);
1686 	map__put(orig);
1687 
1688 	return err;
1689 }
1690 
1691 int machine__create_kernel_maps(struct machine *machine)
1692 {
1693 	struct dso *kernel = machine__get_kernel(machine);
1694 	const char *name = NULL;
1695 	u64 start = 0, end = ~0ULL;
1696 	int ret;
1697 
1698 	if (kernel == NULL)
1699 		return -1;
1700 
1701 	ret = __machine__create_kernel_maps(machine, kernel);
1702 	if (ret < 0)
1703 		goto out_put;
1704 
1705 	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1706 		if (machine__is_host(machine))
1707 			pr_debug("Problems creating module maps, "
1708 				 "continuing anyway...\n");
1709 		else
1710 			pr_debug("Problems creating module maps for guest %d, "
1711 				 "continuing anyway...\n", machine->pid);
1712 	}
1713 
1714 	if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1715 		if (name &&
1716 		    map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1717 			machine__destroy_kernel_maps(machine);
1718 			ret = -1;
1719 			goto out_put;
1720 		}
1721 
1722 		/*
1723 		 * we have a real start address now, so re-order the kmaps
1724 		 * assume it's the last in the kmaps
1725 		 */
1726 		ret = machine__update_kernel_mmap(machine, start, end);
1727 		if (ret < 0)
1728 			goto out_put;
1729 	}
1730 
1731 	if (machine__create_extra_kernel_maps(machine, kernel))
1732 		pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1733 
1734 	if (end == ~0ULL) {
1735 		/* update end address of the kernel map using adjacent module address */
1736 		struct map_rb_node *rb_node = maps__find_node(machine__kernel_maps(machine),
1737 							machine__kernel_map(machine));
1738 		struct map_rb_node *next = map_rb_node__next(rb_node);
1739 
1740 		if (next)
1741 			machine__set_kernel_mmap(machine, start, map__start(next->map));
1742 	}
1743 
1744 out_put:
1745 	dso__put(kernel);
1746 	return ret;
1747 }
1748 
1749 static bool machine__uses_kcore(struct machine *machine)
1750 {
1751 	struct dso *dso;
1752 
1753 	list_for_each_entry(dso, &machine->dsos.head, node) {
1754 		if (dso__is_kcore(dso))
1755 			return true;
1756 	}
1757 
1758 	return false;
1759 }
1760 
1761 static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1762 					     struct extra_kernel_map *xm)
1763 {
1764 	return machine__is(machine, "x86_64") &&
1765 	       is_entry_trampoline(xm->name);
1766 }
1767 
1768 static int machine__process_extra_kernel_map(struct machine *machine,
1769 					     struct extra_kernel_map *xm)
1770 {
1771 	struct dso *kernel = machine__kernel_dso(machine);
1772 
1773 	if (kernel == NULL)
1774 		return -1;
1775 
1776 	return machine__create_extra_kernel_map(machine, kernel, xm);
1777 }
1778 
1779 static int machine__process_kernel_mmap_event(struct machine *machine,
1780 					      struct extra_kernel_map *xm,
1781 					      struct build_id *bid)
1782 {
1783 	struct map *map;
1784 	enum dso_space_type dso_space;
1785 	bool is_kernel_mmap;
1786 	const char *mmap_name = machine->mmap_name;
1787 
1788 	/* If we have maps from kcore then we do not need or want any others */
1789 	if (machine__uses_kcore(machine))
1790 		return 0;
1791 
1792 	if (machine__is_host(machine))
1793 		dso_space = DSO_SPACE__KERNEL;
1794 	else
1795 		dso_space = DSO_SPACE__KERNEL_GUEST;
1796 
1797 	is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1798 	if (!is_kernel_mmap && !machine__is_host(machine)) {
1799 		/*
1800 		 * If the event was recorded inside the guest and injected into
1801 		 * the host perf.data file, then it will match a host mmap_name,
1802 		 * so try that - see machine__set_mmap_name().
1803 		 */
1804 		mmap_name = "[kernel.kallsyms]";
1805 		is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1806 	}
1807 	if (xm->name[0] == '/' ||
1808 	    (!is_kernel_mmap && xm->name[0] == '[')) {
1809 		map = machine__addnew_module_map(machine, xm->start,
1810 						 xm->name);
1811 		if (map == NULL)
1812 			goto out_problem;
1813 
1814 		map__set_end(map, map__start(map) + xm->end - xm->start);
1815 
1816 		if (build_id__is_defined(bid))
1817 			dso__set_build_id(map__dso(map), bid);
1818 
1819 	} else if (is_kernel_mmap) {
1820 		const char *symbol_name = xm->name + strlen(mmap_name);
1821 		/*
1822 		 * Should be there already, from the build-id table in
1823 		 * the header.
1824 		 */
1825 		struct dso *kernel = NULL;
1826 		struct dso *dso;
1827 
1828 		down_read(&machine->dsos.lock);
1829 
1830 		list_for_each_entry(dso, &machine->dsos.head, node) {
1831 
1832 			/*
1833 			 * The cpumode passed to is_kernel_module is not the
1834 			 * cpumode of *this* event. If we insist on passing
1835 			 * correct cpumode to is_kernel_module, we should
1836 			 * record the cpumode when we adding this dso to the
1837 			 * linked list.
1838 			 *
1839 			 * However we don't really need passing correct
1840 			 * cpumode.  We know the correct cpumode must be kernel
1841 			 * mode (if not, we should not link it onto kernel_dsos
1842 			 * list).
1843 			 *
1844 			 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1845 			 * is_kernel_module() treats it as a kernel cpumode.
1846 			 */
1847 
1848 			if (!dso->kernel ||
1849 			    is_kernel_module(dso->long_name,
1850 					     PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1851 				continue;
1852 
1853 
1854 			kernel = dso;
1855 			break;
1856 		}
1857 
1858 		up_read(&machine->dsos.lock);
1859 
1860 		if (kernel == NULL)
1861 			kernel = machine__findnew_dso(machine, machine->mmap_name);
1862 		if (kernel == NULL)
1863 			goto out_problem;
1864 
1865 		kernel->kernel = dso_space;
1866 		if (__machine__create_kernel_maps(machine, kernel) < 0) {
1867 			dso__put(kernel);
1868 			goto out_problem;
1869 		}
1870 
1871 		if (strstr(kernel->long_name, "vmlinux"))
1872 			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1873 
1874 		if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) {
1875 			dso__put(kernel);
1876 			goto out_problem;
1877 		}
1878 
1879 		if (build_id__is_defined(bid))
1880 			dso__set_build_id(kernel, bid);
1881 
1882 		/*
1883 		 * Avoid using a zero address (kptr_restrict) for the ref reloc
1884 		 * symbol. Effectively having zero here means that at record
1885 		 * time /proc/sys/kernel/kptr_restrict was non zero.
1886 		 */
1887 		if (xm->pgoff != 0) {
1888 			map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1889 							symbol_name,
1890 							xm->pgoff);
1891 		}
1892 
1893 		if (machine__is_default_guest(machine)) {
1894 			/*
1895 			 * preload dso of guest kernel and modules
1896 			 */
1897 			dso__load(kernel, machine__kernel_map(machine));
1898 		}
1899 	} else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
1900 		return machine__process_extra_kernel_map(machine, xm);
1901 	}
1902 	return 0;
1903 out_problem:
1904 	return -1;
1905 }
1906 
1907 int machine__process_mmap2_event(struct machine *machine,
1908 				 union perf_event *event,
1909 				 struct perf_sample *sample)
1910 {
1911 	struct thread *thread;
1912 	struct map *map;
1913 	struct dso_id dso_id = {
1914 		.maj = event->mmap2.maj,
1915 		.min = event->mmap2.min,
1916 		.ino = event->mmap2.ino,
1917 		.ino_generation = event->mmap2.ino_generation,
1918 	};
1919 	struct build_id __bid, *bid = NULL;
1920 	int ret = 0;
1921 
1922 	if (dump_trace)
1923 		perf_event__fprintf_mmap2(event, stdout);
1924 
1925 	if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
1926 		bid = &__bid;
1927 		build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
1928 	}
1929 
1930 	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1931 	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1932 		struct extra_kernel_map xm = {
1933 			.start = event->mmap2.start,
1934 			.end   = event->mmap2.start + event->mmap2.len,
1935 			.pgoff = event->mmap2.pgoff,
1936 		};
1937 
1938 		strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
1939 		ret = machine__process_kernel_mmap_event(machine, &xm, bid);
1940 		if (ret < 0)
1941 			goto out_problem;
1942 		return 0;
1943 	}
1944 
1945 	thread = machine__findnew_thread(machine, event->mmap2.pid,
1946 					event->mmap2.tid);
1947 	if (thread == NULL)
1948 		goto out_problem;
1949 
1950 	map = map__new(machine, event->mmap2.start,
1951 			event->mmap2.len, event->mmap2.pgoff,
1952 			&dso_id, event->mmap2.prot,
1953 			event->mmap2.flags, bid,
1954 			event->mmap2.filename, thread);
1955 
1956 	if (map == NULL)
1957 		goto out_problem_map;
1958 
1959 	ret = thread__insert_map(thread, map);
1960 	if (ret)
1961 		goto out_problem_insert;
1962 
1963 	thread__put(thread);
1964 	map__put(map);
1965 	return 0;
1966 
1967 out_problem_insert:
1968 	map__put(map);
1969 out_problem_map:
1970 	thread__put(thread);
1971 out_problem:
1972 	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1973 	return 0;
1974 }
1975 
1976 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1977 				struct perf_sample *sample)
1978 {
1979 	struct thread *thread;
1980 	struct map *map;
1981 	u32 prot = 0;
1982 	int ret = 0;
1983 
1984 	if (dump_trace)
1985 		perf_event__fprintf_mmap(event, stdout);
1986 
1987 	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1988 	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1989 		struct extra_kernel_map xm = {
1990 			.start = event->mmap.start,
1991 			.end   = event->mmap.start + event->mmap.len,
1992 			.pgoff = event->mmap.pgoff,
1993 		};
1994 
1995 		strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1996 		ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
1997 		if (ret < 0)
1998 			goto out_problem;
1999 		return 0;
2000 	}
2001 
2002 	thread = machine__findnew_thread(machine, event->mmap.pid,
2003 					 event->mmap.tid);
2004 	if (thread == NULL)
2005 		goto out_problem;
2006 
2007 	if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
2008 		prot = PROT_EXEC;
2009 
2010 	map = map__new(machine, event->mmap.start,
2011 			event->mmap.len, event->mmap.pgoff,
2012 			NULL, prot, 0, NULL, event->mmap.filename, thread);
2013 
2014 	if (map == NULL)
2015 		goto out_problem_map;
2016 
2017 	ret = thread__insert_map(thread, map);
2018 	if (ret)
2019 		goto out_problem_insert;
2020 
2021 	thread__put(thread);
2022 	map__put(map);
2023 	return 0;
2024 
2025 out_problem_insert:
2026 	map__put(map);
2027 out_problem_map:
2028 	thread__put(thread);
2029 out_problem:
2030 	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
2031 	return 0;
2032 }
2033 
2034 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
2035 {
2036 	struct threads *threads = machine__threads(machine, th->tid);
2037 
2038 	if (threads->last_match == th)
2039 		threads__set_last_match(threads, NULL);
2040 
2041 	if (lock)
2042 		down_write(&threads->lock);
2043 
2044 	BUG_ON(refcount_read(&th->refcnt) == 0);
2045 
2046 	rb_erase_cached(&th->rb_node, &threads->entries);
2047 	RB_CLEAR_NODE(&th->rb_node);
2048 	--threads->nr;
2049 	/*
2050 	 * Move it first to the dead_threads list, then drop the reference,
2051 	 * if this is the last reference, then the thread__delete destructor
2052 	 * will be called and we will remove it from the dead_threads list.
2053 	 */
2054 	list_add_tail(&th->node, &threads->dead);
2055 
2056 	/*
2057 	 * We need to do the put here because if this is the last refcount,
2058 	 * then we will be touching the threads->dead head when removing the
2059 	 * thread.
2060 	 */
2061 	thread__put(th);
2062 
2063 	if (lock)
2064 		up_write(&threads->lock);
2065 }
2066 
2067 void machine__remove_thread(struct machine *machine, struct thread *th)
2068 {
2069 	return __machine__remove_thread(machine, th, true);
2070 }
2071 
2072 int machine__process_fork_event(struct machine *machine, union perf_event *event,
2073 				struct perf_sample *sample)
2074 {
2075 	struct thread *thread = machine__find_thread(machine,
2076 						     event->fork.pid,
2077 						     event->fork.tid);
2078 	struct thread *parent = machine__findnew_thread(machine,
2079 							event->fork.ppid,
2080 							event->fork.ptid);
2081 	bool do_maps_clone = true;
2082 	int err = 0;
2083 
2084 	if (dump_trace)
2085 		perf_event__fprintf_task(event, stdout);
2086 
2087 	/*
2088 	 * There may be an existing thread that is not actually the parent,
2089 	 * either because we are processing events out of order, or because the
2090 	 * (fork) event that would have removed the thread was lost. Assume the
2091 	 * latter case and continue on as best we can.
2092 	 */
2093 	if (parent->pid_ != (pid_t)event->fork.ppid) {
2094 		dump_printf("removing erroneous parent thread %d/%d\n",
2095 			    parent->pid_, parent->tid);
2096 		machine__remove_thread(machine, parent);
2097 		thread__put(parent);
2098 		parent = machine__findnew_thread(machine, event->fork.ppid,
2099 						 event->fork.ptid);
2100 	}
2101 
2102 	/* if a thread currently exists for the thread id remove it */
2103 	if (thread != NULL) {
2104 		machine__remove_thread(machine, thread);
2105 		thread__put(thread);
2106 	}
2107 
2108 	thread = machine__findnew_thread(machine, event->fork.pid,
2109 					 event->fork.tid);
2110 	/*
2111 	 * When synthesizing FORK events, we are trying to create thread
2112 	 * objects for the already running tasks on the machine.
2113 	 *
2114 	 * Normally, for a kernel FORK event, we want to clone the parent's
2115 	 * maps because that is what the kernel just did.
2116 	 *
2117 	 * But when synthesizing, this should not be done.  If we do, we end up
2118 	 * with overlapping maps as we process the synthesized MMAP2 events that
2119 	 * get delivered shortly thereafter.
2120 	 *
2121 	 * Use the FORK event misc flags in an internal way to signal this
2122 	 * situation, so we can elide the map clone when appropriate.
2123 	 */
2124 	if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
2125 		do_maps_clone = false;
2126 
2127 	if (thread == NULL || parent == NULL ||
2128 	    thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
2129 		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
2130 		err = -1;
2131 	}
2132 	thread__put(thread);
2133 	thread__put(parent);
2134 
2135 	return err;
2136 }
2137 
2138 int machine__process_exit_event(struct machine *machine, union perf_event *event,
2139 				struct perf_sample *sample __maybe_unused)
2140 {
2141 	struct thread *thread = machine__find_thread(machine,
2142 						     event->fork.pid,
2143 						     event->fork.tid);
2144 
2145 	if (dump_trace)
2146 		perf_event__fprintf_task(event, stdout);
2147 
2148 	if (thread != NULL) {
2149 		thread__exited(thread);
2150 		thread__put(thread);
2151 	}
2152 
2153 	return 0;
2154 }
2155 
2156 int machine__process_event(struct machine *machine, union perf_event *event,
2157 			   struct perf_sample *sample)
2158 {
2159 	int ret;
2160 
2161 	switch (event->header.type) {
2162 	case PERF_RECORD_COMM:
2163 		ret = machine__process_comm_event(machine, event, sample); break;
2164 	case PERF_RECORD_MMAP:
2165 		ret = machine__process_mmap_event(machine, event, sample); break;
2166 	case PERF_RECORD_NAMESPACES:
2167 		ret = machine__process_namespaces_event(machine, event, sample); break;
2168 	case PERF_RECORD_CGROUP:
2169 		ret = machine__process_cgroup_event(machine, event, sample); break;
2170 	case PERF_RECORD_MMAP2:
2171 		ret = machine__process_mmap2_event(machine, event, sample); break;
2172 	case PERF_RECORD_FORK:
2173 		ret = machine__process_fork_event(machine, event, sample); break;
2174 	case PERF_RECORD_EXIT:
2175 		ret = machine__process_exit_event(machine, event, sample); break;
2176 	case PERF_RECORD_LOST:
2177 		ret = machine__process_lost_event(machine, event, sample); break;
2178 	case PERF_RECORD_AUX:
2179 		ret = machine__process_aux_event(machine, event); break;
2180 	case PERF_RECORD_ITRACE_START:
2181 		ret = machine__process_itrace_start_event(machine, event); break;
2182 	case PERF_RECORD_LOST_SAMPLES:
2183 		ret = machine__process_lost_samples_event(machine, event, sample); break;
2184 	case PERF_RECORD_SWITCH:
2185 	case PERF_RECORD_SWITCH_CPU_WIDE:
2186 		ret = machine__process_switch_event(machine, event); break;
2187 	case PERF_RECORD_KSYMBOL:
2188 		ret = machine__process_ksymbol(machine, event, sample); break;
2189 	case PERF_RECORD_BPF_EVENT:
2190 		ret = machine__process_bpf(machine, event, sample); break;
2191 	case PERF_RECORD_TEXT_POKE:
2192 		ret = machine__process_text_poke(machine, event, sample); break;
2193 	case PERF_RECORD_AUX_OUTPUT_HW_ID:
2194 		ret = machine__process_aux_output_hw_id_event(machine, event); break;
2195 	default:
2196 		ret = -1;
2197 		break;
2198 	}
2199 
2200 	return ret;
2201 }
2202 
2203 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
2204 {
2205 	if (!regexec(regex, sym->name, 0, NULL, 0))
2206 		return true;
2207 	return false;
2208 }
2209 
2210 static void ip__resolve_ams(struct thread *thread,
2211 			    struct addr_map_symbol *ams,
2212 			    u64 ip)
2213 {
2214 	struct addr_location al;
2215 
2216 	memset(&al, 0, sizeof(al));
2217 	/*
2218 	 * We cannot use the header.misc hint to determine whether a
2219 	 * branch stack address is user, kernel, guest, hypervisor.
2220 	 * Branches may straddle the kernel/user/hypervisor boundaries.
2221 	 * Thus, we have to try consecutively until we find a match
2222 	 * or else, the symbol is unknown
2223 	 */
2224 	thread__find_cpumode_addr_location(thread, ip, &al);
2225 
2226 	ams->addr = ip;
2227 	ams->al_addr = al.addr;
2228 	ams->al_level = al.level;
2229 	ams->ms.maps = al.maps;
2230 	ams->ms.sym = al.sym;
2231 	ams->ms.map = al.map;
2232 	ams->phys_addr = 0;
2233 	ams->data_page_size = 0;
2234 }
2235 
2236 static void ip__resolve_data(struct thread *thread,
2237 			     u8 m, struct addr_map_symbol *ams,
2238 			     u64 addr, u64 phys_addr, u64 daddr_page_size)
2239 {
2240 	struct addr_location al;
2241 
2242 	memset(&al, 0, sizeof(al));
2243 
2244 	thread__find_symbol(thread, m, addr, &al);
2245 
2246 	ams->addr = addr;
2247 	ams->al_addr = al.addr;
2248 	ams->al_level = al.level;
2249 	ams->ms.maps = al.maps;
2250 	ams->ms.sym = al.sym;
2251 	ams->ms.map = al.map;
2252 	ams->phys_addr = phys_addr;
2253 	ams->data_page_size = daddr_page_size;
2254 }
2255 
2256 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2257 				     struct addr_location *al)
2258 {
2259 	struct mem_info *mi = mem_info__new();
2260 
2261 	if (!mi)
2262 		return NULL;
2263 
2264 	ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
2265 	ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
2266 			 sample->addr, sample->phys_addr,
2267 			 sample->data_page_size);
2268 	mi->data_src.val = sample->data_src;
2269 
2270 	return mi;
2271 }
2272 
2273 static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2274 {
2275 	struct map *map = ms->map;
2276 	char *srcline = NULL;
2277 	struct dso *dso;
2278 
2279 	if (!map || callchain_param.key == CCKEY_FUNCTION)
2280 		return srcline;
2281 
2282 	dso = map__dso(map);
2283 	srcline = srcline__tree_find(&dso->srclines, ip);
2284 	if (!srcline) {
2285 		bool show_sym = false;
2286 		bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2287 
2288 		srcline = get_srcline(dso, map__rip_2objdump(map, ip),
2289 				      ms->sym, show_sym, show_addr, ip);
2290 		srcline__tree_insert(&dso->srclines, ip, srcline);
2291 	}
2292 
2293 	return srcline;
2294 }
2295 
2296 struct iterations {
2297 	int nr_loop_iter;
2298 	u64 cycles;
2299 };
2300 
2301 static int add_callchain_ip(struct thread *thread,
2302 			    struct callchain_cursor *cursor,
2303 			    struct symbol **parent,
2304 			    struct addr_location *root_al,
2305 			    u8 *cpumode,
2306 			    u64 ip,
2307 			    bool branch,
2308 			    struct branch_flags *flags,
2309 			    struct iterations *iter,
2310 			    u64 branch_from)
2311 {
2312 	struct map_symbol ms;
2313 	struct addr_location al;
2314 	int nr_loop_iter = 0, err;
2315 	u64 iter_cycles = 0;
2316 	const char *srcline = NULL;
2317 
2318 	al.filtered = 0;
2319 	al.sym = NULL;
2320 	al.srcline = NULL;
2321 	if (!cpumode) {
2322 		thread__find_cpumode_addr_location(thread, ip, &al);
2323 	} else {
2324 		if (ip >= PERF_CONTEXT_MAX) {
2325 			switch (ip) {
2326 			case PERF_CONTEXT_HV:
2327 				*cpumode = PERF_RECORD_MISC_HYPERVISOR;
2328 				break;
2329 			case PERF_CONTEXT_KERNEL:
2330 				*cpumode = PERF_RECORD_MISC_KERNEL;
2331 				break;
2332 			case PERF_CONTEXT_USER:
2333 				*cpumode = PERF_RECORD_MISC_USER;
2334 				break;
2335 			default:
2336 				pr_debug("invalid callchain context: "
2337 					 "%"PRId64"\n", (s64) ip);
2338 				/*
2339 				 * It seems the callchain is corrupted.
2340 				 * Discard all.
2341 				 */
2342 				callchain_cursor_reset(cursor);
2343 				return 1;
2344 			}
2345 			return 0;
2346 		}
2347 		thread__find_symbol(thread, *cpumode, ip, &al);
2348 	}
2349 
2350 	if (al.sym != NULL) {
2351 		if (perf_hpp_list.parent && !*parent &&
2352 		    symbol__match_regex(al.sym, &parent_regex))
2353 			*parent = al.sym;
2354 		else if (have_ignore_callees && root_al &&
2355 		  symbol__match_regex(al.sym, &ignore_callees_regex)) {
2356 			/* Treat this symbol as the root,
2357 			   forgetting its callees. */
2358 			*root_al = al;
2359 			callchain_cursor_reset(cursor);
2360 		}
2361 	}
2362 
2363 	if (symbol_conf.hide_unresolved && al.sym == NULL)
2364 		return 0;
2365 
2366 	if (iter) {
2367 		nr_loop_iter = iter->nr_loop_iter;
2368 		iter_cycles = iter->cycles;
2369 	}
2370 
2371 	ms.maps = al.maps;
2372 	ms.map = al.map;
2373 	ms.sym = al.sym;
2374 
2375 	if (!branch && append_inlines(cursor, &ms, ip) == 0)
2376 		return 0;
2377 
2378 	srcline = callchain_srcline(&ms, al.addr);
2379 	err = callchain_cursor_append(cursor, ip, &ms,
2380 				      branch, flags, nr_loop_iter,
2381 				      iter_cycles, branch_from, srcline);
2382 	map__put(al.map);
2383 	return err;
2384 }
2385 
2386 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2387 					   struct addr_location *al)
2388 {
2389 	unsigned int i;
2390 	const struct branch_stack *bs = sample->branch_stack;
2391 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2392 	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2393 
2394 	if (!bi)
2395 		return NULL;
2396 
2397 	for (i = 0; i < bs->nr; i++) {
2398 		ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2399 		ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2400 		bi[i].flags = entries[i].flags;
2401 	}
2402 	return bi;
2403 }
2404 
2405 static void save_iterations(struct iterations *iter,
2406 			    struct branch_entry *be, int nr)
2407 {
2408 	int i;
2409 
2410 	iter->nr_loop_iter++;
2411 	iter->cycles = 0;
2412 
2413 	for (i = 0; i < nr; i++)
2414 		iter->cycles += be[i].flags.cycles;
2415 }
2416 
2417 #define CHASHSZ 127
2418 #define CHASHBITS 7
2419 #define NO_ENTRY 0xff
2420 
2421 #define PERF_MAX_BRANCH_DEPTH 127
2422 
2423 /* Remove loops. */
2424 static int remove_loops(struct branch_entry *l, int nr,
2425 			struct iterations *iter)
2426 {
2427 	int i, j, off;
2428 	unsigned char chash[CHASHSZ];
2429 
2430 	memset(chash, NO_ENTRY, sizeof(chash));
2431 
2432 	BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2433 
2434 	for (i = 0; i < nr; i++) {
2435 		int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2436 
2437 		/* no collision handling for now */
2438 		if (chash[h] == NO_ENTRY) {
2439 			chash[h] = i;
2440 		} else if (l[chash[h]].from == l[i].from) {
2441 			bool is_loop = true;
2442 			/* check if it is a real loop */
2443 			off = 0;
2444 			for (j = chash[h]; j < i && i + off < nr; j++, off++)
2445 				if (l[j].from != l[i + off].from) {
2446 					is_loop = false;
2447 					break;
2448 				}
2449 			if (is_loop) {
2450 				j = nr - (i + off);
2451 				if (j > 0) {
2452 					save_iterations(iter + i + off,
2453 						l + i, off);
2454 
2455 					memmove(iter + i, iter + i + off,
2456 						j * sizeof(*iter));
2457 
2458 					memmove(l + i, l + i + off,
2459 						j * sizeof(*l));
2460 				}
2461 
2462 				nr -= off;
2463 			}
2464 		}
2465 	}
2466 	return nr;
2467 }
2468 
2469 static int lbr_callchain_add_kernel_ip(struct thread *thread,
2470 				       struct callchain_cursor *cursor,
2471 				       struct perf_sample *sample,
2472 				       struct symbol **parent,
2473 				       struct addr_location *root_al,
2474 				       u64 branch_from,
2475 				       bool callee, int end)
2476 {
2477 	struct ip_callchain *chain = sample->callchain;
2478 	u8 cpumode = PERF_RECORD_MISC_USER;
2479 	int err, i;
2480 
2481 	if (callee) {
2482 		for (i = 0; i < end + 1; i++) {
2483 			err = add_callchain_ip(thread, cursor, parent,
2484 					       root_al, &cpumode, chain->ips[i],
2485 					       false, NULL, NULL, branch_from);
2486 			if (err)
2487 				return err;
2488 		}
2489 		return 0;
2490 	}
2491 
2492 	for (i = end; i >= 0; i--) {
2493 		err = add_callchain_ip(thread, cursor, parent,
2494 				       root_al, &cpumode, chain->ips[i],
2495 				       false, NULL, NULL, branch_from);
2496 		if (err)
2497 			return err;
2498 	}
2499 
2500 	return 0;
2501 }
2502 
2503 static void save_lbr_cursor_node(struct thread *thread,
2504 				 struct callchain_cursor *cursor,
2505 				 int idx)
2506 {
2507 	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2508 
2509 	if (!lbr_stitch)
2510 		return;
2511 
2512 	if (cursor->pos == cursor->nr) {
2513 		lbr_stitch->prev_lbr_cursor[idx].valid = false;
2514 		return;
2515 	}
2516 
2517 	if (!cursor->curr)
2518 		cursor->curr = cursor->first;
2519 	else
2520 		cursor->curr = cursor->curr->next;
2521 	memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2522 	       sizeof(struct callchain_cursor_node));
2523 
2524 	lbr_stitch->prev_lbr_cursor[idx].valid = true;
2525 	cursor->pos++;
2526 }
2527 
2528 static int lbr_callchain_add_lbr_ip(struct thread *thread,
2529 				    struct callchain_cursor *cursor,
2530 				    struct perf_sample *sample,
2531 				    struct symbol **parent,
2532 				    struct addr_location *root_al,
2533 				    u64 *branch_from,
2534 				    bool callee)
2535 {
2536 	struct branch_stack *lbr_stack = sample->branch_stack;
2537 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2538 	u8 cpumode = PERF_RECORD_MISC_USER;
2539 	int lbr_nr = lbr_stack->nr;
2540 	struct branch_flags *flags;
2541 	int err, i;
2542 	u64 ip;
2543 
2544 	/*
2545 	 * The curr and pos are not used in writing session. They are cleared
2546 	 * in callchain_cursor_commit() when the writing session is closed.
2547 	 * Using curr and pos to track the current cursor node.
2548 	 */
2549 	if (thread->lbr_stitch) {
2550 		cursor->curr = NULL;
2551 		cursor->pos = cursor->nr;
2552 		if (cursor->nr) {
2553 			cursor->curr = cursor->first;
2554 			for (i = 0; i < (int)(cursor->nr - 1); i++)
2555 				cursor->curr = cursor->curr->next;
2556 		}
2557 	}
2558 
2559 	if (callee) {
2560 		/* Add LBR ip from first entries.to */
2561 		ip = entries[0].to;
2562 		flags = &entries[0].flags;
2563 		*branch_from = entries[0].from;
2564 		err = add_callchain_ip(thread, cursor, parent,
2565 				       root_al, &cpumode, ip,
2566 				       true, flags, NULL,
2567 				       *branch_from);
2568 		if (err)
2569 			return err;
2570 
2571 		/*
2572 		 * The number of cursor node increases.
2573 		 * Move the current cursor node.
2574 		 * But does not need to save current cursor node for entry 0.
2575 		 * It's impossible to stitch the whole LBRs of previous sample.
2576 		 */
2577 		if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
2578 			if (!cursor->curr)
2579 				cursor->curr = cursor->first;
2580 			else
2581 				cursor->curr = cursor->curr->next;
2582 			cursor->pos++;
2583 		}
2584 
2585 		/* Add LBR ip from entries.from one by one. */
2586 		for (i = 0; i < lbr_nr; i++) {
2587 			ip = entries[i].from;
2588 			flags = &entries[i].flags;
2589 			err = add_callchain_ip(thread, cursor, parent,
2590 					       root_al, &cpumode, ip,
2591 					       true, flags, NULL,
2592 					       *branch_from);
2593 			if (err)
2594 				return err;
2595 			save_lbr_cursor_node(thread, cursor, i);
2596 		}
2597 		return 0;
2598 	}
2599 
2600 	/* Add LBR ip from entries.from one by one. */
2601 	for (i = lbr_nr - 1; i >= 0; i--) {
2602 		ip = entries[i].from;
2603 		flags = &entries[i].flags;
2604 		err = add_callchain_ip(thread, cursor, parent,
2605 				       root_al, &cpumode, ip,
2606 				       true, flags, NULL,
2607 				       *branch_from);
2608 		if (err)
2609 			return err;
2610 		save_lbr_cursor_node(thread, cursor, i);
2611 	}
2612 
2613 	/* Add LBR ip from first entries.to */
2614 	ip = entries[0].to;
2615 	flags = &entries[0].flags;
2616 	*branch_from = entries[0].from;
2617 	err = add_callchain_ip(thread, cursor, parent,
2618 			       root_al, &cpumode, ip,
2619 			       true, flags, NULL,
2620 			       *branch_from);
2621 	if (err)
2622 		return err;
2623 
2624 	return 0;
2625 }
2626 
2627 static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2628 					     struct callchain_cursor *cursor)
2629 {
2630 	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2631 	struct callchain_cursor_node *cnode;
2632 	struct stitch_list *stitch_node;
2633 	int err;
2634 
2635 	list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2636 		cnode = &stitch_node->cursor;
2637 
2638 		err = callchain_cursor_append(cursor, cnode->ip,
2639 					      &cnode->ms,
2640 					      cnode->branch,
2641 					      &cnode->branch_flags,
2642 					      cnode->nr_loop_iter,
2643 					      cnode->iter_cycles,
2644 					      cnode->branch_from,
2645 					      cnode->srcline);
2646 		if (err)
2647 			return err;
2648 	}
2649 	return 0;
2650 }
2651 
2652 static struct stitch_list *get_stitch_node(struct thread *thread)
2653 {
2654 	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2655 	struct stitch_list *stitch_node;
2656 
2657 	if (!list_empty(&lbr_stitch->free_lists)) {
2658 		stitch_node = list_first_entry(&lbr_stitch->free_lists,
2659 					       struct stitch_list, node);
2660 		list_del(&stitch_node->node);
2661 
2662 		return stitch_node;
2663 	}
2664 
2665 	return malloc(sizeof(struct stitch_list));
2666 }
2667 
2668 static bool has_stitched_lbr(struct thread *thread,
2669 			     struct perf_sample *cur,
2670 			     struct perf_sample *prev,
2671 			     unsigned int max_lbr,
2672 			     bool callee)
2673 {
2674 	struct branch_stack *cur_stack = cur->branch_stack;
2675 	struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2676 	struct branch_stack *prev_stack = prev->branch_stack;
2677 	struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2678 	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2679 	int i, j, nr_identical_branches = 0;
2680 	struct stitch_list *stitch_node;
2681 	u64 cur_base, distance;
2682 
2683 	if (!cur_stack || !prev_stack)
2684 		return false;
2685 
2686 	/* Find the physical index of the base-of-stack for current sample. */
2687 	cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2688 
2689 	distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2690 						     (max_lbr + prev_stack->hw_idx - cur_base);
2691 	/* Previous sample has shorter stack. Nothing can be stitched. */
2692 	if (distance + 1 > prev_stack->nr)
2693 		return false;
2694 
2695 	/*
2696 	 * Check if there are identical LBRs between two samples.
2697 	 * Identical LBRs must have same from, to and flags values. Also,
2698 	 * they have to be saved in the same LBR registers (same physical
2699 	 * index).
2700 	 *
2701 	 * Starts from the base-of-stack of current sample.
2702 	 */
2703 	for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2704 		if ((prev_entries[i].from != cur_entries[j].from) ||
2705 		    (prev_entries[i].to != cur_entries[j].to) ||
2706 		    (prev_entries[i].flags.value != cur_entries[j].flags.value))
2707 			break;
2708 		nr_identical_branches++;
2709 	}
2710 
2711 	if (!nr_identical_branches)
2712 		return false;
2713 
2714 	/*
2715 	 * Save the LBRs between the base-of-stack of previous sample
2716 	 * and the base-of-stack of current sample into lbr_stitch->lists.
2717 	 * These LBRs will be stitched later.
2718 	 */
2719 	for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2720 
2721 		if (!lbr_stitch->prev_lbr_cursor[i].valid)
2722 			continue;
2723 
2724 		stitch_node = get_stitch_node(thread);
2725 		if (!stitch_node)
2726 			return false;
2727 
2728 		memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2729 		       sizeof(struct callchain_cursor_node));
2730 
2731 		if (callee)
2732 			list_add(&stitch_node->node, &lbr_stitch->lists);
2733 		else
2734 			list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2735 	}
2736 
2737 	return true;
2738 }
2739 
2740 static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2741 {
2742 	if (thread->lbr_stitch)
2743 		return true;
2744 
2745 	thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
2746 	if (!thread->lbr_stitch)
2747 		goto err;
2748 
2749 	thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2750 	if (!thread->lbr_stitch->prev_lbr_cursor)
2751 		goto free_lbr_stitch;
2752 
2753 	INIT_LIST_HEAD(&thread->lbr_stitch->lists);
2754 	INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
2755 
2756 	return true;
2757 
2758 free_lbr_stitch:
2759 	zfree(&thread->lbr_stitch);
2760 err:
2761 	pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2762 	thread->lbr_stitch_enable = false;
2763 	return false;
2764 }
2765 
2766 /*
2767  * Resolve LBR callstack chain sample
2768  * Return:
2769  * 1 on success get LBR callchain information
2770  * 0 no available LBR callchain information, should try fp
2771  * negative error code on other errors.
2772  */
2773 static int resolve_lbr_callchain_sample(struct thread *thread,
2774 					struct callchain_cursor *cursor,
2775 					struct perf_sample *sample,
2776 					struct symbol **parent,
2777 					struct addr_location *root_al,
2778 					int max_stack,
2779 					unsigned int max_lbr)
2780 {
2781 	bool callee = (callchain_param.order == ORDER_CALLEE);
2782 	struct ip_callchain *chain = sample->callchain;
2783 	int chain_nr = min(max_stack, (int)chain->nr), i;
2784 	struct lbr_stitch *lbr_stitch;
2785 	bool stitched_lbr = false;
2786 	u64 branch_from = 0;
2787 	int err;
2788 
2789 	for (i = 0; i < chain_nr; i++) {
2790 		if (chain->ips[i] == PERF_CONTEXT_USER)
2791 			break;
2792 	}
2793 
2794 	/* LBR only affects the user callchain */
2795 	if (i == chain_nr)
2796 		return 0;
2797 
2798 	if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
2799 	    (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2800 		lbr_stitch = thread->lbr_stitch;
2801 
2802 		stitched_lbr = has_stitched_lbr(thread, sample,
2803 						&lbr_stitch->prev_sample,
2804 						max_lbr, callee);
2805 
2806 		if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2807 			list_replace_init(&lbr_stitch->lists,
2808 					  &lbr_stitch->free_lists);
2809 		}
2810 		memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2811 	}
2812 
2813 	if (callee) {
2814 		/* Add kernel ip */
2815 		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2816 						  parent, root_al, branch_from,
2817 						  true, i);
2818 		if (err)
2819 			goto error;
2820 
2821 		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2822 					       root_al, &branch_from, true);
2823 		if (err)
2824 			goto error;
2825 
2826 		if (stitched_lbr) {
2827 			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2828 			if (err)
2829 				goto error;
2830 		}
2831 
2832 	} else {
2833 		if (stitched_lbr) {
2834 			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2835 			if (err)
2836 				goto error;
2837 		}
2838 		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2839 					       root_al, &branch_from, false);
2840 		if (err)
2841 			goto error;
2842 
2843 		/* Add kernel ip */
2844 		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2845 						  parent, root_al, branch_from,
2846 						  false, i);
2847 		if (err)
2848 			goto error;
2849 	}
2850 	return 1;
2851 
2852 error:
2853 	return (err < 0) ? err : 0;
2854 }
2855 
2856 static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2857 			     struct callchain_cursor *cursor,
2858 			     struct symbol **parent,
2859 			     struct addr_location *root_al,
2860 			     u8 *cpumode, int ent)
2861 {
2862 	int err = 0;
2863 
2864 	while (--ent >= 0) {
2865 		u64 ip = chain->ips[ent];
2866 
2867 		if (ip >= PERF_CONTEXT_MAX) {
2868 			err = add_callchain_ip(thread, cursor, parent,
2869 					       root_al, cpumode, ip,
2870 					       false, NULL, NULL, 0);
2871 			break;
2872 		}
2873 	}
2874 	return err;
2875 }
2876 
2877 static u64 get_leaf_frame_caller(struct perf_sample *sample,
2878 		struct thread *thread, int usr_idx)
2879 {
2880 	if (machine__normalized_is(maps__machine(thread->maps), "arm64"))
2881 		return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
2882 	else
2883 		return 0;
2884 }
2885 
2886 static int thread__resolve_callchain_sample(struct thread *thread,
2887 					    struct callchain_cursor *cursor,
2888 					    struct evsel *evsel,
2889 					    struct perf_sample *sample,
2890 					    struct symbol **parent,
2891 					    struct addr_location *root_al,
2892 					    int max_stack)
2893 {
2894 	struct branch_stack *branch = sample->branch_stack;
2895 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2896 	struct ip_callchain *chain = sample->callchain;
2897 	int chain_nr = 0;
2898 	u8 cpumode = PERF_RECORD_MISC_USER;
2899 	int i, j, err, nr_entries, usr_idx;
2900 	int skip_idx = -1;
2901 	int first_call = 0;
2902 	u64 leaf_frame_caller;
2903 
2904 	if (chain)
2905 		chain_nr = chain->nr;
2906 
2907 	if (evsel__has_branch_callstack(evsel)) {
2908 		struct perf_env *env = evsel__env(evsel);
2909 
2910 		err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2911 						   root_al, max_stack,
2912 						   !env ? 0 : env->max_branches);
2913 		if (err)
2914 			return (err < 0) ? err : 0;
2915 	}
2916 
2917 	/*
2918 	 * Based on DWARF debug information, some architectures skip
2919 	 * a callchain entry saved by the kernel.
2920 	 */
2921 	skip_idx = arch_skip_callchain_idx(thread, chain);
2922 
2923 	/*
2924 	 * Add branches to call stack for easier browsing. This gives
2925 	 * more context for a sample than just the callers.
2926 	 *
2927 	 * This uses individual histograms of paths compared to the
2928 	 * aggregated histograms the normal LBR mode uses.
2929 	 *
2930 	 * Limitations for now:
2931 	 * - No extra filters
2932 	 * - No annotations (should annotate somehow)
2933 	 */
2934 
2935 	if (branch && callchain_param.branch_callstack) {
2936 		int nr = min(max_stack, (int)branch->nr);
2937 		struct branch_entry be[nr];
2938 		struct iterations iter[nr];
2939 
2940 		if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2941 			pr_warning("corrupted branch chain. skipping...\n");
2942 			goto check_calls;
2943 		}
2944 
2945 		for (i = 0; i < nr; i++) {
2946 			if (callchain_param.order == ORDER_CALLEE) {
2947 				be[i] = entries[i];
2948 
2949 				if (chain == NULL)
2950 					continue;
2951 
2952 				/*
2953 				 * Check for overlap into the callchain.
2954 				 * The return address is one off compared to
2955 				 * the branch entry. To adjust for this
2956 				 * assume the calling instruction is not longer
2957 				 * than 8 bytes.
2958 				 */
2959 				if (i == skip_idx ||
2960 				    chain->ips[first_call] >= PERF_CONTEXT_MAX)
2961 					first_call++;
2962 				else if (be[i].from < chain->ips[first_call] &&
2963 				    be[i].from >= chain->ips[first_call] - 8)
2964 					first_call++;
2965 			} else
2966 				be[i] = entries[branch->nr - i - 1];
2967 		}
2968 
2969 		memset(iter, 0, sizeof(struct iterations) * nr);
2970 		nr = remove_loops(be, nr, iter);
2971 
2972 		for (i = 0; i < nr; i++) {
2973 			err = add_callchain_ip(thread, cursor, parent,
2974 					       root_al,
2975 					       NULL, be[i].to,
2976 					       true, &be[i].flags,
2977 					       NULL, be[i].from);
2978 
2979 			if (!err)
2980 				err = add_callchain_ip(thread, cursor, parent, root_al,
2981 						       NULL, be[i].from,
2982 						       true, &be[i].flags,
2983 						       &iter[i], 0);
2984 			if (err == -EINVAL)
2985 				break;
2986 			if (err)
2987 				return err;
2988 		}
2989 
2990 		if (chain_nr == 0)
2991 			return 0;
2992 
2993 		chain_nr -= nr;
2994 	}
2995 
2996 check_calls:
2997 	if (chain && callchain_param.order != ORDER_CALLEE) {
2998 		err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2999 					&cpumode, chain->nr - first_call);
3000 		if (err)
3001 			return (err < 0) ? err : 0;
3002 	}
3003 	for (i = first_call, nr_entries = 0;
3004 	     i < chain_nr && nr_entries < max_stack; i++) {
3005 		u64 ip;
3006 
3007 		if (callchain_param.order == ORDER_CALLEE)
3008 			j = i;
3009 		else
3010 			j = chain->nr - i - 1;
3011 
3012 #ifdef HAVE_SKIP_CALLCHAIN_IDX
3013 		if (j == skip_idx)
3014 			continue;
3015 #endif
3016 		ip = chain->ips[j];
3017 		if (ip < PERF_CONTEXT_MAX)
3018                        ++nr_entries;
3019 		else if (callchain_param.order != ORDER_CALLEE) {
3020 			err = find_prev_cpumode(chain, thread, cursor, parent,
3021 						root_al, &cpumode, j);
3022 			if (err)
3023 				return (err < 0) ? err : 0;
3024 			continue;
3025 		}
3026 
3027 		/*
3028 		 * PERF_CONTEXT_USER allows us to locate where the user stack ends.
3029 		 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER,
3030 		 * the index will be different in order to add the missing frame
3031 		 * at the right place.
3032 		 */
3033 
3034 		usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1;
3035 
3036 		if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) {
3037 
3038 			leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
3039 
3040 			/*
3041 			 * check if leaf_frame_Caller != ip to not add the same
3042 			 * value twice.
3043 			 */
3044 
3045 			if (leaf_frame_caller && leaf_frame_caller != ip) {
3046 
3047 				err = add_callchain_ip(thread, cursor, parent,
3048 					       root_al, &cpumode, leaf_frame_caller,
3049 					       false, NULL, NULL, 0);
3050 				if (err)
3051 					return (err < 0) ? err : 0;
3052 			}
3053 		}
3054 
3055 		err = add_callchain_ip(thread, cursor, parent,
3056 				       root_al, &cpumode, ip,
3057 				       false, NULL, NULL, 0);
3058 
3059 		if (err)
3060 			return (err < 0) ? err : 0;
3061 	}
3062 
3063 	return 0;
3064 }
3065 
3066 static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
3067 {
3068 	struct symbol *sym = ms->sym;
3069 	struct map *map = ms->map;
3070 	struct inline_node *inline_node;
3071 	struct inline_list *ilist;
3072 	struct dso *dso;
3073 	u64 addr;
3074 	int ret = 1;
3075 
3076 	if (!symbol_conf.inline_name || !map || !sym)
3077 		return ret;
3078 
3079 	addr = map__dso_map_ip(map, ip);
3080 	addr = map__rip_2objdump(map, addr);
3081 	dso = map__dso(map);
3082 
3083 	inline_node = inlines__tree_find(&dso->inlined_nodes, addr);
3084 	if (!inline_node) {
3085 		inline_node = dso__parse_addr_inlines(dso, addr, sym);
3086 		if (!inline_node)
3087 			return ret;
3088 		inlines__tree_insert(&dso->inlined_nodes, inline_node);
3089 	}
3090 
3091 	list_for_each_entry(ilist, &inline_node->val, list) {
3092 		struct map_symbol ilist_ms = {
3093 			.maps = ms->maps,
3094 			.map = map,
3095 			.sym = ilist->symbol,
3096 		};
3097 		ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
3098 					      NULL, 0, 0, 0, ilist->srcline);
3099 
3100 		if (ret != 0)
3101 			return ret;
3102 	}
3103 
3104 	return ret;
3105 }
3106 
3107 static int unwind_entry(struct unwind_entry *entry, void *arg)
3108 {
3109 	struct callchain_cursor *cursor = arg;
3110 	const char *srcline = NULL;
3111 	u64 addr = entry->ip;
3112 
3113 	if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
3114 		return 0;
3115 
3116 	if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
3117 		return 0;
3118 
3119 	/*
3120 	 * Convert entry->ip from a virtual address to an offset in
3121 	 * its corresponding binary.
3122 	 */
3123 	if (entry->ms.map)
3124 		addr = map__dso_map_ip(entry->ms.map, entry->ip);
3125 
3126 	srcline = callchain_srcline(&entry->ms, addr);
3127 	return callchain_cursor_append(cursor, entry->ip, &entry->ms,
3128 				       false, NULL, 0, 0, 0, srcline);
3129 }
3130 
3131 static int thread__resolve_callchain_unwind(struct thread *thread,
3132 					    struct callchain_cursor *cursor,
3133 					    struct evsel *evsel,
3134 					    struct perf_sample *sample,
3135 					    int max_stack)
3136 {
3137 	/* Can we do dwarf post unwind? */
3138 	if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
3139 	      (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
3140 		return 0;
3141 
3142 	/* Bail out if nothing was captured. */
3143 	if ((!sample->user_regs.regs) ||
3144 	    (!sample->user_stack.size))
3145 		return 0;
3146 
3147 	return unwind__get_entries(unwind_entry, cursor,
3148 				   thread, sample, max_stack, false);
3149 }
3150 
3151 int thread__resolve_callchain(struct thread *thread,
3152 			      struct callchain_cursor *cursor,
3153 			      struct evsel *evsel,
3154 			      struct perf_sample *sample,
3155 			      struct symbol **parent,
3156 			      struct addr_location *root_al,
3157 			      int max_stack)
3158 {
3159 	int ret = 0;
3160 
3161 	callchain_cursor_reset(cursor);
3162 
3163 	if (callchain_param.order == ORDER_CALLEE) {
3164 		ret = thread__resolve_callchain_sample(thread, cursor,
3165 						       evsel, sample,
3166 						       parent, root_al,
3167 						       max_stack);
3168 		if (ret)
3169 			return ret;
3170 		ret = thread__resolve_callchain_unwind(thread, cursor,
3171 						       evsel, sample,
3172 						       max_stack);
3173 	} else {
3174 		ret = thread__resolve_callchain_unwind(thread, cursor,
3175 						       evsel, sample,
3176 						       max_stack);
3177 		if (ret)
3178 			return ret;
3179 		ret = thread__resolve_callchain_sample(thread, cursor,
3180 						       evsel, sample,
3181 						       parent, root_al,
3182 						       max_stack);
3183 	}
3184 
3185 	return ret;
3186 }
3187 
3188 int machine__for_each_thread(struct machine *machine,
3189 			     int (*fn)(struct thread *thread, void *p),
3190 			     void *priv)
3191 {
3192 	struct threads *threads;
3193 	struct rb_node *nd;
3194 	struct thread *thread;
3195 	int rc = 0;
3196 	int i;
3197 
3198 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
3199 		threads = &machine->threads[i];
3200 		for (nd = rb_first_cached(&threads->entries); nd;
3201 		     nd = rb_next(nd)) {
3202 			thread = rb_entry(nd, struct thread, rb_node);
3203 			rc = fn(thread, priv);
3204 			if (rc != 0)
3205 				return rc;
3206 		}
3207 
3208 		list_for_each_entry(thread, &threads->dead, node) {
3209 			rc = fn(thread, priv);
3210 			if (rc != 0)
3211 				return rc;
3212 		}
3213 	}
3214 	return rc;
3215 }
3216 
3217 int machines__for_each_thread(struct machines *machines,
3218 			      int (*fn)(struct thread *thread, void *p),
3219 			      void *priv)
3220 {
3221 	struct rb_node *nd;
3222 	int rc = 0;
3223 
3224 	rc = machine__for_each_thread(&machines->host, fn, priv);
3225 	if (rc != 0)
3226 		return rc;
3227 
3228 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3229 		struct machine *machine = rb_entry(nd, struct machine, rb_node);
3230 
3231 		rc = machine__for_each_thread(machine, fn, priv);
3232 		if (rc != 0)
3233 			return rc;
3234 	}
3235 	return rc;
3236 }
3237 
3238 pid_t machine__get_current_tid(struct machine *machine, int cpu)
3239 {
3240 	if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz)
3241 		return -1;
3242 
3243 	return machine->current_tid[cpu];
3244 }
3245 
3246 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
3247 			     pid_t tid)
3248 {
3249 	struct thread *thread;
3250 	const pid_t init_val = -1;
3251 
3252 	if (cpu < 0)
3253 		return -EINVAL;
3254 
3255 	if (realloc_array_as_needed(machine->current_tid,
3256 				    machine->current_tid_sz,
3257 				    (unsigned int)cpu,
3258 				    &init_val))
3259 		return -ENOMEM;
3260 
3261 	machine->current_tid[cpu] = tid;
3262 
3263 	thread = machine__findnew_thread(machine, pid, tid);
3264 	if (!thread)
3265 		return -ENOMEM;
3266 
3267 	thread->cpu = cpu;
3268 	thread__put(thread);
3269 
3270 	return 0;
3271 }
3272 
3273 /*
3274  * Compares the raw arch string. N.B. see instead perf_env__arch() or
3275  * machine__normalized_is() if a normalized arch is needed.
3276  */
3277 bool machine__is(struct machine *machine, const char *arch)
3278 {
3279 	return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3280 }
3281 
3282 bool machine__normalized_is(struct machine *machine, const char *arch)
3283 {
3284 	return machine && !strcmp(perf_env__arch(machine->env), arch);
3285 }
3286 
3287 int machine__nr_cpus_avail(struct machine *machine)
3288 {
3289 	return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3290 }
3291 
3292 int machine__get_kernel_start(struct machine *machine)
3293 {
3294 	struct map *map = machine__kernel_map(machine);
3295 	int err = 0;
3296 
3297 	/*
3298 	 * The only addresses above 2^63 are kernel addresses of a 64-bit
3299 	 * kernel.  Note that addresses are unsigned so that on a 32-bit system
3300 	 * all addresses including kernel addresses are less than 2^32.  In
3301 	 * that case (32-bit system), if the kernel mapping is unknown, all
3302 	 * addresses will be assumed to be in user space - see
3303 	 * machine__kernel_ip().
3304 	 */
3305 	machine->kernel_start = 1ULL << 63;
3306 	if (map) {
3307 		err = map__load(map);
3308 		/*
3309 		 * On x86_64, PTI entry trampolines are less than the
3310 		 * start of kernel text, but still above 2^63. So leave
3311 		 * kernel_start = 1ULL << 63 for x86_64.
3312 		 */
3313 		if (!err && !machine__is(machine, "x86_64"))
3314 			machine->kernel_start = map__start(map);
3315 	}
3316 	return err;
3317 }
3318 
3319 u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3320 {
3321 	u8 addr_cpumode = cpumode;
3322 	bool kernel_ip;
3323 
3324 	if (!machine->single_address_space)
3325 		goto out;
3326 
3327 	kernel_ip = machine__kernel_ip(machine, addr);
3328 	switch (cpumode) {
3329 	case PERF_RECORD_MISC_KERNEL:
3330 	case PERF_RECORD_MISC_USER:
3331 		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3332 					   PERF_RECORD_MISC_USER;
3333 		break;
3334 	case PERF_RECORD_MISC_GUEST_KERNEL:
3335 	case PERF_RECORD_MISC_GUEST_USER:
3336 		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3337 					   PERF_RECORD_MISC_GUEST_USER;
3338 		break;
3339 	default:
3340 		break;
3341 	}
3342 out:
3343 	return addr_cpumode;
3344 }
3345 
3346 struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
3347 {
3348 	return dsos__findnew_id(&machine->dsos, filename, id);
3349 }
3350 
3351 struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3352 {
3353 	return machine__findnew_dso_id(machine, filename, NULL);
3354 }
3355 
3356 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3357 {
3358 	struct machine *machine = vmachine;
3359 	struct map *map;
3360 	struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3361 
3362 	if (sym == NULL)
3363 		return NULL;
3364 
3365 	*modp = __map__is_kmodule(map) ? (char *)map__dso(map)->short_name : NULL;
3366 	*addrp = map__unmap_ip(map, sym->start);
3367 	return sym->name;
3368 }
3369 
3370 int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
3371 {
3372 	struct dso *pos;
3373 	int err = 0;
3374 
3375 	list_for_each_entry(pos, &machine->dsos.head, node) {
3376 		if (fn(pos, machine, priv))
3377 			err = -1;
3378 	}
3379 	return err;
3380 }
3381 
3382 int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
3383 {
3384 	struct maps *maps = machine__kernel_maps(machine);
3385 	struct map_rb_node *pos;
3386 	int err = 0;
3387 
3388 	maps__for_each_entry(maps, pos) {
3389 		err = fn(pos->map, priv);
3390 		if (err != 0) {
3391 			break;
3392 		}
3393 	}
3394 	return err;
3395 }
3396 
3397 bool machine__is_lock_function(struct machine *machine, u64 addr)
3398 {
3399 	if (!machine->sched.text_start) {
3400 		struct map *kmap;
3401 		struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap);
3402 
3403 		if (!sym) {
3404 			/* to avoid retry */
3405 			machine->sched.text_start = 1;
3406 			return false;
3407 		}
3408 
3409 		machine->sched.text_start = map__unmap_ip(kmap, sym->start);
3410 
3411 		/* should not fail from here */
3412 		sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap);
3413 		machine->sched.text_end = map__unmap_ip(kmap, sym->start);
3414 
3415 		sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap);
3416 		machine->lock.text_start = map__unmap_ip(kmap, sym->start);
3417 
3418 		sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
3419 		machine->lock.text_end = map__unmap_ip(kmap, sym->start);
3420 	}
3421 
3422 	/* failed to get kernel symbols */
3423 	if (machine->sched.text_start == 1)
3424 		return false;
3425 
3426 	/* mutex and rwsem functions are in sched text section */
3427 	if (machine->sched.text_start <= addr && addr < machine->sched.text_end)
3428 		return true;
3429 
3430 	/* spinlock functions are in lock text section */
3431 	if (machine->lock.text_start <= addr && addr < machine->lock.text_end)
3432 		return true;
3433 
3434 	return false;
3435 }
3436