xref: /linux/tools/perf/util/bpf_lock_contention.c (revision e91c37f1)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/cgroup.h"
3 #include "util/debug.h"
4 #include "util/evlist.h"
5 #include "util/machine.h"
6 #include "util/map.h"
7 #include "util/symbol.h"
8 #include "util/target.h"
9 #include "util/thread.h"
10 #include "util/thread_map.h"
11 #include "util/lock-contention.h"
12 #include <linux/zalloc.h>
13 #include <linux/string.h>
14 #include <bpf/bpf.h>
15 #include <inttypes.h>
16 
17 #include "bpf_skel/lock_contention.skel.h"
18 #include "bpf_skel/lock_data.h"
19 
20 static struct lock_contention_bpf *skel;
21 
22 int lock_contention_prepare(struct lock_contention *con)
23 {
24 	int i, fd;
25 	int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1;
26 	struct evlist *evlist = con->evlist;
27 	struct target *target = con->target;
28 
29 	skel = lock_contention_bpf__open();
30 	if (!skel) {
31 		pr_err("Failed to open lock-contention BPF skeleton\n");
32 		return -1;
33 	}
34 
35 	bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
36 	bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
37 	bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
38 
39 	if (con->aggr_mode == LOCK_AGGR_TASK)
40 		bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries);
41 	else
42 		bpf_map__set_max_entries(skel->maps.task_data, 1);
43 
44 	if (con->save_callstack)
45 		bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
46 	else
47 		bpf_map__set_max_entries(skel->maps.stacks, 1);
48 
49 	if (target__has_cpu(target))
50 		ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
51 	if (target__has_task(target))
52 		ntasks = perf_thread_map__nr(evlist->core.threads);
53 	if (con->filters->nr_types)
54 		ntypes = con->filters->nr_types;
55 	if (con->filters->nr_cgrps)
56 		ncgrps = con->filters->nr_cgrps;
57 
58 	/* resolve lock name filters to addr */
59 	if (con->filters->nr_syms) {
60 		struct symbol *sym;
61 		struct map *kmap;
62 		unsigned long *addrs;
63 
64 		for (i = 0; i < con->filters->nr_syms; i++) {
65 			sym = machine__find_kernel_symbol_by_name(con->machine,
66 								  con->filters->syms[i],
67 								  &kmap);
68 			if (sym == NULL) {
69 				pr_warning("ignore unknown symbol: %s\n",
70 					   con->filters->syms[i]);
71 				continue;
72 			}
73 
74 			addrs = realloc(con->filters->addrs,
75 					(con->filters->nr_addrs + 1) * sizeof(*addrs));
76 			if (addrs == NULL) {
77 				pr_warning("memory allocation failure\n");
78 				continue;
79 			}
80 
81 			addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start);
82 			con->filters->addrs = addrs;
83 		}
84 		naddrs = con->filters->nr_addrs;
85 	}
86 
87 	bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
88 	bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
89 	bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
90 	bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
91 	bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
92 
93 	if (lock_contention_bpf__load(skel) < 0) {
94 		pr_err("Failed to load lock-contention BPF skeleton\n");
95 		return -1;
96 	}
97 
98 	if (target__has_cpu(target)) {
99 		u32 cpu;
100 		u8 val = 1;
101 
102 		skel->bss->has_cpu = 1;
103 		fd = bpf_map__fd(skel->maps.cpu_filter);
104 
105 		for (i = 0; i < ncpus; i++) {
106 			cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
107 			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
108 		}
109 	}
110 
111 	if (target__has_task(target)) {
112 		u32 pid;
113 		u8 val = 1;
114 
115 		skel->bss->has_task = 1;
116 		fd = bpf_map__fd(skel->maps.task_filter);
117 
118 		for (i = 0; i < ntasks; i++) {
119 			pid = perf_thread_map__pid(evlist->core.threads, i);
120 			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
121 		}
122 	}
123 
124 	if (target__none(target) && evlist->workload.pid > 0) {
125 		u32 pid = evlist->workload.pid;
126 		u8 val = 1;
127 
128 		skel->bss->has_task = 1;
129 		fd = bpf_map__fd(skel->maps.task_filter);
130 		bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
131 	}
132 
133 	if (con->filters->nr_types) {
134 		u8 val = 1;
135 
136 		skel->bss->has_type = 1;
137 		fd = bpf_map__fd(skel->maps.type_filter);
138 
139 		for (i = 0; i < con->filters->nr_types; i++)
140 			bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY);
141 	}
142 
143 	if (con->filters->nr_addrs) {
144 		u8 val = 1;
145 
146 		skel->bss->has_addr = 1;
147 		fd = bpf_map__fd(skel->maps.addr_filter);
148 
149 		for (i = 0; i < con->filters->nr_addrs; i++)
150 			bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
151 	}
152 
153 	if (con->filters->nr_cgrps) {
154 		u8 val = 1;
155 
156 		skel->bss->has_cgroup = 1;
157 		fd = bpf_map__fd(skel->maps.cgroup_filter);
158 
159 		for (i = 0; i < con->filters->nr_cgrps; i++)
160 			bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
161 	}
162 
163 	/* these don't work well if in the rodata section */
164 	skel->bss->stack_skip = con->stack_skip;
165 	skel->bss->aggr_mode = con->aggr_mode;
166 	skel->bss->needs_callstack = con->save_callstack;
167 	skel->bss->lock_owner = con->owner;
168 
169 	if (con->aggr_mode == LOCK_AGGR_CGROUP) {
170 		if (cgroup_is_v2("perf_event"))
171 			skel->bss->use_cgroup_v2 = 1;
172 
173 		read_all_cgroups(&con->cgroups);
174 	}
175 
176 	bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
177 
178 	lock_contention_bpf__attach(skel);
179 	return 0;
180 }
181 
182 int lock_contention_start(void)
183 {
184 	skel->bss->enabled = 1;
185 	return 0;
186 }
187 
188 int lock_contention_stop(void)
189 {
190 	skel->bss->enabled = 0;
191 	return 0;
192 }
193 
194 static const char *lock_contention_get_name(struct lock_contention *con,
195 					    struct contention_key *key,
196 					    u64 *stack_trace, u32 flags)
197 {
198 	int idx = 0;
199 	u64 addr;
200 	const char *name = "";
201 	static char name_buf[KSYM_NAME_LEN];
202 	struct symbol *sym;
203 	struct map *kmap;
204 	struct machine *machine = con->machine;
205 
206 	if (con->aggr_mode == LOCK_AGGR_TASK) {
207 		struct contention_task_data task;
208 		int pid = key->pid;
209 		int task_fd = bpf_map__fd(skel->maps.task_data);
210 
211 		/* do not update idle comm which contains CPU number */
212 		if (pid) {
213 			struct thread *t = __machine__findnew_thread(machine, /*pid=*/-1, pid);
214 
215 			if (t == NULL)
216 				return name;
217 			if (!bpf_map_lookup_elem(task_fd, &pid, &task) &&
218 			    thread__set_comm(t, task.comm, /*timestamp=*/0))
219 				name = task.comm;
220 		}
221 		return name;
222 	}
223 
224 	if (con->aggr_mode == LOCK_AGGR_ADDR) {
225 		int lock_fd = bpf_map__fd(skel->maps.lock_syms);
226 
227 		/* per-process locks set upper bits of the flags */
228 		if (flags & LCD_F_MMAP_LOCK)
229 			return "mmap_lock";
230 		if (flags & LCD_F_SIGHAND_LOCK)
231 			return "siglock";
232 
233 		/* global locks with symbols */
234 		sym = machine__find_kernel_symbol(machine, key->lock_addr_or_cgroup, &kmap);
235 		if (sym)
236 			return sym->name;
237 
238 		/* try semi-global locks collected separately */
239 		if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) {
240 			if (flags == LOCK_CLASS_RQLOCK)
241 				return "rq_lock";
242 		}
243 
244 		return "";
245 	}
246 
247 	if (con->aggr_mode == LOCK_AGGR_CGROUP) {
248 		u64 cgrp_id = key->lock_addr_or_cgroup;
249 		struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id);
250 
251 		if (cgrp)
252 			return cgrp->name;
253 
254 		snprintf(name_buf, sizeof(name_buf), "cgroup:%" PRIu64 "", cgrp_id);
255 		return name_buf;
256 	}
257 
258 	/* LOCK_AGGR_CALLER: skip lock internal functions */
259 	while (machine__is_lock_function(machine, stack_trace[idx]) &&
260 	       idx < con->max_stack - 1)
261 		idx++;
262 
263 	addr = stack_trace[idx];
264 	sym = machine__find_kernel_symbol(machine, addr, &kmap);
265 
266 	if (sym) {
267 		unsigned long offset;
268 
269 		offset = map__map_ip(kmap, addr) - sym->start;
270 
271 		if (offset == 0)
272 			return sym->name;
273 
274 		snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset);
275 	} else {
276 		snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr);
277 	}
278 
279 	return name_buf;
280 }
281 
282 int lock_contention_read(struct lock_contention *con)
283 {
284 	int fd, stack, err = 0;
285 	struct contention_key *prev_key, key = {};
286 	struct contention_data data = {};
287 	struct lock_stat *st = NULL;
288 	struct machine *machine = con->machine;
289 	u64 *stack_trace;
290 	size_t stack_size = con->max_stack * sizeof(*stack_trace);
291 
292 	fd = bpf_map__fd(skel->maps.lock_stat);
293 	stack = bpf_map__fd(skel->maps.stacks);
294 
295 	con->fails.task = skel->bss->task_fail;
296 	con->fails.stack = skel->bss->stack_fail;
297 	con->fails.time = skel->bss->time_fail;
298 	con->fails.data = skel->bss->data_fail;
299 
300 	stack_trace = zalloc(stack_size);
301 	if (stack_trace == NULL)
302 		return -1;
303 
304 	if (con->aggr_mode == LOCK_AGGR_TASK) {
305 		struct thread *idle = __machine__findnew_thread(machine,
306 								/*pid=*/0,
307 								/*tid=*/0);
308 		thread__set_comm(idle, "swapper", /*timestamp=*/0);
309 	}
310 
311 	if (con->aggr_mode == LOCK_AGGR_ADDR) {
312 		DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
313 			.flags = BPF_F_TEST_RUN_ON_CPU,
314 		);
315 		int prog_fd = bpf_program__fd(skel->progs.collect_lock_syms);
316 
317 		bpf_prog_test_run_opts(prog_fd, &opts);
318 	}
319 
320 	/* make sure it loads the kernel map */
321 	maps__load_first(machine->kmaps);
322 
323 	prev_key = NULL;
324 	while (!bpf_map_get_next_key(fd, prev_key, &key)) {
325 		s64 ls_key;
326 		const char *name;
327 
328 		/* to handle errors in the loop body */
329 		err = -1;
330 
331 		bpf_map_lookup_elem(fd, &key, &data);
332 		if (con->save_callstack) {
333 			bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
334 
335 			if (!match_callstack_filter(machine, stack_trace)) {
336 				con->nr_filtered += data.count;
337 				goto next;
338 			}
339 		}
340 
341 		switch (con->aggr_mode) {
342 		case LOCK_AGGR_CALLER:
343 			ls_key = key.stack_id;
344 			break;
345 		case LOCK_AGGR_TASK:
346 			ls_key = key.pid;
347 			break;
348 		case LOCK_AGGR_ADDR:
349 		case LOCK_AGGR_CGROUP:
350 			ls_key = key.lock_addr_or_cgroup;
351 			break;
352 		default:
353 			goto next;
354 		}
355 
356 		st = lock_stat_find(ls_key);
357 		if (st != NULL) {
358 			st->wait_time_total += data.total_time;
359 			if (st->wait_time_max < data.max_time)
360 				st->wait_time_max = data.max_time;
361 			if (st->wait_time_min > data.min_time)
362 				st->wait_time_min = data.min_time;
363 
364 			st->nr_contended += data.count;
365 			if (st->nr_contended)
366 				st->avg_wait_time = st->wait_time_total / st->nr_contended;
367 			goto next;
368 		}
369 
370 		name = lock_contention_get_name(con, &key, stack_trace, data.flags);
371 		st = lock_stat_findnew(ls_key, name, data.flags);
372 		if (st == NULL)
373 			break;
374 
375 		st->nr_contended = data.count;
376 		st->wait_time_total = data.total_time;
377 		st->wait_time_max = data.max_time;
378 		st->wait_time_min = data.min_time;
379 
380 		if (data.count)
381 			st->avg_wait_time = data.total_time / data.count;
382 
383 		if (con->aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
384 			st->callstack = memdup(stack_trace, stack_size);
385 			if (st->callstack == NULL)
386 				break;
387 		}
388 
389 next:
390 		prev_key = &key;
391 
392 		/* we're fine now, reset the error */
393 		err = 0;
394 	}
395 
396 	free(stack_trace);
397 
398 	return err;
399 }
400 
401 int lock_contention_finish(struct lock_contention *con)
402 {
403 	if (skel) {
404 		skel->bss->enabled = 0;
405 		lock_contention_bpf__destroy(skel);
406 	}
407 
408 	while (!RB_EMPTY_ROOT(&con->cgroups)) {
409 		struct rb_node *node = rb_first(&con->cgroups);
410 		struct cgroup *cgrp = rb_entry(node, struct cgroup, node);
411 
412 		rb_erase(node, &con->cgroups);
413 		cgroup__put(cgrp);
414 	}
415 
416 	return 0;
417 }
418