xref: /linux/tools/perf/util/bpf_kwork.c (revision c6fbb759)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bpf_kwork.c
4  *
5  * Copyright (c) 2022  Huawei Inc,  Yang Jihong <yangjihong1@huawei.com>
6  */
7 
8 #include <time.h>
9 #include <fcntl.h>
10 #include <stdio.h>
11 #include <unistd.h>
12 
13 #include <linux/time64.h>
14 
15 #include "util/debug.h"
16 #include "util/kwork.h"
17 
18 #include <bpf/bpf.h>
19 
20 #include "util/bpf_skel/kwork_trace.skel.h"
21 
22 /*
23  * This should be in sync with "util/kwork_trace.bpf.c"
24  */
25 #define MAX_KWORKNAME 128
26 
27 struct work_key {
28 	u32 type;
29 	u32 cpu;
30 	u64 id;
31 };
32 
33 struct report_data {
34 	u64 nr;
35 	u64 total_time;
36 	u64 max_time;
37 	u64 max_time_start;
38 	u64 max_time_end;
39 };
40 
41 struct kwork_class_bpf {
42 	struct kwork_class *class;
43 
44 	void (*load_prepare)(struct perf_kwork *kwork);
45 	int  (*get_work_name)(struct work_key *key, char **ret_name);
46 };
47 
48 static struct kwork_trace_bpf *skel;
49 
50 static struct timespec ts_start;
51 static struct timespec ts_end;
52 
53 void perf_kwork__trace_start(void)
54 {
55 	clock_gettime(CLOCK_MONOTONIC, &ts_start);
56 	skel->bss->enabled = 1;
57 }
58 
59 void perf_kwork__trace_finish(void)
60 {
61 	clock_gettime(CLOCK_MONOTONIC, &ts_end);
62 	skel->bss->enabled = 0;
63 }
64 
65 static int get_work_name_from_map(struct work_key *key, char **ret_name)
66 {
67 	char name[MAX_KWORKNAME] = { 0 };
68 	int fd = bpf_map__fd(skel->maps.perf_kwork_names);
69 
70 	*ret_name = NULL;
71 
72 	if (fd < 0) {
73 		pr_debug("Invalid names map fd\n");
74 		return 0;
75 	}
76 
77 	if ((bpf_map_lookup_elem(fd, key, name) == 0) && (strlen(name) != 0)) {
78 		*ret_name = strdup(name);
79 		if (*ret_name == NULL) {
80 			pr_err("Failed to copy work name\n");
81 			return -1;
82 		}
83 	}
84 
85 	return 0;
86 }
87 
88 static void irq_load_prepare(struct perf_kwork *kwork)
89 {
90 	if (kwork->report == KWORK_REPORT_RUNTIME) {
91 		bpf_program__set_autoload(skel->progs.report_irq_handler_entry, true);
92 		bpf_program__set_autoload(skel->progs.report_irq_handler_exit, true);
93 	}
94 }
95 
96 static struct kwork_class_bpf kwork_irq_bpf = {
97 	.load_prepare  = irq_load_prepare,
98 	.get_work_name = get_work_name_from_map,
99 };
100 
101 static void softirq_load_prepare(struct perf_kwork *kwork)
102 {
103 	if (kwork->report == KWORK_REPORT_RUNTIME) {
104 		bpf_program__set_autoload(skel->progs.report_softirq_entry, true);
105 		bpf_program__set_autoload(skel->progs.report_softirq_exit, true);
106 	} else if (kwork->report == KWORK_REPORT_LATENCY) {
107 		bpf_program__set_autoload(skel->progs.latency_softirq_raise, true);
108 		bpf_program__set_autoload(skel->progs.latency_softirq_entry, true);
109 	}
110 }
111 
112 static struct kwork_class_bpf kwork_softirq_bpf = {
113 	.load_prepare  = softirq_load_prepare,
114 	.get_work_name = get_work_name_from_map,
115 };
116 
117 static void workqueue_load_prepare(struct perf_kwork *kwork)
118 {
119 	if (kwork->report == KWORK_REPORT_RUNTIME) {
120 		bpf_program__set_autoload(skel->progs.report_workqueue_execute_start, true);
121 		bpf_program__set_autoload(skel->progs.report_workqueue_execute_end, true);
122 	} else if (kwork->report == KWORK_REPORT_LATENCY) {
123 		bpf_program__set_autoload(skel->progs.latency_workqueue_activate_work, true);
124 		bpf_program__set_autoload(skel->progs.latency_workqueue_execute_start, true);
125 	}
126 }
127 
128 static struct kwork_class_bpf kwork_workqueue_bpf = {
129 	.load_prepare  = workqueue_load_prepare,
130 	.get_work_name = get_work_name_from_map,
131 };
132 
133 static struct kwork_class_bpf *
134 kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = {
135 	[KWORK_CLASS_IRQ]       = &kwork_irq_bpf,
136 	[KWORK_CLASS_SOFTIRQ]   = &kwork_softirq_bpf,
137 	[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue_bpf,
138 };
139 
140 static bool valid_kwork_class_type(enum kwork_class_type type)
141 {
142 	return type >= 0 && type < KWORK_CLASS_MAX ? true : false;
143 }
144 
145 static int setup_filters(struct perf_kwork *kwork)
146 {
147 	u8 val = 1;
148 	int i, nr_cpus, key, fd;
149 	struct perf_cpu_map *map;
150 
151 	if (kwork->cpu_list != NULL) {
152 		fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter);
153 		if (fd < 0) {
154 			pr_debug("Invalid cpu filter fd\n");
155 			return -1;
156 		}
157 
158 		map = perf_cpu_map__new(kwork->cpu_list);
159 		if (map == NULL) {
160 			pr_debug("Invalid cpu_list\n");
161 			return -1;
162 		}
163 
164 		nr_cpus = libbpf_num_possible_cpus();
165 		for (i = 0; i < perf_cpu_map__nr(map); i++) {
166 			struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
167 
168 			if (cpu.cpu >= nr_cpus) {
169 				perf_cpu_map__put(map);
170 				pr_err("Requested cpu %d too large\n", cpu.cpu);
171 				return -1;
172 			}
173 			bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
174 		}
175 		perf_cpu_map__put(map);
176 
177 		skel->bss->has_cpu_filter = 1;
178 	}
179 
180 	if (kwork->profile_name != NULL) {
181 		if (strlen(kwork->profile_name) >= MAX_KWORKNAME) {
182 			pr_err("Requested name filter %s too large, limit to %d\n",
183 			       kwork->profile_name, MAX_KWORKNAME - 1);
184 			return -1;
185 		}
186 
187 		fd = bpf_map__fd(skel->maps.perf_kwork_name_filter);
188 		if (fd < 0) {
189 			pr_debug("Invalid name filter fd\n");
190 			return -1;
191 		}
192 
193 		key = 0;
194 		bpf_map_update_elem(fd, &key, kwork->profile_name, BPF_ANY);
195 
196 		skel->bss->has_name_filter = 1;
197 	}
198 
199 	return 0;
200 }
201 
202 int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork)
203 {
204 	struct bpf_program *prog;
205 	struct kwork_class *class;
206 	struct kwork_class_bpf *class_bpf;
207 	enum kwork_class_type type;
208 
209 	skel = kwork_trace_bpf__open();
210 	if (!skel) {
211 		pr_debug("Failed to open kwork trace skeleton\n");
212 		return -1;
213 	}
214 
215 	/*
216 	 * set all progs to non-autoload,
217 	 * then set corresponding progs according to config
218 	 */
219 	bpf_object__for_each_program(prog, skel->obj)
220 		bpf_program__set_autoload(prog, false);
221 
222 	list_for_each_entry(class, &kwork->class_list, list) {
223 		type = class->type;
224 		if (!valid_kwork_class_type(type) ||
225 		    (kwork_class_bpf_supported_list[type] == NULL)) {
226 			pr_err("Unsupported bpf trace class %s\n", class->name);
227 			goto out;
228 		}
229 
230 		class_bpf = kwork_class_bpf_supported_list[type];
231 		class_bpf->class = class;
232 
233 		if (class_bpf->load_prepare != NULL)
234 			class_bpf->load_prepare(kwork);
235 	}
236 
237 	if (kwork_trace_bpf__load(skel)) {
238 		pr_debug("Failed to load kwork trace skeleton\n");
239 		goto out;
240 	}
241 
242 	if (setup_filters(kwork))
243 		goto out;
244 
245 	if (kwork_trace_bpf__attach(skel)) {
246 		pr_debug("Failed to attach kwork trace skeleton\n");
247 		goto out;
248 	}
249 
250 	return 0;
251 
252 out:
253 	kwork_trace_bpf__destroy(skel);
254 	return -1;
255 }
256 
257 static int add_work(struct perf_kwork *kwork,
258 		    struct work_key *key,
259 		    struct report_data *data)
260 {
261 	struct kwork_work *work;
262 	struct kwork_class_bpf *bpf_trace;
263 	struct kwork_work tmp = {
264 		.id = key->id,
265 		.name = NULL,
266 		.cpu = key->cpu,
267 	};
268 	enum kwork_class_type type = key->type;
269 
270 	if (!valid_kwork_class_type(type)) {
271 		pr_debug("Invalid class type %d to add work\n", type);
272 		return -1;
273 	}
274 
275 	bpf_trace = kwork_class_bpf_supported_list[type];
276 	tmp.class = bpf_trace->class;
277 
278 	if ((bpf_trace->get_work_name != NULL) &&
279 	    (bpf_trace->get_work_name(key, &tmp.name)))
280 		return -1;
281 
282 	work = perf_kwork_add_work(kwork, tmp.class, &tmp);
283 	if (work == NULL)
284 		return -1;
285 
286 	if (kwork->report == KWORK_REPORT_RUNTIME) {
287 		work->nr_atoms = data->nr;
288 		work->total_runtime = data->total_time;
289 		work->max_runtime = data->max_time;
290 		work->max_runtime_start = data->max_time_start;
291 		work->max_runtime_end = data->max_time_end;
292 	} else if (kwork->report == KWORK_REPORT_LATENCY) {
293 		work->nr_atoms = data->nr;
294 		work->total_latency = data->total_time;
295 		work->max_latency = data->max_time;
296 		work->max_latency_start = data->max_time_start;
297 		work->max_latency_end = data->max_time_end;
298 	} else {
299 		pr_debug("Invalid bpf report type %d\n", kwork->report);
300 		return -1;
301 	}
302 
303 	kwork->timestart = (u64)ts_start.tv_sec * NSEC_PER_SEC + ts_start.tv_nsec;
304 	kwork->timeend = (u64)ts_end.tv_sec * NSEC_PER_SEC + ts_end.tv_nsec;
305 
306 	return 0;
307 }
308 
309 int perf_kwork__report_read_bpf(struct perf_kwork *kwork)
310 {
311 	struct report_data data;
312 	struct work_key key = {
313 		.type = 0,
314 		.cpu  = 0,
315 		.id   = 0,
316 	};
317 	struct work_key prev = {
318 		.type = 0,
319 		.cpu  = 0,
320 		.id   = 0,
321 	};
322 	int fd = bpf_map__fd(skel->maps.perf_kwork_report);
323 
324 	if (fd < 0) {
325 		pr_debug("Invalid report fd\n");
326 		return -1;
327 	}
328 
329 	while (!bpf_map_get_next_key(fd, &prev, &key)) {
330 		if ((bpf_map_lookup_elem(fd, &key, &data)) != 0) {
331 			pr_debug("Failed to lookup report elem\n");
332 			return -1;
333 		}
334 
335 		if ((data.nr != 0) && (add_work(kwork, &key, &data) != 0))
336 			return -1;
337 
338 		prev = key;
339 	}
340 	return 0;
341 }
342 
343 void perf_kwork__report_cleanup_bpf(void)
344 {
345 	kwork_trace_bpf__destroy(skel);
346 }
347