xref: /linux/tools/lib/perf/evsel.c (revision 908fc4c2)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <unistd.h>
4 #include <sys/syscall.h>
5 #include <perf/evsel.h>
6 #include <perf/cpumap.h>
7 #include <perf/threadmap.h>
8 #include <linux/list.h>
9 #include <internal/evsel.h>
10 #include <linux/zalloc.h>
11 #include <stdlib.h>
12 #include <internal/xyarray.h>
13 #include <internal/cpumap.h>
14 #include <internal/mmap.h>
15 #include <internal/threadmap.h>
16 #include <internal/lib.h>
17 #include <linux/string.h>
18 #include <sys/ioctl.h>
19 #include <sys/mman.h>
20 #include <asm/bug.h>
21 
22 void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
23 		      int idx)
24 {
25 	INIT_LIST_HEAD(&evsel->node);
26 	evsel->attr = *attr;
27 	evsel->idx  = idx;
28 	evsel->leader = evsel;
29 }
30 
31 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
32 {
33 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
34 
35 	if (evsel != NULL)
36 		perf_evsel__init(evsel, attr, 0);
37 
38 	return evsel;
39 }
40 
41 void perf_evsel__delete(struct perf_evsel *evsel)
42 {
43 	free(evsel);
44 }
45 
46 #define FD(_evsel, _cpu_map_idx, _thread)				\
47 	((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread))
48 #define MMAP(_evsel, _cpu_map_idx, _thread)				\
49 	(_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \
50 		      : NULL)
51 
52 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
53 {
54 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
55 
56 	if (evsel->fd) {
57 		int idx, thread;
58 
59 		for (idx = 0; idx < ncpus; idx++) {
60 			for (thread = 0; thread < nthreads; thread++) {
61 				int *fd = FD(evsel, idx, thread);
62 
63 				if (fd)
64 					*fd = -1;
65 			}
66 		}
67 	}
68 
69 	return evsel->fd != NULL ? 0 : -ENOMEM;
70 }
71 
72 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
73 {
74 	evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
75 
76 	return evsel->mmap != NULL ? 0 : -ENOMEM;
77 }
78 
79 static int
80 sys_perf_event_open(struct perf_event_attr *attr,
81 		    pid_t pid, struct perf_cpu cpu, int group_fd,
82 		    unsigned long flags)
83 {
84 	return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags);
85 }
86 
87 static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd)
88 {
89 	struct perf_evsel *leader = evsel->leader;
90 	int *fd;
91 
92 	if (evsel == leader) {
93 		*group_fd = -1;
94 		return 0;
95 	}
96 
97 	/*
98 	 * Leader must be already processed/open,
99 	 * if not it's a bug.
100 	 */
101 	if (!leader->fd)
102 		return -ENOTCONN;
103 
104 	fd = FD(leader, cpu_map_idx, thread);
105 	if (fd == NULL || *fd == -1)
106 		return -EBADF;
107 
108 	*group_fd = *fd;
109 
110 	return 0;
111 }
112 
113 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
114 		     struct perf_thread_map *threads)
115 {
116 	struct perf_cpu cpu;
117 	int idx, thread, err = 0;
118 
119 	if (cpus == NULL) {
120 		static struct perf_cpu_map *empty_cpu_map;
121 
122 		if (empty_cpu_map == NULL) {
123 			empty_cpu_map = perf_cpu_map__dummy_new();
124 			if (empty_cpu_map == NULL)
125 				return -ENOMEM;
126 		}
127 
128 		cpus = empty_cpu_map;
129 	}
130 
131 	if (threads == NULL) {
132 		static struct perf_thread_map *empty_thread_map;
133 
134 		if (empty_thread_map == NULL) {
135 			empty_thread_map = perf_thread_map__new_dummy();
136 			if (empty_thread_map == NULL)
137 				return -ENOMEM;
138 		}
139 
140 		threads = empty_thread_map;
141 	}
142 
143 	if (evsel->fd == NULL &&
144 	    perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
145 		return -ENOMEM;
146 
147 	perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
148 		for (thread = 0; thread < threads->nr; thread++) {
149 			int fd, group_fd, *evsel_fd;
150 
151 			evsel_fd = FD(evsel, idx, thread);
152 			if (evsel_fd == NULL) {
153 				err = -EINVAL;
154 				goto out;
155 			}
156 
157 			err = get_group_fd(evsel, idx, thread, &group_fd);
158 			if (err < 0)
159 				goto out;
160 
161 			fd = sys_perf_event_open(&evsel->attr,
162 						 threads->map[thread].pid,
163 						 cpu, group_fd, 0);
164 
165 			if (fd < 0) {
166 				err = -errno;
167 				goto out;
168 			}
169 
170 			*evsel_fd = fd;
171 		}
172 	}
173 out:
174 	if (err)
175 		perf_evsel__close(evsel);
176 
177 	return err;
178 }
179 
180 static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx)
181 {
182 	int thread;
183 
184 	for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
185 		int *fd = FD(evsel, cpu_map_idx, thread);
186 
187 		if (fd && *fd >= 0) {
188 			close(*fd);
189 			*fd = -1;
190 		}
191 	}
192 }
193 
194 void perf_evsel__close_fd(struct perf_evsel *evsel)
195 {
196 	for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++)
197 		perf_evsel__close_fd_cpu(evsel, idx);
198 }
199 
200 void perf_evsel__free_fd(struct perf_evsel *evsel)
201 {
202 	xyarray__delete(evsel->fd);
203 	evsel->fd = NULL;
204 }
205 
206 void perf_evsel__close(struct perf_evsel *evsel)
207 {
208 	if (evsel->fd == NULL)
209 		return;
210 
211 	perf_evsel__close_fd(evsel);
212 	perf_evsel__free_fd(evsel);
213 }
214 
215 void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx)
216 {
217 	if (evsel->fd == NULL)
218 		return;
219 
220 	perf_evsel__close_fd_cpu(evsel, cpu_map_idx);
221 }
222 
223 void perf_evsel__munmap(struct perf_evsel *evsel)
224 {
225 	int idx, thread;
226 
227 	if (evsel->fd == NULL || evsel->mmap == NULL)
228 		return;
229 
230 	for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
231 		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
232 			int *fd = FD(evsel, idx, thread);
233 
234 			if (fd == NULL || *fd < 0)
235 				continue;
236 
237 			perf_mmap__munmap(MMAP(evsel, idx, thread));
238 		}
239 	}
240 
241 	xyarray__delete(evsel->mmap);
242 	evsel->mmap = NULL;
243 }
244 
245 int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
246 {
247 	int ret, idx, thread;
248 	struct perf_mmap_param mp = {
249 		.prot = PROT_READ | PROT_WRITE,
250 		.mask = (pages * page_size) - 1,
251 	};
252 
253 	if (evsel->fd == NULL || evsel->mmap)
254 		return -EINVAL;
255 
256 	if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
257 		return -ENOMEM;
258 
259 	for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
260 		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
261 			int *fd = FD(evsel, idx, thread);
262 			struct perf_mmap *map;
263 			struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx);
264 
265 			if (fd == NULL || *fd < 0)
266 				continue;
267 
268 			map = MMAP(evsel, idx, thread);
269 			perf_mmap__init(map, NULL, false, NULL);
270 
271 			ret = perf_mmap__mmap(map, &mp, *fd, cpu);
272 			if (ret) {
273 				perf_evsel__munmap(evsel);
274 				return ret;
275 			}
276 		}
277 	}
278 
279 	return 0;
280 }
281 
282 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread)
283 {
284 	int *fd = FD(evsel, cpu_map_idx, thread);
285 
286 	if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL)
287 		return NULL;
288 
289 	return MMAP(evsel, cpu_map_idx, thread)->base;
290 }
291 
292 int perf_evsel__read_size(struct perf_evsel *evsel)
293 {
294 	u64 read_format = evsel->attr.read_format;
295 	int entry = sizeof(u64); /* value */
296 	int size = 0;
297 	int nr = 1;
298 
299 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
300 		size += sizeof(u64);
301 
302 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
303 		size += sizeof(u64);
304 
305 	if (read_format & PERF_FORMAT_ID)
306 		entry += sizeof(u64);
307 
308 	if (read_format & PERF_FORMAT_GROUP) {
309 		nr = evsel->nr_members;
310 		size += sizeof(u64);
311 	}
312 
313 	size += entry * nr;
314 	return size;
315 }
316 
317 int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
318 		     struct perf_counts_values *count)
319 {
320 	size_t size = perf_evsel__read_size(evsel);
321 	int *fd = FD(evsel, cpu_map_idx, thread);
322 
323 	memset(count, 0, sizeof(*count));
324 
325 	if (fd == NULL || *fd < 0)
326 		return -EINVAL;
327 
328 	if (MMAP(evsel, cpu_map_idx, thread) &&
329 	    !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
330 		return 0;
331 
332 	if (readn(*fd, count->values, size) <= 0)
333 		return -errno;
334 
335 	return 0;
336 }
337 
338 static int perf_evsel__ioctl(struct perf_evsel *evsel, int ioc, void *arg,
339 			     int cpu_map_idx, int thread)
340 {
341 	int *fd = FD(evsel, cpu_map_idx, thread);
342 
343 	if (fd == NULL || *fd < 0)
344 		return -1;
345 
346 	return ioctl(*fd, ioc, arg);
347 }
348 
349 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
350 				 int ioc,  void *arg,
351 				 int cpu_map_idx)
352 {
353 	int thread;
354 
355 	for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
356 		int err = perf_evsel__ioctl(evsel, ioc, arg, cpu_map_idx, thread);
357 
358 		if (err)
359 			return err;
360 	}
361 
362 	return 0;
363 }
364 
365 int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
366 {
367 	return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx);
368 }
369 
370 int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
371 {
372 	struct perf_cpu cpu __maybe_unused;
373 	int idx;
374 	int err;
375 
376 	perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
377 		err = perf_evsel__ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, idx, thread);
378 		if (err)
379 			return err;
380 	}
381 
382 	return 0;
383 }
384 
385 int perf_evsel__enable(struct perf_evsel *evsel)
386 {
387 	int i;
388 	int err = 0;
389 
390 	for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
391 		err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
392 	return err;
393 }
394 
395 int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
396 {
397 	return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx);
398 }
399 
400 int perf_evsel__disable(struct perf_evsel *evsel)
401 {
402 	int i;
403 	int err = 0;
404 
405 	for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
406 		err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
407 	return err;
408 }
409 
410 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
411 {
412 	int err = 0, i;
413 
414 	for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
415 		err = perf_evsel__run_ioctl(evsel,
416 				     PERF_EVENT_IOC_SET_FILTER,
417 				     (void *)filter, i);
418 	return err;
419 }
420 
421 struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
422 {
423 	return evsel->cpus;
424 }
425 
426 struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
427 {
428 	return evsel->threads;
429 }
430 
431 struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
432 {
433 	return &evsel->attr;
434 }
435 
436 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
437 {
438 	if (ncpus == 0 || nthreads == 0)
439 		return 0;
440 
441 	if (evsel->system_wide)
442 		nthreads = 1;
443 
444 	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
445 	if (evsel->sample_id == NULL)
446 		return -ENOMEM;
447 
448 	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
449 	if (evsel->id == NULL) {
450 		xyarray__delete(evsel->sample_id);
451 		evsel->sample_id = NULL;
452 		return -ENOMEM;
453 	}
454 
455 	return 0;
456 }
457 
458 void perf_evsel__free_id(struct perf_evsel *evsel)
459 {
460 	xyarray__delete(evsel->sample_id);
461 	evsel->sample_id = NULL;
462 	zfree(&evsel->id);
463 	evsel->ids = 0;
464 }
465 
466 void perf_counts_values__scale(struct perf_counts_values *count,
467 			       bool scale, __s8 *pscaled)
468 {
469 	s8 scaled = 0;
470 
471 	if (scale) {
472 		if (count->run == 0) {
473 			scaled = -1;
474 			count->val = 0;
475 		} else if (count->run < count->ena) {
476 			scaled = 1;
477 			count->val = (u64)((double)count->val * count->ena / count->run);
478 		}
479 	}
480 
481 	if (pscaled)
482 		*pscaled = scaled;
483 }
484