xref: /linux/tools/perf/tests/perf-record.c (revision 52338415)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/string.h>
5 /* For the CLR_() macros */
6 #include <pthread.h>
7 
8 #include <sched.h>
9 #include "evlist.h"
10 #include "evsel.h"
11 #include "debug.h"
12 #include "record.h"
13 #include "tests.h"
14 #include "util/mmap.h"
15 
16 static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
17 {
18 	int i, cpu = -1, nrcpus = 1024;
19 realloc:
20 	CPU_ZERO(maskp);
21 
22 	if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
23 		if (errno == EINVAL && nrcpus < (1024 << 8)) {
24 			nrcpus = nrcpus << 2;
25 			goto realloc;
26 		}
27 		perror("sched_getaffinity");
28 			return -1;
29 	}
30 
31 	for (i = 0; i < nrcpus; i++) {
32 		if (CPU_ISSET(i, maskp)) {
33 			if (cpu == -1)
34 				cpu = i;
35 			else
36 				CPU_CLR(i, maskp);
37 		}
38 	}
39 
40 	return cpu;
41 }
42 
43 int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unused)
44 {
45 	struct record_opts opts = {
46 		.target = {
47 			.uid = UINT_MAX,
48 			.uses_mmap = true,
49 		},
50 		.no_buffering = true,
51 		.mmap_pages   = 256,
52 	};
53 	cpu_set_t cpu_mask;
54 	size_t cpu_mask_size = sizeof(cpu_mask);
55 	struct evlist *evlist = perf_evlist__new_dummy();
56 	struct evsel *evsel;
57 	struct perf_sample sample;
58 	const char *cmd = "sleep";
59 	const char *argv[] = { cmd, "1", NULL, };
60 	char *bname, *mmap_filename;
61 	u64 prev_time = 0;
62 	bool found_cmd_mmap = false,
63 	     found_coreutils_mmap = false,
64 	     found_libc_mmap = false,
65 	     found_vdso_mmap = false,
66 	     found_ld_mmap = false;
67 	int err = -1, errs = 0, i, wakeups = 0;
68 	u32 cpu;
69 	int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
70 	char sbuf[STRERR_BUFSIZE];
71 
72 	if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
73 		evlist = perf_evlist__new_default();
74 
75 	if (evlist == NULL) {
76 		pr_debug("Not enough memory to create evlist\n");
77 		goto out;
78 	}
79 
80 	/*
81 	 * Create maps of threads and cpus to monitor. In this case
82 	 * we start with all threads and cpus (-1, -1) but then in
83 	 * perf_evlist__prepare_workload we'll fill in the only thread
84 	 * we're monitoring, the one forked there.
85 	 */
86 	err = perf_evlist__create_maps(evlist, &opts.target);
87 	if (err < 0) {
88 		pr_debug("Not enough memory to create thread/cpu maps\n");
89 		goto out_delete_evlist;
90 	}
91 
92 	/*
93 	 * Prepare the workload in argv[] to run, it'll fork it, and then wait
94 	 * for perf_evlist__start_workload() to exec it. This is done this way
95 	 * so that we have time to open the evlist (calling sys_perf_event_open
96 	 * on all the fds) and then mmap them.
97 	 */
98 	err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
99 	if (err < 0) {
100 		pr_debug("Couldn't run the workload!\n");
101 		goto out_delete_evlist;
102 	}
103 
104 	/*
105 	 * Config the evsels, setting attr->comm on the first one, etc.
106 	 */
107 	evsel = evlist__first(evlist);
108 	perf_evsel__set_sample_bit(evsel, CPU);
109 	perf_evsel__set_sample_bit(evsel, TID);
110 	perf_evsel__set_sample_bit(evsel, TIME);
111 	perf_evlist__config(evlist, &opts, NULL);
112 
113 	err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
114 	if (err < 0) {
115 		pr_debug("sched__get_first_possible_cpu: %s\n",
116 			 str_error_r(errno, sbuf, sizeof(sbuf)));
117 		goto out_delete_evlist;
118 	}
119 
120 	cpu = err;
121 
122 	/*
123 	 * So that we can check perf_sample.cpu on all the samples.
124 	 */
125 	if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
126 		pr_debug("sched_setaffinity: %s\n",
127 			 str_error_r(errno, sbuf, sizeof(sbuf)));
128 		goto out_delete_evlist;
129 	}
130 
131 	/*
132 	 * Call sys_perf_event_open on all the fds on all the evsels,
133 	 * grouping them if asked to.
134 	 */
135 	err = evlist__open(evlist);
136 	if (err < 0) {
137 		pr_debug("perf_evlist__open: %s\n",
138 			 str_error_r(errno, sbuf, sizeof(sbuf)));
139 		goto out_delete_evlist;
140 	}
141 
142 	/*
143 	 * mmap the first fd on a given CPU and ask for events for the other
144 	 * fds in the same CPU to be injected in the same mmap ring buffer
145 	 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
146 	 */
147 	err = evlist__mmap(evlist, opts.mmap_pages);
148 	if (err < 0) {
149 		pr_debug("evlist__mmap: %s\n",
150 			 str_error_r(errno, sbuf, sizeof(sbuf)));
151 		goto out_delete_evlist;
152 	}
153 
154 	/*
155 	 * Now that all is properly set up, enable the events, they will
156 	 * count just on workload.pid, which will start...
157 	 */
158 	evlist__enable(evlist);
159 
160 	/*
161 	 * Now!
162 	 */
163 	perf_evlist__start_workload(evlist);
164 
165 	while (1) {
166 		int before = total_events;
167 
168 		for (i = 0; i < evlist->core.nr_mmaps; i++) {
169 			union perf_event *event;
170 			struct mmap *md;
171 
172 			md = &evlist->mmap[i];
173 			if (perf_mmap__read_init(md) < 0)
174 				continue;
175 
176 			while ((event = perf_mmap__read_event(md)) != NULL) {
177 				const u32 type = event->header.type;
178 				const char *name = perf_event__name(type);
179 
180 				++total_events;
181 				if (type < PERF_RECORD_MAX)
182 					nr_events[type]++;
183 
184 				err = perf_evlist__parse_sample(evlist, event, &sample);
185 				if (err < 0) {
186 					if (verbose > 0)
187 						perf_event__fprintf(event, stderr);
188 					pr_debug("Couldn't parse sample\n");
189 					goto out_delete_evlist;
190 				}
191 
192 				if (verbose > 0) {
193 					pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
194 					perf_event__fprintf(event, stderr);
195 				}
196 
197 				if (prev_time > sample.time) {
198 					pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
199 						 name, prev_time, sample.time);
200 					++errs;
201 				}
202 
203 				prev_time = sample.time;
204 
205 				if (sample.cpu != cpu) {
206 					pr_debug("%s with unexpected cpu, expected %d, got %d\n",
207 						 name, cpu, sample.cpu);
208 					++errs;
209 				}
210 
211 				if ((pid_t)sample.pid != evlist->workload.pid) {
212 					pr_debug("%s with unexpected pid, expected %d, got %d\n",
213 						 name, evlist->workload.pid, sample.pid);
214 					++errs;
215 				}
216 
217 				if ((pid_t)sample.tid != evlist->workload.pid) {
218 					pr_debug("%s with unexpected tid, expected %d, got %d\n",
219 						 name, evlist->workload.pid, sample.tid);
220 					++errs;
221 				}
222 
223 				if ((type == PERF_RECORD_COMM ||
224 				     type == PERF_RECORD_MMAP ||
225 				     type == PERF_RECORD_MMAP2 ||
226 				     type == PERF_RECORD_FORK ||
227 				     type == PERF_RECORD_EXIT) &&
228 				     (pid_t)event->comm.pid != evlist->workload.pid) {
229 					pr_debug("%s with unexpected pid/tid\n", name);
230 					++errs;
231 				}
232 
233 				if ((type == PERF_RECORD_COMM ||
234 				     type == PERF_RECORD_MMAP ||
235 				     type == PERF_RECORD_MMAP2) &&
236 				     event->comm.pid != event->comm.tid) {
237 					pr_debug("%s with different pid/tid!\n", name);
238 					++errs;
239 				}
240 
241 				switch (type) {
242 				case PERF_RECORD_COMM:
243 					if (strcmp(event->comm.comm, cmd)) {
244 						pr_debug("%s with unexpected comm!\n", name);
245 						++errs;
246 					}
247 					break;
248 				case PERF_RECORD_EXIT:
249 					goto found_exit;
250 				case PERF_RECORD_MMAP:
251 					mmap_filename = event->mmap.filename;
252 					goto check_bname;
253 				case PERF_RECORD_MMAP2:
254 					mmap_filename = event->mmap2.filename;
255 				check_bname:
256 					bname = strrchr(mmap_filename, '/');
257 					if (bname != NULL) {
258 						if (!found_cmd_mmap)
259 							found_cmd_mmap = !strcmp(bname + 1, cmd);
260 						if (!found_coreutils_mmap)
261 							found_coreutils_mmap = !strcmp(bname + 1, "coreutils");
262 						if (!found_libc_mmap)
263 							found_libc_mmap = !strncmp(bname + 1, "libc", 4);
264 						if (!found_ld_mmap)
265 							found_ld_mmap = !strncmp(bname + 1, "ld", 2);
266 					} else if (!found_vdso_mmap)
267 						found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
268 					break;
269 
270 				case PERF_RECORD_SAMPLE:
271 					/* Just ignore samples for now */
272 					break;
273 				default:
274 					pr_debug("Unexpected perf_event->header.type %d!\n",
275 						 type);
276 					++errs;
277 				}
278 
279 				perf_mmap__consume(md);
280 			}
281 			perf_mmap__read_done(md);
282 		}
283 
284 		/*
285 		 * We don't use poll here because at least at 3.1 times the
286 		 * PERF_RECORD_{!SAMPLE} events don't honour
287 		 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
288 		 */
289 		if (total_events == before && false)
290 			evlist__poll(evlist, -1);
291 
292 		sleep(1);
293 		if (++wakeups > 5) {
294 			pr_debug("No PERF_RECORD_EXIT event!\n");
295 			break;
296 		}
297 	}
298 
299 found_exit:
300 	if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
301 		pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
302 		++errs;
303 	}
304 
305 	if (nr_events[PERF_RECORD_COMM] == 0) {
306 		pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
307 		++errs;
308 	}
309 
310 	if (!found_cmd_mmap && !found_coreutils_mmap) {
311 		pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
312 		++errs;
313 	}
314 
315 	if (!found_libc_mmap) {
316 		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
317 		++errs;
318 	}
319 
320 	if (!found_ld_mmap) {
321 		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
322 		++errs;
323 	}
324 
325 	if (!found_vdso_mmap) {
326 		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
327 		++errs;
328 	}
329 out_delete_evlist:
330 	evlist__delete(evlist);
331 out:
332 	return (err < 0 || errs > 0) ? -1 : 0;
333 }
334