xref: /openbsd/regress/sys/dev/kcov/kcov.c (revision 097a140d)
1 /*	$OpenBSD: kcov.c,v 1.15 2020/10/03 07:35:07 anton Exp $	*/
2 
3 /*
4  * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/event.h>
21 #include <sys/ioctl.h>
22 #include <sys/kcov.h>
23 #include <sys/mman.h>
24 #include <sys/wait.h>
25 
26 #include <err.h>
27 #include <errno.h>
28 #include <fcntl.h>
29 #include <pthread.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <unistd.h>
34 
35 struct context {
36 	int c_fd;
37 	int c_mode;
38 	unsigned long c_bufsize;
39 };
40 
41 static int test_close(struct context *);
42 static int test_coverage(struct context *);
43 static int test_dying(struct context *);
44 static int test_exec(struct context *);
45 static int test_fork(struct context *);
46 static int test_open(struct context *);
47 static int test_remote(struct context *);
48 static int test_remote_close(struct context *);
49 static int test_remote_interrupt(struct context *);
50 static int test_state(struct context *);
51 
52 static int check_coverage(const unsigned long *, int, unsigned long, int);
53 static void do_syscall(void);
54 static void dump(const unsigned long *, int mode);
55 static void kcov_disable(int);
56 static void kcov_enable(int, int);
57 static int kcov_open(void);
58 static __dead void usage(void);
59 
60 static const char *self;
61 
62 int
63 main(int argc, char *argv[])
64 {
65 	struct {
66 		const char *name;
67 		int (*fn)(struct context *);
68 		int coverage;		/* test must produce coverage */
69 	} tests[] = {
70 		{ "close",		test_close,		0 },
71 		{ "coverage",		test_coverage,		1 },
72 		{ "dying",		test_dying,		1 },
73 		{ "exec",		test_exec,		1 },
74 		{ "fork",		test_fork,		1 },
75 		{ "open",		test_open,		0 },
76 		{ "remote",		test_remote,		1 },
77 		{ "remote-close",	test_remote_close,	0 },
78 		{ "remote-interrupt",	test_remote_interrupt,	-1 },
79 		{ "state",		test_state,		1 },
80 		{ NULL,			NULL,			0 },
81 	};
82 	struct context ctx;
83 	const char *errstr;
84 	unsigned long *cover, frac;
85 	int c, i;
86 	int error = 0;
87 	int prereq = 0;
88 	int reexec = 0;
89 	int verbose = 0;
90 
91 	self = argv[0];
92 
93 	memset(&ctx, 0, sizeof(ctx));
94 	ctx.c_bufsize = 256 << 10;
95 
96 	while ((c = getopt(argc, argv, "b:Em:pv")) != -1)
97 		switch (c) {
98 		case 'b':
99 			frac = strtonum(optarg, 1, 100, &errstr);
100 			if (frac == 0)
101 				errx(1, "buffer size fraction %s", errstr);
102 			else if (frac > ctx.c_bufsize)
103 				errx(1, "buffer size fraction too large");
104 			ctx.c_bufsize /= frac;
105 			break;
106 		case 'E':
107 			reexec = 1;
108 			break;
109 		case 'm':
110 			if (strcmp(optarg, "pc") == 0)
111 				ctx.c_mode = KCOV_MODE_TRACE_PC;
112 			else if (strcmp(optarg, "cmp") == 0)
113 				ctx.c_mode = KCOV_MODE_TRACE_CMP;
114 			else
115 				errx(1, "unknown mode %s", optarg);
116 			break;
117 		case 'p':
118 			prereq = 1;
119 			break;
120 		case 'v':
121 			verbose = 1;
122 			break;
123 		default:
124 			usage();
125 		}
126 	argc -= optind;
127 	argv += optind;
128 
129 	if (prereq) {
130 		ctx.c_fd = kcov_open();
131 		close(ctx.c_fd);
132 		return 0;
133 	}
134 
135 	if (reexec) {
136 		do_syscall();
137 		return 0;
138 	}
139 
140 	if (ctx.c_mode == 0 || argc != 1)
141 		usage();
142 	for (i = 0; tests[i].name != NULL; i++)
143 		if (strcmp(argv[0], tests[i].name) == 0)
144 			break;
145 	if (tests[i].name == NULL)
146 		errx(1, "%s: no such test", argv[0]);
147 
148 	ctx.c_fd = kcov_open();
149 	if (ioctl(ctx.c_fd, KIOSETBUFSIZE, &ctx.c_bufsize) == -1)
150 		err(1, "ioctl: KIOSETBUFSIZE");
151 	cover = mmap(NULL, ctx.c_bufsize * sizeof(unsigned long),
152 	    PROT_READ | PROT_WRITE, MAP_SHARED, ctx.c_fd, 0);
153 	if (cover == MAP_FAILED)
154 		err(1, "mmap");
155 
156 	*cover = 0;
157 	error = tests[i].fn(&ctx);
158 	if (verbose)
159 		dump(cover, ctx.c_mode);
160 	if (check_coverage(cover, ctx.c_mode, ctx.c_bufsize, tests[i].coverage))
161 		error = 1;
162 
163 	if (munmap(cover, ctx.c_bufsize * sizeof(unsigned long)) == -1)
164 		err(1, "munmap");
165 	if (ctx.c_fd != -1) {
166 		if (close(ctx.c_fd) == -1)
167 			err(1, "close");
168 	}
169 
170 	return error;
171 }
172 
173 static __dead void
174 usage(void)
175 {
176 	fprintf(stderr, "usage: kcov [-Epv] [-b fraction] -m mode test\n");
177 	exit(1);
178 }
179 
180 static void
181 do_syscall(void)
182 {
183 	getpid();
184 }
185 
186 static int
187 check_coverage(const unsigned long *cover, int mode, unsigned long maxsize,
188     int nonzero)
189 {
190 	unsigned long arg1, arg2, exp, i, pc, type;
191 	int error = 0;
192 
193 	if (nonzero == -1) {
194 		return 0;
195 	} else if (nonzero && cover[0] == 0) {
196 		warnx("coverage empty (count=0)\n");
197 		return 1;
198 	} else if (!nonzero && cover[0] != 0) {
199 		warnx("coverage not empty (count=%lu)\n", *cover);
200 		return 1;
201 	} else if (cover[0] >= maxsize) {
202 		warnx("coverage overflow (count=%lu, max=%lu)\n",
203 		    *cover, maxsize);
204 		return 1;
205 	}
206 
207 	if (mode == KCOV_MODE_TRACE_CMP) {
208 		if (*cover * 4 >= maxsize) {
209 			warnx("coverage cmp overflow (count=%lu, max=%lu)\n",
210 			    *cover * 4, maxsize);
211 			return 1;
212 		}
213 
214 		for (i = 0; i < cover[0]; i++) {
215 			type = cover[i * 4 + 1];
216 			arg1 = cover[i * 4 + 2];
217 			arg2 = cover[i * 4 + 3];
218 			pc = cover[i * 4 + 4];
219 
220 			exp = type >> 1;
221 			if (exp <= 3)
222 				continue;
223 
224 			warnx("coverage cmp invalid size (i=%lu, exp=%lx, "
225 			    "const=%ld, arg1=%lu, arg2=%lu, pc=%p)\n",
226 			    i, exp, type & 0x1, arg1, arg2, (void *)pc);
227 			error = 1;
228 		}
229 	}
230 
231 	return error;
232 }
233 
234 static void
235 dump(const unsigned long *cover, int mode)
236 {
237 	unsigned long i;
238 	int stride = 1;
239 
240 	if (mode == KCOV_MODE_TRACE_CMP)
241 		stride = 4;
242 
243 	for (i = 0; i < cover[0]; i++)
244 		printf("%p\n", (void *)cover[i * stride + stride]);
245 }
246 
247 static int
248 kcov_open(void)
249 {
250 	int fd;
251 
252 	fd = open("/dev/kcov", O_RDWR);
253 	if (fd == -1)
254 		err(1, "open: /dev/kcov");
255 	return fd;
256 }
257 
258 static void
259 kcov_enable(int fd, int mode)
260 {
261 	if (ioctl(fd, KIOENABLE, &mode) == -1)
262 		err(1, "ioctl: KIOENABLE");
263 }
264 
265 static void
266 kcov_disable(int fd)
267 {
268 	if (ioctl(fd, KIODISABLE) == -1)
269 		err(1, "ioctl: KIODISABLE");
270 }
271 
272 /*
273  * Close before mmap.
274  */
275 static int
276 test_close(struct context *ctx)
277 {
278 	int fd;
279 
280 	fd = kcov_open();
281 	close(fd);
282 	return 0;
283 }
284 
285 /*
286  * Coverage of current thread.
287  */
288 static int
289 test_coverage(struct context *ctx)
290 {
291 	kcov_enable(ctx->c_fd, ctx->c_mode);
292 	do_syscall();
293 	kcov_disable(ctx->c_fd);
294 	return 0;
295 }
296 
297 static void *
298 closer(void *arg)
299 {
300 	struct context *ctx = arg;
301 
302 	close(ctx->c_fd);
303 	return NULL;
304 }
305 
306 /*
307  * Close kcov descriptor in another thread during tracing.
308  */
309 static int
310 test_dying(struct context *ctx)
311 {
312 	pthread_t th;
313 	int error;
314 
315 	kcov_enable(ctx->c_fd, ctx->c_mode);
316 
317 	if ((error = pthread_create(&th, NULL, closer, (void *)ctx)))
318 		errc(1, error, "pthread_create");
319 	if ((error = pthread_join(th, NULL)))
320 		errc(1, error, "pthread_join");
321 
322 	error = 0;
323 	if (close(ctx->c_fd) == -1) {
324 		if (errno != EBADF)
325 			err(1, "close");
326 	} else {
327 		warnx("expected kcov descriptor to be closed");
328 		error = 1;
329 	}
330 	ctx->c_fd = -1;
331 
332 	return error;
333 }
334 
335 /*
336  * Coverage of thread after exec.
337  */
338 static int
339 test_exec(struct context *ctx)
340 {
341 	pid_t pid;
342 	int status;
343 
344 	pid = fork();
345 	if (pid == -1)
346 		err(1, "fork");
347 	if (pid == 0) {
348 		kcov_enable(ctx->c_fd, ctx->c_mode);
349 		execlp(self, self, "-E", NULL);
350 		_exit(1);
351 	}
352 
353 	if (waitpid(pid, &status, 0) == -1)
354 		err(1, "waitpid");
355 	if (WIFSIGNALED(status)) {
356 		warnx("terminated by signal (%d)", WTERMSIG(status));
357 		return 1;
358 	} else if (WEXITSTATUS(status) != 0) {
359 		warnx("non-zero exit (%d)", WEXITSTATUS(status));
360 		return 1;
361 	}
362 
363 	/* Upon exit, the kcov descriptor must be reusable again. */
364 	kcov_enable(ctx->c_fd, ctx->c_mode);
365 	kcov_disable(ctx->c_fd);
366 
367 	return 0;
368 }
369 
370 /*
371  * Coverage of thread after fork.
372  */
373 static int
374 test_fork(struct context *ctx)
375 {
376 	pid_t pid;
377 	int status;
378 
379 	pid = fork();
380 	if (pid == -1)
381 		err(1, "fork");
382 	if (pid == 0) {
383 		kcov_enable(ctx->c_fd, ctx->c_mode);
384 		do_syscall();
385 		_exit(0);
386 	}
387 
388 	if (waitpid(pid, &status, 0) == -1)
389 		err(1, "waitpid");
390 	if (WIFSIGNALED(status)) {
391 		warnx("terminated by signal (%d)", WTERMSIG(status));
392 		return 1;
393 	} else if (WEXITSTATUS(status) != 0) {
394 		warnx("non-zero exit (%d)", WEXITSTATUS(status));
395 		return 1;
396 	}
397 
398 	/* Upon exit, the kcov descriptor must be reusable again. */
399 	kcov_enable(ctx->c_fd, ctx->c_mode);
400 	kcov_disable(ctx->c_fd);
401 
402 	return 0;
403 }
404 
405 /*
406  * Open /dev/kcov more than once.
407  */
408 static int
409 test_open(struct context *ctx)
410 {
411 	unsigned long *cover;
412 	int fd;
413 	int error = 0;
414 
415 	fd = kcov_open();
416 	if (ioctl(fd, KIOSETBUFSIZE, &ctx->c_bufsize) == -1)
417 		err(1, "ioctl: KIOSETBUFSIZE");
418 	cover = mmap(NULL, ctx->c_bufsize * sizeof(unsigned long),
419 	    PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
420 	if (cover == MAP_FAILED)
421 		err(1, "mmap");
422 
423 	kcov_enable(fd, ctx->c_mode);
424 	do_syscall();
425 	kcov_disable(fd);
426 
427 	error = check_coverage(cover, ctx->c_mode, ctx->c_bufsize, 1);
428 
429 	if (munmap(cover, ctx->c_bufsize * sizeof(unsigned long)))
430 		err(1, "munmap");
431 	close(fd);
432 
433 	return error;
434 }
435 
436 /*
437  * Remote taskq coverage. One reliable way to trigger a task on behalf of the
438  * running process is to monitor a kqueue file descriptor using kqueue.
439  */
440 static int
441 test_remote(struct context *ctx)
442 {
443 	struct kio_remote_attach remote = {
444 		.subsystem	= KCOV_REMOTE_COMMON,
445 		.id		= 0,
446 	};
447 	struct kevent kev;
448 	int kq1, kq2, pip[2];
449 	int x = 0;
450 
451 	if (ioctl(ctx->c_fd, KIOREMOTEATTACH, &remote) == -1)
452 		err(1, "ioctl: KIOREMOTEATTACH");
453 	kcov_enable(ctx->c_fd, ctx->c_mode);
454 
455 	kq1 = kqueue();
456 	if (kq1 == -1)
457 		err(1, "kqueue");
458 	kq2 = kqueue();
459 	if (kq1 == -1)
460 		err(1, "kqueue");
461 	EV_SET(&kev, kq2, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, NULL);
462 	if (kevent(kq1, &kev, 1, NULL, 0, NULL) == -1)
463 		err(1, "kqueue");
464 
465 	if (pipe(pip) == -1)
466 		err(1, "pipe");
467 
468 	EV_SET(&kev, pip[0], EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, NULL);
469 	if (kevent(kq2, &kev, 1, NULL, 0, NULL) == -1)
470 		err(1, "kqueue");
471 	(void)write(pip[1], &x, sizeof(x));
472 
473 	if (kevent(kq1, NULL, 0, &kev, 1, NULL) == -1)
474 		err(1, "kevent");
475 
476 	kcov_disable(ctx->c_fd);
477 
478 	return 0;
479 }
480 
481 /*
482  * Close with remote coverage enabled.
483  */
484 static int
485 test_remote_close(struct context *ctx)
486 {
487 	struct kio_remote_attach remote = {
488 		.subsystem	= KCOV_REMOTE_COMMON,
489 		.id		= 0,
490 	};
491 
492 	if (ioctl(ctx->c_fd, KIOREMOTEATTACH, &remote) == -1)
493 		err(1, "ioctl: KIOREMOTEATTACH");
494 	kcov_enable(ctx->c_fd, ctx->c_mode);
495 	if (close(ctx->c_fd) == -1)
496 		err(1, "close");
497 	ctx->c_fd = kcov_open();
498 	return 0;
499 }
500 
501 /*
502  * Remote interrupt coverage. There's no reliable way to enter a remote section
503  * in interrupt context. This test can however by used to examine the coverage
504  * collected in interrupt context:
505  *
506  *     $ until [ -s cov ]; do kcov -v -m pc remote-interrupt >cov; done
507  */
508 static int
509 test_remote_interrupt(struct context *ctx)
510 {
511 	struct kio_remote_attach remote = {
512 		.subsystem	= KCOV_REMOTE_COMMON,
513 		.id		= 0,
514 	};
515 	int i;
516 
517 	if (ioctl(ctx->c_fd, KIOREMOTEATTACH, &remote) == -1)
518 		err(1, "ioctl: KIOREMOTEATTACH");
519 	kcov_enable(ctx->c_fd, ctx->c_mode);
520 
521 	for (i = 0; i < 100; i++)
522 		(void)getpid();
523 
524 	kcov_disable(ctx->c_fd);
525 
526 	return 0;
527 }
528 
529 /*
530  * State transitions.
531  */
532 static int
533 test_state(struct context *ctx)
534 {
535 	if (ioctl(ctx->c_fd, KIOENABLE, &ctx->c_mode) == -1) {
536 		warn("KIOSETBUFSIZE -> KIOENABLE");
537 		return 1;
538 	}
539 	if (ioctl(ctx->c_fd, KIODISABLE) == -1) {
540 		warn("KIOENABLE -> KIODISABLE");
541 		return 1;
542 	}
543 	if (ioctl(ctx->c_fd, KIOSETBUFSIZE, 0) != -1) {
544 		warnx("KIOSETBUFSIZE -> KIOSETBUFSIZE");
545 		return 1;
546 	}
547 	if (ioctl(ctx->c_fd, KIODISABLE) != -1) {
548 		warnx("KIOSETBUFSIZE -> KIODISABLE");
549 		return 1;
550 	}
551 
552 	kcov_enable(ctx->c_fd, ctx->c_mode);
553 	if (ioctl(ctx->c_fd, KIOENABLE, &ctx->c_mode) != -1) {
554 		warnx("KIOENABLE -> KIOENABLE");
555 		return 1;
556 	}
557 	if (ioctl(ctx->c_fd, KIOSETBUFSIZE, 0) != -1) {
558 		warnx("KIOENABLE -> KIOSETBUFSIZE");
559 		return 1;
560 	}
561 	kcov_disable(ctx->c_fd);
562 
563 	return 0;
564 }
565