xref: /openbsd/regress/sys/dev/kcov/kcov.c (revision 4bdff4be)
1 /*	$OpenBSD: kcov.c,v 1.17 2022/01/11 06:01:15 anton Exp $	*/
2 
3 /*
4  * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/event.h>
21 #include <sys/ioctl.h>
22 #include <sys/kcov.h>
23 #include <sys/mman.h>
24 #include <sys/socket.h>
25 #include <sys/un.h>
26 #include <sys/wait.h>
27 
28 #include <err.h>
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <pthread.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <unistd.h>
36 
37 struct context {
38 	int c_fd;
39 	int c_mode;
40 	unsigned long c_bufsize;
41 };
42 
43 static int test_close(struct context *);
44 static int test_coverage(struct context *);
45 static int test_dying(struct context *);
46 static int test_exec(struct context *);
47 static int test_fdsend(struct context *);
48 static int test_fork(struct context *);
49 static int test_open(struct context *);
50 static int test_remote(struct context *);
51 static int test_remote_close(struct context *);
52 static int test_remote_interrupt(struct context *);
53 static int test_state(struct context *);
54 
55 static int check_coverage(const unsigned long *, int, unsigned long, int);
56 static void do_syscall(void);
57 static void dump(const unsigned long *, int mode);
58 static void kcov_disable(int);
59 static void kcov_enable(int, int);
60 static int kcov_open(void);
61 static __dead void usage(void);
62 
63 static const char *self;
64 
65 int
66 main(int argc, char *argv[])
67 {
68 	struct {
69 		const char *name;
70 		int (*fn)(struct context *);
71 		int coverage;		/* test must produce coverage */
72 	} tests[] = {
73 		{ "close",		test_close,		0 },
74 		{ "coverage",		test_coverage,		1 },
75 		{ "dying",		test_dying,		-1 },
76 		{ "exec",		test_exec,		1 },
77 		{ "fdsend",		test_fdsend,		-1 },
78 		{ "fork",		test_fork,		1 },
79 		{ "open",		test_open,		0 },
80 		{ "remote",		test_remote,		1 },
81 		{ "remote-close",	test_remote_close,	0 },
82 		{ "remote-interrupt",	test_remote_interrupt,	-1 },
83 		{ "state",		test_state,		1 },
84 		{ NULL,			NULL,			0 },
85 	};
86 	struct context ctx;
87 	const char *errstr;
88 	unsigned long *cover, frac;
89 	int c, i;
90 	int error = 0;
91 	int prereq = 0;
92 	int reexec = 0;
93 	int verbose = 0;
94 
95 	self = argv[0];
96 
97 	memset(&ctx, 0, sizeof(ctx));
98 	ctx.c_bufsize = 256 << 10;
99 
100 	while ((c = getopt(argc, argv, "b:Em:pv")) != -1)
101 		switch (c) {
102 		case 'b':
103 			frac = strtonum(optarg, 1, 100, &errstr);
104 			if (frac == 0)
105 				errx(1, "buffer size fraction %s", errstr);
106 			else if (frac > ctx.c_bufsize)
107 				errx(1, "buffer size fraction too large");
108 			ctx.c_bufsize /= frac;
109 			break;
110 		case 'E':
111 			reexec = 1;
112 			break;
113 		case 'm':
114 			if (strcmp(optarg, "pc") == 0)
115 				ctx.c_mode = KCOV_MODE_TRACE_PC;
116 			else if (strcmp(optarg, "cmp") == 0)
117 				ctx.c_mode = KCOV_MODE_TRACE_CMP;
118 			else
119 				errx(1, "unknown mode %s", optarg);
120 			break;
121 		case 'p':
122 			prereq = 1;
123 			break;
124 		case 'v':
125 			verbose = 1;
126 			break;
127 		default:
128 			usage();
129 		}
130 	argc -= optind;
131 	argv += optind;
132 
133 	if (prereq) {
134 		ctx.c_fd = kcov_open();
135 		close(ctx.c_fd);
136 		return 0;
137 	}
138 
139 	if (reexec) {
140 		do_syscall();
141 		return 0;
142 	}
143 
144 	if (ctx.c_mode == 0 || argc != 1)
145 		usage();
146 	for (i = 0; tests[i].name != NULL; i++)
147 		if (strcmp(argv[0], tests[i].name) == 0)
148 			break;
149 	if (tests[i].name == NULL)
150 		errx(1, "%s: no such test", argv[0]);
151 
152 	ctx.c_fd = kcov_open();
153 	if (ioctl(ctx.c_fd, KIOSETBUFSIZE, &ctx.c_bufsize) == -1)
154 		err(1, "ioctl: KIOSETBUFSIZE");
155 	cover = mmap(NULL, ctx.c_bufsize * sizeof(unsigned long),
156 	    PROT_READ | PROT_WRITE, MAP_SHARED, ctx.c_fd, 0);
157 	if (cover == MAP_FAILED)
158 		err(1, "mmap");
159 
160 	*cover = 0;
161 	error = tests[i].fn(&ctx);
162 	if (verbose)
163 		dump(cover, ctx.c_mode);
164 	if (check_coverage(cover, ctx.c_mode, ctx.c_bufsize, tests[i].coverage))
165 		error = 1;
166 
167 	if (munmap(cover, ctx.c_bufsize * sizeof(unsigned long)) == -1)
168 		err(1, "munmap");
169 	if (ctx.c_fd != -1) {
170 		if (close(ctx.c_fd) == -1)
171 			err(1, "close");
172 	}
173 
174 	return error;
175 }
176 
177 static __dead void
178 usage(void)
179 {
180 	fprintf(stderr, "usage: kcov [-Epv] [-b fraction] -m mode test\n");
181 	exit(1);
182 }
183 
184 static void
185 do_syscall(void)
186 {
187 	getpid();
188 }
189 
190 static int
191 check_coverage(const unsigned long *cover, int mode, unsigned long maxsize,
192     int nonzero)
193 {
194 	unsigned long arg1, arg2, exp, i, pc, type;
195 	int error = 0;
196 
197 	if (nonzero == -1) {
198 		return 0;
199 	} else if (nonzero && cover[0] == 0) {
200 		warnx("coverage empty (count=0)\n");
201 		return 1;
202 	} else if (!nonzero && cover[0] != 0) {
203 		warnx("coverage not empty (count=%lu)\n", *cover);
204 		return 1;
205 	} else if (cover[0] >= maxsize) {
206 		warnx("coverage overflow (count=%lu, max=%lu)\n",
207 		    *cover, maxsize);
208 		return 1;
209 	}
210 
211 	if (mode == KCOV_MODE_TRACE_CMP) {
212 		if (*cover * 4 >= maxsize) {
213 			warnx("coverage cmp overflow (count=%lu, max=%lu)\n",
214 			    *cover * 4, maxsize);
215 			return 1;
216 		}
217 
218 		for (i = 0; i < cover[0]; i++) {
219 			type = cover[i * 4 + 1];
220 			arg1 = cover[i * 4 + 2];
221 			arg2 = cover[i * 4 + 3];
222 			pc = cover[i * 4 + 4];
223 
224 			exp = type >> 1;
225 			if (exp <= 3)
226 				continue;
227 
228 			warnx("coverage cmp invalid size (i=%lu, exp=%lx, "
229 			    "const=%ld, arg1=%lu, arg2=%lu, pc=%p)\n",
230 			    i, exp, type & 0x1, arg1, arg2, (void *)pc);
231 			error = 1;
232 		}
233 	}
234 
235 	return error;
236 }
237 
238 static void
239 dump(const unsigned long *cover, int mode)
240 {
241 	unsigned long i;
242 	int stride = 1;
243 
244 	if (mode == KCOV_MODE_TRACE_CMP)
245 		stride = 4;
246 
247 	for (i = 0; i < cover[0]; i++)
248 		printf("%p\n", (void *)cover[i * stride + stride]);
249 }
250 
251 static int
252 kcov_open(void)
253 {
254 	int fd;
255 
256 	fd = open("/dev/kcov", O_RDWR);
257 	if (fd == -1)
258 		err(1, "open: /dev/kcov");
259 	return fd;
260 }
261 
262 static void
263 kcov_enable(int fd, int mode)
264 {
265 	if (ioctl(fd, KIOENABLE, &mode) == -1)
266 		err(1, "ioctl: KIOENABLE");
267 }
268 
269 static void
270 kcov_disable(int fd)
271 {
272 	if (ioctl(fd, KIODISABLE) == -1)
273 		err(1, "ioctl: KIODISABLE");
274 }
275 
276 /*
277  * Close before mmap.
278  */
279 static int
280 test_close(struct context *ctx)
281 {
282 	int fd;
283 
284 	fd = kcov_open();
285 	close(fd);
286 	return 0;
287 }
288 
289 /*
290  * Coverage of current thread.
291  */
292 static int
293 test_coverage(struct context *ctx)
294 {
295 	kcov_enable(ctx->c_fd, ctx->c_mode);
296 	do_syscall();
297 	kcov_disable(ctx->c_fd);
298 	return 0;
299 }
300 
301 static void *
302 closer(void *arg)
303 {
304 	struct context *ctx = arg;
305 
306 	close(ctx->c_fd);
307 	return NULL;
308 }
309 
310 /*
311  * Close kcov descriptor in another thread during tracing.
312  */
313 static int
314 test_dying(struct context *ctx)
315 {
316 	pthread_t th;
317 	int error;
318 
319 	kcov_enable(ctx->c_fd, ctx->c_mode);
320 
321 	if ((error = pthread_create(&th, NULL, closer, (void *)ctx)))
322 		errc(1, error, "pthread_create");
323 	if ((error = pthread_join(th, NULL)))
324 		errc(1, error, "pthread_join");
325 
326 	error = 0;
327 	if (close(ctx->c_fd) == -1) {
328 		if (errno != EBADF)
329 			err(1, "close");
330 	} else {
331 		warnx("expected kcov descriptor to be closed");
332 		error = 1;
333 	}
334 	ctx->c_fd = -1;
335 
336 	return error;
337 }
338 
339 /*
340  * Coverage of thread after exec.
341  */
342 static int
343 test_exec(struct context *ctx)
344 {
345 	pid_t pid;
346 	int status;
347 
348 	pid = fork();
349 	if (pid == -1)
350 		err(1, "fork");
351 	if (pid == 0) {
352 		kcov_enable(ctx->c_fd, ctx->c_mode);
353 		execlp(self, self, "-E", NULL);
354 		_exit(1);
355 	}
356 
357 	if (waitpid(pid, &status, 0) == -1)
358 		err(1, "waitpid");
359 	if (WIFSIGNALED(status)) {
360 		warnx("terminated by signal (%d)", WTERMSIG(status));
361 		return 1;
362 	} else if (WEXITSTATUS(status) != 0) {
363 		warnx("non-zero exit (%d)", WEXITSTATUS(status));
364 		return 1;
365 	}
366 
367 	/* Upon exit, the kcov descriptor must be reusable again. */
368 	kcov_enable(ctx->c_fd, ctx->c_mode);
369 	kcov_disable(ctx->c_fd);
370 
371 	return 0;
372 }
373 
374 /*
375  * File descriptor send/receive is not allowed since remote coverage is tied to
376  * the current process.
377  */
378 static int
379 test_fdsend(struct context *ctx)
380 {
381 	struct msghdr msg;
382 	union {
383 		struct cmsghdr hdr;
384 		unsigned char buf[CMSG_SPACE(sizeof(int))];
385 	} cmsgbuf;
386 	struct cmsghdr *cmsg;
387 	int pair[2];
388 
389 	if (socketpair(AF_UNIX, SOCK_DGRAM, 0, pair) == -1)
390 		err(1, "socketpair");
391 
392 	memset(&msg, 0, sizeof(msg));
393 	msg.msg_control = &cmsgbuf.buf;
394 	msg.msg_controllen = sizeof(cmsgbuf.buf);
395 	cmsg = CMSG_FIRSTHDR(&msg);
396 	cmsg->cmsg_len = CMSG_LEN(sizeof(int));
397 	cmsg->cmsg_level = SOL_SOCKET;
398 	cmsg->cmsg_type = SCM_RIGHTS;
399 	*(int *)CMSG_DATA(cmsg) = ctx->c_fd;
400 	if (sendmsg(pair[1], &msg, 0) != -1)
401 		errx(1, "sendmsg: expected error");
402 
403 	close(pair[0]);
404 	close(pair[1]);
405 	return 0;
406 }
407 
408 /*
409  * Coverage of thread after fork.
410  */
411 static int
412 test_fork(struct context *ctx)
413 {
414 	pid_t pid;
415 	int status;
416 
417 	pid = fork();
418 	if (pid == -1)
419 		err(1, "fork");
420 	if (pid == 0) {
421 		kcov_enable(ctx->c_fd, ctx->c_mode);
422 		do_syscall();
423 		_exit(0);
424 	}
425 
426 	if (waitpid(pid, &status, 0) == -1)
427 		err(1, "waitpid");
428 	if (WIFSIGNALED(status)) {
429 		warnx("terminated by signal (%d)", WTERMSIG(status));
430 		return 1;
431 	} else if (WEXITSTATUS(status) != 0) {
432 		warnx("non-zero exit (%d)", WEXITSTATUS(status));
433 		return 1;
434 	}
435 
436 	/* Upon exit, the kcov descriptor must be reusable again. */
437 	kcov_enable(ctx->c_fd, ctx->c_mode);
438 	kcov_disable(ctx->c_fd);
439 
440 	return 0;
441 }
442 
443 /*
444  * Open /dev/kcov more than once.
445  */
446 static int
447 test_open(struct context *ctx)
448 {
449 	unsigned long *cover;
450 	int fd;
451 	int error = 0;
452 
453 	fd = kcov_open();
454 	if (ioctl(fd, KIOSETBUFSIZE, &ctx->c_bufsize) == -1)
455 		err(1, "ioctl: KIOSETBUFSIZE");
456 	cover = mmap(NULL, ctx->c_bufsize * sizeof(unsigned long),
457 	    PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
458 	if (cover == MAP_FAILED)
459 		err(1, "mmap");
460 
461 	kcov_enable(fd, ctx->c_mode);
462 	do_syscall();
463 	kcov_disable(fd);
464 
465 	error = check_coverage(cover, ctx->c_mode, ctx->c_bufsize, 1);
466 
467 	if (munmap(cover, ctx->c_bufsize * sizeof(unsigned long)))
468 		err(1, "munmap");
469 	close(fd);
470 
471 	return error;
472 }
473 
474 /*
475  * Remote taskq coverage. One reliable way to trigger a task on behalf of the
476  * running process is to monitor a kqueue file descriptor using kqueue.
477  */
478 static int
479 test_remote(struct context *ctx)
480 {
481 	struct kio_remote_attach remote = {
482 		.subsystem	= KCOV_REMOTE_COMMON,
483 		.id		= 0,
484 	};
485 	struct kevent kev;
486 	int kq1, kq2, pip[2];
487 	int x = 0;
488 
489 	if (ioctl(ctx->c_fd, KIOREMOTEATTACH, &remote) == -1)
490 		err(1, "ioctl: KIOREMOTEATTACH");
491 	kcov_enable(ctx->c_fd, ctx->c_mode);
492 
493 	kq1 = kqueue();
494 	if (kq1 == -1)
495 		err(1, "kqueue");
496 	kq2 = kqueue();
497 	if (kq1 == -1)
498 		err(1, "kqueue");
499 	EV_SET(&kev, kq2, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, NULL);
500 	if (kevent(kq1, &kev, 1, NULL, 0, NULL) == -1)
501 		err(1, "kqueue");
502 
503 	if (pipe(pip) == -1)
504 		err(1, "pipe");
505 
506 	EV_SET(&kev, pip[0], EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, NULL);
507 	if (kevent(kq2, &kev, 1, NULL, 0, NULL) == -1)
508 		err(1, "kqueue");
509 	(void)write(pip[1], &x, sizeof(x));
510 
511 	if (kevent(kq1, NULL, 0, &kev, 1, NULL) == -1)
512 		err(1, "kevent");
513 
514 	kcov_disable(ctx->c_fd);
515 
516 	return 0;
517 }
518 
519 /*
520  * Close with remote coverage enabled.
521  */
522 static int
523 test_remote_close(struct context *ctx)
524 {
525 	struct kio_remote_attach remote = {
526 		.subsystem	= KCOV_REMOTE_COMMON,
527 		.id		= 0,
528 	};
529 
530 	if (ioctl(ctx->c_fd, KIOREMOTEATTACH, &remote) == -1)
531 		err(1, "ioctl: KIOREMOTEATTACH");
532 	kcov_enable(ctx->c_fd, ctx->c_mode);
533 	if (close(ctx->c_fd) == -1)
534 		err(1, "close");
535 	ctx->c_fd = kcov_open();
536 	return 0;
537 }
538 
539 /*
540  * Remote interrupt coverage. There's no reliable way to enter a remote section
541  * in interrupt context. This test can however by used to examine the coverage
542  * collected in interrupt context:
543  *
544  *     $ until [ -s cov ]; do kcov -v -m pc remote-interrupt >cov; done
545  */
546 static int
547 test_remote_interrupt(struct context *ctx)
548 {
549 	struct kio_remote_attach remote = {
550 		.subsystem	= KCOV_REMOTE_COMMON,
551 		.id		= 0,
552 	};
553 	int i;
554 
555 	if (ioctl(ctx->c_fd, KIOREMOTEATTACH, &remote) == -1)
556 		err(1, "ioctl: KIOREMOTEATTACH");
557 	kcov_enable(ctx->c_fd, ctx->c_mode);
558 
559 	for (i = 0; i < 100; i++)
560 		(void)getpid();
561 
562 	kcov_disable(ctx->c_fd);
563 
564 	return 0;
565 }
566 
567 /*
568  * State transitions.
569  */
570 static int
571 test_state(struct context *ctx)
572 {
573 	if (ioctl(ctx->c_fd, KIOENABLE, &ctx->c_mode) == -1) {
574 		warn("KIOSETBUFSIZE -> KIOENABLE");
575 		return 1;
576 	}
577 	if (ioctl(ctx->c_fd, KIODISABLE) == -1) {
578 		warn("KIOENABLE -> KIODISABLE");
579 		return 1;
580 	}
581 	if (ioctl(ctx->c_fd, KIOSETBUFSIZE, 0) != -1) {
582 		warnx("KIOSETBUFSIZE -> KIOSETBUFSIZE");
583 		return 1;
584 	}
585 	if (ioctl(ctx->c_fd, KIODISABLE) != -1) {
586 		warnx("KIOSETBUFSIZE -> KIODISABLE");
587 		return 1;
588 	}
589 
590 	kcov_enable(ctx->c_fd, ctx->c_mode);
591 	if (ioctl(ctx->c_fd, KIOENABLE, &ctx->c_mode) != -1) {
592 		warnx("KIOENABLE -> KIOENABLE");
593 		return 1;
594 	}
595 	if (ioctl(ctx->c_fd, KIOSETBUFSIZE, 0) != -1) {
596 		warnx("KIOENABLE -> KIOSETBUFSIZE");
597 		return 1;
598 	}
599 	kcov_disable(ctx->c_fd);
600 
601 	return 0;
602 }
603