xref: /openbsd/regress/sys/dev/kcov/kcov.c (revision e5dd7070)
1 /*	$OpenBSD: kcov.c,v 1.13 2020/08/01 08:44:57 anton Exp $	*/
2 
3 /*
4  * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/event.h>
21 #include <sys/ioctl.h>
22 #include <sys/kcov.h>
23 #include <sys/mman.h>
24 #include <sys/wait.h>
25 
26 #include <err.h>
27 #include <errno.h>
28 #include <fcntl.h>
29 #include <pthread.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <unistd.h>
34 
35 struct context {
36 	int c_fd;
37 	int c_mode;
38 	unsigned long c_bufsize;
39 };
40 
41 static int test_close(struct context *);
42 static int test_coverage(struct context *);
43 static int test_dying(struct context *);
44 static int test_exec(struct context *);
45 static int test_fork(struct context *);
46 static int test_open(struct context *);
47 static int test_remote(struct context *);
48 static int test_remote_close(struct context *);
49 static int test_state(struct context *);
50 
51 static int check_coverage(const unsigned long *, int, unsigned long, int);
52 static void do_syscall(void);
53 static void dump(const unsigned long *, int mode);
54 static void kcov_disable(int);
55 static void kcov_enable(int, int);
56 static int kcov_open(void);
57 static __dead void usage(void);
58 
59 static const char *self;
60 
61 int
62 main(int argc, char *argv[])
63 {
64 	struct {
65 		const char *name;
66 		int (*fn)(struct context *);
67 		int coverage;		/* test must produce coverage */
68 	} tests[] = {
69 		{ "close",		test_close,		0 },
70 		{ "coverage",		test_coverage,		1 },
71 		{ "dying",		test_dying,		1 },
72 		{ "exec",		test_exec,		1 },
73 		{ "fork",		test_fork,		1 },
74 		{ "open",		test_open,		0 },
75 		{ "remote",		test_remote,		1 },
76 		{ "remote-close",	test_remote_close,	0 },
77 		{ "state",		test_state,		1 },
78 		{ NULL,			NULL,			0 },
79 	};
80 	struct context ctx;
81 	const char *errstr;
82 	unsigned long *cover, frac;
83 	int c, i;
84 	int error = 0;
85 	int prereq = 0;
86 	int reexec = 0;
87 	int verbose = 0;
88 
89 	self = argv[0];
90 
91 	memset(&ctx, 0, sizeof(ctx));
92 	ctx.c_bufsize = 256 << 10;
93 
94 	while ((c = getopt(argc, argv, "b:Em:pv")) != -1)
95 		switch (c) {
96 		case 'b':
97 			frac = strtonum(optarg, 1, 100, &errstr);
98 			if (frac == 0)
99 				errx(1, "buffer size fraction %s", errstr);
100 			else if (frac > ctx.c_bufsize)
101 				errx(1, "buffer size fraction too large");
102 			ctx.c_bufsize /= frac;
103 			break;
104 		case 'E':
105 			reexec = 1;
106 			break;
107 		case 'm':
108 			if (strcmp(optarg, "pc") == 0)
109 				ctx.c_mode = KCOV_MODE_TRACE_PC;
110 			else if (strcmp(optarg, "cmp") == 0)
111 				ctx.c_mode = KCOV_MODE_TRACE_CMP;
112 			else
113 				errx(1, "unknown mode %s", optarg);
114 			break;
115 		case 'p':
116 			prereq = 1;
117 			break;
118 		case 'v':
119 			verbose = 1;
120 			break;
121 		default:
122 			usage();
123 		}
124 	argc -= optind;
125 	argv += optind;
126 
127 	if (prereq) {
128 		ctx.c_fd = kcov_open();
129 		close(ctx.c_fd);
130 		return 0;
131 	}
132 
133 	if (reexec) {
134 		do_syscall();
135 		return 0;
136 	}
137 
138 	if (ctx.c_mode == 0 || argc != 1)
139 		usage();
140 	for (i = 0; tests[i].name != NULL; i++)
141 		if (strcmp(argv[0], tests[i].name) == 0)
142 			break;
143 	if (tests[i].name == NULL)
144 		errx(1, "%s: no such test", argv[0]);
145 
146 	ctx.c_fd = kcov_open();
147 	if (ioctl(ctx.c_fd, KIOSETBUFSIZE, &ctx.c_bufsize) == -1)
148 		err(1, "ioctl: KIOSETBUFSIZE");
149 	cover = mmap(NULL, ctx.c_bufsize * sizeof(unsigned long),
150 	    PROT_READ | PROT_WRITE, MAP_SHARED, ctx.c_fd, 0);
151 	if (cover == MAP_FAILED)
152 		err(1, "mmap");
153 
154 	*cover = 0;
155 	error = tests[i].fn(&ctx);
156 	if (verbose)
157 		dump(cover, ctx.c_mode);
158 	if (check_coverage(cover, ctx.c_mode, ctx.c_bufsize, tests[i].coverage))
159 		error = 1;
160 
161 	if (munmap(cover, ctx.c_bufsize * sizeof(unsigned long)) == -1)
162 		err(1, "munmap");
163 	if (ctx.c_fd != -1) {
164 		if (close(ctx.c_fd) == -1)
165 			err(1, "close");
166 	}
167 
168 	return error;
169 }
170 
171 static __dead void
172 usage(void)
173 {
174 	fprintf(stderr, "usage: kcov [-Epv] [-b fraction] -t mode test\n");
175 	exit(1);
176 }
177 
178 static void
179 do_syscall(void)
180 {
181 	getpid();
182 }
183 
184 static int
185 check_coverage(const unsigned long *cover, int mode, unsigned long maxsize,
186     int nonzero)
187 {
188 	unsigned long arg1, arg2, exp, i, pc, type;
189 	int error = 0;
190 
191 	if (nonzero && cover[0] == 0) {
192 		warnx("coverage empty (count=0)\n");
193 		return 1;
194 	} else if (!nonzero && cover[0] != 0) {
195 		warnx("coverage not empty (count=%lu)\n", *cover);
196 		return 1;
197 	} else if (cover[0] >= maxsize) {
198 		warnx("coverage overflow (count=%lu, max=%lu)\n",
199 		    *cover, maxsize);
200 		return 1;
201 	}
202 
203 	if (mode == KCOV_MODE_TRACE_CMP) {
204 		if (*cover * 4 >= maxsize) {
205 			warnx("coverage cmp overflow (count=%lu, max=%lu)\n",
206 			    *cover * 4, maxsize);
207 			return 1;
208 		}
209 
210 		for (i = 0; i < cover[0]; i++) {
211 			type = cover[i * 4 + 1];
212 			arg1 = cover[i * 4 + 2];
213 			arg2 = cover[i * 4 + 3];
214 			pc = cover[i * 4 + 4];
215 
216 			exp = type >> 1;
217 			if (exp <= 3)
218 				continue;
219 
220 			warnx("coverage cmp invalid size (i=%lu, exp=%lx, "
221 			    "const=%ld, arg1=%lu, arg2=%lu, pc=%p)\n",
222 			    i, exp, type & 0x1, arg1, arg2, (void *)pc);
223 			error = 1;
224 		}
225 	}
226 
227 	return error;
228 }
229 
230 static void
231 dump(const unsigned long *cover, int mode)
232 {
233 	unsigned long i;
234 	int stride = 1;
235 
236 	if (mode == KCOV_MODE_TRACE_CMP)
237 		stride = 4;
238 
239 	for (i = 0; i < cover[0]; i++)
240 		printf("%p\n", (void *)cover[i * stride + stride]);
241 }
242 
243 static int
244 kcov_open(void)
245 {
246 	int fd;
247 
248 	fd = open("/dev/kcov", O_RDWR);
249 	if (fd == -1)
250 		err(1, "open: /dev/kcov");
251 	return fd;
252 }
253 
254 static void
255 kcov_enable(int fd, int mode)
256 {
257 	if (ioctl(fd, KIOENABLE, &mode) == -1)
258 		err(1, "ioctl: KIOENABLE");
259 }
260 
261 static void
262 kcov_disable(int fd)
263 {
264 	if (ioctl(fd, KIODISABLE) == -1)
265 		err(1, "ioctl: KIODISABLE");
266 }
267 
268 /*
269  * Close before mmap.
270  */
271 static int
272 test_close(struct context *ctx)
273 {
274 	int fd;
275 
276 	fd = kcov_open();
277 	close(fd);
278 	return 0;
279 }
280 
281 /*
282  * Coverage of current thread.
283  */
284 static int
285 test_coverage(struct context *ctx)
286 {
287 	kcov_enable(ctx->c_fd, ctx->c_mode);
288 	do_syscall();
289 	kcov_disable(ctx->c_fd);
290 	return 0;
291 }
292 
293 static void *
294 closer(void *arg)
295 {
296 	struct context *ctx = arg;
297 
298 	close(ctx->c_fd);
299 	return NULL;
300 }
301 
302 /*
303  * Close kcov descriptor in another thread during tracing.
304  */
305 static int
306 test_dying(struct context *ctx)
307 {
308 	pthread_t th;
309 	int error;
310 
311 	kcov_enable(ctx->c_fd, ctx->c_mode);
312 
313 	if ((error = pthread_create(&th, NULL, closer, (void *)ctx)))
314 		errc(1, error, "pthread_create");
315 	if ((error = pthread_join(th, NULL)))
316 		errc(1, error, "pthread_join");
317 
318 	error = 0;
319 	if (close(ctx->c_fd) == -1) {
320 		if (errno != EBADF)
321 			err(1, "close");
322 	} else {
323 		warnx("expected kcov descriptor to be closed");
324 		error = 1;
325 	}
326 	ctx->c_fd = -1;
327 
328 	return error;
329 }
330 
331 /*
332  * Coverage of thread after exec.
333  */
334 static int
335 test_exec(struct context *ctx)
336 {
337 	pid_t pid;
338 	int status;
339 
340 	pid = fork();
341 	if (pid == -1)
342 		err(1, "fork");
343 	if (pid == 0) {
344 		kcov_enable(ctx->c_fd, ctx->c_mode);
345 		execlp(self, self, "-E", NULL);
346 		_exit(1);
347 	}
348 
349 	if (waitpid(pid, &status, 0) == -1)
350 		err(1, "waitpid");
351 	if (WIFSIGNALED(status)) {
352 		warnx("terminated by signal (%d)", WTERMSIG(status));
353 		return 1;
354 	} else if (WEXITSTATUS(status) != 0) {
355 		warnx("non-zero exit (%d)", WEXITSTATUS(status));
356 		return 1;
357 	}
358 
359 	/* Upon exit, the kcov descriptor must be reusable again. */
360 	kcov_enable(ctx->c_fd, ctx->c_mode);
361 	kcov_disable(ctx->c_fd);
362 
363 	return 0;
364 }
365 
366 /*
367  * Coverage of thread after fork.
368  */
369 static int
370 test_fork(struct context *ctx)
371 {
372 	pid_t pid;
373 	int status;
374 
375 	pid = fork();
376 	if (pid == -1)
377 		err(1, "fork");
378 	if (pid == 0) {
379 		kcov_enable(ctx->c_fd, ctx->c_mode);
380 		do_syscall();
381 		_exit(0);
382 	}
383 
384 	if (waitpid(pid, &status, 0) == -1)
385 		err(1, "waitpid");
386 	if (WIFSIGNALED(status)) {
387 		warnx("terminated by signal (%d)", WTERMSIG(status));
388 		return 1;
389 	} else if (WEXITSTATUS(status) != 0) {
390 		warnx("non-zero exit (%d)", WEXITSTATUS(status));
391 		return 1;
392 	}
393 
394 	/* Upon exit, the kcov descriptor must be reusable again. */
395 	kcov_enable(ctx->c_fd, ctx->c_mode);
396 	kcov_disable(ctx->c_fd);
397 
398 	return 0;
399 }
400 
401 /*
402  * Open /dev/kcov more than once.
403  */
404 static int
405 test_open(struct context *ctx)
406 {
407 	unsigned long *cover;
408 	int fd;
409 	int error = 0;
410 
411 	fd = kcov_open();
412 	if (ioctl(fd, KIOSETBUFSIZE, &ctx->c_bufsize) == -1)
413 		err(1, "ioctl: KIOSETBUFSIZE");
414 	cover = mmap(NULL, ctx->c_bufsize * sizeof(unsigned long),
415 	    PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
416 	if (cover == MAP_FAILED)
417 		err(1, "mmap");
418 
419 	kcov_enable(fd, ctx->c_mode);
420 	do_syscall();
421 	kcov_disable(fd);
422 
423 	error = check_coverage(cover, ctx->c_mode, ctx->c_bufsize, 1);
424 
425 	if (munmap(cover, ctx->c_bufsize * sizeof(unsigned long)))
426 		err(1, "munmap");
427 	close(fd);
428 
429 	return error;
430 }
431 
432 /*
433  * Remote taskq coverage. One reliable way to trigger a task on behalf of the
434  * running process is to monitor a kqueue file descriptor using kqueue.
435  */
436 static int
437 test_remote(struct context *ctx)
438 {
439 	struct kio_remote_attach remote = {
440 		.subsystem	= KCOV_REMOTE_COMMON,
441 		.id		= 0,
442 	};
443 	struct kevent kev;
444 	int kq1, kq2, pip[2];
445 	int x = 0;
446 
447 	if (ioctl(ctx->c_fd, KIOREMOTEATTACH, &remote) == -1)
448 		err(1, "ioctl: KIOREMOTEATTACH");
449 	kcov_enable(ctx->c_fd, ctx->c_mode);
450 
451 	kq1 = kqueue();
452 	if (kq1 == -1)
453 		err(1, "kqueue");
454 	kq2 = kqueue();
455 	if (kq1 == -1)
456 		err(1, "kqueue");
457 	EV_SET(&kev, kq2, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, NULL);
458 	if (kevent(kq1, &kev, 1, NULL, 0, NULL) == -1)
459 		err(1, "kqueue");
460 
461 	if (pipe(pip) == -1)
462 		err(1, "pipe");
463 
464 	EV_SET(&kev, pip[0], EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, NULL);
465 	if (kevent(kq2, &kev, 1, NULL, 0, NULL) == -1)
466 		err(1, "kqueue");
467 	(void)write(pip[1], &x, sizeof(x));
468 
469 	if (kevent(kq1, NULL, 0, &kev, 1, NULL) == -1)
470 		err(1, "kevent");
471 
472 	kcov_disable(ctx->c_fd);
473 
474 	return 0;
475 }
476 
477 /*
478  * Close with remote coverage enabled.
479  */
480 static int
481 test_remote_close(struct context *ctx)
482 {
483 	struct kio_remote_attach remote = {
484 		.subsystem	= KCOV_REMOTE_COMMON,
485 		.id		= 0,
486 	};
487 
488 	if (ioctl(ctx->c_fd, KIOREMOTEATTACH, &remote) == -1)
489 		err(1, "ioctl: KIOREMOTEATTACH");
490 	kcov_enable(ctx->c_fd, ctx->c_mode);
491 	if (close(ctx->c_fd) == -1)
492 		err(1, "close");
493 	ctx->c_fd = kcov_open();
494 	return 0;
495 }
496 
497 /*
498  * State transitions.
499  */
500 static int
501 test_state(struct context *ctx)
502 {
503 	if (ioctl(ctx->c_fd, KIOENABLE, &ctx->c_mode) == -1) {
504 		warn("KIOSETBUFSIZE -> KIOENABLE");
505 		return 1;
506 	}
507 	if (ioctl(ctx->c_fd, KIODISABLE) == -1) {
508 		warn("KIOENABLE -> KIODISABLE");
509 		return 1;
510 	}
511 	if (ioctl(ctx->c_fd, KIOSETBUFSIZE, 0) != -1) {
512 		warnx("KIOSETBUFSIZE -> KIOSETBUFSIZE");
513 		return 1;
514 	}
515 	if (ioctl(ctx->c_fd, KIODISABLE) != -1) {
516 		warnx("KIOSETBUFSIZE -> KIODISABLE");
517 		return 1;
518 	}
519 
520 	kcov_enable(ctx->c_fd, ctx->c_mode);
521 	if (ioctl(ctx->c_fd, KIOENABLE, &ctx->c_mode) != -1) {
522 		warnx("KIOENABLE -> KIOENABLE");
523 		return 1;
524 	}
525 	if (ioctl(ctx->c_fd, KIOSETBUFSIZE, 0) != -1) {
526 		warnx("KIOENABLE -> KIOSETBUFSIZE");
527 		return 1;
528 	}
529 	kcov_disable(ctx->c_fd);
530 
531 	return 0;
532 }
533