xref: /openbsd/sys/dev/kcov.c (revision 274d7c50)
1 /*	$OpenBSD: kcov.c,v 1.15 2019/05/19 08:55:27 anton Exp $	*/
2 
3 /*
4  * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <sys/kcov.h>
23 #include <sys/malloc.h>
24 #include <sys/stdint.h>
25 #include <sys/queue.h>
26 
27 #include <uvm/uvm_extern.h>
28 
29 #define KCOV_BUF_MEMB_SIZE	sizeof(uintptr_t)
30 
31 #define KCOV_CMP_CONST		0x1
32 #define KCOV_CMP_SIZE(x)	((x) << 1)
33 
34 /* #define KCOV_DEBUG */
35 #ifdef KCOV_DEBUG
36 #define DPRINTF(x...) do { if (kcov_debug) printf(x); } while (0)
37 #else
38 #define DPRINTF(x...)
39 #endif
40 
41 struct kcov_dev {
42 	enum {
43 		KCOV_STATE_NONE,
44 		KCOV_STATE_READY,
45 		KCOV_STATE_TRACE,
46 		KCOV_STATE_DYING,
47 	}		 kd_state;
48 	int		 kd_mode;
49 	int		 kd_unit;	/* device minor */
50 	uintptr_t	*kd_buf;	/* traced coverage */
51 	size_t		 kd_nmemb;
52 	size_t		 kd_size;
53 
54 	TAILQ_ENTRY(kcov_dev)	kd_entry;
55 };
56 
57 void kcovattach(int);
58 
59 int kd_init(struct kcov_dev *, unsigned long);
60 void kd_free(struct kcov_dev *);
61 struct kcov_dev *kd_lookup(int);
62 
63 static inline int inintr(void);
64 
65 TAILQ_HEAD(, kcov_dev) kd_list = TAILQ_HEAD_INITIALIZER(kd_list);
66 
67 int kcov_cold = 1;
68 
69 #ifdef KCOV_DEBUG
70 int kcov_debug = 1;
71 #endif
72 
73 /*
74  * Compiling the kernel with the `-fsanitize-coverage=trace-pc' option will
75  * cause the following function to be called upon function entry and before
76  * each block instructions that maps to a single line in the original source
77  * code.
78  *
79  * If kcov is enabled for the current thread, the kernel program counter will
80  * be stored in its corresponding coverage buffer.
81  * The first element in the coverage buffer holds the index of next available
82  * element.
83  */
84 void
85 __sanitizer_cov_trace_pc(void)
86 {
87 	struct kcov_dev *kd;
88 	uint64_t idx;
89 
90 	/*
91 	 * Do not trace before kcovopen() has been called at least once.
92 	 * At this point, all secondary CPUs have booted and accessing curcpu()
93 	 * is safe.
94 	 */
95 	if (kcov_cold)
96 		return;
97 
98 	/* Do not trace in interrupts to prevent noisy coverage. */
99 	if (inintr())
100 		return;
101 
102 	kd = curproc->p_kd;
103 	if (kd == NULL || kd->kd_mode != KCOV_MODE_TRACE_PC)
104 		return;
105 
106 	idx = kd->kd_buf[0];
107 	if (idx + 1 <= kd->kd_nmemb) {
108 		kd->kd_buf[idx + 1] = (uintptr_t)__builtin_return_address(0);
109 		kd->kd_buf[0] = idx + 1;
110 	}
111 }
112 
113 /*
114  * Compiling the kernel with the `-fsanitize-coverage=trace-cmp' option will
115  * cause the following function to be called upon integer comparisons and switch
116  * statements.
117  *
118  * If kcov is enabled for the current thread, the comparison will be stored in
119  * its corresponding coverage buffer.
120  */
121 void
122 trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, uintptr_t pc)
123 {
124 	struct kcov_dev *kd;
125 	uint64_t idx;
126 
127 	/*
128 	 * Do not trace before kcovopen() has been called at least once.
129 	 * At this point, all secondary CPUs have booted and accessing curcpu()
130 	 * is safe.
131 	 */
132 	if (kcov_cold)
133 		return;
134 
135 	/* Do not trace in interrupts to prevent noisy coverage. */
136 	if (inintr())
137 		return;
138 
139 	kd = curproc->p_kd;
140 	if (kd == NULL || kd->kd_mode != KCOV_MODE_TRACE_CMP)
141 		return;
142 
143 	idx = kd->kd_buf[0];
144 	if (idx * 4 + 4 <= kd->kd_nmemb) {
145 		kd->kd_buf[idx * 4 + 1] = type;
146 		kd->kd_buf[idx * 4 + 2] = arg1;
147 		kd->kd_buf[idx * 4 + 3] = arg2;
148 		kd->kd_buf[idx * 4 + 4] = pc;
149 		kd->kd_buf[0] = idx + 1;
150 	}
151 }
152 
153 void
154 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
155 {
156 	trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2,
157 	    (uintptr_t)__builtin_return_address(0));
158 }
159 
160 void
161 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
162 {
163 	trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2,
164 	    (uintptr_t)__builtin_return_address(0));
165 }
166 
167 void
168 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
169 {
170 	trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2,
171 	    (uintptr_t)__builtin_return_address(0));
172 }
173 
174 void
175 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
176 {
177 	trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2,
178 	    (uintptr_t)__builtin_return_address(0));
179 }
180 
181 void
182 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
183 {
184 	trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
185 	    (uintptr_t)__builtin_return_address(0));
186 }
187 
188 void
189 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
190 {
191 	trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
192 	    (uintptr_t)__builtin_return_address(0));
193 }
194 
195 void
196 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
197 {
198 	trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
199 	    (uintptr_t)__builtin_return_address(0));
200 }
201 
202 void
203 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
204 {
205 	trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
206 	    (uintptr_t)__builtin_return_address(0));
207 }
208 
209 void
210 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
211 {
212 	uint64_t i, nbits, ncases, type;
213 	uintptr_t pc;
214 
215 	pc = (uintptr_t)__builtin_return_address(0);
216 	ncases = cases[0];
217 	nbits = cases[1];
218 
219 	switch (nbits) {
220 	case 8:
221 		type = KCOV_CMP_SIZE(0);
222 		break;
223 	case 16:
224 		type = KCOV_CMP_SIZE(1);
225 		break;
226 	case 32:
227 		type = KCOV_CMP_SIZE(2);
228 		break;
229 	case 64:
230 		type = KCOV_CMP_SIZE(3);
231 		break;
232 	default:
233 		return;
234 	}
235 	type |= KCOV_CMP_CONST;
236 
237 	for (i = 0; i < ncases; i++)
238 		trace_cmp(type, cases[i + 2], val, pc);
239 }
240 
241 void
242 kcovattach(int count)
243 {
244 }
245 
246 int
247 kcovopen(dev_t dev, int flag, int mode, struct proc *p)
248 {
249 	struct kcov_dev *kd;
250 
251 	if (kd_lookup(minor(dev)) != NULL)
252 		return (EBUSY);
253 
254 	if (kcov_cold)
255 		kcov_cold = 0;
256 
257 	DPRINTF("%s: unit=%d\n", __func__, minor(dev));
258 
259 	kd = malloc(sizeof(*kd), M_SUBPROC, M_WAITOK | M_ZERO);
260 	kd->kd_unit = minor(dev);
261 	TAILQ_INSERT_TAIL(&kd_list, kd, kd_entry);
262 	return (0);
263 }
264 
265 int
266 kcovclose(dev_t dev, int flag, int mode, struct proc *p)
267 {
268 	struct kcov_dev *kd;
269 
270 	kd = kd_lookup(minor(dev));
271 	if (kd == NULL)
272 		return (EINVAL);
273 
274 	DPRINTF("%s: unit=%d, state=%d, mode=%d\n",
275 	    __func__, kd->kd_unit, kd->kd_state, kd->kd_mode);
276 
277 	if (kd->kd_state == KCOV_STATE_TRACE) {
278 		kd->kd_state = KCOV_STATE_DYING;
279 		kd->kd_mode = KCOV_MODE_NONE;
280 	} else {
281 		kd_free(kd);
282 	}
283 
284 	return (0);
285 }
286 
287 int
288 kcovioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
289 {
290 	struct kcov_dev *kd;
291 	int mode;
292 	int error = 0;
293 
294 	kd = kd_lookup(minor(dev));
295 	if (kd == NULL)
296 		return (ENXIO);
297 
298 	switch (cmd) {
299 	case KIOSETBUFSIZE:
300 		error = kd_init(kd, *((unsigned long *)data));
301 		break;
302 	case KIOENABLE:
303 		/* Only one kcov descriptor can be enabled per thread. */
304 		if (p->p_kd != NULL || kd->kd_state != KCOV_STATE_READY) {
305 			error = EBUSY;
306 			break;
307 		}
308 		mode = *((int *)data);
309 		if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) {
310 			error = EINVAL;
311 			break;
312 		}
313 		kd->kd_state = KCOV_STATE_TRACE;
314 		kd->kd_mode = mode;
315 		p->p_kd = kd;
316 		break;
317 	case KIODISABLE:
318 		/* Only the enabled thread may disable itself. */
319 		if (p->p_kd != kd || kd->kd_state != KCOV_STATE_TRACE) {
320 			error = EBUSY;
321 			break;
322 		}
323 		kd->kd_state = KCOV_STATE_READY;
324 		kd->kd_mode = KCOV_MODE_NONE;
325 		p->p_kd = NULL;
326 		break;
327 	default:
328 		error = ENOTTY;
329 	}
330 
331 	DPRINTF("%s: unit=%d, state=%d, mode=%d, error=%d\n",
332 	    __func__, kd->kd_unit, kd->kd_state, kd->kd_mode, error);
333 
334 	return (error);
335 }
336 
337 paddr_t
338 kcovmmap(dev_t dev, off_t offset, int prot)
339 {
340 	struct kcov_dev *kd;
341 	paddr_t pa;
342 	vaddr_t va;
343 
344 	kd = kd_lookup(minor(dev));
345 	if (kd == NULL)
346 		return (paddr_t)(-1);
347 
348 	if (offset < 0 || offset >= kd->kd_nmemb * KCOV_BUF_MEMB_SIZE)
349 		return (paddr_t)(-1);
350 
351 	va = (vaddr_t)kd->kd_buf + offset;
352 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
353 		return (paddr_t)(-1);
354 	return (pa);
355 }
356 
357 void
358 kcov_exit(struct proc *p)
359 {
360 	struct kcov_dev *kd;
361 
362 	kd = p->p_kd;
363 	if (kd == NULL)
364 		return;
365 
366 	DPRINTF("%s: unit=%d, state=%d, mode=%d\n",
367 	    __func__, kd->kd_unit, kd->kd_state, kd->kd_mode);
368 
369 	if (kd->kd_state == KCOV_STATE_DYING) {
370 		kd_free(kd);
371 	} else {
372 		kd->kd_state = KCOV_STATE_READY;
373 		kd->kd_mode = KCOV_MODE_NONE;
374 	}
375 	p->p_kd = NULL;
376 }
377 
378 struct kcov_dev *
379 kd_lookup(int unit)
380 {
381 	struct kcov_dev *kd;
382 
383 	TAILQ_FOREACH(kd, &kd_list, kd_entry) {
384 		if (kd->kd_unit == unit)
385 			return (kd);
386 	}
387 	return (NULL);
388 }
389 
390 int
391 kd_init(struct kcov_dev *kd, unsigned long nmemb)
392 {
393 	void *buf;
394 	size_t size;
395 
396 	KASSERT(kd->kd_buf == NULL);
397 
398 	if (kd->kd_state != KCOV_STATE_NONE)
399 		return (EBUSY);
400 
401 	if (nmemb == 0 || nmemb > KCOV_BUF_MAX_NMEMB)
402 		return (EINVAL);
403 
404 	size = roundup(nmemb * KCOV_BUF_MEMB_SIZE, PAGE_SIZE);
405 	buf = km_alloc(size, &kv_any, &kp_zero, &kd_waitok);
406 	if (buf == NULL)
407 		return (ENOMEM);
408 	/* km_malloc() can sleep, ensure the race was won. */
409 	if (kd->kd_state != KCOV_STATE_NONE) {
410 		km_free(buf, size, &kv_any, &kp_zero);
411 		return (EBUSY);
412 	}
413 	kd->kd_buf = buf;
414 	/* The first element is reserved to hold the number of used elements. */
415 	kd->kd_nmemb = nmemb - 1;
416 	kd->kd_size = size;
417 	kd->kd_state = KCOV_STATE_READY;
418 	return (0);
419 }
420 
421 void
422 kd_free(struct kcov_dev *kd)
423 {
424 	DPRINTF("%s: unit=%d, state=%d, mode=%d\n",
425 	    __func__, kd->kd_unit, kd->kd_state, kd->kd_mode);
426 
427 	TAILQ_REMOVE(&kd_list, kd, kd_entry);
428 	if (kd->kd_buf != NULL)
429 		km_free(kd->kd_buf, kd->kd_size, &kv_any, &kp_zero);
430 	free(kd, M_SUBPROC, sizeof(*kd));
431 }
432 
433 static inline int
434 inintr(void)
435 {
436 #if defined(__amd64__) || defined(__arm__) || defined(__arm64__) || \
437     defined(__i386__)
438 	return (curcpu()->ci_idepth > 0);
439 #else
440 	return (0);
441 #endif
442 }
443