xref: /openbsd/sys/dev/kcov.c (revision ba619c7d)
1 /*	$OpenBSD: kcov.c,v 1.50 2024/11/10 10:04:33 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <sys/kcov.h>
23 #include <sys/malloc.h>
24 #include <sys/mutex.h>
25 #include <sys/pool.h>
26 #include <sys/stdint.h>
27 #include <sys/queue.h>
28 
29 /* kcov_vnode() */
30 #include <sys/conf.h>
31 #include <sys/vnode.h>
32 #include <sys/specdev.h>
33 
34 #include <uvm/uvm_extern.h>
35 
36 #define KCOV_BUF_MEMB_SIZE	sizeof(uintptr_t)
37 #define KCOV_BUF_MAX_NMEMB	(256 << 10)
38 
39 #define KCOV_CMP_CONST		0x1
40 #define KCOV_CMP_SIZE(x)	((x) << 1)
41 
42 #define KCOV_STATE_NONE		0
43 #define KCOV_STATE_READY	1
44 #define KCOV_STATE_TRACE	2
45 #define KCOV_STATE_DYING	3
46 
47 #define KCOV_STRIDE_TRACE_PC	1
48 #define KCOV_STRIDE_TRACE_CMP	4
49 
50 /*
51  * Coverage structure.
52  *
53  * Locking:
54  * 	I	immutable after creation
55  *	M	kcov_mtx
56  *	a	atomic operations
57  */
58 struct kcov_dev {
59 	int		 kd_state;	/* [M] */
60 	int		 kd_mode;	/* [M] */
61 	int		 kd_unit;	/* [I] D_CLONE unique device minor */
62 	int		 kd_intr;	/* [M] currently used in interrupt */
63 	uintptr_t	*kd_buf;	/* [a] traced coverage */
64 	size_t		 kd_nmemb;	/* [I] */
65 	size_t		 kd_size;	/* [I] */
66 
67 	struct kcov_remote *kd_kr;	/* [M] */
68 
69 	TAILQ_ENTRY(kcov_dev)	kd_entry;	/* [M] */
70 };
71 
72 /*
73  * Remote coverage structure.
74  *
75  * Locking:
76  * 	I	immutable after creation
77  *	M	kcov_mtx
78  */
79 struct kcov_remote {
80 	struct kcov_dev *kr_kd;	/* [M] */
81 	void *kr_id;		/* [I] */
82 	int kr_subsystem;	/* [I] */
83 	int kr_nsections;	/* [M] # threads in remote section */
84 	int kr_state;		/* [M] */
85 
86 	TAILQ_ENTRY(kcov_remote) kr_entry;	/* [M] */
87 };
88 
89 /*
90  * Per CPU coverage structure used to track coverage when executing in a remote
91  * interrupt context.
92  *
93  * Locking:
94  * 	I	immutable after creation
95  *	M	kcov_mtx
96  */
97 struct kcov_cpu {
98 	struct kcov_dev  kc_kd;
99 	struct kcov_dev *kc_kd_save;	/* [M] previous kcov_dev */
100 	int kc_cpuid;			/* [I] cpu number */
101 
102 	TAILQ_ENTRY(kcov_cpu) kc_entry;	/* [I] */
103 };
104 
105 void kcovattach(int);
106 
107 int kd_init(struct kcov_dev *, unsigned long);
108 void kd_free(struct kcov_dev *);
109 struct kcov_dev *kd_lookup(int);
110 void kd_copy(struct kcov_dev *, struct kcov_dev *);
111 
112 struct kcov_remote *kcov_remote_register_locked(int, void *);
113 int kcov_remote_attach(struct kcov_dev *, struct kio_remote_attach *);
114 void kcov_remote_detach(struct kcov_dev *, struct kcov_remote *);
115 void kr_free(struct kcov_remote *);
116 void kr_barrier(struct kcov_remote *);
117 struct kcov_remote *kr_lookup(int, void *);
118 
119 static struct kcov_dev *kd_curproc(int);
120 static struct kcov_cpu *kd_curcpu(void);
121 static uint64_t kd_claim(struct kcov_dev *, int, int);
122 
123 TAILQ_HEAD(, kcov_dev) kd_list = TAILQ_HEAD_INITIALIZER(kd_list);
124 TAILQ_HEAD(, kcov_remote) kr_list = TAILQ_HEAD_INITIALIZER(kr_list);
125 TAILQ_HEAD(, kcov_cpu) kc_list = TAILQ_HEAD_INITIALIZER(kc_list);
126 
127 int kcov_cold = 1;
128 int kr_cold = 1;
129 struct mutex kcov_mtx = MUTEX_INITIALIZER(IPL_MPFLOOR);
130 struct pool kr_pool;
131 
132 static inline int
inintr(struct cpu_info * ci)133 inintr(struct cpu_info *ci)
134 {
135 	return (ci->ci_idepth > 0);
136 }
137 
138 /*
139  * Compiling the kernel with the `-fsanitize-coverage=trace-pc' option will
140  * cause the following function to be called upon function entry and before
141  * each block of instructions that maps to a single line in the original source
142  * code.
143  *
144  * If kcov is enabled for the current thread, the kernel program counter will
145  * be stored in its corresponding coverage buffer.
146  */
147 void
__sanitizer_cov_trace_pc(void)148 __sanitizer_cov_trace_pc(void)
149 {
150 	struct kcov_dev *kd;
151 	uint64_t idx;
152 
153 	kd = kd_curproc(KCOV_MODE_TRACE_PC);
154 	if (kd == NULL)
155 		return;
156 
157 	if ((idx = kd_claim(kd, KCOV_STRIDE_TRACE_PC, 1)))
158 		kd->kd_buf[idx] = (uintptr_t)__builtin_return_address(0);
159 }
160 
161 /*
162  * Compiling the kernel with the `-fsanitize-coverage=trace-cmp' option will
163  * cause the following function to be called upon integer comparisons and switch
164  * statements.
165  *
166  * If kcov is enabled for the current thread, the comparison will be stored in
167  * its corresponding coverage buffer.
168  */
169 void
trace_cmp(struct kcov_dev * kd,uint64_t type,uint64_t arg1,uint64_t arg2,uintptr_t pc)170 trace_cmp(struct kcov_dev *kd, uint64_t type, uint64_t arg1, uint64_t arg2,
171     uintptr_t pc)
172 {
173 	uint64_t idx;
174 
175 	if ((idx = kd_claim(kd, KCOV_STRIDE_TRACE_CMP, 1))) {
176 		kd->kd_buf[idx] = type;
177 		kd->kd_buf[idx + 1] = arg1;
178 		kd->kd_buf[idx + 2] = arg2;
179 		kd->kd_buf[idx + 3] = pc;
180 	}
181 }
182 
183 #define TRACE_CMP(type, arg1, arg2) do {				\
184 	struct kcov_dev *kd;						\
185 	if ((kd = kd_curproc(KCOV_MODE_TRACE_CMP)) == NULL)		\
186 		return;							\
187 	trace_cmp(kd, (type), (arg1), (arg2),				\
188 	    (uintptr_t)__builtin_return_address(0));			\
189 } while (0)
190 
191 void
__sanitizer_cov_trace_cmp1(uint8_t arg1,uint8_t arg2)192 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
193 {
194 	TRACE_CMP(KCOV_CMP_SIZE(0), arg1, arg2);
195 }
196 
197 void
__sanitizer_cov_trace_cmp2(uint16_t arg1,uint16_t arg2)198 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
199 {
200 	TRACE_CMP(KCOV_CMP_SIZE(1), arg1, arg2);
201 }
202 
203 void
__sanitizer_cov_trace_cmp4(uint32_t arg1,uint32_t arg2)204 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
205 {
206 	TRACE_CMP(KCOV_CMP_SIZE(2), arg1, arg2);
207 }
208 
209 void
__sanitizer_cov_trace_cmp8(uint64_t arg1,uint64_t arg2)210 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
211 {
212 	TRACE_CMP(KCOV_CMP_SIZE(3), arg1, arg2);
213 }
214 
215 void
__sanitizer_cov_trace_const_cmp1(uint8_t arg1,uint8_t arg2)216 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
217 {
218 	TRACE_CMP(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2);
219 }
220 
221 void
__sanitizer_cov_trace_const_cmp2(uint16_t arg1,uint16_t arg2)222 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
223 {
224 	TRACE_CMP(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2);
225 }
226 
227 void
__sanitizer_cov_trace_const_cmp4(uint32_t arg1,uint32_t arg2)228 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
229 {
230 	TRACE_CMP(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2);
231 }
232 
233 void
__sanitizer_cov_trace_const_cmp8(uint64_t arg1,uint64_t arg2)234 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
235 {
236 	TRACE_CMP(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2);
237 }
238 
239 void
__sanitizer_cov_trace_switch(uint64_t val,uint64_t * cases)240 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
241 {
242 	struct kcov_dev *kd;
243 	uint64_t i, nbits, ncases, type;
244 	uintptr_t pc;
245 
246 	kd = kd_curproc(KCOV_MODE_TRACE_CMP);
247 	if (kd == NULL)
248 		return;
249 
250 	pc = (uintptr_t)__builtin_return_address(0);
251 	ncases = cases[0];
252 	nbits = cases[1];
253 
254 	switch (nbits) {
255 	case 8:
256 		type = KCOV_CMP_SIZE(0);
257 		break;
258 	case 16:
259 		type = KCOV_CMP_SIZE(1);
260 		break;
261 	case 32:
262 		type = KCOV_CMP_SIZE(2);
263 		break;
264 	case 64:
265 		type = KCOV_CMP_SIZE(3);
266 		break;
267 	default:
268 		return;
269 	}
270 	type |= KCOV_CMP_CONST;
271 
272 	for (i = 0; i < ncases; i++)
273 		trace_cmp(kd, type, cases[i + 2], val, pc);
274 }
275 
276 void
kcovattach(int count)277 kcovattach(int count)
278 {
279 	struct kcov_cpu *kc;
280 	int error, i;
281 
282 	pool_init(&kr_pool, sizeof(struct kcov_remote), 0, IPL_MPFLOOR, PR_WAITOK,
283 	    "kcovpl", NULL);
284 
285 	kc = mallocarray(ncpusfound, sizeof(*kc), M_DEVBUF, M_WAITOK | M_ZERO);
286 	mtx_enter(&kcov_mtx);
287 	for (i = 0; i < ncpusfound; i++) {
288 		kc[i].kc_cpuid = i;
289 		error = kd_init(&kc[i].kc_kd, KCOV_BUF_MAX_NMEMB);
290 		KASSERT(error == 0);
291 		TAILQ_INSERT_TAIL(&kc_list, &kc[i], kc_entry);
292 	}
293 	mtx_leave(&kcov_mtx);
294 
295 	kr_cold = 0;
296 }
297 
298 int
kcovopen(dev_t dev,int flag,int mode,struct proc * p)299 kcovopen(dev_t dev, int flag, int mode, struct proc *p)
300 {
301 	struct kcov_dev *kd;
302 
303 	kd = malloc(sizeof(*kd), M_SUBPROC, M_WAITOK | M_ZERO);
304 	kd->kd_unit = minor(dev);
305 	mtx_enter(&kcov_mtx);
306 	KASSERT(kd_lookup(kd->kd_unit) == NULL);
307 	TAILQ_INSERT_TAIL(&kd_list, kd, kd_entry);
308 	if (kcov_cold)
309 		kcov_cold = 0;
310 	mtx_leave(&kcov_mtx);
311 	return (0);
312 }
313 
314 int
kcovclose(dev_t dev,int flag,int mode,struct proc * p)315 kcovclose(dev_t dev, int flag, int mode, struct proc *p)
316 {
317 	struct kcov_dev *kd;
318 
319 	mtx_enter(&kcov_mtx);
320 
321 	kd = kd_lookup(minor(dev));
322 	if (kd == NULL) {
323 		mtx_leave(&kcov_mtx);
324 		return (ENXIO);
325 	}
326 
327 	TAILQ_REMOVE(&kd_list, kd, kd_entry);
328 	if (kd->kd_state == KCOV_STATE_TRACE && kd->kd_kr == NULL) {
329 		/*
330 		 * Another thread is currently using the kcov descriptor,
331 		 * postpone freeing to kcov_exit().
332 		 */
333 		kd->kd_state = KCOV_STATE_DYING;
334 		kd->kd_mode = KCOV_MODE_NONE;
335 	} else {
336 		kd_free(kd);
337 	}
338 
339 	mtx_leave(&kcov_mtx);
340 	return (0);
341 }
342 
343 int
kcovioctl(dev_t dev,u_long cmd,caddr_t data,int flag,struct proc * p)344 kcovioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
345 {
346 	struct kcov_dev *kd;
347 	int mode;
348 	int error = 0;
349 
350 	mtx_enter(&kcov_mtx);
351 
352 	kd = kd_lookup(minor(dev));
353 	if (kd == NULL) {
354 		mtx_leave(&kcov_mtx);
355 		return (ENXIO);
356 	}
357 
358 	switch (cmd) {
359 	case KIOSETBUFSIZE:
360 		error = kd_init(kd, *((unsigned long *)data));
361 		break;
362 	case KIOENABLE:
363 		/* Only one kcov descriptor can be enabled per thread. */
364 		if (p->p_kd != NULL) {
365 			error = EBUSY;
366 			break;
367 		}
368 		if (kd->kd_state != KCOV_STATE_READY) {
369 			error = ENXIO;
370 			break;
371 		}
372 		mode = *((int *)data);
373 		if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) {
374 			error = EINVAL;
375 			break;
376 		}
377 		kd->kd_state = KCOV_STATE_TRACE;
378 		kd->kd_mode = mode;
379 		/* Remote coverage is mutually exclusive. */
380 		if (kd->kd_kr == NULL)
381 			p->p_kd = kd;
382 		break;
383 	case KIODISABLE:
384 		/* Only the enabled thread may disable itself. */
385 		if ((p->p_kd != kd && kd->kd_kr == NULL)) {
386 			error = EPERM;
387 			break;
388 		}
389 		if (kd->kd_state != KCOV_STATE_TRACE) {
390 			error = ENXIO;
391 			break;
392 		}
393 		kd->kd_state = KCOV_STATE_READY;
394 		kd->kd_mode = KCOV_MODE_NONE;
395 		if (kd->kd_kr != NULL)
396 			kr_barrier(kd->kd_kr);
397 		p->p_kd = NULL;
398 		break;
399 	case KIOREMOTEATTACH:
400 		error = kcov_remote_attach(kd,
401 		    (struct kio_remote_attach *)data);
402 		break;
403 	default:
404 		error = ENOTTY;
405 	}
406 	mtx_leave(&kcov_mtx);
407 
408 	return (error);
409 }
410 
411 paddr_t
kcovmmap(dev_t dev,off_t offset,int prot)412 kcovmmap(dev_t dev, off_t offset, int prot)
413 {
414 	struct kcov_dev *kd;
415 	paddr_t pa = -1;
416 	vaddr_t va;
417 
418 	mtx_enter(&kcov_mtx);
419 
420 	kd = kd_lookup(minor(dev));
421 	if (kd == NULL)
422 		goto out;
423 
424 	if (offset < 0 || offset >= kd->kd_nmemb * KCOV_BUF_MEMB_SIZE)
425 		goto out;
426 
427 	va = (vaddr_t)kd->kd_buf + offset;
428 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
429 		pa = -1;
430 
431 out:
432 	mtx_leave(&kcov_mtx);
433 	return (pa);
434 }
435 
436 void
kcov_exit(struct proc * p)437 kcov_exit(struct proc *p)
438 {
439 	struct kcov_dev *kd;
440 
441 	mtx_enter(&kcov_mtx);
442 
443 	kd = p->p_kd;
444 	if (kd == NULL) {
445 		mtx_leave(&kcov_mtx);
446 		return;
447 	}
448 
449 	if (kd->kd_state == KCOV_STATE_DYING) {
450 		p->p_kd = NULL;
451 		kd_free(kd);
452 	} else {
453 		kd->kd_state = KCOV_STATE_READY;
454 		kd->kd_mode = KCOV_MODE_NONE;
455 		if (kd->kd_kr != NULL)
456 			kr_barrier(kd->kd_kr);
457 		p->p_kd = NULL;
458 	}
459 
460 	mtx_leave(&kcov_mtx);
461 }
462 
463 /*
464  * Returns non-zero if the given vnode refers to a kcov device.
465  */
466 int
kcov_vnode(struct vnode * vp)467 kcov_vnode(struct vnode *vp)
468 {
469 	return (vp->v_type == VCHR &&
470 	    cdevsw[major(vp->v_rdev)].d_open == kcovopen);
471 }
472 
473 struct kcov_dev *
kd_lookup(int unit)474 kd_lookup(int unit)
475 {
476 	struct kcov_dev *kd;
477 
478 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
479 
480 	TAILQ_FOREACH(kd, &kd_list, kd_entry) {
481 		if (kd->kd_unit == unit)
482 			return (kd);
483 	}
484 	return (NULL);
485 }
486 
487 void
kd_copy(struct kcov_dev * dst,struct kcov_dev * src)488 kd_copy(struct kcov_dev *dst, struct kcov_dev *src)
489 {
490 	uint64_t idx, nmemb;
491 	int stride;
492 
493 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
494 	KASSERT(dst->kd_mode == src->kd_mode);
495 
496 	nmemb = src->kd_buf[0];
497 	if (nmemb == 0)
498 		return;
499 	stride = src->kd_mode == KCOV_MODE_TRACE_CMP ? KCOV_STRIDE_TRACE_CMP :
500 	    KCOV_STRIDE_TRACE_PC;
501 	idx = kd_claim(dst, stride, nmemb);
502 	if (idx == 0)
503 		return;
504 	memcpy(&dst->kd_buf[idx], &src->kd_buf[1],
505 	    stride * nmemb * KCOV_BUF_MEMB_SIZE);
506 }
507 
508 int
kd_init(struct kcov_dev * kd,unsigned long nmemb)509 kd_init(struct kcov_dev *kd, unsigned long nmemb)
510 {
511 	void *buf;
512 	size_t size;
513 	int error;
514 
515 	KASSERT(kd->kd_buf == NULL);
516 
517 	if (kd->kd_state != KCOV_STATE_NONE)
518 		return (EBUSY);
519 
520 	if (nmemb == 0 || nmemb > KCOV_BUF_MAX_NMEMB)
521 		return (EINVAL);
522 
523 	size = roundup(nmemb * KCOV_BUF_MEMB_SIZE, PAGE_SIZE);
524 	mtx_leave(&kcov_mtx);
525 	buf = km_alloc(size, &kv_any, &kp_zero, &kd_waitok);
526 	if (buf == NULL) {
527 		error = ENOMEM;
528 		goto err;
529 	}
530 	/* km_malloc() can sleep, ensure the race was won. */
531 	if (kd->kd_state != KCOV_STATE_NONE) {
532 		error = EBUSY;
533 		goto err;
534 	}
535 	mtx_enter(&kcov_mtx);
536 	kd->kd_buf = buf;
537 	/* The first element is reserved to hold the number of used elements. */
538 	kd->kd_nmemb = nmemb - 1;
539 	kd->kd_size = size;
540 	kd->kd_state = KCOV_STATE_READY;
541 	return (0);
542 
543 err:
544 	if (buf != NULL)
545 		km_free(buf, size, &kv_any, &kp_zero);
546 	mtx_enter(&kcov_mtx);
547 	return (error);
548 }
549 
550 void
kd_free(struct kcov_dev * kd)551 kd_free(struct kcov_dev *kd)
552 {
553 	struct kcov_remote *kr;
554 
555 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
556 
557 	kr = kd->kd_kr;
558 	if (kr != NULL)
559 		kcov_remote_detach(kd, kr);
560 
561 	if (kd->kd_buf != NULL) {
562 		mtx_leave(&kcov_mtx);
563 		km_free(kd->kd_buf, kd->kd_size, &kv_any, &kp_zero);
564 		mtx_enter(&kcov_mtx);
565 	}
566 	free(kd, M_SUBPROC, sizeof(*kd));
567 }
568 
569 static struct kcov_dev *
kd_curproc(int mode)570 kd_curproc(int mode)
571 {
572 	struct cpu_info *ci;
573 	struct kcov_dev *kd;
574 
575 	/*
576 	 * Do not trace before kcovopen() has been called at least once.
577 	 * At this point, all secondary CPUs have booted and accessing curcpu()
578 	 * is safe.
579 	 */
580 	if (__predict_false(kcov_cold))
581 		return (NULL);
582 
583 	ci = curcpu();
584 	kd = ci->ci_curproc->p_kd;
585 	if (__predict_true(kd == NULL) || kd->kd_mode != mode)
586 		return (NULL);
587 
588 	/*
589 	 * Do not trace if the kernel has panicked. This could happen if curproc
590 	 * had kcov enabled while panicking.
591 	 */
592 	if (__predict_false(panicstr || db_active))
593 		return (NULL);
594 
595 	/* Do not trace in interrupt context unless this is a remote section. */
596 	if (inintr(ci) && kd->kd_intr == 0)
597 		return (NULL);
598 
599 	return (kd);
600 
601 }
602 
603 static struct kcov_cpu *
kd_curcpu(void)604 kd_curcpu(void)
605 {
606 	struct kcov_cpu *kc;
607 	unsigned int cpuid = cpu_number();
608 
609 	TAILQ_FOREACH(kc, &kc_list, kc_entry) {
610 		if (kc->kc_cpuid == cpuid)
611 			return (kc);
612 	}
613 	return (NULL);
614 }
615 
616 /*
617  * Claim stride times nmemb number of elements in the coverage buffer. Returns
618  * the index of the first claimed element. If the claim cannot be fulfilled,
619  * zero is returned.
620  */
621 static uint64_t
kd_claim(struct kcov_dev * kd,int stride,int nmemb)622 kd_claim(struct kcov_dev *kd, int stride, int nmemb)
623 {
624 	uint64_t idx, was;
625 
626 	idx = kd->kd_buf[0];
627 	for (;;) {
628 		if (stride * (idx + nmemb) > kd->kd_nmemb)
629 			return (0);
630 
631 		was = atomic_cas_ulong(&kd->kd_buf[0], idx, idx + nmemb);
632 		if (was == idx)
633 			return (idx * stride + 1);
634 		idx = was;
635 	}
636 }
637 
638 void
kcov_remote_enter(int subsystem,void * id)639 kcov_remote_enter(int subsystem, void *id)
640 {
641 	struct cpu_info *ci;
642 	struct kcov_cpu *kc;
643 	struct kcov_dev *kd;
644 	struct kcov_remote *kr;
645 	struct proc *p;
646 
647 	mtx_enter(&kcov_mtx);
648 	kr = kr_lookup(subsystem, id);
649 	if (kr == NULL || kr->kr_state != KCOV_STATE_READY)
650 		goto out;
651 	kd = kr->kr_kd;
652 	if (kd == NULL || kd->kd_state != KCOV_STATE_TRACE)
653 		goto out;
654 	ci = curcpu();
655 	p = ci->ci_curproc;
656 	if (inintr(ci)) {
657 		/*
658 		 * XXX we only expect to be called from softclock interrupts at
659 		 * this point.
660 		 */
661 		kc = kd_curcpu();
662 		if (kc == NULL || kc->kc_kd.kd_intr == 1)
663 			goto out;
664 		kc->kc_kd.kd_state = KCOV_STATE_TRACE;
665 		kc->kc_kd.kd_mode = kd->kd_mode;
666 		kc->kc_kd.kd_intr = 1;
667 		kc->kc_kd_save = p->p_kd;
668 		kd = &kc->kc_kd;
669 		/* Reset coverage buffer. */
670 		kd->kd_buf[0] = 0;
671 	} else {
672 		KASSERT(p->p_kd == NULL);
673 	}
674 	kr->kr_nsections++;
675 	p->p_kd = kd;
676 
677 out:
678 	mtx_leave(&kcov_mtx);
679 }
680 
681 void
kcov_remote_leave(int subsystem,void * id)682 kcov_remote_leave(int subsystem, void *id)
683 {
684 	struct cpu_info *ci;
685 	struct kcov_cpu *kc;
686 	struct kcov_remote *kr;
687 	struct proc *p;
688 
689 	mtx_enter(&kcov_mtx);
690 	ci = curcpu();
691 	p = ci->ci_curproc;
692 	if (p->p_kd == NULL)
693 		goto out;
694 	kr = kr_lookup(subsystem, id);
695 	if (kr == NULL)
696 		goto out;
697 	if (inintr(ci)) {
698 		kc = kd_curcpu();
699 		if (kc == NULL || kc->kc_kd.kd_intr == 0)
700 			goto out;
701 
702 		/*
703 		 * Stop writing to the coverage buffer associated with this CPU
704 		 * before copying its contents.
705 		 */
706 		p->p_kd = kc->kc_kd_save;
707 		kc->kc_kd_save = NULL;
708 
709 		kd_copy(kr->kr_kd, &kc->kc_kd);
710 		kc->kc_kd.kd_state = KCOV_STATE_READY;
711 		kc->kc_kd.kd_mode = KCOV_MODE_NONE;
712 		kc->kc_kd.kd_intr = 0;
713 	} else {
714 		KASSERT(p->p_kd == kr->kr_kd);
715 		p->p_kd = NULL;
716 	}
717 	if (--kr->kr_nsections == 0)
718 		wakeup(kr);
719 out:
720 	mtx_leave(&kcov_mtx);
721 }
722 
723 void
kcov_remote_register(int subsystem,void * id)724 kcov_remote_register(int subsystem, void *id)
725 {
726 	mtx_enter(&kcov_mtx);
727 	kcov_remote_register_locked(subsystem, id);
728 	mtx_leave(&kcov_mtx);
729 }
730 
731 void
kcov_remote_unregister(int subsystem,void * id)732 kcov_remote_unregister(int subsystem, void *id)
733 {
734 	struct kcov_remote *kr;
735 
736 	mtx_enter(&kcov_mtx);
737 	kr = kr_lookup(subsystem, id);
738 	if (kr != NULL)
739 		kr_free(kr);
740 	mtx_leave(&kcov_mtx);
741 }
742 
743 struct kcov_remote *
kcov_remote_register_locked(int subsystem,void * id)744 kcov_remote_register_locked(int subsystem, void *id)
745 {
746 	struct kcov_remote *kr, *tmp;
747 
748 	/* Do not allow registrations before the pool is initialized. */
749 	KASSERT(kr_cold == 0);
750 
751 	/*
752 	 * Temporarily release the mutex since the allocation could end up
753 	 * sleeping.
754 	 */
755 	mtx_leave(&kcov_mtx);
756 	kr = pool_get(&kr_pool, PR_WAITOK | PR_ZERO);
757 	kr->kr_subsystem = subsystem;
758 	kr->kr_id = id;
759 	kr->kr_state = KCOV_STATE_NONE;
760 	mtx_enter(&kcov_mtx);
761 
762 	for (;;) {
763 		tmp = kr_lookup(subsystem, id);
764 		if (tmp == NULL)
765 			break;
766 		if (tmp->kr_state != KCOV_STATE_DYING) {
767 			pool_put(&kr_pool, kr);
768 			return (NULL);
769 		}
770 		/*
771 		 * The remote could already be deregistered while another
772 		 * thread is currently inside a kcov remote section.
773 		 */
774 		msleep_nsec(tmp, &kcov_mtx, PWAIT, "kcov", INFSLP);
775 	}
776 	TAILQ_INSERT_TAIL(&kr_list, kr, kr_entry);
777 	return (kr);
778 }
779 
780 int
kcov_remote_attach(struct kcov_dev * kd,struct kio_remote_attach * arg)781 kcov_remote_attach(struct kcov_dev *kd, struct kio_remote_attach *arg)
782 {
783 	struct kcov_remote *kr = NULL;
784 
785 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
786 
787 	if (kd->kd_state != KCOV_STATE_READY)
788 		return (ENXIO);
789 
790 	if (arg->subsystem == KCOV_REMOTE_COMMON) {
791 		kr = kcov_remote_register_locked(KCOV_REMOTE_COMMON,
792 		    curproc->p_p);
793 		if (kr == NULL)
794 			return (EBUSY);
795 	} else {
796 		return (EINVAL);
797 	}
798 
799 	kr->kr_state = KCOV_STATE_READY;
800 	kr->kr_kd = kd;
801 	kd->kd_kr = kr;
802 	return (0);
803 }
804 
805 void
kcov_remote_detach(struct kcov_dev * kd,struct kcov_remote * kr)806 kcov_remote_detach(struct kcov_dev *kd, struct kcov_remote *kr)
807 {
808 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
809 
810 	KASSERT(kd == kr->kr_kd);
811 	if (kr->kr_subsystem == KCOV_REMOTE_COMMON) {
812 		kr_free(kr);
813 	} else {
814 		kr->kr_state = KCOV_STATE_NONE;
815 		kr_barrier(kr);
816 		kd->kd_kr = NULL;
817 		kr->kr_kd = NULL;
818 	}
819 }
820 
821 void
kr_free(struct kcov_remote * kr)822 kr_free(struct kcov_remote *kr)
823 {
824 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
825 
826 	kr->kr_state = KCOV_STATE_DYING;
827 	kr_barrier(kr);
828 	if (kr->kr_kd != NULL)
829 		kr->kr_kd->kd_kr = NULL;
830 	kr->kr_kd = NULL;
831 	TAILQ_REMOVE(&kr_list, kr, kr_entry);
832 	/* Notify thread(s) waiting in kcov_remote_register(). */
833 	wakeup(kr);
834 	pool_put(&kr_pool, kr);
835 }
836 
837 void
kr_barrier(struct kcov_remote * kr)838 kr_barrier(struct kcov_remote *kr)
839 {
840 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
841 
842 	while (kr->kr_nsections > 0)
843 		msleep_nsec(kr, &kcov_mtx, PWAIT, "kcovbar", INFSLP);
844 }
845 
846 struct kcov_remote *
kr_lookup(int subsystem,void * id)847 kr_lookup(int subsystem, void *id)
848 {
849 	struct kcov_remote *kr;
850 
851 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
852 
853 	TAILQ_FOREACH(kr, &kr_list, kr_entry) {
854 		if (kr->kr_subsystem == subsystem && kr->kr_id == id)
855 			return (kr);
856 	}
857 	return (NULL);
858 }
859