xref: /openbsd/sys/dev/kcov.c (revision 771fbea0)
1 /*	$OpenBSD: kcov.c,v 1.36 2020/10/10 07:07:46 anton Exp $	*/
2 
3 /*
4  * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <sys/kcov.h>
23 #include <sys/malloc.h>
24 #include <sys/mutex.h>
25 #include <sys/pool.h>
26 #include <sys/stdint.h>
27 #include <sys/queue.h>
28 
29 #include <uvm/uvm_extern.h>
30 
31 #define KCOV_BUF_MEMB_SIZE	sizeof(uintptr_t)
32 #define KCOV_BUF_MAX_NMEMB	(256 << 10)
33 
34 #define KCOV_CMP_CONST		0x1
35 #define KCOV_CMP_SIZE(x)	((x) << 1)
36 
37 #define KCOV_STATE_NONE		0
38 #define KCOV_STATE_READY	1
39 #define KCOV_STATE_TRACE	2
40 #define KCOV_STATE_DYING	3
41 
42 #define KCOV_STRIDE_TRACE_PC	1
43 #define KCOV_STRIDE_TRACE_CMP	4
44 
45 /*
46  * Coverage structure.
47  *
48  * Locking:
49  * 	I	immutable after creation
50  *	M	kcov_mtx
51  *	a	atomic operations
52  */
53 struct kcov_dev {
54 	int		 kd_state;	/* [M] */
55 	int		 kd_mode;	/* [M] */
56 	int		 kd_unit;	/* [I] device minor */
57 	int		 kd_intr;	/* [M] currently used in interrupt */
58 	uintptr_t	*kd_buf;	/* [a] traced coverage */
59 	size_t		 kd_nmemb;	/* [I] */
60 	size_t		 kd_size;	/* [I] */
61 
62 	struct kcov_remote *kd_kr;	/* [M] */
63 
64 	TAILQ_ENTRY(kcov_dev)	kd_entry;	/* [M] */
65 };
66 
67 /*
68  * Remote coverage structure.
69  *
70  * Locking:
71  * 	I	immutable after creation
72  *	M	kcov_mtx
73  */
74 struct kcov_remote {
75 	struct kcov_dev *kr_kd;	/* [M] */
76 	void *kr_id;		/* [I] */
77 	int kr_subsystem;	/* [I] */
78 	int kr_nsections;	/* [M] # threads in remote section */
79 	int kr_state;		/* [M] */
80 
81 	TAILQ_ENTRY(kcov_remote) kr_entry;	/* [M] */
82 };
83 
84 /*
85  * Per CPU coverage structure used to track coverage when executing in a remote
86  * interrupt context.
87  *
88  * Locking:
89  * 	I	immutable after creation
90  *	M	kcov_mtx
91  */
92 struct kcov_cpu {
93 	struct kcov_dev  kc_kd;
94 	struct kcov_dev *kc_kd_save;	/* [M] previous kcov_dev */
95 	int kc_cpuid;			/* [I] cpu number */
96 
97 	TAILQ_ENTRY(kcov_cpu) kc_entry;	/* [I] */
98 };
99 
100 void kcovattach(int);
101 
102 int kd_init(struct kcov_dev *, unsigned long);
103 void kd_free(struct kcov_dev *);
104 struct kcov_dev *kd_lookup(int);
105 void kd_put(struct kcov_dev *, struct kcov_dev *);
106 
107 struct kcov_remote *kcov_remote_register_locked(int, void *);
108 int kcov_remote_attach(struct kcov_dev *, struct kio_remote_attach *);
109 void kcov_remote_detach(struct kcov_dev *, struct kcov_remote *);
110 void kr_free(struct kcov_remote *);
111 void kr_barrier(struct kcov_remote *);
112 struct kcov_remote *kr_lookup(int, void *);
113 
114 static struct kcov_dev *kd_curproc(int);
115 static struct kcov_cpu *kd_curcpu(void);
116 static uint64_t kd_claim(struct kcov_dev *, int, int);
117 static inline int inintr(void);
118 
119 TAILQ_HEAD(, kcov_dev) kd_list = TAILQ_HEAD_INITIALIZER(kd_list);
120 TAILQ_HEAD(, kcov_remote) kr_list = TAILQ_HEAD_INITIALIZER(kr_list);
121 TAILQ_HEAD(, kcov_cpu) kc_list = TAILQ_HEAD_INITIALIZER(kc_list);
122 
123 int kcov_cold = 1;
124 int kr_cold = 1;
125 struct mutex kcov_mtx = MUTEX_INITIALIZER(IPL_MPFLOOR);
126 struct pool kr_pool;
127 
128 /*
129  * Compiling the kernel with the `-fsanitize-coverage=trace-pc' option will
130  * cause the following function to be called upon function entry and before
131  * each block instructions that maps to a single line in the original source
132  * code.
133  *
134  * If kcov is enabled for the current thread, the kernel program counter will
135  * be stored in its corresponding coverage buffer.
136  */
137 void
138 __sanitizer_cov_trace_pc(void)
139 {
140 	struct kcov_dev *kd;
141 	uint64_t idx;
142 
143 	kd = kd_curproc(KCOV_MODE_TRACE_PC);
144 	if (kd == NULL)
145 		return;
146 
147 	if ((idx = kd_claim(kd, KCOV_STRIDE_TRACE_PC, 1)))
148 		kd->kd_buf[idx] = (uintptr_t)__builtin_return_address(0);
149 }
150 
151 /*
152  * Compiling the kernel with the `-fsanitize-coverage=trace-cmp' option will
153  * cause the following function to be called upon integer comparisons and switch
154  * statements.
155  *
156  * If kcov is enabled for the current thread, the comparison will be stored in
157  * its corresponding coverage buffer.
158  */
159 void
160 trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, uintptr_t pc)
161 {
162 	struct kcov_dev *kd;
163 	uint64_t idx;
164 
165 	kd = kd_curproc(KCOV_MODE_TRACE_CMP);
166 	if (kd == NULL)
167 		return;
168 
169 	if ((idx = kd_claim(kd, KCOV_STRIDE_TRACE_CMP, 1))) {
170 		kd->kd_buf[idx] = type;
171 		kd->kd_buf[idx + 1] = arg1;
172 		kd->kd_buf[idx + 2] = arg2;
173 		kd->kd_buf[idx + 3] = pc;
174 	}
175 }
176 
177 void
178 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
179 {
180 	trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2,
181 	    (uintptr_t)__builtin_return_address(0));
182 }
183 
184 void
185 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
186 {
187 	trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2,
188 	    (uintptr_t)__builtin_return_address(0));
189 }
190 
191 void
192 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
193 {
194 	trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2,
195 	    (uintptr_t)__builtin_return_address(0));
196 }
197 
198 void
199 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
200 {
201 	trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2,
202 	    (uintptr_t)__builtin_return_address(0));
203 }
204 
205 void
206 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
207 {
208 	trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
209 	    (uintptr_t)__builtin_return_address(0));
210 }
211 
212 void
213 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
214 {
215 	trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
216 	    (uintptr_t)__builtin_return_address(0));
217 }
218 
219 void
220 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
221 {
222 	trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
223 	    (uintptr_t)__builtin_return_address(0));
224 }
225 
226 void
227 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
228 {
229 	trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
230 	    (uintptr_t)__builtin_return_address(0));
231 }
232 
233 void
234 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
235 {
236 	uint64_t i, nbits, ncases, type;
237 	uintptr_t pc;
238 
239 	pc = (uintptr_t)__builtin_return_address(0);
240 	ncases = cases[0];
241 	nbits = cases[1];
242 
243 	switch (nbits) {
244 	case 8:
245 		type = KCOV_CMP_SIZE(0);
246 		break;
247 	case 16:
248 		type = KCOV_CMP_SIZE(1);
249 		break;
250 	case 32:
251 		type = KCOV_CMP_SIZE(2);
252 		break;
253 	case 64:
254 		type = KCOV_CMP_SIZE(3);
255 		break;
256 	default:
257 		return;
258 	}
259 	type |= KCOV_CMP_CONST;
260 
261 	for (i = 0; i < ncases; i++)
262 		trace_cmp(type, cases[i + 2], val, pc);
263 }
264 
265 void
266 kcovattach(int count)
267 {
268 	struct kcov_cpu *kc;
269 	int error, i;
270 
271 	pool_init(&kr_pool, sizeof(struct kcov_remote), 0, IPL_MPFLOOR, PR_WAITOK,
272 	    "kcovpl", NULL);
273 
274 	kc = mallocarray(ncpusfound, sizeof(*kc), M_DEVBUF, M_WAITOK | M_ZERO);
275 	mtx_enter(&kcov_mtx);
276 	for (i = 0; i < ncpusfound; i++) {
277 		kc[i].kc_cpuid = i;
278 		error = kd_init(&kc[i].kc_kd, KCOV_BUF_MAX_NMEMB);
279 		KASSERT(error == 0);
280 		TAILQ_INSERT_TAIL(&kc_list, &kc[i], kc_entry);
281 	}
282 	mtx_leave(&kcov_mtx);
283 
284 	kr_cold = 0;
285 }
286 
287 int
288 kcovopen(dev_t dev, int flag, int mode, struct proc *p)
289 {
290 	struct kcov_dev *kd;
291 
292 	mtx_enter(&kcov_mtx);
293 
294 	if (kd_lookup(minor(dev)) != NULL) {
295 		mtx_leave(&kcov_mtx);
296 		return (EBUSY);
297 	}
298 
299 	if (kcov_cold)
300 		kcov_cold = 0;
301 
302 	mtx_leave(&kcov_mtx);
303 	kd = malloc(sizeof(*kd), M_SUBPROC, M_WAITOK | M_ZERO);
304 	kd->kd_unit = minor(dev);
305 	mtx_enter(&kcov_mtx);
306 	TAILQ_INSERT_TAIL(&kd_list, kd, kd_entry);
307 	mtx_leave(&kcov_mtx);
308 	return (0);
309 }
310 
311 int
312 kcovclose(dev_t dev, int flag, int mode, struct proc *p)
313 {
314 	struct kcov_dev *kd;
315 
316 	mtx_enter(&kcov_mtx);
317 
318 	kd = kd_lookup(minor(dev));
319 	if (kd == NULL) {
320 		mtx_leave(&kcov_mtx);
321 		return (EINVAL);
322 	}
323 
324 	if (kd->kd_state == KCOV_STATE_TRACE && kd->kd_kr == NULL) {
325 		/*
326 		 * Another thread is currently using the kcov descriptor,
327 		 * postpone freeing to kcov_exit().
328 		 */
329 		kd->kd_state = KCOV_STATE_DYING;
330 		kd->kd_mode = KCOV_MODE_NONE;
331 	} else {
332 		kd_free(kd);
333 	}
334 
335 	mtx_leave(&kcov_mtx);
336 	return (0);
337 }
338 
339 int
340 kcovioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
341 {
342 	struct kcov_dev *kd;
343 	int mode;
344 	int error = 0;
345 
346 	mtx_enter(&kcov_mtx);
347 
348 	kd = kd_lookup(minor(dev));
349 	if (kd == NULL) {
350 		mtx_leave(&kcov_mtx);
351 		return (ENXIO);
352 	}
353 
354 	switch (cmd) {
355 	case KIOSETBUFSIZE:
356 		error = kd_init(kd, *((unsigned long *)data));
357 		break;
358 	case KIOENABLE:
359 		/* Only one kcov descriptor can be enabled per thread. */
360 		if (p->p_kd != NULL || kd->kd_state != KCOV_STATE_READY) {
361 			error = EBUSY;
362 			break;
363 		}
364 		mode = *((int *)data);
365 		if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) {
366 			error = EINVAL;
367 			break;
368 		}
369 		kd->kd_state = KCOV_STATE_TRACE;
370 		kd->kd_mode = mode;
371 		/* Remote coverage is mutually exclusive. */
372 		if (kd->kd_kr == NULL)
373 			p->p_kd = kd;
374 		break;
375 	case KIODISABLE:
376 		/* Only the enabled thread may disable itself. */
377 		if ((p->p_kd != kd && kd->kd_kr == NULL) ||
378 		    kd->kd_state != KCOV_STATE_TRACE) {
379 			error = EBUSY;
380 			break;
381 		}
382 		kd->kd_state = KCOV_STATE_READY;
383 		kd->kd_mode = KCOV_MODE_NONE;
384 		if (kd->kd_kr != NULL)
385 			kr_barrier(kd->kd_kr);
386 		p->p_kd = NULL;
387 		break;
388 	case KIOREMOTEATTACH:
389 		error = kcov_remote_attach(kd,
390 		    (struct kio_remote_attach *)data);
391 		break;
392 	default:
393 		error = ENOTTY;
394 	}
395 	mtx_leave(&kcov_mtx);
396 
397 	return (error);
398 }
399 
400 paddr_t
401 kcovmmap(dev_t dev, off_t offset, int prot)
402 {
403 	struct kcov_dev *kd;
404 	paddr_t pa = -1;
405 	vaddr_t va;
406 
407 	mtx_enter(&kcov_mtx);
408 
409 	kd = kd_lookup(minor(dev));
410 	if (kd == NULL)
411 		goto out;
412 
413 	if (offset < 0 || offset >= kd->kd_nmemb * KCOV_BUF_MEMB_SIZE)
414 		goto out;
415 
416 	va = (vaddr_t)kd->kd_buf + offset;
417 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
418 		pa = -1;
419 
420 out:
421 	mtx_leave(&kcov_mtx);
422 	return (pa);
423 }
424 
425 void
426 kcov_exit(struct proc *p)
427 {
428 	struct kcov_dev *kd;
429 
430 	mtx_enter(&kcov_mtx);
431 
432 	kd = p->p_kd;
433 	if (kd == NULL) {
434 		mtx_leave(&kcov_mtx);
435 		return;
436 	}
437 
438 	if (kd->kd_state == KCOV_STATE_DYING) {
439 		p->p_kd = NULL;
440 		kd_free(kd);
441 	} else {
442 		kd->kd_state = KCOV_STATE_READY;
443 		kd->kd_mode = KCOV_MODE_NONE;
444 		if (kd->kd_kr != NULL)
445 			kr_barrier(kd->kd_kr);
446 		p->p_kd = NULL;
447 	}
448 
449 	mtx_leave(&kcov_mtx);
450 }
451 
452 struct kcov_dev *
453 kd_lookup(int unit)
454 {
455 	struct kcov_dev *kd;
456 
457 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
458 
459 	TAILQ_FOREACH(kd, &kd_list, kd_entry) {
460 		if (kd->kd_unit == unit)
461 			return (kd);
462 	}
463 	return (NULL);
464 }
465 
466 void
467 kd_put(struct kcov_dev *dst, struct kcov_dev *src)
468 {
469 	uint64_t idx, nmemb;
470 	int stride;
471 
472 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
473 	KASSERT(dst->kd_mode == src->kd_mode);
474 
475 	nmemb = src->kd_buf[0];
476 	if (nmemb == 0)
477 		return;
478 	stride = src->kd_mode == KCOV_MODE_TRACE_CMP ? KCOV_STRIDE_TRACE_CMP :
479 	    KCOV_STRIDE_TRACE_PC;
480 	idx = kd_claim(dst, stride, nmemb);
481 	if (idx == 0)
482 		return;
483 	memcpy(&dst->kd_buf[idx], &src->kd_buf[1],
484 	    stride * nmemb * KCOV_BUF_MEMB_SIZE);
485 }
486 
487 int
488 kd_init(struct kcov_dev *kd, unsigned long nmemb)
489 {
490 	void *buf;
491 	size_t size;
492 	int error;
493 
494 	KASSERT(kd->kd_buf == NULL);
495 
496 	if (kd->kd_state != KCOV_STATE_NONE)
497 		return (EBUSY);
498 
499 	if (nmemb == 0 || nmemb > KCOV_BUF_MAX_NMEMB)
500 		return (EINVAL);
501 
502 	size = roundup(nmemb * KCOV_BUF_MEMB_SIZE, PAGE_SIZE);
503 	mtx_leave(&kcov_mtx);
504 	buf = km_alloc(size, &kv_any, &kp_zero, &kd_waitok);
505 	if (buf == NULL) {
506 		error = ENOMEM;
507 		goto err;
508 	}
509 	/* km_malloc() can sleep, ensure the race was won. */
510 	if (kd->kd_state != KCOV_STATE_NONE) {
511 		error = EBUSY;
512 		goto err;
513 	}
514 	mtx_enter(&kcov_mtx);
515 	kd->kd_buf = buf;
516 	/* The first element is reserved to hold the number of used elements. */
517 	kd->kd_nmemb = nmemb - 1;
518 	kd->kd_size = size;
519 	kd->kd_state = KCOV_STATE_READY;
520 	return (0);
521 
522 err:
523 	if (buf != NULL)
524 		km_free(buf, size, &kv_any, &kp_zero);
525 	mtx_enter(&kcov_mtx);
526 	return (error);
527 }
528 
529 void
530 kd_free(struct kcov_dev *kd)
531 {
532 	struct kcov_remote *kr;
533 
534 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
535 
536 	TAILQ_REMOVE(&kd_list, kd, kd_entry);
537 
538 	kr = kd->kd_kr;
539 	if (kr != NULL)
540 		kcov_remote_detach(kd, kr);
541 
542 	if (kd->kd_buf != NULL) {
543 		mtx_leave(&kcov_mtx);
544 		km_free(kd->kd_buf, kd->kd_size, &kv_any, &kp_zero);
545 		mtx_enter(&kcov_mtx);
546 	}
547 	free(kd, M_SUBPROC, sizeof(*kd));
548 }
549 
550 static struct kcov_dev *
551 kd_curproc(int mode)
552 {
553 	struct kcov_dev *kd;
554 
555 	/*
556 	 * Do not trace if the kernel has panicked. This could happen if curproc
557 	 * had kcov enabled while panicking.
558 	 */
559 	if (__predict_false(panicstr || db_active))
560 		return (NULL);
561 
562 	/*
563 	 * Do not trace before kcovopen() has been called at least once.
564 	 * At this point, all secondary CPUs have booted and accessing curcpu()
565 	 * is safe.
566 	 */
567 	if (__predict_false(kcov_cold))
568 		return (NULL);
569 
570 	kd = curproc->p_kd;
571 	if (__predict_true(kd == NULL) || kd->kd_mode != mode)
572 		return (NULL);
573 	if (inintr() && kd->kd_intr == 0)
574 		return (NULL);
575 	return (kd);
576 
577 }
578 
579 static struct kcov_cpu *
580 kd_curcpu(void)
581 {
582 	struct kcov_cpu *kc;
583 	unsigned int cpuid = cpu_number();
584 
585 	TAILQ_FOREACH(kc, &kc_list, kc_entry) {
586 		if (kc->kc_cpuid == cpuid)
587 			return (kc);
588 	}
589 	return (NULL);
590 }
591 
592 /*
593  * Claim stride times nmemb number of elements in the coverage buffer. Returns
594  * the index of the first claimed element. If the claim cannot be fulfilled,
595  * zero is returned.
596  */
597 static uint64_t
598 kd_claim(struct kcov_dev *kd, int stride, int nmemb)
599 {
600 	uint64_t idx, was;
601 
602 	idx = kd->kd_buf[0];
603 	for (;;) {
604 		if (stride * (idx + nmemb) > kd->kd_nmemb)
605 			return (0);
606 
607 		was = atomic_cas_ulong(&kd->kd_buf[0], idx, idx + nmemb);
608 		if (was == idx)
609 			return (idx * stride + 1);
610 		idx = was;
611 	}
612 }
613 
614 static inline int
615 inintr(void)
616 {
617 #if defined(__amd64__) || defined(__arm__) || defined(__arm64__) || \
618     defined(__i386__)
619 	return (curcpu()->ci_idepth > 0);
620 #else
621 	return (0);
622 #endif
623 }
624 
625 void
626 kcov_remote_enter(int subsystem, void *id)
627 {
628 	struct kcov_cpu *kc;
629 	struct kcov_dev *kd;
630 	struct kcov_remote *kr;
631 	struct proc *p;
632 
633 	mtx_enter(&kcov_mtx);
634 	kr = kr_lookup(subsystem, id);
635 	if (kr == NULL || kr->kr_state != KCOV_STATE_READY)
636 		goto out;
637 	kd = kr->kr_kd;
638 	if (kd == NULL || kd->kd_state != KCOV_STATE_TRACE)
639 		goto out;
640 	p = curproc;
641 	if (inintr()) {
642 		/*
643 		 * XXX we only expect to be called from softclock interrupts at
644 		 * this point.
645 		 */
646 		kc = kd_curcpu();
647 		if (kc == NULL || kc->kc_kd.kd_intr == 1)
648 			goto out;
649 		kc->kc_kd.kd_state = KCOV_STATE_TRACE;
650 		kc->kc_kd.kd_mode = kd->kd_mode;
651 		kc->kc_kd.kd_intr = 1;
652 		kc->kc_kd_save = p->p_kd;
653 		kd = &kc->kc_kd;
654 		/* Reset coverage buffer. */
655 		kd->kd_buf[0] = 0;
656 	} else {
657 		KASSERT(p->p_kd == NULL);
658 	}
659 	kr->kr_nsections++;
660 	p->p_kd = kd;
661 
662 out:
663 	mtx_leave(&kcov_mtx);
664 }
665 
666 void
667 kcov_remote_leave(int subsystem, void *id)
668 {
669 	struct kcov_cpu *kc;
670 	struct kcov_remote *kr;
671 	struct proc *p;
672 
673 	mtx_enter(&kcov_mtx);
674 	p = curproc;
675 	if (p->p_kd == NULL)
676 		goto out;
677 	kr = kr_lookup(subsystem, id);
678 	if (kr == NULL)
679 		goto out;
680 	if (inintr()) {
681 		kc = kd_curcpu();
682 		if (kc == NULL || kc->kc_kd.kd_intr == 0)
683 			goto out;
684 
685 		/*
686 		 * Stop writing to the coverage buffer associated with this CPU
687 		 * before copying its contents.
688 		 */
689 		p->p_kd = kc->kc_kd_save;
690 		kc->kc_kd_save = NULL;
691 
692 		kd_put(kr->kr_kd, &kc->kc_kd);
693 		kc->kc_kd.kd_state = KCOV_STATE_READY;
694 		kc->kc_kd.kd_mode = KCOV_MODE_NONE;
695 		kc->kc_kd.kd_intr = 0;
696 	} else {
697 		KASSERT(p->p_kd == kr->kr_kd);
698 		p->p_kd = NULL;
699 	}
700 	if (--kr->kr_nsections == 0)
701 		wakeup(kr);
702 out:
703 	mtx_leave(&kcov_mtx);
704 }
705 
706 void
707 kcov_remote_register(int subsystem, void *id)
708 {
709 	mtx_enter(&kcov_mtx);
710 	kcov_remote_register_locked(subsystem, id);
711 	mtx_leave(&kcov_mtx);
712 }
713 
714 void
715 kcov_remote_unregister(int subsystem, void *id)
716 {
717 	struct kcov_remote *kr;
718 
719 	mtx_enter(&kcov_mtx);
720 	kr = kr_lookup(subsystem, id);
721 	if (kr != NULL)
722 		kr_free(kr);
723 	mtx_leave(&kcov_mtx);
724 }
725 
726 struct kcov_remote *
727 kcov_remote_register_locked(int subsystem, void *id)
728 {
729 	struct kcov_remote *kr, *tmp;
730 
731 	/* Do not allow registrations before the pool is initialized. */
732 	KASSERT(kr_cold == 0);
733 
734 	/*
735 	 * Temporarily release the mutex since the allocation could end up
736 	 * sleeping.
737 	 */
738 	mtx_leave(&kcov_mtx);
739 	kr = pool_get(&kr_pool, PR_WAITOK | PR_ZERO);
740 	kr->kr_subsystem = subsystem;
741 	kr->kr_id = id;
742 	kr->kr_state = KCOV_STATE_NONE;
743 	mtx_enter(&kcov_mtx);
744 
745 	for (;;) {
746 		tmp = kr_lookup(subsystem, id);
747 		if (tmp == NULL)
748 			break;
749 		if (tmp->kr_state != KCOV_STATE_DYING) {
750 			pool_put(&kr_pool, kr);
751 			return (NULL);
752 		}
753 		/*
754 		 * The remote could already be deregistered while another
755 		 * thread is currently inside a kcov remote section.
756 		 */
757 		KASSERT(tmp->kr_state == KCOV_STATE_DYING);
758 		msleep_nsec(tmp, &kcov_mtx, PWAIT, "kcov", INFSLP);
759 	}
760 	TAILQ_INSERT_TAIL(&kr_list, kr, kr_entry);
761 	return (kr);
762 }
763 
764 int
765 kcov_remote_attach(struct kcov_dev *kd, struct kio_remote_attach *arg)
766 {
767 	struct kcov_remote *kr = NULL;
768 
769 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
770 
771 	if (kd->kd_state != KCOV_STATE_READY)
772 		return (EBUSY);
773 
774 	if (arg->subsystem == KCOV_REMOTE_COMMON)
775 		kr = kcov_remote_register_locked(KCOV_REMOTE_COMMON,
776 		    curproc->p_p);
777 	if (kr == NULL)
778 		return (EINVAL);
779 	if (kr->kr_state != KCOV_STATE_NONE)
780 		return (EBUSY);
781 
782 	kr->kr_state = KCOV_STATE_READY;
783 	kr->kr_kd = kd;
784 	kd->kd_kr = kr;
785 	return (0);
786 }
787 
788 void
789 kcov_remote_detach(struct kcov_dev *kd, struct kcov_remote *kr)
790 {
791 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
792 
793 	KASSERT(kd == kr->kr_kd);
794 	if (kr->kr_subsystem == KCOV_REMOTE_COMMON) {
795 		kr_free(kr);
796 	} else {
797 		kr->kr_state = KCOV_STATE_NONE;
798 		kr_barrier(kr);
799 		kd->kd_kr = NULL;
800 		kr->kr_kd = NULL;
801 	}
802 }
803 
804 void
805 kr_free(struct kcov_remote *kr)
806 {
807 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
808 
809 	kr->kr_state = KCOV_STATE_DYING;
810 	kr_barrier(kr);
811 	if (kr->kr_kd != NULL)
812 		kr->kr_kd->kd_kr = NULL;
813 	kr->kr_kd = NULL;
814 	TAILQ_REMOVE(&kr_list, kr, kr_entry);
815 	/* Notify thread(s) waiting in kcov_remote_register(). */
816 	wakeup(kr);
817 	pool_put(&kr_pool, kr);
818 }
819 
820 void
821 kr_barrier(struct kcov_remote *kr)
822 {
823 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
824 
825 	while (kr->kr_nsections > 0)
826 		msleep_nsec(kr, &kcov_mtx, PWAIT, "kcovbar", INFSLP);
827 }
828 
829 struct kcov_remote *
830 kr_lookup(int subsystem, void *id)
831 {
832 	struct kcov_remote *kr;
833 
834 	MUTEX_ASSERT_LOCKED(&kcov_mtx);
835 
836 	TAILQ_FOREACH(kr, &kr_list, kr_entry) {
837 		if (kr->kr_subsystem == subsystem && kr->kr_id == id)
838 			return (kr);
839 	}
840 	return (NULL);
841 }
842