1 /* $OpenBSD: kcov.c,v 1.49 2023/07/29 06:52:50 anton Exp $ */
2
3 /*
4 * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <sys/kcov.h>
23 #include <sys/malloc.h>
24 #include <sys/mutex.h>
25 #include <sys/pool.h>
26 #include <sys/stdint.h>
27 #include <sys/queue.h>
28
29 /* kcov_vnode() */
30 #include <sys/conf.h>
31 #include <sys/vnode.h>
32 #include <sys/specdev.h>
33
34 #include <uvm/uvm_extern.h>
35
36 #define KCOV_BUF_MEMB_SIZE sizeof(uintptr_t)
37 #define KCOV_BUF_MAX_NMEMB (256 << 10)
38
39 #define KCOV_CMP_CONST 0x1
40 #define KCOV_CMP_SIZE(x) ((x) << 1)
41
42 #define KCOV_STATE_NONE 0
43 #define KCOV_STATE_READY 1
44 #define KCOV_STATE_TRACE 2
45 #define KCOV_STATE_DYING 3
46
47 #define KCOV_STRIDE_TRACE_PC 1
48 #define KCOV_STRIDE_TRACE_CMP 4
49
50 /*
51 * Coverage structure.
52 *
53 * Locking:
54 * I immutable after creation
55 * M kcov_mtx
56 * a atomic operations
57 */
58 struct kcov_dev {
59 int kd_state; /* [M] */
60 int kd_mode; /* [M] */
61 int kd_unit; /* [I] D_CLONE unique device minor */
62 int kd_intr; /* [M] currently used in interrupt */
63 uintptr_t *kd_buf; /* [a] traced coverage */
64 size_t kd_nmemb; /* [I] */
65 size_t kd_size; /* [I] */
66
67 struct kcov_remote *kd_kr; /* [M] */
68
69 TAILQ_ENTRY(kcov_dev) kd_entry; /* [M] */
70 };
71
72 /*
73 * Remote coverage structure.
74 *
75 * Locking:
76 * I immutable after creation
77 * M kcov_mtx
78 */
79 struct kcov_remote {
80 struct kcov_dev *kr_kd; /* [M] */
81 void *kr_id; /* [I] */
82 int kr_subsystem; /* [I] */
83 int kr_nsections; /* [M] # threads in remote section */
84 int kr_state; /* [M] */
85
86 TAILQ_ENTRY(kcov_remote) kr_entry; /* [M] */
87 };
88
89 /*
90 * Per CPU coverage structure used to track coverage when executing in a remote
91 * interrupt context.
92 *
93 * Locking:
94 * I immutable after creation
95 * M kcov_mtx
96 */
97 struct kcov_cpu {
98 struct kcov_dev kc_kd;
99 struct kcov_dev *kc_kd_save; /* [M] previous kcov_dev */
100 int kc_cpuid; /* [I] cpu number */
101
102 TAILQ_ENTRY(kcov_cpu) kc_entry; /* [I] */
103 };
104
105 void kcovattach(int);
106
107 int kd_init(struct kcov_dev *, unsigned long);
108 void kd_free(struct kcov_dev *);
109 struct kcov_dev *kd_lookup(int);
110 void kd_copy(struct kcov_dev *, struct kcov_dev *);
111
112 struct kcov_remote *kcov_remote_register_locked(int, void *);
113 int kcov_remote_attach(struct kcov_dev *, struct kio_remote_attach *);
114 void kcov_remote_detach(struct kcov_dev *, struct kcov_remote *);
115 void kr_free(struct kcov_remote *);
116 void kr_barrier(struct kcov_remote *);
117 struct kcov_remote *kr_lookup(int, void *);
118
119 static struct kcov_dev *kd_curproc(int);
120 static struct kcov_cpu *kd_curcpu(void);
121 static uint64_t kd_claim(struct kcov_dev *, int, int);
122
123 TAILQ_HEAD(, kcov_dev) kd_list = TAILQ_HEAD_INITIALIZER(kd_list);
124 TAILQ_HEAD(, kcov_remote) kr_list = TAILQ_HEAD_INITIALIZER(kr_list);
125 TAILQ_HEAD(, kcov_cpu) kc_list = TAILQ_HEAD_INITIALIZER(kc_list);
126
127 int kcov_cold = 1;
128 int kr_cold = 1;
129 struct mutex kcov_mtx = MUTEX_INITIALIZER(IPL_MPFLOOR);
130 struct pool kr_pool;
131
132 static inline int
inintr(struct cpu_info * ci)133 inintr(struct cpu_info *ci)
134 {
135 #if defined(__amd64__) || defined(__arm__) || defined(__arm64__) || \
136 defined(__i386__)
137 return (ci->ci_idepth > 0);
138 #else
139 return (0);
140 #endif
141 }
142
143 /*
144 * Compiling the kernel with the `-fsanitize-coverage=trace-pc' option will
145 * cause the following function to be called upon function entry and before
146 * each block of instructions that maps to a single line in the original source
147 * code.
148 *
149 * If kcov is enabled for the current thread, the kernel program counter will
150 * be stored in its corresponding coverage buffer.
151 */
152 void
__sanitizer_cov_trace_pc(void)153 __sanitizer_cov_trace_pc(void)
154 {
155 struct kcov_dev *kd;
156 uint64_t idx;
157
158 kd = kd_curproc(KCOV_MODE_TRACE_PC);
159 if (kd == NULL)
160 return;
161
162 if ((idx = kd_claim(kd, KCOV_STRIDE_TRACE_PC, 1)))
163 kd->kd_buf[idx] = (uintptr_t)__builtin_return_address(0);
164 }
165
166 /*
167 * Compiling the kernel with the `-fsanitize-coverage=trace-cmp' option will
168 * cause the following function to be called upon integer comparisons and switch
169 * statements.
170 *
171 * If kcov is enabled for the current thread, the comparison will be stored in
172 * its corresponding coverage buffer.
173 */
174 void
trace_cmp(struct kcov_dev * kd,uint64_t type,uint64_t arg1,uint64_t arg2,uintptr_t pc)175 trace_cmp(struct kcov_dev *kd, uint64_t type, uint64_t arg1, uint64_t arg2,
176 uintptr_t pc)
177 {
178 uint64_t idx;
179
180 if ((idx = kd_claim(kd, KCOV_STRIDE_TRACE_CMP, 1))) {
181 kd->kd_buf[idx] = type;
182 kd->kd_buf[idx + 1] = arg1;
183 kd->kd_buf[idx + 2] = arg2;
184 kd->kd_buf[idx + 3] = pc;
185 }
186 }
187
188 #define TRACE_CMP(type, arg1, arg2) do { \
189 struct kcov_dev *kd; \
190 if ((kd = kd_curproc(KCOV_MODE_TRACE_CMP)) == NULL) \
191 return; \
192 trace_cmp(kd, (type), (arg1), (arg2), \
193 (uintptr_t)__builtin_return_address(0)); \
194 } while (0)
195
196 void
__sanitizer_cov_trace_cmp1(uint8_t arg1,uint8_t arg2)197 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
198 {
199 TRACE_CMP(KCOV_CMP_SIZE(0), arg1, arg2);
200 }
201
202 void
__sanitizer_cov_trace_cmp2(uint16_t arg1,uint16_t arg2)203 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
204 {
205 TRACE_CMP(KCOV_CMP_SIZE(1), arg1, arg2);
206 }
207
208 void
__sanitizer_cov_trace_cmp4(uint32_t arg1,uint32_t arg2)209 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
210 {
211 TRACE_CMP(KCOV_CMP_SIZE(2), arg1, arg2);
212 }
213
214 void
__sanitizer_cov_trace_cmp8(uint64_t arg1,uint64_t arg2)215 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
216 {
217 TRACE_CMP(KCOV_CMP_SIZE(3), arg1, arg2);
218 }
219
220 void
__sanitizer_cov_trace_const_cmp1(uint8_t arg1,uint8_t arg2)221 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
222 {
223 TRACE_CMP(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2);
224 }
225
226 void
__sanitizer_cov_trace_const_cmp2(uint16_t arg1,uint16_t arg2)227 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
228 {
229 TRACE_CMP(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2);
230 }
231
232 void
__sanitizer_cov_trace_const_cmp4(uint32_t arg1,uint32_t arg2)233 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
234 {
235 TRACE_CMP(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2);
236 }
237
238 void
__sanitizer_cov_trace_const_cmp8(uint64_t arg1,uint64_t arg2)239 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
240 {
241 TRACE_CMP(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2);
242 }
243
244 void
__sanitizer_cov_trace_switch(uint64_t val,uint64_t * cases)245 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
246 {
247 struct kcov_dev *kd;
248 uint64_t i, nbits, ncases, type;
249 uintptr_t pc;
250
251 kd = kd_curproc(KCOV_MODE_TRACE_CMP);
252 if (kd == NULL)
253 return;
254
255 pc = (uintptr_t)__builtin_return_address(0);
256 ncases = cases[0];
257 nbits = cases[1];
258
259 switch (nbits) {
260 case 8:
261 type = KCOV_CMP_SIZE(0);
262 break;
263 case 16:
264 type = KCOV_CMP_SIZE(1);
265 break;
266 case 32:
267 type = KCOV_CMP_SIZE(2);
268 break;
269 case 64:
270 type = KCOV_CMP_SIZE(3);
271 break;
272 default:
273 return;
274 }
275 type |= KCOV_CMP_CONST;
276
277 for (i = 0; i < ncases; i++)
278 trace_cmp(kd, type, cases[i + 2], val, pc);
279 }
280
281 void
kcovattach(int count)282 kcovattach(int count)
283 {
284 struct kcov_cpu *kc;
285 int error, i;
286
287 pool_init(&kr_pool, sizeof(struct kcov_remote), 0, IPL_MPFLOOR, PR_WAITOK,
288 "kcovpl", NULL);
289
290 kc = mallocarray(ncpusfound, sizeof(*kc), M_DEVBUF, M_WAITOK | M_ZERO);
291 mtx_enter(&kcov_mtx);
292 for (i = 0; i < ncpusfound; i++) {
293 kc[i].kc_cpuid = i;
294 error = kd_init(&kc[i].kc_kd, KCOV_BUF_MAX_NMEMB);
295 KASSERT(error == 0);
296 TAILQ_INSERT_TAIL(&kc_list, &kc[i], kc_entry);
297 }
298 mtx_leave(&kcov_mtx);
299
300 kr_cold = 0;
301 }
302
303 int
kcovopen(dev_t dev,int flag,int mode,struct proc * p)304 kcovopen(dev_t dev, int flag, int mode, struct proc *p)
305 {
306 struct kcov_dev *kd;
307
308 kd = malloc(sizeof(*kd), M_SUBPROC, M_WAITOK | M_ZERO);
309 kd->kd_unit = minor(dev);
310 mtx_enter(&kcov_mtx);
311 KASSERT(kd_lookup(kd->kd_unit) == NULL);
312 TAILQ_INSERT_TAIL(&kd_list, kd, kd_entry);
313 if (kcov_cold)
314 kcov_cold = 0;
315 mtx_leave(&kcov_mtx);
316 return (0);
317 }
318
319 int
kcovclose(dev_t dev,int flag,int mode,struct proc * p)320 kcovclose(dev_t dev, int flag, int mode, struct proc *p)
321 {
322 struct kcov_dev *kd;
323
324 mtx_enter(&kcov_mtx);
325
326 kd = kd_lookup(minor(dev));
327 if (kd == NULL) {
328 mtx_leave(&kcov_mtx);
329 return (ENXIO);
330 }
331
332 TAILQ_REMOVE(&kd_list, kd, kd_entry);
333 if (kd->kd_state == KCOV_STATE_TRACE && kd->kd_kr == NULL) {
334 /*
335 * Another thread is currently using the kcov descriptor,
336 * postpone freeing to kcov_exit().
337 */
338 kd->kd_state = KCOV_STATE_DYING;
339 kd->kd_mode = KCOV_MODE_NONE;
340 } else {
341 kd_free(kd);
342 }
343
344 mtx_leave(&kcov_mtx);
345 return (0);
346 }
347
348 int
kcovioctl(dev_t dev,u_long cmd,caddr_t data,int flag,struct proc * p)349 kcovioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
350 {
351 struct kcov_dev *kd;
352 int mode;
353 int error = 0;
354
355 mtx_enter(&kcov_mtx);
356
357 kd = kd_lookup(minor(dev));
358 if (kd == NULL) {
359 mtx_leave(&kcov_mtx);
360 return (ENXIO);
361 }
362
363 switch (cmd) {
364 case KIOSETBUFSIZE:
365 error = kd_init(kd, *((unsigned long *)data));
366 break;
367 case KIOENABLE:
368 /* Only one kcov descriptor can be enabled per thread. */
369 if (p->p_kd != NULL) {
370 error = EBUSY;
371 break;
372 }
373 if (kd->kd_state != KCOV_STATE_READY) {
374 error = ENXIO;
375 break;
376 }
377 mode = *((int *)data);
378 if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) {
379 error = EINVAL;
380 break;
381 }
382 kd->kd_state = KCOV_STATE_TRACE;
383 kd->kd_mode = mode;
384 /* Remote coverage is mutually exclusive. */
385 if (kd->kd_kr == NULL)
386 p->p_kd = kd;
387 break;
388 case KIODISABLE:
389 /* Only the enabled thread may disable itself. */
390 if ((p->p_kd != kd && kd->kd_kr == NULL)) {
391 error = EPERM;
392 break;
393 }
394 if (kd->kd_state != KCOV_STATE_TRACE) {
395 error = ENXIO;
396 break;
397 }
398 kd->kd_state = KCOV_STATE_READY;
399 kd->kd_mode = KCOV_MODE_NONE;
400 if (kd->kd_kr != NULL)
401 kr_barrier(kd->kd_kr);
402 p->p_kd = NULL;
403 break;
404 case KIOREMOTEATTACH:
405 error = kcov_remote_attach(kd,
406 (struct kio_remote_attach *)data);
407 break;
408 default:
409 error = ENOTTY;
410 }
411 mtx_leave(&kcov_mtx);
412
413 return (error);
414 }
415
416 paddr_t
kcovmmap(dev_t dev,off_t offset,int prot)417 kcovmmap(dev_t dev, off_t offset, int prot)
418 {
419 struct kcov_dev *kd;
420 paddr_t pa = -1;
421 vaddr_t va;
422
423 mtx_enter(&kcov_mtx);
424
425 kd = kd_lookup(minor(dev));
426 if (kd == NULL)
427 goto out;
428
429 if (offset < 0 || offset >= kd->kd_nmemb * KCOV_BUF_MEMB_SIZE)
430 goto out;
431
432 va = (vaddr_t)kd->kd_buf + offset;
433 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
434 pa = -1;
435
436 out:
437 mtx_leave(&kcov_mtx);
438 return (pa);
439 }
440
441 void
kcov_exit(struct proc * p)442 kcov_exit(struct proc *p)
443 {
444 struct kcov_dev *kd;
445
446 mtx_enter(&kcov_mtx);
447
448 kd = p->p_kd;
449 if (kd == NULL) {
450 mtx_leave(&kcov_mtx);
451 return;
452 }
453
454 if (kd->kd_state == KCOV_STATE_DYING) {
455 p->p_kd = NULL;
456 kd_free(kd);
457 } else {
458 kd->kd_state = KCOV_STATE_READY;
459 kd->kd_mode = KCOV_MODE_NONE;
460 if (kd->kd_kr != NULL)
461 kr_barrier(kd->kd_kr);
462 p->p_kd = NULL;
463 }
464
465 mtx_leave(&kcov_mtx);
466 }
467
468 /*
469 * Returns non-zero if the given vnode refers to a kcov device.
470 */
471 int
kcov_vnode(struct vnode * vp)472 kcov_vnode(struct vnode *vp)
473 {
474 return (vp->v_type == VCHR &&
475 cdevsw[major(vp->v_rdev)].d_open == kcovopen);
476 }
477
478 struct kcov_dev *
kd_lookup(int unit)479 kd_lookup(int unit)
480 {
481 struct kcov_dev *kd;
482
483 MUTEX_ASSERT_LOCKED(&kcov_mtx);
484
485 TAILQ_FOREACH(kd, &kd_list, kd_entry) {
486 if (kd->kd_unit == unit)
487 return (kd);
488 }
489 return (NULL);
490 }
491
492 void
kd_copy(struct kcov_dev * dst,struct kcov_dev * src)493 kd_copy(struct kcov_dev *dst, struct kcov_dev *src)
494 {
495 uint64_t idx, nmemb;
496 int stride;
497
498 MUTEX_ASSERT_LOCKED(&kcov_mtx);
499 KASSERT(dst->kd_mode == src->kd_mode);
500
501 nmemb = src->kd_buf[0];
502 if (nmemb == 0)
503 return;
504 stride = src->kd_mode == KCOV_MODE_TRACE_CMP ? KCOV_STRIDE_TRACE_CMP :
505 KCOV_STRIDE_TRACE_PC;
506 idx = kd_claim(dst, stride, nmemb);
507 if (idx == 0)
508 return;
509 memcpy(&dst->kd_buf[idx], &src->kd_buf[1],
510 stride * nmemb * KCOV_BUF_MEMB_SIZE);
511 }
512
513 int
kd_init(struct kcov_dev * kd,unsigned long nmemb)514 kd_init(struct kcov_dev *kd, unsigned long nmemb)
515 {
516 void *buf;
517 size_t size;
518 int error;
519
520 KASSERT(kd->kd_buf == NULL);
521
522 if (kd->kd_state != KCOV_STATE_NONE)
523 return (EBUSY);
524
525 if (nmemb == 0 || nmemb > KCOV_BUF_MAX_NMEMB)
526 return (EINVAL);
527
528 size = roundup(nmemb * KCOV_BUF_MEMB_SIZE, PAGE_SIZE);
529 mtx_leave(&kcov_mtx);
530 buf = km_alloc(size, &kv_any, &kp_zero, &kd_waitok);
531 if (buf == NULL) {
532 error = ENOMEM;
533 goto err;
534 }
535 /* km_malloc() can sleep, ensure the race was won. */
536 if (kd->kd_state != KCOV_STATE_NONE) {
537 error = EBUSY;
538 goto err;
539 }
540 mtx_enter(&kcov_mtx);
541 kd->kd_buf = buf;
542 /* The first element is reserved to hold the number of used elements. */
543 kd->kd_nmemb = nmemb - 1;
544 kd->kd_size = size;
545 kd->kd_state = KCOV_STATE_READY;
546 return (0);
547
548 err:
549 if (buf != NULL)
550 km_free(buf, size, &kv_any, &kp_zero);
551 mtx_enter(&kcov_mtx);
552 return (error);
553 }
554
555 void
kd_free(struct kcov_dev * kd)556 kd_free(struct kcov_dev *kd)
557 {
558 struct kcov_remote *kr;
559
560 MUTEX_ASSERT_LOCKED(&kcov_mtx);
561
562 kr = kd->kd_kr;
563 if (kr != NULL)
564 kcov_remote_detach(kd, kr);
565
566 if (kd->kd_buf != NULL) {
567 mtx_leave(&kcov_mtx);
568 km_free(kd->kd_buf, kd->kd_size, &kv_any, &kp_zero);
569 mtx_enter(&kcov_mtx);
570 }
571 free(kd, M_SUBPROC, sizeof(*kd));
572 }
573
574 static struct kcov_dev *
kd_curproc(int mode)575 kd_curproc(int mode)
576 {
577 struct cpu_info *ci;
578 struct kcov_dev *kd;
579
580 /*
581 * Do not trace before kcovopen() has been called at least once.
582 * At this point, all secondary CPUs have booted and accessing curcpu()
583 * is safe.
584 */
585 if (__predict_false(kcov_cold))
586 return (NULL);
587
588 ci = curcpu();
589 kd = ci->ci_curproc->p_kd;
590 if (__predict_true(kd == NULL) || kd->kd_mode != mode)
591 return (NULL);
592
593 /*
594 * Do not trace if the kernel has panicked. This could happen if curproc
595 * had kcov enabled while panicking.
596 */
597 if (__predict_false(panicstr || db_active))
598 return (NULL);
599
600 /* Do not trace in interrupt context unless this is a remote section. */
601 if (inintr(ci) && kd->kd_intr == 0)
602 return (NULL);
603
604 return (kd);
605
606 }
607
608 static struct kcov_cpu *
kd_curcpu(void)609 kd_curcpu(void)
610 {
611 struct kcov_cpu *kc;
612 unsigned int cpuid = cpu_number();
613
614 TAILQ_FOREACH(kc, &kc_list, kc_entry) {
615 if (kc->kc_cpuid == cpuid)
616 return (kc);
617 }
618 return (NULL);
619 }
620
621 /*
622 * Claim stride times nmemb number of elements in the coverage buffer. Returns
623 * the index of the first claimed element. If the claim cannot be fulfilled,
624 * zero is returned.
625 */
626 static uint64_t
kd_claim(struct kcov_dev * kd,int stride,int nmemb)627 kd_claim(struct kcov_dev *kd, int stride, int nmemb)
628 {
629 uint64_t idx, was;
630
631 idx = kd->kd_buf[0];
632 for (;;) {
633 if (stride * (idx + nmemb) > kd->kd_nmemb)
634 return (0);
635
636 was = atomic_cas_ulong(&kd->kd_buf[0], idx, idx + nmemb);
637 if (was == idx)
638 return (idx * stride + 1);
639 idx = was;
640 }
641 }
642
643 void
kcov_remote_enter(int subsystem,void * id)644 kcov_remote_enter(int subsystem, void *id)
645 {
646 struct cpu_info *ci;
647 struct kcov_cpu *kc;
648 struct kcov_dev *kd;
649 struct kcov_remote *kr;
650 struct proc *p;
651
652 mtx_enter(&kcov_mtx);
653 kr = kr_lookup(subsystem, id);
654 if (kr == NULL || kr->kr_state != KCOV_STATE_READY)
655 goto out;
656 kd = kr->kr_kd;
657 if (kd == NULL || kd->kd_state != KCOV_STATE_TRACE)
658 goto out;
659 ci = curcpu();
660 p = ci->ci_curproc;
661 if (inintr(ci)) {
662 /*
663 * XXX we only expect to be called from softclock interrupts at
664 * this point.
665 */
666 kc = kd_curcpu();
667 if (kc == NULL || kc->kc_kd.kd_intr == 1)
668 goto out;
669 kc->kc_kd.kd_state = KCOV_STATE_TRACE;
670 kc->kc_kd.kd_mode = kd->kd_mode;
671 kc->kc_kd.kd_intr = 1;
672 kc->kc_kd_save = p->p_kd;
673 kd = &kc->kc_kd;
674 /* Reset coverage buffer. */
675 kd->kd_buf[0] = 0;
676 } else {
677 KASSERT(p->p_kd == NULL);
678 }
679 kr->kr_nsections++;
680 p->p_kd = kd;
681
682 out:
683 mtx_leave(&kcov_mtx);
684 }
685
686 void
kcov_remote_leave(int subsystem,void * id)687 kcov_remote_leave(int subsystem, void *id)
688 {
689 struct cpu_info *ci;
690 struct kcov_cpu *kc;
691 struct kcov_remote *kr;
692 struct proc *p;
693
694 mtx_enter(&kcov_mtx);
695 ci = curcpu();
696 p = ci->ci_curproc;
697 if (p->p_kd == NULL)
698 goto out;
699 kr = kr_lookup(subsystem, id);
700 if (kr == NULL)
701 goto out;
702 if (inintr(ci)) {
703 kc = kd_curcpu();
704 if (kc == NULL || kc->kc_kd.kd_intr == 0)
705 goto out;
706
707 /*
708 * Stop writing to the coverage buffer associated with this CPU
709 * before copying its contents.
710 */
711 p->p_kd = kc->kc_kd_save;
712 kc->kc_kd_save = NULL;
713
714 kd_copy(kr->kr_kd, &kc->kc_kd);
715 kc->kc_kd.kd_state = KCOV_STATE_READY;
716 kc->kc_kd.kd_mode = KCOV_MODE_NONE;
717 kc->kc_kd.kd_intr = 0;
718 } else {
719 KASSERT(p->p_kd == kr->kr_kd);
720 p->p_kd = NULL;
721 }
722 if (--kr->kr_nsections == 0)
723 wakeup(kr);
724 out:
725 mtx_leave(&kcov_mtx);
726 }
727
728 void
kcov_remote_register(int subsystem,void * id)729 kcov_remote_register(int subsystem, void *id)
730 {
731 mtx_enter(&kcov_mtx);
732 kcov_remote_register_locked(subsystem, id);
733 mtx_leave(&kcov_mtx);
734 }
735
736 void
kcov_remote_unregister(int subsystem,void * id)737 kcov_remote_unregister(int subsystem, void *id)
738 {
739 struct kcov_remote *kr;
740
741 mtx_enter(&kcov_mtx);
742 kr = kr_lookup(subsystem, id);
743 if (kr != NULL)
744 kr_free(kr);
745 mtx_leave(&kcov_mtx);
746 }
747
748 struct kcov_remote *
kcov_remote_register_locked(int subsystem,void * id)749 kcov_remote_register_locked(int subsystem, void *id)
750 {
751 struct kcov_remote *kr, *tmp;
752
753 /* Do not allow registrations before the pool is initialized. */
754 KASSERT(kr_cold == 0);
755
756 /*
757 * Temporarily release the mutex since the allocation could end up
758 * sleeping.
759 */
760 mtx_leave(&kcov_mtx);
761 kr = pool_get(&kr_pool, PR_WAITOK | PR_ZERO);
762 kr->kr_subsystem = subsystem;
763 kr->kr_id = id;
764 kr->kr_state = KCOV_STATE_NONE;
765 mtx_enter(&kcov_mtx);
766
767 for (;;) {
768 tmp = kr_lookup(subsystem, id);
769 if (tmp == NULL)
770 break;
771 if (tmp->kr_state != KCOV_STATE_DYING) {
772 pool_put(&kr_pool, kr);
773 return (NULL);
774 }
775 /*
776 * The remote could already be deregistered while another
777 * thread is currently inside a kcov remote section.
778 */
779 msleep_nsec(tmp, &kcov_mtx, PWAIT, "kcov", INFSLP);
780 }
781 TAILQ_INSERT_TAIL(&kr_list, kr, kr_entry);
782 return (kr);
783 }
784
785 int
kcov_remote_attach(struct kcov_dev * kd,struct kio_remote_attach * arg)786 kcov_remote_attach(struct kcov_dev *kd, struct kio_remote_attach *arg)
787 {
788 struct kcov_remote *kr = NULL;
789
790 MUTEX_ASSERT_LOCKED(&kcov_mtx);
791
792 if (kd->kd_state != KCOV_STATE_READY)
793 return (ENXIO);
794
795 if (arg->subsystem == KCOV_REMOTE_COMMON) {
796 kr = kcov_remote_register_locked(KCOV_REMOTE_COMMON,
797 curproc->p_p);
798 if (kr == NULL)
799 return (EBUSY);
800 } else {
801 return (EINVAL);
802 }
803
804 kr->kr_state = KCOV_STATE_READY;
805 kr->kr_kd = kd;
806 kd->kd_kr = kr;
807 return (0);
808 }
809
810 void
kcov_remote_detach(struct kcov_dev * kd,struct kcov_remote * kr)811 kcov_remote_detach(struct kcov_dev *kd, struct kcov_remote *kr)
812 {
813 MUTEX_ASSERT_LOCKED(&kcov_mtx);
814
815 KASSERT(kd == kr->kr_kd);
816 if (kr->kr_subsystem == KCOV_REMOTE_COMMON) {
817 kr_free(kr);
818 } else {
819 kr->kr_state = KCOV_STATE_NONE;
820 kr_barrier(kr);
821 kd->kd_kr = NULL;
822 kr->kr_kd = NULL;
823 }
824 }
825
826 void
kr_free(struct kcov_remote * kr)827 kr_free(struct kcov_remote *kr)
828 {
829 MUTEX_ASSERT_LOCKED(&kcov_mtx);
830
831 kr->kr_state = KCOV_STATE_DYING;
832 kr_barrier(kr);
833 if (kr->kr_kd != NULL)
834 kr->kr_kd->kd_kr = NULL;
835 kr->kr_kd = NULL;
836 TAILQ_REMOVE(&kr_list, kr, kr_entry);
837 /* Notify thread(s) waiting in kcov_remote_register(). */
838 wakeup(kr);
839 pool_put(&kr_pool, kr);
840 }
841
842 void
kr_barrier(struct kcov_remote * kr)843 kr_barrier(struct kcov_remote *kr)
844 {
845 MUTEX_ASSERT_LOCKED(&kcov_mtx);
846
847 while (kr->kr_nsections > 0)
848 msleep_nsec(kr, &kcov_mtx, PWAIT, "kcovbar", INFSLP);
849 }
850
851 struct kcov_remote *
kr_lookup(int subsystem,void * id)852 kr_lookup(int subsystem, void *id)
853 {
854 struct kcov_remote *kr;
855
856 MUTEX_ASSERT_LOCKED(&kcov_mtx);
857
858 TAILQ_FOREACH(kr, &kr_list, kr_entry) {
859 if (kr->kr_subsystem == subsystem && kr->kr_id == id)
860 return (kr);
861 }
862 return (NULL);
863 }
864