1 /* $OpenBSD: kern_ktrace.c,v 1.115 2024/12/27 11:57:16 mpi Exp $ */
2 /* $NetBSD: kern_ktrace.c,v 1.23 1996/02/09 18:59:36 christos Exp $ */
3
4 /*
5 * Copyright (c) 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <sys/sched.h>
39 #include <sys/fcntl.h>
40 #include <sys/namei.h>
41 #include <sys/vnode.h>
42 #include <sys/lock.h>
43 #include <sys/ktrace.h>
44 #include <sys/malloc.h>
45 #include <sys/syslog.h>
46 #include <sys/sysctl.h>
47 #include <sys/pledge.h>
48
49 #include <sys/mount.h>
50 #include <sys/syscall.h>
51 #include <sys/syscallargs.h>
52
53 void ktrinitheaderraw(struct ktr_header *, uint, pid_t, pid_t);
54 void ktrinitheader(struct ktr_header *, struct proc *, int);
55 int ktrstart(struct proc *, struct vnode *, struct ucred *);
56 int ktrops(struct proc *, struct process *, int, int, struct vnode *,
57 struct ucred *);
58 int ktrsetchildren(struct proc *, struct process *, int, int,
59 struct vnode *, struct ucred *);
60 int ktrwrite(struct proc *, struct ktr_header *, const void *, size_t);
61 int ktrwrite2(struct proc *, struct ktr_header *, const void *, size_t,
62 const void *, size_t);
63 int ktrwriteraw(struct proc *, struct vnode *, struct ucred *,
64 struct ktr_header *, struct iovec *);
65 int ktrcanset(struct proc *, struct process *);
66
67 /*
68 * Clear the trace settings in a correct way (to avoid races).
69 */
70 void
ktrcleartrace(struct process * pr)71 ktrcleartrace(struct process *pr)
72 {
73 struct vnode *vp;
74 struct ucred *cred;
75
76 if (pr->ps_tracevp != NULL) {
77 vp = pr->ps_tracevp;
78 cred = pr->ps_tracecred;
79
80 pr->ps_traceflag = 0;
81 pr->ps_tracevp = NULL;
82 pr->ps_tracecred = NULL;
83
84 vp->v_writecount--;
85 vrele(vp);
86 crfree(cred);
87 }
88 }
89
90 /*
91 * Change the trace setting in a correct way (to avoid races).
92 */
93 void
ktrsettrace(struct process * pr,int facs,struct vnode * newvp,struct ucred * newcred)94 ktrsettrace(struct process *pr, int facs, struct vnode *newvp,
95 struct ucred *newcred)
96 {
97 struct vnode *oldvp;
98 struct ucred *oldcred;
99
100 KASSERT(newvp != NULL);
101 KASSERT(newcred != NULL);
102
103 pr->ps_traceflag |= facs;
104
105 /* nothing to change about where the trace goes? */
106 if (pr->ps_tracevp == newvp && pr->ps_tracecred == newcred)
107 return;
108
109 vref(newvp);
110 crhold(newcred);
111 newvp->v_writecount++;
112
113 oldvp = pr->ps_tracevp;
114 oldcred = pr->ps_tracecred;
115
116 pr->ps_tracevp = newvp;
117 pr->ps_tracecred = newcred;
118
119 if (oldvp != NULL) {
120 oldvp->v_writecount--;
121 vrele(oldvp);
122 crfree(oldcred);
123 }
124 }
125
126 void
ktrinitheaderraw(struct ktr_header * kth,uint type,pid_t pid,pid_t tid)127 ktrinitheaderraw(struct ktr_header *kth, uint type, pid_t pid, pid_t tid)
128 {
129 memset(kth, 0, sizeof(struct ktr_header));
130 kth->ktr_type = type;
131 kth->ktr_pid = pid;
132 kth->ktr_tid = tid;
133 }
134
135 void
ktrinitheader(struct ktr_header * kth,struct proc * p,int type)136 ktrinitheader(struct ktr_header *kth, struct proc *p, int type)
137 {
138 struct process *pr = p->p_p;
139
140 ktrinitheaderraw(kth, type, pr->ps_pid, p->p_tid + THREAD_PID_OFFSET);
141 memcpy(kth->ktr_comm, pr->ps_comm, sizeof(kth->ktr_comm));
142 }
143
144 int
ktrstart(struct proc * p,struct vnode * vp,struct ucred * cred)145 ktrstart(struct proc *p, struct vnode *vp, struct ucred *cred)
146 {
147 struct ktr_header kth;
148
149 ktrinitheaderraw(&kth, htobe32(KTR_START), -1, -1);
150 return (ktrwriteraw(p, vp, cred, &kth, NULL));
151 }
152
153 void
ktrsyscall(struct proc * p,register_t code,size_t argsize,register_t args[])154 ktrsyscall(struct proc *p, register_t code, size_t argsize, register_t args[])
155 {
156 struct ktr_header kth;
157 struct ktr_syscall *ktp;
158 size_t len = sizeof(struct ktr_syscall) + argsize;
159 register_t *argp;
160 u_int nargs = 0;
161 int i;
162
163 if (code == SYS_sysctl) {
164 /*
165 * The sysctl encoding stores the mib[]
166 * array because it is interesting.
167 */
168 if (args[1] > 0)
169 nargs = lmin(args[1], CTL_MAXNAME);
170 len += nargs * sizeof(int);
171 }
172 atomic_setbits_int(&p->p_flag, P_INKTR);
173 ktrinitheader(&kth, p, KTR_SYSCALL);
174 ktp = malloc(len, M_TEMP, M_WAITOK);
175 ktp->ktr_code = code;
176 ktp->ktr_argsize = argsize;
177 argp = (register_t *)((char *)ktp + sizeof(struct ktr_syscall));
178 for (i = 0; i < (argsize / sizeof *argp); i++)
179 *argp++ = args[i];
180 if (nargs && copyin((void *)args[0], argp, nargs * sizeof(int)))
181 memset(argp, 0, nargs * sizeof(int));
182 KERNEL_LOCK();
183 ktrwrite(p, &kth, ktp, len);
184 KERNEL_UNLOCK();
185 free(ktp, M_TEMP, len);
186 atomic_clearbits_int(&p->p_flag, P_INKTR);
187 }
188
189 void
ktrsysret(struct proc * p,register_t code,int error,const register_t retval[2])190 ktrsysret(struct proc *p, register_t code, int error,
191 const register_t retval[2])
192 {
193 struct ktr_header kth;
194 struct ktr_sysret ktp;
195 int len;
196
197 atomic_setbits_int(&p->p_flag, P_INKTR);
198 ktrinitheader(&kth, p, KTR_SYSRET);
199 ktp.ktr_code = code;
200 ktp.ktr_error = error;
201 if (error)
202 len = 0;
203 else if (code == SYS_lseek)
204 /* the one exception: lseek on ILP32 needs more */
205 len = sizeof(long long);
206 else
207 len = sizeof(register_t);
208 KERNEL_LOCK();
209 ktrwrite2(p, &kth, &ktp, sizeof(ktp), retval, len);
210 KERNEL_UNLOCK();
211 atomic_clearbits_int(&p->p_flag, P_INKTR);
212 }
213
214 void
ktrnamei(struct proc * p,char * path)215 ktrnamei(struct proc *p, char *path)
216 {
217 struct ktr_header kth;
218
219 atomic_setbits_int(&p->p_flag, P_INKTR);
220 ktrinitheader(&kth, p, KTR_NAMEI);
221 KERNEL_LOCK();
222 ktrwrite(p, &kth, path, strlen(path));
223 KERNEL_UNLOCK();
224 atomic_clearbits_int(&p->p_flag, P_INKTR);
225 }
226
227 void
ktrgenio(struct proc * p,int fd,enum uio_rw rw,struct iovec * iov,ssize_t len)228 ktrgenio(struct proc *p, int fd, enum uio_rw rw, struct iovec *iov,
229 ssize_t len)
230 {
231 struct ktr_header kth;
232 struct ktr_genio ktp;
233 caddr_t cp;
234 int count, error;
235 int buflen;
236
237 atomic_setbits_int(&p->p_flag, P_INKTR);
238
239 /* beware overflow */
240 if (len > PAGE_SIZE)
241 buflen = PAGE_SIZE;
242 else
243 buflen = len + sizeof(struct ktr_genio);
244
245 ktrinitheader(&kth, p, KTR_GENIO);
246 ktp.ktr_fd = fd;
247 ktp.ktr_rw = rw;
248
249 cp = malloc(buflen, M_TEMP, M_WAITOK);
250 while (len > 0) {
251 /*
252 * Don't allow this process to hog the cpu when doing
253 * huge I/O.
254 */
255 sched_pause(preempt);
256
257 count = lmin(iov->iov_len, buflen);
258 if (count > len)
259 count = len;
260 if (copyin(iov->iov_base, cp, count))
261 break;
262
263 KERNEL_LOCK();
264 error = ktrwrite2(p, &kth, &ktp, sizeof(ktp), cp, count);
265 KERNEL_UNLOCK();
266 if (error != 0)
267 break;
268
269 iov->iov_len -= count;
270 iov->iov_base = (caddr_t)iov->iov_base + count;
271
272 if (iov->iov_len == 0)
273 iov++;
274
275 len -= count;
276 }
277
278 free(cp, M_TEMP, buflen);
279 atomic_clearbits_int(&p->p_flag, P_INKTR);
280 }
281
282 void
ktrpsig(struct proc * p,int sig,sig_t action,int mask,int code,siginfo_t * si)283 ktrpsig(struct proc *p, int sig, sig_t action, int mask, int code,
284 siginfo_t *si)
285 {
286 struct ktr_header kth;
287 struct ktr_psig kp;
288
289 atomic_setbits_int(&p->p_flag, P_INKTR);
290 ktrinitheader(&kth, p, KTR_PSIG);
291 kp.signo = (char)sig;
292 kp.action = action;
293 kp.mask = mask;
294 kp.code = code;
295 kp.si = *si;
296
297 KERNEL_LOCK();
298 ktrwrite(p, &kth, &kp, sizeof(kp));
299 KERNEL_UNLOCK();
300 atomic_clearbits_int(&p->p_flag, P_INKTR);
301 }
302
303 void
ktrstruct(struct proc * p,const char * name,const void * data,size_t datalen)304 ktrstruct(struct proc *p, const char *name, const void *data, size_t datalen)
305 {
306 struct ktr_header kth;
307
308 atomic_setbits_int(&p->p_flag, P_INKTR);
309 ktrinitheader(&kth, p, KTR_STRUCT);
310
311 if (data == NULL)
312 datalen = 0;
313 KERNEL_LOCK();
314 ktrwrite2(p, &kth, name, strlen(name) + 1, data, datalen);
315 KERNEL_UNLOCK();
316 atomic_clearbits_int(&p->p_flag, P_INKTR);
317 }
318
319 int
ktruser(struct proc * p,const char * id,const void * addr,size_t len)320 ktruser(struct proc *p, const char *id, const void *addr, size_t len)
321 {
322 struct ktr_header kth;
323 struct ktr_user ktp;
324 int error;
325 void *memp;
326 #define STK_PARAMS 128
327 long long stkbuf[STK_PARAMS / sizeof(long long)];
328
329 if (!KTRPOINT(p, KTR_USER))
330 return (0);
331 if (len > KTR_USER_MAXLEN)
332 return (EINVAL);
333
334 atomic_setbits_int(&p->p_flag, P_INKTR);
335 ktrinitheader(&kth, p, KTR_USER);
336 memset(ktp.ktr_id, 0, KTR_USER_MAXIDLEN);
337 error = copyinstr(id, ktp.ktr_id, KTR_USER_MAXIDLEN, NULL);
338 if (error == 0) {
339 if (len > sizeof(stkbuf))
340 memp = malloc(len, M_TEMP, M_WAITOK);
341 else
342 memp = stkbuf;
343 error = copyin(addr, memp, len);
344 if (error == 0) {
345 KERNEL_LOCK();
346 ktrwrite2(p, &kth, &ktp, sizeof(ktp), memp, len);
347 KERNEL_UNLOCK();
348 }
349 if (memp != stkbuf)
350 free(memp, M_TEMP, len);
351 }
352 atomic_clearbits_int(&p->p_flag, P_INKTR);
353 return (error);
354 }
355
356 void
ktrexec(struct proc * p,int type,const char * data,ssize_t len)357 ktrexec(struct proc *p, int type, const char *data, ssize_t len)
358 {
359 struct ktr_header kth;
360 int count, error;
361 int buflen;
362
363 assert(type == KTR_EXECARGS || type == KTR_EXECENV);
364 atomic_setbits_int(&p->p_flag, P_INKTR);
365
366 /* beware overflow */
367 if (len > PAGE_SIZE)
368 buflen = PAGE_SIZE;
369 else
370 buflen = len;
371
372 ktrinitheader(&kth, p, type);
373
374 while (len > 0) {
375 /*
376 * Don't allow this process to hog the cpu when doing
377 * huge I/O.
378 */
379 sched_pause(preempt);
380
381 count = lmin(len, buflen);
382 KERNEL_LOCK();
383 error = ktrwrite(p, &kth, data, count);
384 KERNEL_UNLOCK();
385 if (error != 0)
386 break;
387
388 len -= count;
389 data += count;
390 }
391
392 atomic_clearbits_int(&p->p_flag, P_INKTR);
393 }
394
395 void
ktrpledge(struct proc * p,int error,uint64_t code,int syscall)396 ktrpledge(struct proc *p, int error, uint64_t code, int syscall)
397 {
398 struct ktr_header kth;
399 struct ktr_pledge kp;
400
401 atomic_setbits_int(&p->p_flag, P_INKTR);
402 ktrinitheader(&kth, p, KTR_PLEDGE);
403 kp.error = error;
404 kp.code = code;
405 kp.syscall = syscall;
406
407 KERNEL_LOCK();
408 ktrwrite(p, &kth, &kp, sizeof(kp));
409 KERNEL_UNLOCK();
410 atomic_clearbits_int(&p->p_flag, P_INKTR);
411 }
412
413 void
ktrpinsyscall(struct proc * p,int error,int syscall,vaddr_t addr)414 ktrpinsyscall(struct proc *p, int error, int syscall, vaddr_t addr)
415 {
416 struct ktr_header kth;
417 struct ktr_pinsyscall kp;
418
419 atomic_setbits_int(&p->p_flag, P_INKTR);
420 ktrinitheader(&kth, p, KTR_PINSYSCALL);
421 kp.error = error;
422 kp.syscall = syscall;
423 kp.addr = addr;
424
425 KERNEL_LOCK();
426 ktrwrite(p, &kth, &kp, sizeof(kp));
427 KERNEL_UNLOCK();
428 atomic_clearbits_int(&p->p_flag, P_INKTR);
429 }
430
431 /* Interface and common routines */
432
433 int
doktrace(struct vnode * vp,int ops,int facs,pid_t pid,struct proc * p)434 doktrace(struct vnode *vp, int ops, int facs, pid_t pid, struct proc *p)
435 {
436 struct process *pr = NULL;
437 struct ucred *cred = NULL;
438 struct pgrp *pg;
439 int descend = ops & KTRFLAG_DESCEND;
440 int ret = 0;
441 int error = 0;
442
443 facs = facs & ~((unsigned)KTRFAC_ROOT);
444 ops = KTROP(ops);
445
446 if (ops != KTROP_CLEAR) {
447 /*
448 * an operation which requires a file argument.
449 */
450 cred = p->p_ucred;
451 if (!vp) {
452 error = EINVAL;
453 goto done;
454 }
455 if (vp->v_type != VREG) {
456 error = EACCES;
457 goto done;
458 }
459 }
460 /*
461 * Clear all uses of the tracefile
462 */
463 if (ops == KTROP_CLEARFILE) {
464 LIST_FOREACH(pr, &allprocess, ps_list) {
465 if (pr->ps_tracevp == vp) {
466 if (ktrcanset(p, pr))
467 ktrcleartrace(pr);
468 else
469 error = EPERM;
470 }
471 }
472 goto done;
473 }
474 /*
475 * need something to (un)trace (XXX - why is this here?)
476 */
477 if (!facs) {
478 error = EINVAL;
479 goto done;
480 }
481 if (ops == KTROP_SET) {
482 if (suser(p) == 0)
483 facs |= KTRFAC_ROOT;
484 error = ktrstart(p, vp, cred);
485 if (error != 0)
486 goto done;
487 }
488 /*
489 * do it
490 */
491 if (pid < 0) {
492 /*
493 * by process group
494 */
495 pg = pgfind(-pid);
496 if (pg == NULL) {
497 error = ESRCH;
498 goto done;
499 }
500 LIST_FOREACH(pr, &pg->pg_members, ps_pglist) {
501 if (descend)
502 ret |= ktrsetchildren(p, pr, ops, facs, vp,
503 cred);
504 else
505 ret |= ktrops(p, pr, ops, facs, vp, cred);
506 }
507 } else {
508 /*
509 * by pid
510 */
511 pr = prfind(pid);
512 if (pr == NULL) {
513 error = ESRCH;
514 goto done;
515 }
516 if (descend)
517 ret |= ktrsetchildren(p, pr, ops, facs, vp, cred);
518 else
519 ret |= ktrops(p, pr, ops, facs, vp, cred);
520 }
521 if (!ret)
522 error = EPERM;
523 done:
524 return (error);
525 }
526
527 /*
528 * ktrace system call
529 */
530 int
sys_ktrace(struct proc * p,void * v,register_t * retval)531 sys_ktrace(struct proc *p, void *v, register_t *retval)
532 {
533 struct sys_ktrace_args /* {
534 syscallarg(const char *) fname;
535 syscallarg(int) ops;
536 syscallarg(int) facs;
537 syscallarg(pid_t) pid;
538 } */ *uap = v;
539 struct vnode *vp = NULL;
540 const char *fname = SCARG(uap, fname);
541 struct ucred *cred = NULL;
542 int error;
543
544 if (fname) {
545 struct nameidata nd;
546
547 cred = p->p_ucred;
548 NDINIT(&nd, 0, 0, UIO_USERSPACE, fname, p);
549 nd.ni_pledge = PLEDGE_CPATH | PLEDGE_WPATH;
550 nd.ni_unveil = UNVEIL_CREATE | UNVEIL_WRITE;
551 if ((error = vn_open(&nd, FWRITE|O_NOFOLLOW, 0)) != 0)
552 return error;
553 vp = nd.ni_vp;
554
555 VOP_UNLOCK(vp);
556 }
557
558 error = doktrace(vp, SCARG(uap, ops), SCARG(uap, facs),
559 SCARG(uap, pid), p);
560 if (vp != NULL)
561 (void)vn_close(vp, FWRITE, cred, p);
562
563 return error;
564 }
565
566 int
ktrops(struct proc * curp,struct process * pr,int ops,int facs,struct vnode * vp,struct ucred * cred)567 ktrops(struct proc *curp, struct process *pr, int ops, int facs,
568 struct vnode *vp, struct ucred *cred)
569 {
570 if (!ktrcanset(curp, pr))
571 return (0);
572 if (ops == KTROP_SET)
573 ktrsettrace(pr, facs, vp, cred);
574 else {
575 /* KTROP_CLEAR */
576 pr->ps_traceflag &= ~facs;
577 if ((pr->ps_traceflag & KTRFAC_MASK) == 0) {
578 /* cleared all the facility bits, so stop completely */
579 ktrcleartrace(pr);
580 }
581 }
582
583 return (1);
584 }
585
586 int
ktrsetchildren(struct proc * curp,struct process * top,int ops,int facs,struct vnode * vp,struct ucred * cred)587 ktrsetchildren(struct proc *curp, struct process *top, int ops, int facs,
588 struct vnode *vp, struct ucred *cred)
589 {
590 struct process *pr;
591 int ret = 0;
592
593 pr = top;
594 for (;;) {
595 ret |= ktrops(curp, pr, ops, facs, vp, cred);
596 /*
597 * If this process has children, descend to them next,
598 * otherwise do any siblings, and if done with this level,
599 * follow back up the tree (but not past top).
600 */
601 if (!LIST_EMPTY(&pr->ps_children))
602 pr = LIST_FIRST(&pr->ps_children);
603 else for (;;) {
604 if (pr == top)
605 return (ret);
606 if (LIST_NEXT(pr, ps_sibling) != NULL) {
607 pr = LIST_NEXT(pr, ps_sibling);
608 break;
609 }
610 pr = pr->ps_pptr;
611 }
612 }
613 /*NOTREACHED*/
614 }
615
616 int
ktrwrite(struct proc * p,struct ktr_header * kth,const void * aux,size_t len)617 ktrwrite(struct proc *p, struct ktr_header *kth, const void *aux, size_t len)
618 {
619 struct vnode *vp = p->p_p->ps_tracevp;
620 struct ucred *cred = p->p_p->ps_tracecred;
621 struct iovec data[2];
622 int error;
623
624 if (vp == NULL)
625 return 0;
626 crhold(cred);
627 data[0].iov_base = (void *)aux;
628 data[0].iov_len = len;
629 data[1].iov_len = 0;
630 kth->ktr_len = len;
631 error = ktrwriteraw(p, vp, cred, kth, data);
632 crfree(cred);
633 return (error);
634 }
635
636 int
ktrwrite2(struct proc * p,struct ktr_header * kth,const void * aux1,size_t len1,const void * aux2,size_t len2)637 ktrwrite2(struct proc *p, struct ktr_header *kth, const void *aux1,
638 size_t len1, const void *aux2, size_t len2)
639 {
640 struct vnode *vp = p->p_p->ps_tracevp;
641 struct ucred *cred = p->p_p->ps_tracecred;
642 struct iovec data[2];
643 int error;
644
645 if (vp == NULL)
646 return 0;
647 crhold(cred);
648 data[0].iov_base = (void *)aux1;
649 data[0].iov_len = len1;
650 data[1].iov_base = (void *)aux2;
651 data[1].iov_len = len2;
652 kth->ktr_len = len1 + len2;
653 error = ktrwriteraw(p, vp, cred, kth, data);
654 crfree(cred);
655 return (error);
656 }
657
658 int
ktrwriteraw(struct proc * curp,struct vnode * vp,struct ucred * cred,struct ktr_header * kth,struct iovec * data)659 ktrwriteraw(struct proc *curp, struct vnode *vp, struct ucred *cred,
660 struct ktr_header *kth, struct iovec *data)
661 {
662 struct uio auio;
663 struct iovec aiov[3];
664 struct process *pr;
665 int error;
666
667 nanotime(&kth->ktr_time);
668
669 KERNEL_ASSERT_LOCKED();
670
671 auio.uio_iov = &aiov[0];
672 auio.uio_offset = 0;
673 auio.uio_segflg = UIO_SYSSPACE;
674 auio.uio_rw = UIO_WRITE;
675 aiov[0].iov_base = (caddr_t)kth;
676 aiov[0].iov_len = sizeof(struct ktr_header);
677 auio.uio_resid = sizeof(struct ktr_header);
678 auio.uio_iovcnt = 1;
679 auio.uio_procp = curp;
680 if (kth->ktr_len > 0) {
681 aiov[1] = data[0];
682 aiov[2] = data[1];
683 auio.uio_iovcnt++;
684 if (aiov[2].iov_len > 0)
685 auio.uio_iovcnt++;
686 auio.uio_resid += kth->ktr_len;
687 }
688 error = vget(vp, LK_EXCLUSIVE | LK_RETRY);
689 if (error)
690 goto bad;
691 error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, cred);
692 vput(vp);
693 if (error)
694 goto bad;
695
696 return (0);
697
698 bad:
699 /*
700 * If error encountered, give up tracing on this vnode.
701 */
702 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
703 error);
704 LIST_FOREACH(pr, &allprocess, ps_list) {
705 if (pr == curp->p_p)
706 continue;
707 if (pr->ps_tracevp == vp && pr->ps_tracecred == cred)
708 ktrcleartrace(pr);
709 }
710 ktrcleartrace(curp->p_p);
711 return (error);
712 }
713
714 /*
715 * Return true if caller has permission to set the ktracing state
716 * of target. Essentially, the target can't possess any
717 * more permissions than the caller. KTRFAC_ROOT signifies that
718 * root previously set the tracing status on the target process, and
719 * so, only root may further change it.
720 *
721 * TODO: check groups. use caller effective gid.
722 */
723 int
ktrcanset(struct proc * callp,struct process * targetpr)724 ktrcanset(struct proc *callp, struct process *targetpr)
725 {
726 struct ucred *caller = callp->p_ucred;
727 struct ucred *target = targetpr->ps_ucred;
728
729 if ((caller->cr_uid == target->cr_ruid &&
730 target->cr_ruid == target->cr_svuid &&
731 caller->cr_rgid == target->cr_rgid && /* XXX */
732 target->cr_rgid == target->cr_svgid &&
733 (targetpr->ps_traceflag & KTRFAC_ROOT) == 0 &&
734 !ISSET(targetpr->ps_flags, PS_SUGID)) ||
735 caller->cr_uid == 0)
736 return (1);
737
738 return (0);
739 }
740