1 /* $OpenBSD: kern_ktrace.c,v 1.114 2023/12/15 15:12:08 deraadt Exp $ */
2 /* $NetBSD: kern_ktrace.c,v 1.23 1996/02/09 18:59:36 christos Exp $ */
3
4 /*
5 * Copyright (c) 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <sys/sched.h>
39 #include <sys/fcntl.h>
40 #include <sys/namei.h>
41 #include <sys/vnode.h>
42 #include <sys/lock.h>
43 #include <sys/ktrace.h>
44 #include <sys/malloc.h>
45 #include <sys/syslog.h>
46 #include <sys/sysctl.h>
47 #include <sys/pledge.h>
48
49 #include <sys/mount.h>
50 #include <sys/syscall.h>
51 #include <sys/syscallargs.h>
52
53 void ktrinitheaderraw(struct ktr_header *, uint, pid_t, pid_t);
54 void ktrinitheader(struct ktr_header *, struct proc *, int);
55 int ktrstart(struct proc *, struct vnode *, struct ucred *);
56 int ktrops(struct proc *, struct process *, int, int, struct vnode *,
57 struct ucred *);
58 int ktrsetchildren(struct proc *, struct process *, int, int,
59 struct vnode *, struct ucred *);
60 int ktrwrite(struct proc *, struct ktr_header *, const void *, size_t);
61 int ktrwrite2(struct proc *, struct ktr_header *, const void *, size_t,
62 const void *, size_t);
63 int ktrwriteraw(struct proc *, struct vnode *, struct ucred *,
64 struct ktr_header *, struct iovec *);
65 int ktrcanset(struct proc *, struct process *);
66
67 /*
68 * Clear the trace settings in a correct way (to avoid races).
69 */
70 void
ktrcleartrace(struct process * pr)71 ktrcleartrace(struct process *pr)
72 {
73 struct vnode *vp;
74 struct ucred *cred;
75
76 if (pr->ps_tracevp != NULL) {
77 vp = pr->ps_tracevp;
78 cred = pr->ps_tracecred;
79
80 pr->ps_traceflag = 0;
81 pr->ps_tracevp = NULL;
82 pr->ps_tracecred = NULL;
83
84 vp->v_writecount--;
85 vrele(vp);
86 crfree(cred);
87 }
88 }
89
90 /*
91 * Change the trace setting in a correct way (to avoid races).
92 */
93 void
ktrsettrace(struct process * pr,int facs,struct vnode * newvp,struct ucred * newcred)94 ktrsettrace(struct process *pr, int facs, struct vnode *newvp,
95 struct ucred *newcred)
96 {
97 struct vnode *oldvp;
98 struct ucred *oldcred;
99
100 KASSERT(newvp != NULL);
101 KASSERT(newcred != NULL);
102
103 pr->ps_traceflag |= facs;
104
105 /* nothing to change about where the trace goes? */
106 if (pr->ps_tracevp == newvp && pr->ps_tracecred == newcred)
107 return;
108
109 vref(newvp);
110 crhold(newcred);
111 newvp->v_writecount++;
112
113 oldvp = pr->ps_tracevp;
114 oldcred = pr->ps_tracecred;
115
116 pr->ps_tracevp = newvp;
117 pr->ps_tracecred = newcred;
118
119 if (oldvp != NULL) {
120 oldvp->v_writecount--;
121 vrele(oldvp);
122 crfree(oldcred);
123 }
124 }
125
126 void
ktrinitheaderraw(struct ktr_header * kth,uint type,pid_t pid,pid_t tid)127 ktrinitheaderraw(struct ktr_header *kth, uint type, pid_t pid, pid_t tid)
128 {
129 memset(kth, 0, sizeof(struct ktr_header));
130 kth->ktr_type = type;
131 kth->ktr_pid = pid;
132 kth->ktr_tid = tid;
133 }
134
135 void
ktrinitheader(struct ktr_header * kth,struct proc * p,int type)136 ktrinitheader(struct ktr_header *kth, struct proc *p, int type)
137 {
138 struct process *pr = p->p_p;
139
140 ktrinitheaderraw(kth, type, pr->ps_pid, p->p_tid + THREAD_PID_OFFSET);
141 memcpy(kth->ktr_comm, pr->ps_comm, sizeof(kth->ktr_comm));
142 }
143
144 int
ktrstart(struct proc * p,struct vnode * vp,struct ucred * cred)145 ktrstart(struct proc *p, struct vnode *vp, struct ucred *cred)
146 {
147 struct ktr_header kth;
148
149 ktrinitheaderraw(&kth, htobe32(KTR_START), -1, -1);
150 return (ktrwriteraw(p, vp, cred, &kth, NULL));
151 }
152
153 void
ktrsyscall(struct proc * p,register_t code,size_t argsize,register_t args[])154 ktrsyscall(struct proc *p, register_t code, size_t argsize, register_t args[])
155 {
156 struct ktr_header kth;
157 struct ktr_syscall *ktp;
158 size_t len = sizeof(struct ktr_syscall) + argsize;
159 register_t *argp;
160 u_int nargs = 0;
161 int i;
162
163 if (code == SYS_sysctl) {
164 /*
165 * The sysctl encoding stores the mib[]
166 * array because it is interesting.
167 */
168 if (args[1] > 0)
169 nargs = lmin(args[1], CTL_MAXNAME);
170 len += nargs * sizeof(int);
171 }
172 atomic_setbits_int(&p->p_flag, P_INKTR);
173 ktrinitheader(&kth, p, KTR_SYSCALL);
174 ktp = malloc(len, M_TEMP, M_WAITOK);
175 ktp->ktr_code = code;
176 ktp->ktr_argsize = argsize;
177 argp = (register_t *)((char *)ktp + sizeof(struct ktr_syscall));
178 for (i = 0; i < (argsize / sizeof *argp); i++)
179 *argp++ = args[i];
180 if (nargs && copyin((void *)args[0], argp, nargs * sizeof(int)))
181 memset(argp, 0, nargs * sizeof(int));
182 ktrwrite(p, &kth, ktp, len);
183 free(ktp, M_TEMP, len);
184 atomic_clearbits_int(&p->p_flag, P_INKTR);
185 }
186
187 void
ktrsysret(struct proc * p,register_t code,int error,const register_t retval[2])188 ktrsysret(struct proc *p, register_t code, int error,
189 const register_t retval[2])
190 {
191 struct ktr_header kth;
192 struct ktr_sysret ktp;
193 int len;
194
195 atomic_setbits_int(&p->p_flag, P_INKTR);
196 ktrinitheader(&kth, p, KTR_SYSRET);
197 ktp.ktr_code = code;
198 ktp.ktr_error = error;
199 if (error)
200 len = 0;
201 else if (code == SYS_lseek)
202 /* the one exception: lseek on ILP32 needs more */
203 len = sizeof(long long);
204 else
205 len = sizeof(register_t);
206 ktrwrite2(p, &kth, &ktp, sizeof(ktp), retval, len);
207 atomic_clearbits_int(&p->p_flag, P_INKTR);
208 }
209
210 void
ktrnamei(struct proc * p,char * path)211 ktrnamei(struct proc *p, char *path)
212 {
213 struct ktr_header kth;
214
215 atomic_setbits_int(&p->p_flag, P_INKTR);
216 ktrinitheader(&kth, p, KTR_NAMEI);
217 ktrwrite(p, &kth, path, strlen(path));
218 atomic_clearbits_int(&p->p_flag, P_INKTR);
219 }
220
221 void
ktrgenio(struct proc * p,int fd,enum uio_rw rw,struct iovec * iov,ssize_t len)222 ktrgenio(struct proc *p, int fd, enum uio_rw rw, struct iovec *iov,
223 ssize_t len)
224 {
225 struct ktr_header kth;
226 struct ktr_genio ktp;
227 caddr_t cp;
228 int count, error;
229 int buflen;
230
231 atomic_setbits_int(&p->p_flag, P_INKTR);
232
233 /* beware overflow */
234 if (len > PAGE_SIZE)
235 buflen = PAGE_SIZE;
236 else
237 buflen = len + sizeof(struct ktr_genio);
238
239 ktrinitheader(&kth, p, KTR_GENIO);
240 ktp.ktr_fd = fd;
241 ktp.ktr_rw = rw;
242
243 cp = malloc(buflen, M_TEMP, M_WAITOK);
244 while (len > 0) {
245 /*
246 * Don't allow this process to hog the cpu when doing
247 * huge I/O.
248 */
249 sched_pause(preempt);
250
251 count = lmin(iov->iov_len, buflen);
252 if (count > len)
253 count = len;
254 if (copyin(iov->iov_base, cp, count))
255 break;
256
257 KERNEL_LOCK();
258 error = ktrwrite2(p, &kth, &ktp, sizeof(ktp), cp, count);
259 KERNEL_UNLOCK();
260 if (error != 0)
261 break;
262
263 iov->iov_len -= count;
264 iov->iov_base = (caddr_t)iov->iov_base + count;
265
266 if (iov->iov_len == 0)
267 iov++;
268
269 len -= count;
270 }
271
272 free(cp, M_TEMP, buflen);
273 atomic_clearbits_int(&p->p_flag, P_INKTR);
274 }
275
276 void
ktrpsig(struct proc * p,int sig,sig_t action,int mask,int code,siginfo_t * si)277 ktrpsig(struct proc *p, int sig, sig_t action, int mask, int code,
278 siginfo_t *si)
279 {
280 struct ktr_header kth;
281 struct ktr_psig kp;
282
283 atomic_setbits_int(&p->p_flag, P_INKTR);
284 ktrinitheader(&kth, p, KTR_PSIG);
285 kp.signo = (char)sig;
286 kp.action = action;
287 kp.mask = mask;
288 kp.code = code;
289 kp.si = *si;
290
291 KERNEL_LOCK();
292 ktrwrite(p, &kth, &kp, sizeof(kp));
293 KERNEL_UNLOCK();
294 atomic_clearbits_int(&p->p_flag, P_INKTR);
295 }
296
297 void
ktrstruct(struct proc * p,const char * name,const void * data,size_t datalen)298 ktrstruct(struct proc *p, const char *name, const void *data, size_t datalen)
299 {
300 struct ktr_header kth;
301
302 atomic_setbits_int(&p->p_flag, P_INKTR);
303 ktrinitheader(&kth, p, KTR_STRUCT);
304
305 if (data == NULL)
306 datalen = 0;
307 KERNEL_LOCK();
308 ktrwrite2(p, &kth, name, strlen(name) + 1, data, datalen);
309 KERNEL_UNLOCK();
310 atomic_clearbits_int(&p->p_flag, P_INKTR);
311 }
312
313 int
ktruser(struct proc * p,const char * id,const void * addr,size_t len)314 ktruser(struct proc *p, const char *id, const void *addr, size_t len)
315 {
316 struct ktr_header kth;
317 struct ktr_user ktp;
318 int error;
319 void *memp;
320 #define STK_PARAMS 128
321 long long stkbuf[STK_PARAMS / sizeof(long long)];
322
323 if (!KTRPOINT(p, KTR_USER))
324 return (0);
325 if (len > KTR_USER_MAXLEN)
326 return (EINVAL);
327
328 atomic_setbits_int(&p->p_flag, P_INKTR);
329 ktrinitheader(&kth, p, KTR_USER);
330 memset(ktp.ktr_id, 0, KTR_USER_MAXIDLEN);
331 error = copyinstr(id, ktp.ktr_id, KTR_USER_MAXIDLEN, NULL);
332 if (error == 0) {
333 if (len > sizeof(stkbuf))
334 memp = malloc(len, M_TEMP, M_WAITOK);
335 else
336 memp = stkbuf;
337 error = copyin(addr, memp, len);
338 if (error == 0) {
339 KERNEL_LOCK();
340 ktrwrite2(p, &kth, &ktp, sizeof(ktp), memp, len);
341 KERNEL_UNLOCK();
342 }
343 if (memp != stkbuf)
344 free(memp, M_TEMP, len);
345 }
346 atomic_clearbits_int(&p->p_flag, P_INKTR);
347 return (error);
348 }
349
350 void
ktrexec(struct proc * p,int type,const char * data,ssize_t len)351 ktrexec(struct proc *p, int type, const char *data, ssize_t len)
352 {
353 struct ktr_header kth;
354 int count;
355 int buflen;
356
357 assert(type == KTR_EXECARGS || type == KTR_EXECENV);
358 atomic_setbits_int(&p->p_flag, P_INKTR);
359
360 /* beware overflow */
361 if (len > PAGE_SIZE)
362 buflen = PAGE_SIZE;
363 else
364 buflen = len;
365
366 ktrinitheader(&kth, p, type);
367
368 while (len > 0) {
369 /*
370 * Don't allow this process to hog the cpu when doing
371 * huge I/O.
372 */
373 sched_pause(preempt);
374
375 count = lmin(len, buflen);
376 if (ktrwrite(p, &kth, data, count) != 0)
377 break;
378
379 len -= count;
380 data += count;
381 }
382
383 atomic_clearbits_int(&p->p_flag, P_INKTR);
384 }
385
386 void
ktrpledge(struct proc * p,int error,uint64_t code,int syscall)387 ktrpledge(struct proc *p, int error, uint64_t code, int syscall)
388 {
389 struct ktr_header kth;
390 struct ktr_pledge kp;
391
392 atomic_setbits_int(&p->p_flag, P_INKTR);
393 ktrinitheader(&kth, p, KTR_PLEDGE);
394 kp.error = error;
395 kp.code = code;
396 kp.syscall = syscall;
397
398 KERNEL_LOCK();
399 ktrwrite(p, &kth, &kp, sizeof(kp));
400 KERNEL_UNLOCK();
401 atomic_clearbits_int(&p->p_flag, P_INKTR);
402 }
403
404 void
ktrpinsyscall(struct proc * p,int error,int syscall,vaddr_t addr)405 ktrpinsyscall(struct proc *p, int error, int syscall, vaddr_t addr)
406 {
407 struct ktr_header kth;
408 struct ktr_pinsyscall kp;
409
410 atomic_setbits_int(&p->p_flag, P_INKTR);
411 ktrinitheader(&kth, p, KTR_PINSYSCALL);
412 kp.error = error;
413 kp.syscall = syscall;
414 kp.addr = addr;
415
416 KERNEL_LOCK();
417 ktrwrite(p, &kth, &kp, sizeof(kp));
418 KERNEL_UNLOCK();
419 atomic_clearbits_int(&p->p_flag, P_INKTR);
420 }
421
422 /* Interface and common routines */
423
424 int
doktrace(struct vnode * vp,int ops,int facs,pid_t pid,struct proc * p)425 doktrace(struct vnode *vp, int ops, int facs, pid_t pid, struct proc *p)
426 {
427 struct process *pr = NULL;
428 struct ucred *cred = NULL;
429 struct pgrp *pg;
430 int descend = ops & KTRFLAG_DESCEND;
431 int ret = 0;
432 int error = 0;
433
434 facs = facs & ~((unsigned)KTRFAC_ROOT);
435 ops = KTROP(ops);
436
437 if (ops != KTROP_CLEAR) {
438 /*
439 * an operation which requires a file argument.
440 */
441 cred = p->p_ucred;
442 if (!vp) {
443 error = EINVAL;
444 goto done;
445 }
446 if (vp->v_type != VREG) {
447 error = EACCES;
448 goto done;
449 }
450 }
451 /*
452 * Clear all uses of the tracefile
453 */
454 if (ops == KTROP_CLEARFILE) {
455 LIST_FOREACH(pr, &allprocess, ps_list) {
456 if (pr->ps_tracevp == vp) {
457 if (ktrcanset(p, pr))
458 ktrcleartrace(pr);
459 else
460 error = EPERM;
461 }
462 }
463 goto done;
464 }
465 /*
466 * need something to (un)trace (XXX - why is this here?)
467 */
468 if (!facs) {
469 error = EINVAL;
470 goto done;
471 }
472 if (ops == KTROP_SET) {
473 if (suser(p) == 0)
474 facs |= KTRFAC_ROOT;
475 error = ktrstart(p, vp, cred);
476 if (error != 0)
477 goto done;
478 }
479 /*
480 * do it
481 */
482 if (pid < 0) {
483 /*
484 * by process group
485 */
486 pg = pgfind(-pid);
487 if (pg == NULL) {
488 error = ESRCH;
489 goto done;
490 }
491 LIST_FOREACH(pr, &pg->pg_members, ps_pglist) {
492 if (descend)
493 ret |= ktrsetchildren(p, pr, ops, facs, vp,
494 cred);
495 else
496 ret |= ktrops(p, pr, ops, facs, vp, cred);
497 }
498 } else {
499 /*
500 * by pid
501 */
502 pr = prfind(pid);
503 if (pr == NULL) {
504 error = ESRCH;
505 goto done;
506 }
507 if (descend)
508 ret |= ktrsetchildren(p, pr, ops, facs, vp, cred);
509 else
510 ret |= ktrops(p, pr, ops, facs, vp, cred);
511 }
512 if (!ret)
513 error = EPERM;
514 done:
515 return (error);
516 }
517
518 /*
519 * ktrace system call
520 */
521 int
sys_ktrace(struct proc * p,void * v,register_t * retval)522 sys_ktrace(struct proc *p, void *v, register_t *retval)
523 {
524 struct sys_ktrace_args /* {
525 syscallarg(const char *) fname;
526 syscallarg(int) ops;
527 syscallarg(int) facs;
528 syscallarg(pid_t) pid;
529 } */ *uap = v;
530 struct vnode *vp = NULL;
531 const char *fname = SCARG(uap, fname);
532 struct ucred *cred = NULL;
533 int error;
534
535 if (fname) {
536 struct nameidata nd;
537
538 cred = p->p_ucred;
539 NDINIT(&nd, 0, 0, UIO_USERSPACE, fname, p);
540 nd.ni_pledge = PLEDGE_CPATH | PLEDGE_WPATH;
541 nd.ni_unveil = UNVEIL_CREATE | UNVEIL_WRITE;
542 if ((error = vn_open(&nd, FWRITE|O_NOFOLLOW, 0)) != 0)
543 return error;
544 vp = nd.ni_vp;
545
546 VOP_UNLOCK(vp);
547 }
548
549 error = doktrace(vp, SCARG(uap, ops), SCARG(uap, facs),
550 SCARG(uap, pid), p);
551 if (vp != NULL)
552 (void)vn_close(vp, FWRITE, cred, p);
553
554 return error;
555 }
556
557 int
ktrops(struct proc * curp,struct process * pr,int ops,int facs,struct vnode * vp,struct ucred * cred)558 ktrops(struct proc *curp, struct process *pr, int ops, int facs,
559 struct vnode *vp, struct ucred *cred)
560 {
561 if (!ktrcanset(curp, pr))
562 return (0);
563 if (ops == KTROP_SET)
564 ktrsettrace(pr, facs, vp, cred);
565 else {
566 /* KTROP_CLEAR */
567 pr->ps_traceflag &= ~facs;
568 if ((pr->ps_traceflag & KTRFAC_MASK) == 0) {
569 /* cleared all the facility bits, so stop completely */
570 ktrcleartrace(pr);
571 }
572 }
573
574 return (1);
575 }
576
577 int
ktrsetchildren(struct proc * curp,struct process * top,int ops,int facs,struct vnode * vp,struct ucred * cred)578 ktrsetchildren(struct proc *curp, struct process *top, int ops, int facs,
579 struct vnode *vp, struct ucred *cred)
580 {
581 struct process *pr;
582 int ret = 0;
583
584 pr = top;
585 for (;;) {
586 ret |= ktrops(curp, pr, ops, facs, vp, cred);
587 /*
588 * If this process has children, descend to them next,
589 * otherwise do any siblings, and if done with this level,
590 * follow back up the tree (but not past top).
591 */
592 if (!LIST_EMPTY(&pr->ps_children))
593 pr = LIST_FIRST(&pr->ps_children);
594 else for (;;) {
595 if (pr == top)
596 return (ret);
597 if (LIST_NEXT(pr, ps_sibling) != NULL) {
598 pr = LIST_NEXT(pr, ps_sibling);
599 break;
600 }
601 pr = pr->ps_pptr;
602 }
603 }
604 /*NOTREACHED*/
605 }
606
607 int
ktrwrite(struct proc * p,struct ktr_header * kth,const void * aux,size_t len)608 ktrwrite(struct proc *p, struct ktr_header *kth, const void *aux, size_t len)
609 {
610 struct vnode *vp = p->p_p->ps_tracevp;
611 struct ucred *cred = p->p_p->ps_tracecred;
612 struct iovec data[2];
613 int error;
614
615 if (vp == NULL)
616 return 0;
617 crhold(cred);
618 data[0].iov_base = (void *)aux;
619 data[0].iov_len = len;
620 data[1].iov_len = 0;
621 kth->ktr_len = len;
622 error = ktrwriteraw(p, vp, cred, kth, data);
623 crfree(cred);
624 return (error);
625 }
626
627 int
ktrwrite2(struct proc * p,struct ktr_header * kth,const void * aux1,size_t len1,const void * aux2,size_t len2)628 ktrwrite2(struct proc *p, struct ktr_header *kth, const void *aux1,
629 size_t len1, const void *aux2, size_t len2)
630 {
631 struct vnode *vp = p->p_p->ps_tracevp;
632 struct ucred *cred = p->p_p->ps_tracecred;
633 struct iovec data[2];
634 int error;
635
636 if (vp == NULL)
637 return 0;
638 crhold(cred);
639 data[0].iov_base = (void *)aux1;
640 data[0].iov_len = len1;
641 data[1].iov_base = (void *)aux2;
642 data[1].iov_len = len2;
643 kth->ktr_len = len1 + len2;
644 error = ktrwriteraw(p, vp, cred, kth, data);
645 crfree(cred);
646 return (error);
647 }
648
649 int
ktrwriteraw(struct proc * curp,struct vnode * vp,struct ucred * cred,struct ktr_header * kth,struct iovec * data)650 ktrwriteraw(struct proc *curp, struct vnode *vp, struct ucred *cred,
651 struct ktr_header *kth, struct iovec *data)
652 {
653 struct uio auio;
654 struct iovec aiov[3];
655 struct process *pr;
656 int error;
657
658 nanotime(&kth->ktr_time);
659
660 KERNEL_ASSERT_LOCKED();
661
662 auio.uio_iov = &aiov[0];
663 auio.uio_offset = 0;
664 auio.uio_segflg = UIO_SYSSPACE;
665 auio.uio_rw = UIO_WRITE;
666 aiov[0].iov_base = (caddr_t)kth;
667 aiov[0].iov_len = sizeof(struct ktr_header);
668 auio.uio_resid = sizeof(struct ktr_header);
669 auio.uio_iovcnt = 1;
670 auio.uio_procp = curp;
671 if (kth->ktr_len > 0) {
672 aiov[1] = data[0];
673 aiov[2] = data[1];
674 auio.uio_iovcnt++;
675 if (aiov[2].iov_len > 0)
676 auio.uio_iovcnt++;
677 auio.uio_resid += kth->ktr_len;
678 }
679 error = vget(vp, LK_EXCLUSIVE | LK_RETRY);
680 if (error)
681 goto bad;
682 error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, cred);
683 vput(vp);
684 if (error)
685 goto bad;
686
687 return (0);
688
689 bad:
690 /*
691 * If error encountered, give up tracing on this vnode.
692 */
693 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
694 error);
695 LIST_FOREACH(pr, &allprocess, ps_list) {
696 if (pr == curp->p_p)
697 continue;
698 if (pr->ps_tracevp == vp && pr->ps_tracecred == cred)
699 ktrcleartrace(pr);
700 }
701 ktrcleartrace(curp->p_p);
702 return (error);
703 }
704
705 /*
706 * Return true if caller has permission to set the ktracing state
707 * of target. Essentially, the target can't possess any
708 * more permissions than the caller. KTRFAC_ROOT signifies that
709 * root previously set the tracing status on the target process, and
710 * so, only root may further change it.
711 *
712 * TODO: check groups. use caller effective gid.
713 */
714 int
ktrcanset(struct proc * callp,struct process * targetpr)715 ktrcanset(struct proc *callp, struct process *targetpr)
716 {
717 struct ucred *caller = callp->p_ucred;
718 struct ucred *target = targetpr->ps_ucred;
719
720 if ((caller->cr_uid == target->cr_ruid &&
721 target->cr_ruid == target->cr_svuid &&
722 caller->cr_rgid == target->cr_rgid && /* XXX */
723 target->cr_rgid == target->cr_svgid &&
724 (targetpr->ps_traceflag & KTRFAC_ROOT) == 0 &&
725 !ISSET(targetpr->ps_flags, PS_SUGID)) ||
726 caller->cr_uid == 0)
727 return (1);
728
729 return (0);
730 }
731