xref: /freebsd/sys/kern/kern_ktrace.c (revision 3157ba21)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.
4  * Copyright (c) 2005 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 4. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)kern_ktrace.c	8.2 (Berkeley) 9/23/93
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_ktrace.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/fcntl.h>
42 #include <sys/kernel.h>
43 #include <sys/kthread.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/priv.h>
50 #include <sys/proc.h>
51 #include <sys/unistd.h>
52 #include <sys/vnode.h>
53 #include <sys/socket.h>
54 #include <sys/stat.h>
55 #include <sys/ktrace.h>
56 #include <sys/sx.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 #include <sys/sysproto.h>
60 
61 #include <security/mac/mac_framework.h>
62 
63 /*
64  * The ktrace facility allows the tracing of certain key events in user space
65  * processes, such as system calls, signal delivery, context switches, and
66  * user generated events using utrace(2).  It works by streaming event
67  * records and data to a vnode associated with the process using the
68  * ktrace(2) system call.  In general, records can be written directly from
69  * the context that generates the event.  One important exception to this is
70  * during a context switch, where sleeping is not permitted.  To handle this
71  * case, trace events are generated using in-kernel ktr_request records, and
72  * then delivered to disk at a convenient moment -- either immediately, the
73  * next traceable event, at system call return, or at process exit.
74  *
75  * When dealing with multiple threads or processes writing to the same event
76  * log, ordering guarantees are weak: specifically, if an event has multiple
77  * records (i.e., system call enter and return), they may be interlaced with
78  * records from another event.  Process and thread ID information is provided
79  * in the record, and user applications can de-interlace events if required.
80  */
81 
82 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
83 
84 #ifdef KTRACE
85 
86 #ifndef KTRACE_REQUEST_POOL
87 #define	KTRACE_REQUEST_POOL	100
88 #endif
89 
90 struct ktr_request {
91 	struct	ktr_header ktr_header;
92 	void	*ktr_buffer;
93 	union {
94 		struct	ktr_syscall ktr_syscall;
95 		struct	ktr_sysret ktr_sysret;
96 		struct	ktr_genio ktr_genio;
97 		struct	ktr_psig ktr_psig;
98 		struct	ktr_csw ktr_csw;
99 	} ktr_data;
100 	STAILQ_ENTRY(ktr_request) ktr_list;
101 };
102 
103 static int data_lengths[] = {
104 	0,					/* none */
105 	offsetof(struct ktr_syscall, ktr_args),	/* KTR_SYSCALL */
106 	sizeof(struct ktr_sysret),		/* KTR_SYSRET */
107 	0,					/* KTR_NAMEI */
108 	sizeof(struct ktr_genio),		/* KTR_GENIO */
109 	sizeof(struct ktr_psig),		/* KTR_PSIG */
110 	sizeof(struct ktr_csw),			/* KTR_CSW */
111 	0,					/* KTR_USER */
112 	0,					/* KTR_STRUCT */
113 	0,					/* KTR_SYSCTL */
114 };
115 
116 static STAILQ_HEAD(, ktr_request) ktr_free;
117 
118 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
119 
120 static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
121 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
122 
123 static u_int ktr_geniosize = PAGE_SIZE;
124 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize);
125 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize,
126     0, "Maximum size of genio event payload");
127 
128 static int print_message = 1;
129 struct mtx ktrace_mtx;
130 static struct sx ktrace_sx;
131 
132 static void ktrace_init(void *dummy);
133 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
134 static u_int ktrace_resize_pool(u_int newsize);
135 static struct ktr_request *ktr_getrequest(int type);
136 static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
137 static void ktr_freerequest(struct ktr_request *req);
138 static void ktr_writerequest(struct thread *td, struct ktr_request *req);
139 static int ktrcanset(struct thread *,struct proc *);
140 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
141 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
142 
143 /*
144  * ktrace itself generates events, such as context switches, which we do not
145  * wish to trace.  Maintain a flag, TDP_INKTRACE, on each thread to determine
146  * whether or not it is in a region where tracing of events should be
147  * suppressed.
148  */
149 static void
150 ktrace_enter(struct thread *td)
151 {
152 
153 	KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
154 	td->td_pflags |= TDP_INKTRACE;
155 }
156 
157 static void
158 ktrace_exit(struct thread *td)
159 {
160 
161 	KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
162 	td->td_pflags &= ~TDP_INKTRACE;
163 }
164 
165 static void
166 ktrace_assert(struct thread *td)
167 {
168 
169 	KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
170 }
171 
172 static void
173 ktrace_init(void *dummy)
174 {
175 	struct ktr_request *req;
176 	int i;
177 
178 	mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
179 	sx_init(&ktrace_sx, "ktrace_sx");
180 	STAILQ_INIT(&ktr_free);
181 	for (i = 0; i < ktr_requestpool; i++) {
182 		req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
183 		STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
184 	}
185 }
186 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
187 
188 static int
189 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
190 {
191 	struct thread *td;
192 	u_int newsize, oldsize, wantsize;
193 	int error;
194 
195 	/* Handle easy read-only case first to avoid warnings from GCC. */
196 	if (!req->newptr) {
197 		mtx_lock(&ktrace_mtx);
198 		oldsize = ktr_requestpool;
199 		mtx_unlock(&ktrace_mtx);
200 		return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
201 	}
202 
203 	error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
204 	if (error)
205 		return (error);
206 	td = curthread;
207 	ktrace_enter(td);
208 	mtx_lock(&ktrace_mtx);
209 	oldsize = ktr_requestpool;
210 	newsize = ktrace_resize_pool(wantsize);
211 	mtx_unlock(&ktrace_mtx);
212 	ktrace_exit(td);
213 	error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
214 	if (error)
215 		return (error);
216 	if (wantsize > oldsize && newsize < wantsize)
217 		return (ENOSPC);
218 	return (0);
219 }
220 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW,
221     &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", "");
222 
223 static u_int
224 ktrace_resize_pool(u_int newsize)
225 {
226 	struct ktr_request *req;
227 	int bound;
228 
229 	mtx_assert(&ktrace_mtx, MA_OWNED);
230 	print_message = 1;
231 	bound = newsize - ktr_requestpool;
232 	if (bound == 0)
233 		return (ktr_requestpool);
234 	if (bound < 0)
235 		/* Shrink pool down to newsize if possible. */
236 		while (bound++ < 0) {
237 			req = STAILQ_FIRST(&ktr_free);
238 			if (req == NULL)
239 				return (ktr_requestpool);
240 			STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
241 			ktr_requestpool--;
242 			mtx_unlock(&ktrace_mtx);
243 			free(req, M_KTRACE);
244 			mtx_lock(&ktrace_mtx);
245 		}
246 	else
247 		/* Grow pool up to newsize. */
248 		while (bound-- > 0) {
249 			mtx_unlock(&ktrace_mtx);
250 			req = malloc(sizeof(struct ktr_request), M_KTRACE,
251 			    M_WAITOK);
252 			mtx_lock(&ktrace_mtx);
253 			STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
254 			ktr_requestpool++;
255 		}
256 	return (ktr_requestpool);
257 }
258 
259 /* ktr_getrequest() assumes that ktr_comm[] is the same size as td_name[]. */
260 CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) ==
261     (sizeof((struct thread *)NULL)->td_name));
262 
263 static struct ktr_request *
264 ktr_getrequest(int type)
265 {
266 	struct ktr_request *req;
267 	struct thread *td = curthread;
268 	struct proc *p = td->td_proc;
269 	int pm;
270 
271 	ktrace_enter(td);	/* XXX: In caller instead? */
272 	mtx_lock(&ktrace_mtx);
273 	if (!KTRCHECK(td, type)) {
274 		mtx_unlock(&ktrace_mtx);
275 		ktrace_exit(td);
276 		return (NULL);
277 	}
278 	req = STAILQ_FIRST(&ktr_free);
279 	if (req != NULL) {
280 		STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
281 		req->ktr_header.ktr_type = type;
282 		if (p->p_traceflag & KTRFAC_DROP) {
283 			req->ktr_header.ktr_type |= KTR_DROP;
284 			p->p_traceflag &= ~KTRFAC_DROP;
285 		}
286 		mtx_unlock(&ktrace_mtx);
287 		microtime(&req->ktr_header.ktr_time);
288 		req->ktr_header.ktr_pid = p->p_pid;
289 		req->ktr_header.ktr_tid = td->td_tid;
290 		bcopy(td->td_name, req->ktr_header.ktr_comm,
291 		    sizeof(req->ktr_header.ktr_comm));
292 		req->ktr_buffer = NULL;
293 		req->ktr_header.ktr_len = 0;
294 	} else {
295 		p->p_traceflag |= KTRFAC_DROP;
296 		pm = print_message;
297 		print_message = 0;
298 		mtx_unlock(&ktrace_mtx);
299 		if (pm)
300 			printf("Out of ktrace request objects.\n");
301 		ktrace_exit(td);
302 	}
303 	return (req);
304 }
305 
306 /*
307  * Some trace generation environments don't permit direct access to VFS,
308  * such as during a context switch where sleeping is not allowed.  Under these
309  * circumstances, queue a request to the thread to be written asynchronously
310  * later.
311  */
312 static void
313 ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
314 {
315 
316 	mtx_lock(&ktrace_mtx);
317 	STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
318 	mtx_unlock(&ktrace_mtx);
319 	ktrace_exit(td);
320 }
321 
322 /*
323  * Drain any pending ktrace records from the per-thread queue to disk.  This
324  * is used both internally before committing other records, and also on
325  * system call return.  We drain all the ones we can find at the time when
326  * drain is requested, but don't keep draining after that as those events
327  * may be approximately "after" the current event.
328  */
329 static void
330 ktr_drain(struct thread *td)
331 {
332 	struct ktr_request *queued_req;
333 	STAILQ_HEAD(, ktr_request) local_queue;
334 
335 	ktrace_assert(td);
336 	sx_assert(&ktrace_sx, SX_XLOCKED);
337 
338 	STAILQ_INIT(&local_queue);	/* XXXRW: needed? */
339 
340 	if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
341 		mtx_lock(&ktrace_mtx);
342 		STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
343 		mtx_unlock(&ktrace_mtx);
344 
345 		while ((queued_req = STAILQ_FIRST(&local_queue))) {
346 			STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
347 			ktr_writerequest(td, queued_req);
348 			ktr_freerequest(queued_req);
349 		}
350 	}
351 }
352 
353 /*
354  * Submit a trace record for immediate commit to disk -- to be used only
355  * where entering VFS is OK.  First drain any pending records that may have
356  * been cached in the thread.
357  */
358 static void
359 ktr_submitrequest(struct thread *td, struct ktr_request *req)
360 {
361 
362 	ktrace_assert(td);
363 
364 	sx_xlock(&ktrace_sx);
365 	ktr_drain(td);
366 	ktr_writerequest(td, req);
367 	ktr_freerequest(req);
368 	sx_xunlock(&ktrace_sx);
369 
370 	ktrace_exit(td);
371 }
372 
373 static void
374 ktr_freerequest(struct ktr_request *req)
375 {
376 
377 	if (req->ktr_buffer != NULL)
378 		free(req->ktr_buffer, M_KTRACE);
379 	mtx_lock(&ktrace_mtx);
380 	STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
381 	mtx_unlock(&ktrace_mtx);
382 }
383 
384 void
385 ktrsyscall(code, narg, args)
386 	int code, narg;
387 	register_t args[];
388 {
389 	struct ktr_request *req;
390 	struct ktr_syscall *ktp;
391 	size_t buflen;
392 	char *buf = NULL;
393 
394 	buflen = sizeof(register_t) * narg;
395 	if (buflen > 0) {
396 		buf = malloc(buflen, M_KTRACE, M_WAITOK);
397 		bcopy(args, buf, buflen);
398 	}
399 	req = ktr_getrequest(KTR_SYSCALL);
400 	if (req == NULL) {
401 		if (buf != NULL)
402 			free(buf, M_KTRACE);
403 		return;
404 	}
405 	ktp = &req->ktr_data.ktr_syscall;
406 	ktp->ktr_code = code;
407 	ktp->ktr_narg = narg;
408 	if (buflen > 0) {
409 		req->ktr_header.ktr_len = buflen;
410 		req->ktr_buffer = buf;
411 	}
412 	ktr_submitrequest(curthread, req);
413 }
414 
415 void
416 ktrsysret(code, error, retval)
417 	int code, error;
418 	register_t retval;
419 {
420 	struct ktr_request *req;
421 	struct ktr_sysret *ktp;
422 
423 	req = ktr_getrequest(KTR_SYSRET);
424 	if (req == NULL)
425 		return;
426 	ktp = &req->ktr_data.ktr_sysret;
427 	ktp->ktr_code = code;
428 	ktp->ktr_error = error;
429 	ktp->ktr_retval = retval;		/* what about val2 ? */
430 	ktr_submitrequest(curthread, req);
431 }
432 
433 /*
434  * When a process exits, drain per-process asynchronous trace records.
435  */
436 void
437 ktrprocexit(struct thread *td)
438 {
439 
440 	ktrace_enter(td);
441 	sx_xlock(&ktrace_sx);
442 	ktr_drain(td);
443 	sx_xunlock(&ktrace_sx);
444 	ktrace_exit(td);
445 }
446 
447 /*
448  * When a thread returns, drain any asynchronous records generated by the
449  * system call.
450  */
451 void
452 ktruserret(struct thread *td)
453 {
454 
455 	ktrace_enter(td);
456 	sx_xlock(&ktrace_sx);
457 	ktr_drain(td);
458 	sx_xunlock(&ktrace_sx);
459 	ktrace_exit(td);
460 }
461 
462 void
463 ktrnamei(path)
464 	char *path;
465 {
466 	struct ktr_request *req;
467 	int namelen;
468 	char *buf = NULL;
469 
470 	namelen = strlen(path);
471 	if (namelen > 0) {
472 		buf = malloc(namelen, M_KTRACE, M_WAITOK);
473 		bcopy(path, buf, namelen);
474 	}
475 	req = ktr_getrequest(KTR_NAMEI);
476 	if (req == NULL) {
477 		if (buf != NULL)
478 			free(buf, M_KTRACE);
479 		return;
480 	}
481 	if (namelen > 0) {
482 		req->ktr_header.ktr_len = namelen;
483 		req->ktr_buffer = buf;
484 	}
485 	ktr_submitrequest(curthread, req);
486 }
487 
488 void
489 ktrsysctl(name, namelen)
490 	int *name;
491 	u_int namelen;
492 {
493 	struct ktr_request *req;
494 	u_int mib[CTL_MAXNAME + 2];
495 	char *mibname;
496 	size_t mibnamelen;
497 	int error;
498 
499 	/* Lookup name of mib. */
500 	KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long"));
501 	mib[0] = 0;
502 	mib[1] = 1;
503 	bcopy(name, mib + 2, namelen * sizeof(*name));
504 	mibnamelen = 128;
505 	mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK);
506 	error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen,
507 	    NULL, 0, &mibnamelen, 0);
508 	if (error) {
509 		free(mibname, M_KTRACE);
510 		return;
511 	}
512 	req = ktr_getrequest(KTR_SYSCTL);
513 	if (req == NULL) {
514 		free(mibname, M_KTRACE);
515 		return;
516 	}
517 	req->ktr_header.ktr_len = mibnamelen;
518 	req->ktr_buffer = mibname;
519 	ktr_submitrequest(curthread, req);
520 }
521 
522 void
523 ktrgenio(fd, rw, uio, error)
524 	int fd;
525 	enum uio_rw rw;
526 	struct uio *uio;
527 	int error;
528 {
529 	struct ktr_request *req;
530 	struct ktr_genio *ktg;
531 	int datalen;
532 	char *buf;
533 
534 	if (error) {
535 		free(uio, M_IOV);
536 		return;
537 	}
538 	uio->uio_offset = 0;
539 	uio->uio_rw = UIO_WRITE;
540 	datalen = imin(uio->uio_resid, ktr_geniosize);
541 	buf = malloc(datalen, M_KTRACE, M_WAITOK);
542 	error = uiomove(buf, datalen, uio);
543 	free(uio, M_IOV);
544 	if (error) {
545 		free(buf, M_KTRACE);
546 		return;
547 	}
548 	req = ktr_getrequest(KTR_GENIO);
549 	if (req == NULL) {
550 		free(buf, M_KTRACE);
551 		return;
552 	}
553 	ktg = &req->ktr_data.ktr_genio;
554 	ktg->ktr_fd = fd;
555 	ktg->ktr_rw = rw;
556 	req->ktr_header.ktr_len = datalen;
557 	req->ktr_buffer = buf;
558 	ktr_submitrequest(curthread, req);
559 }
560 
561 void
562 ktrpsig(sig, action, mask, code)
563 	int sig;
564 	sig_t action;
565 	sigset_t *mask;
566 	int code;
567 {
568 	struct ktr_request *req;
569 	struct ktr_psig	*kp;
570 
571 	req = ktr_getrequest(KTR_PSIG);
572 	if (req == NULL)
573 		return;
574 	kp = &req->ktr_data.ktr_psig;
575 	kp->signo = (char)sig;
576 	kp->action = action;
577 	kp->mask = *mask;
578 	kp->code = code;
579 	ktr_enqueuerequest(curthread, req);
580 }
581 
582 void
583 ktrcsw(out, user)
584 	int out, user;
585 {
586 	struct ktr_request *req;
587 	struct ktr_csw *kc;
588 
589 	req = ktr_getrequest(KTR_CSW);
590 	if (req == NULL)
591 		return;
592 	kc = &req->ktr_data.ktr_csw;
593 	kc->out = out;
594 	kc->user = user;
595 	ktr_enqueuerequest(curthread, req);
596 }
597 
598 void
599 ktrstruct(name, namelen, data, datalen)
600 	const char *name;
601 	size_t namelen;
602 	void *data;
603 	size_t datalen;
604 {
605 	struct ktr_request *req;
606 	char *buf = NULL;
607 	size_t buflen;
608 
609 	if (!data)
610 		datalen = 0;
611 	buflen = namelen + 1 + datalen;
612 	buf = malloc(buflen, M_KTRACE, M_WAITOK);
613 	bcopy(name, buf, namelen);
614 	buf[namelen] = '\0';
615 	bcopy(data, buf + namelen + 1, datalen);
616 	if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) {
617 		free(buf, M_KTRACE);
618 		return;
619 	}
620 	req->ktr_buffer = buf;
621 	req->ktr_header.ktr_len = buflen;
622 	ktr_submitrequest(curthread, req);
623 }
624 #endif /* KTRACE */
625 
626 /* Interface and common routines */
627 
628 #ifndef _SYS_SYSPROTO_H_
629 struct ktrace_args {
630 	char	*fname;
631 	int	ops;
632 	int	facs;
633 	int	pid;
634 };
635 #endif
636 /* ARGSUSED */
637 int
638 ktrace(td, uap)
639 	struct thread *td;
640 	register struct ktrace_args *uap;
641 {
642 #ifdef KTRACE
643 	register struct vnode *vp = NULL;
644 	register struct proc *p;
645 	struct pgrp *pg;
646 	int facs = uap->facs & ~KTRFAC_ROOT;
647 	int ops = KTROP(uap->ops);
648 	int descend = uap->ops & KTRFLAG_DESCEND;
649 	int nfound, ret = 0;
650 	int flags, error = 0, vfslocked;
651 	struct nameidata nd;
652 	struct ucred *cred;
653 
654 	/*
655 	 * Need something to (un)trace.
656 	 */
657 	if (ops != KTROP_CLEARFILE && facs == 0)
658 		return (EINVAL);
659 
660 	ktrace_enter(td);
661 	if (ops != KTROP_CLEAR) {
662 		/*
663 		 * an operation which requires a file argument.
664 		 */
665 		NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_USERSPACE,
666 		    uap->fname, td);
667 		flags = FREAD | FWRITE | O_NOFOLLOW;
668 		error = vn_open(&nd, &flags, 0, NULL);
669 		if (error) {
670 			ktrace_exit(td);
671 			return (error);
672 		}
673 		vfslocked = NDHASGIANT(&nd);
674 		NDFREE(&nd, NDF_ONLY_PNBUF);
675 		vp = nd.ni_vp;
676 		VOP_UNLOCK(vp, 0);
677 		if (vp->v_type != VREG) {
678 			(void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
679 			VFS_UNLOCK_GIANT(vfslocked);
680 			ktrace_exit(td);
681 			return (EACCES);
682 		}
683 		VFS_UNLOCK_GIANT(vfslocked);
684 	}
685 	/*
686 	 * Clear all uses of the tracefile.
687 	 */
688 	if (ops == KTROP_CLEARFILE) {
689 		int vrele_count;
690 
691 		vrele_count = 0;
692 		sx_slock(&allproc_lock);
693 		FOREACH_PROC_IN_SYSTEM(p) {
694 			PROC_LOCK(p);
695 			if (p->p_tracevp == vp) {
696 				if (ktrcanset(td, p)) {
697 					mtx_lock(&ktrace_mtx);
698 					cred = p->p_tracecred;
699 					p->p_tracecred = NULL;
700 					p->p_tracevp = NULL;
701 					p->p_traceflag = 0;
702 					mtx_unlock(&ktrace_mtx);
703 					vrele_count++;
704 					crfree(cred);
705 				} else
706 					error = EPERM;
707 			}
708 			PROC_UNLOCK(p);
709 		}
710 		sx_sunlock(&allproc_lock);
711 		if (vrele_count > 0) {
712 			vfslocked = VFS_LOCK_GIANT(vp->v_mount);
713 			while (vrele_count-- > 0)
714 				vrele(vp);
715 			VFS_UNLOCK_GIANT(vfslocked);
716 		}
717 		goto done;
718 	}
719 	/*
720 	 * do it
721 	 */
722 	sx_slock(&proctree_lock);
723 	if (uap->pid < 0) {
724 		/*
725 		 * by process group
726 		 */
727 		pg = pgfind(-uap->pid);
728 		if (pg == NULL) {
729 			sx_sunlock(&proctree_lock);
730 			error = ESRCH;
731 			goto done;
732 		}
733 		/*
734 		 * ktrops() may call vrele(). Lock pg_members
735 		 * by the proctree_lock rather than pg_mtx.
736 		 */
737 		PGRP_UNLOCK(pg);
738 		nfound = 0;
739 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
740 			PROC_LOCK(p);
741 			if (p_cansee(td, p) != 0) {
742 				PROC_UNLOCK(p);
743 				continue;
744 			}
745 			PROC_UNLOCK(p);
746 			nfound++;
747 			if (descend)
748 				ret |= ktrsetchildren(td, p, ops, facs, vp);
749 			else
750 				ret |= ktrops(td, p, ops, facs, vp);
751 		}
752 		if (nfound == 0) {
753 			sx_sunlock(&proctree_lock);
754 			error = ESRCH;
755 			goto done;
756 		}
757 	} else {
758 		/*
759 		 * by pid
760 		 */
761 		p = pfind(uap->pid);
762 		if (p == NULL) {
763 			sx_sunlock(&proctree_lock);
764 			error = ESRCH;
765 			goto done;
766 		}
767 		error = p_cansee(td, p);
768 		/*
769 		 * The slock of the proctree lock will keep this process
770 		 * from going away, so unlocking the proc here is ok.
771 		 */
772 		PROC_UNLOCK(p);
773 		if (error) {
774 			sx_sunlock(&proctree_lock);
775 			goto done;
776 		}
777 		if (descend)
778 			ret |= ktrsetchildren(td, p, ops, facs, vp);
779 		else
780 			ret |= ktrops(td, p, ops, facs, vp);
781 	}
782 	sx_sunlock(&proctree_lock);
783 	if (!ret)
784 		error = EPERM;
785 done:
786 	if (vp != NULL) {
787 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
788 		(void) vn_close(vp, FWRITE, td->td_ucred, td);
789 		VFS_UNLOCK_GIANT(vfslocked);
790 	}
791 	ktrace_exit(td);
792 	return (error);
793 #else /* !KTRACE */
794 	return (ENOSYS);
795 #endif /* KTRACE */
796 }
797 
798 /* ARGSUSED */
799 int
800 utrace(td, uap)
801 	struct thread *td;
802 	register struct utrace_args *uap;
803 {
804 
805 #ifdef KTRACE
806 	struct ktr_request *req;
807 	void *cp;
808 	int error;
809 
810 	if (!KTRPOINT(td, KTR_USER))
811 		return (0);
812 	if (uap->len > KTR_USER_MAXLEN)
813 		return (EINVAL);
814 	cp = malloc(uap->len, M_KTRACE, M_WAITOK);
815 	error = copyin(uap->addr, cp, uap->len);
816 	if (error) {
817 		free(cp, M_KTRACE);
818 		return (error);
819 	}
820 	req = ktr_getrequest(KTR_USER);
821 	if (req == NULL) {
822 		free(cp, M_KTRACE);
823 		return (ENOMEM);
824 	}
825 	req->ktr_buffer = cp;
826 	req->ktr_header.ktr_len = uap->len;
827 	ktr_submitrequest(td, req);
828 	return (0);
829 #else /* !KTRACE */
830 	return (ENOSYS);
831 #endif /* KTRACE */
832 }
833 
834 #ifdef KTRACE
835 static int
836 ktrops(td, p, ops, facs, vp)
837 	struct thread *td;
838 	struct proc *p;
839 	int ops, facs;
840 	struct vnode *vp;
841 {
842 	struct vnode *tracevp = NULL;
843 	struct ucred *tracecred = NULL;
844 
845 	PROC_LOCK(p);
846 	if (!ktrcanset(td, p)) {
847 		PROC_UNLOCK(p);
848 		return (0);
849 	}
850 	mtx_lock(&ktrace_mtx);
851 	if (ops == KTROP_SET) {
852 		if (p->p_tracevp != vp) {
853 			/*
854 			 * if trace file already in use, relinquish below
855 			 */
856 			tracevp = p->p_tracevp;
857 			VREF(vp);
858 			p->p_tracevp = vp;
859 		}
860 		if (p->p_tracecred != td->td_ucred) {
861 			tracecred = p->p_tracecred;
862 			p->p_tracecred = crhold(td->td_ucred);
863 		}
864 		p->p_traceflag |= facs;
865 		if (priv_check(td, PRIV_KTRACE) == 0)
866 			p->p_traceflag |= KTRFAC_ROOT;
867 	} else {
868 		/* KTROP_CLEAR */
869 		if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
870 			/* no more tracing */
871 			p->p_traceflag = 0;
872 			tracevp = p->p_tracevp;
873 			p->p_tracevp = NULL;
874 			tracecred = p->p_tracecred;
875 			p->p_tracecred = NULL;
876 		}
877 	}
878 	mtx_unlock(&ktrace_mtx);
879 	PROC_UNLOCK(p);
880 	if (tracevp != NULL) {
881 		int vfslocked;
882 
883 		vfslocked = VFS_LOCK_GIANT(tracevp->v_mount);
884 		vrele(tracevp);
885 		VFS_UNLOCK_GIANT(vfslocked);
886 	}
887 	if (tracecred != NULL)
888 		crfree(tracecred);
889 
890 	return (1);
891 }
892 
893 static int
894 ktrsetchildren(td, top, ops, facs, vp)
895 	struct thread *td;
896 	struct proc *top;
897 	int ops, facs;
898 	struct vnode *vp;
899 {
900 	register struct proc *p;
901 	register int ret = 0;
902 
903 	p = top;
904 	sx_assert(&proctree_lock, SX_LOCKED);
905 	for (;;) {
906 		ret |= ktrops(td, p, ops, facs, vp);
907 		/*
908 		 * If this process has children, descend to them next,
909 		 * otherwise do any siblings, and if done with this level,
910 		 * follow back up the tree (but not past top).
911 		 */
912 		if (!LIST_EMPTY(&p->p_children))
913 			p = LIST_FIRST(&p->p_children);
914 		else for (;;) {
915 			if (p == top)
916 				return (ret);
917 			if (LIST_NEXT(p, p_sibling)) {
918 				p = LIST_NEXT(p, p_sibling);
919 				break;
920 			}
921 			p = p->p_pptr;
922 		}
923 	}
924 	/*NOTREACHED*/
925 }
926 
927 static void
928 ktr_writerequest(struct thread *td, struct ktr_request *req)
929 {
930 	struct ktr_header *kth;
931 	struct vnode *vp;
932 	struct proc *p;
933 	struct ucred *cred;
934 	struct uio auio;
935 	struct iovec aiov[3];
936 	struct mount *mp;
937 	int datalen, buflen, vrele_count;
938 	int error, vfslocked;
939 
940 	/*
941 	 * We hold the vnode and credential for use in I/O in case ktrace is
942 	 * disabled on the process as we write out the request.
943 	 *
944 	 * XXXRW: This is not ideal: we could end up performing a write after
945 	 * the vnode has been closed.
946 	 */
947 	mtx_lock(&ktrace_mtx);
948 	vp = td->td_proc->p_tracevp;
949 	cred = td->td_proc->p_tracecred;
950 
951 	/*
952 	 * If vp is NULL, the vp has been cleared out from under this
953 	 * request, so just drop it.  Make sure the credential and vnode are
954 	 * in sync: we should have both or neither.
955 	 */
956 	if (vp == NULL) {
957 		KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL"));
958 		mtx_unlock(&ktrace_mtx);
959 		return;
960 	}
961 	VREF(vp);
962 	KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
963 	crhold(cred);
964 	mtx_unlock(&ktrace_mtx);
965 
966 	kth = &req->ktr_header;
967 	KASSERT(((u_short)kth->ktr_type & ~KTR_DROP) <
968 	    sizeof(data_lengths) / sizeof(data_lengths[0]),
969 	    ("data_lengths array overflow"));
970 	datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
971 	buflen = kth->ktr_len;
972 	auio.uio_iov = &aiov[0];
973 	auio.uio_offset = 0;
974 	auio.uio_segflg = UIO_SYSSPACE;
975 	auio.uio_rw = UIO_WRITE;
976 	aiov[0].iov_base = (caddr_t)kth;
977 	aiov[0].iov_len = sizeof(struct ktr_header);
978 	auio.uio_resid = sizeof(struct ktr_header);
979 	auio.uio_iovcnt = 1;
980 	auio.uio_td = td;
981 	if (datalen != 0) {
982 		aiov[1].iov_base = (caddr_t)&req->ktr_data;
983 		aiov[1].iov_len = datalen;
984 		auio.uio_resid += datalen;
985 		auio.uio_iovcnt++;
986 		kth->ktr_len += datalen;
987 	}
988 	if (buflen != 0) {
989 		KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
990 		aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
991 		aiov[auio.uio_iovcnt].iov_len = buflen;
992 		auio.uio_resid += buflen;
993 		auio.uio_iovcnt++;
994 	}
995 
996 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
997 	vn_start_write(vp, &mp, V_WAIT);
998 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
999 #ifdef MAC
1000 	error = mac_vnode_check_write(cred, NOCRED, vp);
1001 	if (error == 0)
1002 #endif
1003 		error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
1004 	VOP_UNLOCK(vp, 0);
1005 	vn_finished_write(mp);
1006 	crfree(cred);
1007 	if (!error) {
1008 		vrele(vp);
1009 		VFS_UNLOCK_GIANT(vfslocked);
1010 		return;
1011 	}
1012 	VFS_UNLOCK_GIANT(vfslocked);
1013 
1014 	/*
1015 	 * If error encountered, give up tracing on this vnode.  We defer
1016 	 * all the vrele()'s on the vnode until after we are finished walking
1017 	 * the various lists to avoid needlessly holding locks.
1018 	 * NB: at this point we still hold the vnode reference that must
1019 	 * not go away as we need the valid vnode to compare with. Thus let
1020 	 * vrele_count start at 1 and the reference will be freed
1021 	 * by the loop at the end after our last use of vp.
1022 	 */
1023 	log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
1024 	    error);
1025 	vrele_count = 1;
1026 	/*
1027 	 * First, clear this vnode from being used by any processes in the
1028 	 * system.
1029 	 * XXX - If one process gets an EPERM writing to the vnode, should
1030 	 * we really do this?  Other processes might have suitable
1031 	 * credentials for the operation.
1032 	 */
1033 	cred = NULL;
1034 	sx_slock(&allproc_lock);
1035 	FOREACH_PROC_IN_SYSTEM(p) {
1036 		PROC_LOCK(p);
1037 		if (p->p_tracevp == vp) {
1038 			mtx_lock(&ktrace_mtx);
1039 			p->p_tracevp = NULL;
1040 			p->p_traceflag = 0;
1041 			cred = p->p_tracecred;
1042 			p->p_tracecred = NULL;
1043 			mtx_unlock(&ktrace_mtx);
1044 			vrele_count++;
1045 		}
1046 		PROC_UNLOCK(p);
1047 		if (cred != NULL) {
1048 			crfree(cred);
1049 			cred = NULL;
1050 		}
1051 	}
1052 	sx_sunlock(&allproc_lock);
1053 
1054 	/*
1055 	 * We can't clear any pending requests in threads that have cached
1056 	 * them but not yet committed them, as those are per-thread.  The
1057 	 * thread will have to clear it itself on system call return.
1058 	 */
1059 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1060 	while (vrele_count-- > 0)
1061 		vrele(vp);
1062 	VFS_UNLOCK_GIANT(vfslocked);
1063 }
1064 
1065 /*
1066  * Return true if caller has permission to set the ktracing state
1067  * of target.  Essentially, the target can't possess any
1068  * more permissions than the caller.  KTRFAC_ROOT signifies that
1069  * root previously set the tracing status on the target process, and
1070  * so, only root may further change it.
1071  */
1072 static int
1073 ktrcanset(td, targetp)
1074 	struct thread *td;
1075 	struct proc *targetp;
1076 {
1077 
1078 	PROC_LOCK_ASSERT(targetp, MA_OWNED);
1079 	if (targetp->p_traceflag & KTRFAC_ROOT &&
1080 	    priv_check(td, PRIV_KTRACE))
1081 		return (0);
1082 
1083 	if (p_candebug(td, targetp) != 0)
1084 		return (0);
1085 
1086 	return (1);
1087 }
1088 
1089 #endif /* KTRACE */
1090