xref: /original-bsd/sys/kern/kern_ktrace.c (revision 0ac4996f)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_ktrace.c	8.5 (Berkeley) 05/14/95
8  */
9 
10 #ifdef KTRACE
11 
12 #include <sys/param.h>
13 #include <sys/systm.h>
14 #include <sys/proc.h>
15 #include <sys/file.h>
16 #include <sys/namei.h>
17 #include <sys/vnode.h>
18 #include <sys/ktrace.h>
19 #include <sys/malloc.h>
20 #include <sys/syslog.h>
21 
22 #include <sys/mount.h>
23 #include <sys/syscallargs.h>
24 
25 struct ktr_header *
26 ktrgetheader(type)
27 	int type;
28 {
29 	register struct ktr_header *kth;
30 	struct proc *p = curproc;	/* XXX */
31 
32 	MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
33 		M_TEMP, M_WAITOK);
34 	kth->ktr_type = type;
35 	microtime(&kth->ktr_time);
36 	kth->ktr_pid = p->p_pid;
37 	bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN);
38 	return (kth);
39 }
40 
41 void
42 ktrsyscall(vp, code, argsize, args)
43 	struct vnode *vp;
44 	int code, argsize;
45 	register_t args[];
46 {
47 	struct	ktr_header *kth;
48 	struct	ktr_syscall *ktp;
49 	register len = sizeof(struct ktr_syscall) + argsize;
50 	struct proc *p = curproc;	/* XXX */
51 	register_t *argp;
52 	int i;
53 
54 	p->p_traceflag |= KTRFAC_ACTIVE;
55 	kth = ktrgetheader(KTR_SYSCALL);
56 	MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK);
57 	ktp->ktr_code = code;
58 	ktp->ktr_argsize = argsize;
59 	argp = (register_t *)((char *)ktp + sizeof(struct ktr_syscall));
60 	for (i = 0; i < (argsize / sizeof *argp); i++)
61 		*argp++ = args[i];
62 	kth->ktr_buf = (caddr_t)ktp;
63 	kth->ktr_len = len;
64 	ktrwrite(vp, kth);
65 	FREE(ktp, M_TEMP);
66 	FREE(kth, M_TEMP);
67 	p->p_traceflag &= ~KTRFAC_ACTIVE;
68 }
69 
70 void
71 ktrsysret(vp, code, error, retval)
72 	struct vnode *vp;
73 	int code, error, retval;
74 {
75 	struct ktr_header *kth;
76 	struct ktr_sysret ktp;
77 	struct proc *p = curproc;	/* XXX */
78 
79 	p->p_traceflag |= KTRFAC_ACTIVE;
80 	kth = ktrgetheader(KTR_SYSRET);
81 	ktp.ktr_code = code;
82 	ktp.ktr_error = error;
83 	ktp.ktr_retval = retval;		/* what about val2 ? */
84 
85 	kth->ktr_buf = (caddr_t)&ktp;
86 	kth->ktr_len = sizeof(struct ktr_sysret);
87 
88 	ktrwrite(vp, kth);
89 	FREE(kth, M_TEMP);
90 	p->p_traceflag &= ~KTRFAC_ACTIVE;
91 }
92 
93 void
94 ktrnamei(vp, path)
95 	struct vnode *vp;
96 	char *path;
97 {
98 	struct ktr_header *kth;
99 	struct proc *p = curproc;	/* XXX */
100 
101 	p->p_traceflag |= KTRFAC_ACTIVE;
102 	kth = ktrgetheader(KTR_NAMEI);
103 	kth->ktr_len = strlen(path);
104 	kth->ktr_buf = path;
105 
106 	ktrwrite(vp, kth);
107 	FREE(kth, M_TEMP);
108 	p->p_traceflag &= ~KTRFAC_ACTIVE;
109 }
110 
111 void
112 ktrgenio(vp, fd, rw, iov, len, error)
113 	struct vnode *vp;
114 	int fd;
115 	enum uio_rw rw;
116 	register struct iovec *iov;
117 	int len, error;
118 {
119 	struct ktr_header *kth;
120 	register struct ktr_genio *ktp;
121 	register caddr_t cp;
122 	register int resid = len, cnt;
123 	struct proc *p = curproc;	/* XXX */
124 
125 	if (error)
126 		return;
127 	p->p_traceflag |= KTRFAC_ACTIVE;
128 	kth = ktrgetheader(KTR_GENIO);
129 	MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len,
130 		M_TEMP, M_WAITOK);
131 	ktp->ktr_fd = fd;
132 	ktp->ktr_rw = rw;
133 	cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio));
134 	while (resid > 0) {
135 		if ((cnt = iov->iov_len) > resid)
136 			cnt = resid;
137 		if (copyin(iov->iov_base, cp, (unsigned)cnt))
138 			goto done;
139 		cp += cnt;
140 		resid -= cnt;
141 		iov++;
142 	}
143 	kth->ktr_buf = (caddr_t)ktp;
144 	kth->ktr_len = sizeof (struct ktr_genio) + len;
145 
146 	ktrwrite(vp, kth);
147 done:
148 	FREE(kth, M_TEMP);
149 	FREE(ktp, M_TEMP);
150 	p->p_traceflag &= ~KTRFAC_ACTIVE;
151 }
152 
153 void
154 ktrpsig(vp, sig, action, mask, code)
155 	struct vnode *vp;
156 	int sig;
157 	sig_t action;
158 	int mask, code;
159 {
160 	struct ktr_header *kth;
161 	struct ktr_psig	kp;
162 	struct proc *p = curproc;	/* XXX */
163 
164 	p->p_traceflag |= KTRFAC_ACTIVE;
165 	kth = ktrgetheader(KTR_PSIG);
166 	kp.signo = (char)sig;
167 	kp.action = action;
168 	kp.mask = mask;
169 	kp.code = code;
170 	kth->ktr_buf = (caddr_t)&kp;
171 	kth->ktr_len = sizeof (struct ktr_psig);
172 
173 	ktrwrite(vp, kth);
174 	FREE(kth, M_TEMP);
175 	p->p_traceflag &= ~KTRFAC_ACTIVE;
176 }
177 
178 void
179 ktrcsw(vp, out, user)
180 	struct vnode *vp;
181 	int out, user;
182 {
183 	struct ktr_header *kth;
184 	struct	ktr_csw kc;
185 	struct proc *p = curproc;	/* XXX */
186 
187 	p->p_traceflag |= KTRFAC_ACTIVE;
188 	kth = ktrgetheader(KTR_CSW);
189 	kc.out = out;
190 	kc.user = user;
191 	kth->ktr_buf = (caddr_t)&kc;
192 	kth->ktr_len = sizeof (struct ktr_csw);
193 
194 	ktrwrite(vp, kth);
195 	FREE(kth, M_TEMP);
196 	p->p_traceflag &= ~KTRFAC_ACTIVE;
197 }
198 
199 /* Interface and common routines */
200 
201 /*
202  * ktrace system call
203  */
204 /* ARGSUSED */
205 int
206 ktrace(curp, uap, retval)
207 	struct proc *curp;
208 	register struct ktrace_args /* {
209 		syscallarg(char *) fname;
210 		syscallarg(int) ops;
211 		syscallarg(int) facs;
212 		syscallarg(int) pid;
213 	} */ *uap;
214 	register_t *retval;
215 {
216 	register struct vnode *vp = NULL;
217 	register struct proc *p;
218 	struct pgrp *pg;
219 	int facs = SCARG(uap, facs) & ~KTRFAC_ROOT;
220 	int ops = KTROP(SCARG(uap, ops));
221 	int descend = SCARG(uap, ops) & KTRFLAG_DESCEND;
222 	int ret = 0;
223 	int error = 0;
224 	struct nameidata nd;
225 
226 	curp->p_traceflag |= KTRFAC_ACTIVE;
227 	if (ops != KTROP_CLEAR) {
228 		/*
229 		 * an operation which requires a file argument.
230 		 */
231 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, fname),
232 		    curp);
233 		if (error = vn_open(&nd, FREAD|FWRITE, 0)) {
234 			curp->p_traceflag &= ~KTRFAC_ACTIVE;
235 			return (error);
236 		}
237 		vp = nd.ni_vp;
238 		VOP_UNLOCK(vp, 0, p);
239 		if (vp->v_type != VREG) {
240 			(void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
241 			curp->p_traceflag &= ~KTRFAC_ACTIVE;
242 			return (EACCES);
243 		}
244 	}
245 	/*
246 	 * Clear all uses of the tracefile
247 	 */
248 	if (ops == KTROP_CLEARFILE) {
249 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
250 			if (p->p_tracep == vp) {
251 				if (ktrcanset(curp, p)) {
252 					p->p_tracep = NULL;
253 					p->p_traceflag = 0;
254 					(void) vn_close(vp, FREAD|FWRITE,
255 						p->p_ucred, p);
256 				} else
257 					error = EPERM;
258 			}
259 		}
260 		goto done;
261 	}
262 	/*
263 	 * need something to (un)trace (XXX - why is this here?)
264 	 */
265 	if (!facs) {
266 		error = EINVAL;
267 		goto done;
268 	}
269 	/*
270 	 * do it
271 	 */
272 	if (SCARG(uap, pid) < 0) {
273 		/*
274 		 * by process group
275 		 */
276 		pg = pgfind(-SCARG(uap, pid));
277 		if (pg == NULL) {
278 			error = ESRCH;
279 			goto done;
280 		}
281 		for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next)
282 			if (descend)
283 				ret |= ktrsetchildren(curp, p, ops, facs, vp);
284 			else
285 				ret |= ktrops(curp, p, ops, facs, vp);
286 
287 	} else {
288 		/*
289 		 * by pid
290 		 */
291 		p = pfind(SCARG(uap, pid));
292 		if (p == NULL) {
293 			error = ESRCH;
294 			goto done;
295 		}
296 		if (descend)
297 			ret |= ktrsetchildren(curp, p, ops, facs, vp);
298 		else
299 			ret |= ktrops(curp, p, ops, facs, vp);
300 	}
301 	if (!ret)
302 		error = EPERM;
303 done:
304 	if (vp != NULL)
305 		(void) vn_close(vp, FWRITE, curp->p_ucred, curp);
306 	curp->p_traceflag &= ~KTRFAC_ACTIVE;
307 	return (error);
308 }
309 
310 int
311 ktrops(curp, p, ops, facs, vp)
312 	struct proc *p, *curp;
313 	int ops, facs;
314 	struct vnode *vp;
315 {
316 
317 	if (!ktrcanset(curp, p))
318 		return (0);
319 	if (ops == KTROP_SET) {
320 		if (p->p_tracep != vp) {
321 			/*
322 			 * if trace file already in use, relinquish
323 			 */
324 			if (p->p_tracep != NULL)
325 				vrele(p->p_tracep);
326 			VREF(vp);
327 			p->p_tracep = vp;
328 		}
329 		p->p_traceflag |= facs;
330 		if (curp->p_ucred->cr_uid == 0)
331 			p->p_traceflag |= KTRFAC_ROOT;
332 	} else {
333 		/* KTROP_CLEAR */
334 		if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
335 			/* no more tracing */
336 			p->p_traceflag = 0;
337 			if (p->p_tracep != NULL) {
338 				vrele(p->p_tracep);
339 				p->p_tracep = NULL;
340 			}
341 		}
342 	}
343 
344 	return (1);
345 }
346 
347 ktrsetchildren(curp, top, ops, facs, vp)
348 	struct proc *curp, *top;
349 	int ops, facs;
350 	struct vnode *vp;
351 {
352 	register struct proc *p;
353 	register int ret = 0;
354 
355 	p = top;
356 	for (;;) {
357 		ret |= ktrops(curp, p, ops, facs, vp);
358 		/*
359 		 * If this process has children, descend to them next,
360 		 * otherwise do any siblings, and if done with this level,
361 		 * follow back up the tree (but not past top).
362 		 */
363 		if (p->p_children.lh_first)
364 			p = p->p_children.lh_first;
365 		else for (;;) {
366 			if (p == top)
367 				return (ret);
368 			if (p->p_sibling.le_next) {
369 				p = p->p_sibling.le_next;
370 				break;
371 			}
372 			p = p->p_pptr;
373 		}
374 	}
375 	/*NOTREACHED*/
376 }
377 
378 ktrwrite(vp, kth)
379 	struct vnode *vp;
380 	register struct ktr_header *kth;
381 {
382 	struct uio auio;
383 	struct iovec aiov[2];
384 	register struct proc *p = curproc;	/* XXX */
385 	int error;
386 
387 	if (vp == NULL)
388 		return;
389 	auio.uio_iov = &aiov[0];
390 	auio.uio_offset = 0;
391 	auio.uio_segflg = UIO_SYSSPACE;
392 	auio.uio_rw = UIO_WRITE;
393 	aiov[0].iov_base = (caddr_t)kth;
394 	aiov[0].iov_len = sizeof(struct ktr_header);
395 	auio.uio_resid = sizeof(struct ktr_header);
396 	auio.uio_iovcnt = 1;
397 	auio.uio_procp = (struct proc *)0;
398 	if (kth->ktr_len > 0) {
399 		auio.uio_iovcnt++;
400 		aiov[1].iov_base = kth->ktr_buf;
401 		aiov[1].iov_len = kth->ktr_len;
402 		auio.uio_resid += kth->ktr_len;
403 	}
404 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
405 	error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred);
406 	VOP_UNLOCK(vp, 0, p);
407 	if (!error)
408 		return;
409 	/*
410 	 * If error encountered, give up tracing on this vnode.
411 	 */
412 	log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
413 	    error);
414 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
415 		if (p->p_tracep == vp) {
416 			p->p_tracep = NULL;
417 			p->p_traceflag = 0;
418 			vrele(vp);
419 		}
420 	}
421 }
422 
423 /*
424  * Return true if caller has permission to set the ktracing state
425  * of target.  Essentially, the target can't possess any
426  * more permissions than the caller.  KTRFAC_ROOT signifies that
427  * root previously set the tracing status on the target process, and
428  * so, only root may further change it.
429  *
430  * TODO: check groups.  use caller effective gid.
431  */
432 ktrcanset(callp, targetp)
433 	struct proc *callp, *targetp;
434 {
435 	register struct pcred *caller = callp->p_cred;
436 	register struct pcred *target = targetp->p_cred;
437 
438 	if ((caller->pc_ucred->cr_uid == target->p_ruid &&
439 	     target->p_ruid == target->p_svuid &&
440 	     caller->p_rgid == target->p_rgid &&	/* XXX */
441 	     target->p_rgid == target->p_svgid &&
442 	     (targetp->p_traceflag & KTRFAC_ROOT) == 0) ||
443 	     caller->pc_ucred->cr_uid == 0)
444 		return (1);
445 
446 	return (0);
447 }
448 
449 #endif
450