xref: /original-bsd/sys/kern/kern_ktrace.c (revision b6592f3d)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_ktrace.c	8.3 (Berkeley) 08/22/94
8  */
9 
10 #ifdef KTRACE
11 
12 #include <sys/param.h>
13 #include <sys/proc.h>
14 #include <sys/file.h>
15 #include <sys/namei.h>
16 #include <sys/vnode.h>
17 #include <sys/ktrace.h>
18 #include <sys/malloc.h>
19 #include <sys/syslog.h>
20 
21 struct ktr_header *
22 ktrgetheader(type)
23 	int type;
24 {
25 	register struct ktr_header *kth;
26 	struct proc *p = curproc;	/* XXX */
27 
28 	MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
29 		M_TEMP, M_WAITOK);
30 	kth->ktr_type = type;
31 	microtime(&kth->ktr_time);
32 	kth->ktr_pid = p->p_pid;
33 	bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN);
34 	return (kth);
35 }
36 
37 ktrsyscall(vp, code, narg, args)
38 	struct vnode *vp;
39 	int code, narg, args[];
40 {
41 	struct	ktr_header *kth;
42 	struct	ktr_syscall *ktp;
43 	register len = sizeof(struct ktr_syscall) + (narg * sizeof(int));
44 	struct proc *p = curproc;	/* XXX */
45 	int 	*argp, i;
46 
47 	p->p_traceflag |= KTRFAC_ACTIVE;
48 	kth = ktrgetheader(KTR_SYSCALL);
49 	MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK);
50 	ktp->ktr_code = code;
51 	ktp->ktr_narg = narg;
52 	argp = (int *)((char *)ktp + sizeof(struct ktr_syscall));
53 	for (i = 0; i < narg; i++)
54 		*argp++ = args[i];
55 	kth->ktr_buf = (caddr_t)ktp;
56 	kth->ktr_len = len;
57 	ktrwrite(vp, kth);
58 	FREE(ktp, M_TEMP);
59 	FREE(kth, M_TEMP);
60 	p->p_traceflag &= ~KTRFAC_ACTIVE;
61 }
62 
63 ktrsysret(vp, code, error, retval)
64 	struct vnode *vp;
65 	int code, error, retval;
66 {
67 	struct ktr_header *kth;
68 	struct ktr_sysret ktp;
69 	struct proc *p = curproc;	/* XXX */
70 
71 	p->p_traceflag |= KTRFAC_ACTIVE;
72 	kth = ktrgetheader(KTR_SYSRET);
73 	ktp.ktr_code = code;
74 	ktp.ktr_error = error;
75 	ktp.ktr_retval = retval;		/* what about val2 ? */
76 
77 	kth->ktr_buf = (caddr_t)&ktp;
78 	kth->ktr_len = sizeof(struct ktr_sysret);
79 
80 	ktrwrite(vp, kth);
81 	FREE(kth, M_TEMP);
82 	p->p_traceflag &= ~KTRFAC_ACTIVE;
83 }
84 
85 ktrnamei(vp, path)
86 	struct vnode *vp;
87 	char *path;
88 {
89 	struct ktr_header *kth;
90 	struct proc *p = curproc;	/* XXX */
91 
92 	p->p_traceflag |= KTRFAC_ACTIVE;
93 	kth = ktrgetheader(KTR_NAMEI);
94 	kth->ktr_len = strlen(path);
95 	kth->ktr_buf = path;
96 
97 	ktrwrite(vp, kth);
98 	FREE(kth, M_TEMP);
99 	p->p_traceflag &= ~KTRFAC_ACTIVE;
100 }
101 
102 ktrgenio(vp, fd, rw, iov, len, error)
103 	struct vnode *vp;
104 	int fd;
105 	enum uio_rw rw;
106 	register struct iovec *iov;
107 	int len, error;
108 {
109 	struct ktr_header *kth;
110 	register struct ktr_genio *ktp;
111 	register caddr_t cp;
112 	register int resid = len, cnt;
113 	struct proc *p = curproc;	/* XXX */
114 
115 	if (error)
116 		return;
117 	p->p_traceflag |= KTRFAC_ACTIVE;
118 	kth = ktrgetheader(KTR_GENIO);
119 	MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len,
120 		M_TEMP, M_WAITOK);
121 	ktp->ktr_fd = fd;
122 	ktp->ktr_rw = rw;
123 	cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio));
124 	while (resid > 0) {
125 		if ((cnt = iov->iov_len) > resid)
126 			cnt = resid;
127 		if (copyin(iov->iov_base, cp, (unsigned)cnt))
128 			goto done;
129 		cp += cnt;
130 		resid -= cnt;
131 		iov++;
132 	}
133 	kth->ktr_buf = (caddr_t)ktp;
134 	kth->ktr_len = sizeof (struct ktr_genio) + len;
135 
136 	ktrwrite(vp, kth);
137 done:
138 	FREE(kth, M_TEMP);
139 	FREE(ktp, M_TEMP);
140 	p->p_traceflag &= ~KTRFAC_ACTIVE;
141 }
142 
143 ktrpsig(vp, sig, action, mask, code)
144 	struct vnode *vp;
145 	int sig;
146 	sig_t action;
147 	int mask, code;
148 {
149 	struct ktr_header *kth;
150 	struct ktr_psig	kp;
151 	struct proc *p = curproc;	/* XXX */
152 
153 	p->p_traceflag |= KTRFAC_ACTIVE;
154 	kth = ktrgetheader(KTR_PSIG);
155 	kp.signo = (char)sig;
156 	kp.action = action;
157 	kp.mask = mask;
158 	kp.code = code;
159 	kth->ktr_buf = (caddr_t)&kp;
160 	kth->ktr_len = sizeof (struct ktr_psig);
161 
162 	ktrwrite(vp, kth);
163 	FREE(kth, M_TEMP);
164 	p->p_traceflag &= ~KTRFAC_ACTIVE;
165 }
166 
167 ktrcsw(vp, out, user)
168 	struct vnode *vp;
169 	int out, user;
170 {
171 	struct ktr_header *kth;
172 	struct	ktr_csw kc;
173 	struct proc *p = curproc;	/* XXX */
174 
175 	p->p_traceflag |= KTRFAC_ACTIVE;
176 	kth = ktrgetheader(KTR_CSW);
177 	kc.out = out;
178 	kc.user = user;
179 	kth->ktr_buf = (caddr_t)&kc;
180 	kth->ktr_len = sizeof (struct ktr_csw);
181 
182 	ktrwrite(vp, kth);
183 	FREE(kth, M_TEMP);
184 	p->p_traceflag &= ~KTRFAC_ACTIVE;
185 }
186 
187 /* Interface and common routines */
188 
189 /*
190  * ktrace system call
191  */
192 struct ktrace_args {
193 	char	*fname;
194 	int	ops;
195 	int	facs;
196 	int	pid;
197 };
198 /* ARGSUSED */
199 ktrace(curp, uap, retval)
200 	struct proc *curp;
201 	register struct ktrace_args *uap;
202 	int *retval;
203 {
204 	register struct vnode *vp = NULL;
205 	register struct proc *p;
206 	struct pgrp *pg;
207 	int facs = uap->facs & ~KTRFAC_ROOT;
208 	int ops = KTROP(uap->ops);
209 	int descend = uap->ops & KTRFLAG_DESCEND;
210 	int ret = 0;
211 	int error = 0;
212 	struct nameidata nd;
213 
214 	curp->p_traceflag |= KTRFAC_ACTIVE;
215 	if (ops != KTROP_CLEAR) {
216 		/*
217 		 * an operation which requires a file argument.
218 		 */
219 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->fname, curp);
220 		if (error = vn_open(&nd, FREAD|FWRITE, 0)) {
221 			curp->p_traceflag &= ~KTRFAC_ACTIVE;
222 			return (error);
223 		}
224 		vp = nd.ni_vp;
225 		VOP_UNLOCK(vp);
226 		if (vp->v_type != VREG) {
227 			(void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
228 			curp->p_traceflag &= ~KTRFAC_ACTIVE;
229 			return (EACCES);
230 		}
231 	}
232 	/*
233 	 * Clear all uses of the tracefile
234 	 */
235 	if (ops == KTROP_CLEARFILE) {
236 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
237 			if (p->p_tracep == vp) {
238 				if (ktrcanset(curp, p)) {
239 					p->p_tracep = NULL;
240 					p->p_traceflag = 0;
241 					(void) vn_close(vp, FREAD|FWRITE,
242 						p->p_ucred, p);
243 				} else
244 					error = EPERM;
245 			}
246 		}
247 		goto done;
248 	}
249 	/*
250 	 * need something to (un)trace (XXX - why is this here?)
251 	 */
252 	if (!facs) {
253 		error = EINVAL;
254 		goto done;
255 	}
256 	/*
257 	 * do it
258 	 */
259 	if (uap->pid < 0) {
260 		/*
261 		 * by process group
262 		 */
263 		pg = pgfind(-uap->pid);
264 		if (pg == NULL) {
265 			error = ESRCH;
266 			goto done;
267 		}
268 		for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next)
269 			if (descend)
270 				ret |= ktrsetchildren(curp, p, ops, facs, vp);
271 			else
272 				ret |= ktrops(curp, p, ops, facs, vp);
273 
274 	} else {
275 		/*
276 		 * by pid
277 		 */
278 		p = pfind(uap->pid);
279 		if (p == NULL) {
280 			error = ESRCH;
281 			goto done;
282 		}
283 		if (descend)
284 			ret |= ktrsetchildren(curp, p, ops, facs, vp);
285 		else
286 			ret |= ktrops(curp, p, ops, facs, vp);
287 	}
288 	if (!ret)
289 		error = EPERM;
290 done:
291 	if (vp != NULL)
292 		(void) vn_close(vp, FWRITE, curp->p_ucred, curp);
293 	curp->p_traceflag &= ~KTRFAC_ACTIVE;
294 	return (error);
295 }
296 
297 int
298 ktrops(curp, p, ops, facs, vp)
299 	struct proc *p, *curp;
300 	int ops, facs;
301 	struct vnode *vp;
302 {
303 
304 	if (!ktrcanset(curp, p))
305 		return (0);
306 	if (ops == KTROP_SET) {
307 		if (p->p_tracep != vp) {
308 			/*
309 			 * if trace file already in use, relinquish
310 			 */
311 			if (p->p_tracep != NULL)
312 				vrele(p->p_tracep);
313 			VREF(vp);
314 			p->p_tracep = vp;
315 		}
316 		p->p_traceflag |= facs;
317 		if (curp->p_ucred->cr_uid == 0)
318 			p->p_traceflag |= KTRFAC_ROOT;
319 	} else {
320 		/* KTROP_CLEAR */
321 		if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
322 			/* no more tracing */
323 			p->p_traceflag = 0;
324 			if (p->p_tracep != NULL) {
325 				vrele(p->p_tracep);
326 				p->p_tracep = NULL;
327 			}
328 		}
329 	}
330 
331 	return (1);
332 }
333 
334 ktrsetchildren(curp, top, ops, facs, vp)
335 	struct proc *curp, *top;
336 	int ops, facs;
337 	struct vnode *vp;
338 {
339 	register struct proc *p;
340 	register int ret = 0;
341 
342 	p = top;
343 	for (;;) {
344 		ret |= ktrops(curp, p, ops, facs, vp);
345 		/*
346 		 * If this process has children, descend to them next,
347 		 * otherwise do any siblings, and if done with this level,
348 		 * follow back up the tree (but not past top).
349 		 */
350 		if (p->p_children.lh_first)
351 			p = p->p_children.lh_first;
352 		else for (;;) {
353 			if (p == top)
354 				return (ret);
355 			if (p->p_sibling.le_next) {
356 				p = p->p_sibling.le_next;
357 				break;
358 			}
359 			p = p->p_pptr;
360 		}
361 	}
362 	/*NOTREACHED*/
363 }
364 
365 ktrwrite(vp, kth)
366 	struct vnode *vp;
367 	register struct ktr_header *kth;
368 {
369 	struct uio auio;
370 	struct iovec aiov[2];
371 	register struct proc *p = curproc;	/* XXX */
372 	int error;
373 
374 	if (vp == NULL)
375 		return;
376 	auio.uio_iov = &aiov[0];
377 	auio.uio_offset = 0;
378 	auio.uio_segflg = UIO_SYSSPACE;
379 	auio.uio_rw = UIO_WRITE;
380 	aiov[0].iov_base = (caddr_t)kth;
381 	aiov[0].iov_len = sizeof(struct ktr_header);
382 	auio.uio_resid = sizeof(struct ktr_header);
383 	auio.uio_iovcnt = 1;
384 	auio.uio_procp = (struct proc *)0;
385 	if (kth->ktr_len > 0) {
386 		auio.uio_iovcnt++;
387 		aiov[1].iov_base = kth->ktr_buf;
388 		aiov[1].iov_len = kth->ktr_len;
389 		auio.uio_resid += kth->ktr_len;
390 	}
391 	VOP_LOCK(vp);
392 	error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred);
393 	VOP_UNLOCK(vp);
394 	if (!error)
395 		return;
396 	/*
397 	 * If error encountered, give up tracing on this vnode.
398 	 */
399 	log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
400 	    error);
401 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
402 		if (p->p_tracep == vp) {
403 			p->p_tracep = NULL;
404 			p->p_traceflag = 0;
405 			vrele(vp);
406 		}
407 	}
408 }
409 
410 /*
411  * Return true if caller has permission to set the ktracing state
412  * of target.  Essentially, the target can't possess any
413  * more permissions than the caller.  KTRFAC_ROOT signifies that
414  * root previously set the tracing status on the target process, and
415  * so, only root may further change it.
416  *
417  * TODO: check groups.  use caller effective gid.
418  */
419 ktrcanset(callp, targetp)
420 	struct proc *callp, *targetp;
421 {
422 	register struct pcred *caller = callp->p_cred;
423 	register struct pcred *target = targetp->p_cred;
424 
425 	if ((caller->pc_ucred->cr_uid == target->p_ruid &&
426 	     target->p_ruid == target->p_svuid &&
427 	     caller->p_rgid == target->p_rgid &&	/* XXX */
428 	     target->p_rgid == target->p_svgid &&
429 	     (targetp->p_traceflag & KTRFAC_ROOT) == 0) ||
430 	     caller->pc_ucred->cr_uid == 0)
431 		return (1);
432 
433 	return (0);
434 }
435 
436 #endif
437