xref: /original-bsd/sys/kern/kern_ktrace.c (revision 086b3864)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_ktrace.c	7.21 (Berkeley) 10/11/92
8  */
9 
10 #ifdef KTRACE
11 
12 #include <sys/param.h>
13 #include <sys/proc.h>
14 #include <sys/file.h>
15 #include <sys/namei.h>
16 #include <sys/vnode.h>
17 #include <sys/ktrace.h>
18 #include <sys/malloc.h>
19 #include <sys/syslog.h>
20 
21 struct ktr_header *
22 ktrgetheader(type)
23 {
24 	register struct ktr_header *kth;
25 	struct proc *p = curproc;	/* XXX */
26 
27 	MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
28 		M_TEMP, M_WAITOK);
29 	kth->ktr_type = type;
30 	microtime(&kth->ktr_time);
31 	kth->ktr_pid = p->p_pid;
32 	bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN);
33 	return (kth);
34 }
35 
36 ktrsyscall(vp, code, narg, args)
37 	struct vnode *vp;
38 	int code, narg, args[];
39 {
40 	struct	ktr_header *kth;
41 	struct	ktr_syscall *ktp;
42 	register len = sizeof(struct ktr_syscall) + (narg * sizeof(int));
43 	struct proc *p = curproc;	/* XXX */
44 	int 	*argp, i;
45 
46 	p->p_traceflag |= KTRFAC_ACTIVE;
47 	kth = ktrgetheader(KTR_SYSCALL);
48 	MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK);
49 	ktp->ktr_code = code;
50 	ktp->ktr_narg = narg;
51 	argp = (int *)((char *)ktp + sizeof(struct ktr_syscall));
52 	for (i = 0; i < narg; i++)
53 		*argp++ = args[i];
54 	kth->ktr_buf = (caddr_t)ktp;
55 	kth->ktr_len = len;
56 	ktrwrite(vp, kth);
57 	FREE(ktp, M_TEMP);
58 	FREE(kth, M_TEMP);
59 	p->p_traceflag &= ~KTRFAC_ACTIVE;
60 }
61 
62 ktrsysret(vp, code, error, retval)
63 	struct vnode *vp;
64 	int code, error, retval;
65 {
66 	struct ktr_header *kth;
67 	struct ktr_sysret ktp;
68 	struct proc *p = curproc;	/* XXX */
69 
70 	p->p_traceflag |= KTRFAC_ACTIVE;
71 	kth = ktrgetheader(KTR_SYSRET);
72 	ktp.ktr_code = code;
73 	ktp.ktr_error = error;
74 	ktp.ktr_retval = retval;		/* what about val2 ? */
75 
76 	kth->ktr_buf = (caddr_t)&ktp;
77 	kth->ktr_len = sizeof(struct ktr_sysret);
78 
79 	ktrwrite(vp, kth);
80 	FREE(kth, M_TEMP);
81 	p->p_traceflag &= ~KTRFAC_ACTIVE;
82 }
83 
84 ktrnamei(vp, path)
85 	struct vnode *vp;
86 	char *path;
87 {
88 	struct ktr_header *kth;
89 	struct proc *p = curproc;	/* XXX */
90 
91 	p->p_traceflag |= KTRFAC_ACTIVE;
92 	kth = ktrgetheader(KTR_NAMEI);
93 	kth->ktr_len = strlen(path);
94 	kth->ktr_buf = path;
95 
96 	ktrwrite(vp, kth);
97 	FREE(kth, M_TEMP);
98 	p->p_traceflag &= ~KTRFAC_ACTIVE;
99 }
100 
101 ktrgenio(vp, fd, rw, iov, len, error)
102 	struct vnode *vp;
103 	int fd;
104 	enum uio_rw rw;
105 	register struct iovec *iov;
106 {
107 	struct ktr_header *kth;
108 	register struct ktr_genio *ktp;
109 	register caddr_t cp;
110 	register int resid = len, cnt;
111 	struct proc *p = curproc;	/* XXX */
112 
113 	if (error)
114 		return;
115 	p->p_traceflag |= KTRFAC_ACTIVE;
116 	kth = ktrgetheader(KTR_GENIO);
117 	MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len,
118 		M_TEMP, M_WAITOK);
119 	ktp->ktr_fd = fd;
120 	ktp->ktr_rw = rw;
121 	cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio));
122 	while (resid > 0) {
123 		if ((cnt = iov->iov_len) > resid)
124 			cnt = resid;
125 		if (copyin(iov->iov_base, cp, (unsigned)cnt))
126 			goto done;
127 		cp += cnt;
128 		resid -= cnt;
129 		iov++;
130 	}
131 	kth->ktr_buf = (caddr_t)ktp;
132 	kth->ktr_len = sizeof (struct ktr_genio) + len;
133 
134 	ktrwrite(vp, kth);
135 done:
136 	FREE(kth, M_TEMP);
137 	FREE(ktp, M_TEMP);
138 	p->p_traceflag &= ~KTRFAC_ACTIVE;
139 }
140 
141 ktrpsig(vp, sig, action, mask, code)
142 	struct	vnode *vp;
143 	sig_t	action;
144 {
145 	struct ktr_header *kth;
146 	struct ktr_psig	kp;
147 	struct proc *p = curproc;	/* XXX */
148 
149 	p->p_traceflag |= KTRFAC_ACTIVE;
150 	kth = ktrgetheader(KTR_PSIG);
151 	kp.signo = (char)sig;
152 	kp.action = action;
153 	kp.mask = mask;
154 	kp.code = code;
155 	kth->ktr_buf = (caddr_t)&kp;
156 	kth->ktr_len = sizeof (struct ktr_psig);
157 
158 	ktrwrite(vp, kth);
159 	FREE(kth, M_TEMP);
160 	p->p_traceflag &= ~KTRFAC_ACTIVE;
161 }
162 
163 ktrcsw(vp, out, user)
164 	struct	vnode *vp;
165 	int	out, user;
166 {
167 	struct ktr_header *kth;
168 	struct	ktr_csw kc;
169 	struct proc *p = curproc;	/* XXX */
170 
171 	p->p_traceflag |= KTRFAC_ACTIVE;
172 	kth = ktrgetheader(KTR_CSW);
173 	kc.out = out;
174 	kc.user = user;
175 	kth->ktr_buf = (caddr_t)&kc;
176 	kth->ktr_len = sizeof (struct ktr_csw);
177 
178 	ktrwrite(vp, kth);
179 	FREE(kth, M_TEMP);
180 	p->p_traceflag &= ~KTRFAC_ACTIVE;
181 }
182 
183 /* Interface and common routines */
184 
185 /*
186  * ktrace system call
187  */
188 struct ktrace_args {
189 	char	*fname;
190 	int	ops;
191 	int	facs;
192 	int	pid;
193 };
194 /* ARGSUSED */
195 ktrace(curp, uap, retval)
196 	struct proc *curp;
197 	register struct ktrace_args *uap;
198 	int *retval;
199 {
200 	register struct vnode *vp = NULL;
201 	register struct proc *p;
202 	struct pgrp *pg;
203 	int facs = uap->facs & ~KTRFAC_ROOT;
204 	int ops = KTROP(uap->ops);
205 	int descend = uap->ops & KTRFLAG_DESCEND;
206 	int ret = 0;
207 	int error = 0;
208 	struct nameidata nd;
209 
210 	curp->p_traceflag |= KTRFAC_ACTIVE;
211 	if (ops != KTROP_CLEAR) {
212 		/*
213 		 * an operation which requires a file argument.
214 		 */
215 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->fname, curp);
216 		if (error = vn_open(&nd, FREAD|FWRITE, 0)) {
217 			curp->p_traceflag &= ~KTRFAC_ACTIVE;
218 			return (error);
219 		}
220 		vp = nd.ni_vp;
221 		VOP_UNLOCK(vp);
222 		if (vp->v_type != VREG) {
223 			(void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
224 			curp->p_traceflag &= ~KTRFAC_ACTIVE;
225 			return (EACCES);
226 		}
227 	}
228 	/*
229 	 * Clear all uses of the tracefile
230 	 */
231 	if (ops == KTROP_CLEARFILE) {
232 		for (p = (struct proc *)allproc; p != NULL; p = p->p_nxt) {
233 			if (p->p_tracep == vp) {
234 				if (ktrcanset(curp, p)) {
235 					p->p_tracep = NULL;
236 					p->p_traceflag = 0;
237 					(void) vn_close(vp, FREAD|FWRITE,
238 						p->p_ucred, p);
239 				} else
240 					error = EPERM;
241 			}
242 		}
243 		goto done;
244 	}
245 	/*
246 	 * need something to (un)trace (XXX - why is this here?)
247 	 */
248 	if (!facs) {
249 		error = EINVAL;
250 		goto done;
251 	}
252 	/*
253 	 * do it
254 	 */
255 	if (uap->pid < 0) {
256 		/*
257 		 * by process group
258 		 */
259 		pg = pgfind(-uap->pid);
260 		if (pg == NULL) {
261 			error = ESRCH;
262 			goto done;
263 		}
264 		for (p = pg->pg_mem; p != NULL; p = p->p_pgrpnxt)
265 			if (descend)
266 				ret |= ktrsetchildren(curp, p, ops, facs, vp);
267 			else
268 				ret |= ktrops(curp, p, ops, facs, vp);
269 
270 	} else {
271 		/*
272 		 * by pid
273 		 */
274 		p = pfind(uap->pid);
275 		if (p == NULL) {
276 			error = ESRCH;
277 			goto done;
278 		}
279 		if (descend)
280 			ret |= ktrsetchildren(curp, p, ops, facs, vp);
281 		else
282 			ret |= ktrops(curp, p, ops, facs, vp);
283 	}
284 	if (!ret)
285 		error = EPERM;
286 done:
287 	if (vp != NULL)
288 		(void) vn_close(vp, FWRITE, curp->p_ucred, curp);
289 	curp->p_traceflag &= ~KTRFAC_ACTIVE;
290 	return (error);
291 }
292 
293 ktrops(curp, p, ops, facs, vp)
294 	struct proc *curp, *p;
295 	struct vnode *vp;
296 {
297 
298 	if (!ktrcanset(curp, p))
299 		return (0);
300 	if (ops == KTROP_SET) {
301 		if (p->p_tracep != vp) {
302 			/*
303 			 * if trace file already in use, relinquish
304 			 */
305 			if (p->p_tracep != NULL)
306 				vrele(p->p_tracep);
307 			VREF(vp);
308 			p->p_tracep = vp;
309 		}
310 		p->p_traceflag |= facs;
311 		if (curp->p_ucred->cr_uid == 0)
312 			p->p_traceflag |= KTRFAC_ROOT;
313 	} else {
314 		/* KTROP_CLEAR */
315 		if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
316 			/* no more tracing */
317 			p->p_traceflag = 0;
318 			if (p->p_tracep != NULL) {
319 				vrele(p->p_tracep);
320 				p->p_tracep = NULL;
321 			}
322 		}
323 	}
324 
325 	return (1);
326 }
327 
328 ktrsetchildren(curp, top, ops, facs, vp)
329 	struct proc *curp, *top;
330 	struct vnode *vp;
331 {
332 	register struct proc *p;
333 	register int ret = 0;
334 
335 	p = top;
336 	for (;;) {
337 		ret |= ktrops(curp, p, ops, facs, vp);
338 		/*
339 		 * If this process has children, descend to them next,
340 		 * otherwise do any siblings, and if done with this level,
341 		 * follow back up the tree (but not past top).
342 		 */
343 		if (p->p_cptr)
344 			p = p->p_cptr;
345 		else if (p == top)
346 			return (ret);
347 		else if (p->p_osptr)
348 			p = p->p_osptr;
349 		else for (;;) {
350 			p = p->p_pptr;
351 			if (p == top)
352 				return (ret);
353 			if (p->p_osptr) {
354 				p = p->p_osptr;
355 				break;
356 			}
357 		}
358 	}
359 	/*NOTREACHED*/
360 }
361 
362 ktrwrite(vp, kth)
363 	struct vnode *vp;
364 	register struct ktr_header *kth;
365 {
366 	struct uio auio;
367 	struct iovec aiov[2];
368 	register struct proc *p = curproc;	/* XXX */
369 	int error;
370 
371 	if (vp == NULL)
372 		return;
373 	auio.uio_iov = &aiov[0];
374 	auio.uio_offset = 0;
375 	auio.uio_segflg = UIO_SYSSPACE;
376 	auio.uio_rw = UIO_WRITE;
377 	aiov[0].iov_base = (caddr_t)kth;
378 	aiov[0].iov_len = sizeof(struct ktr_header);
379 	auio.uio_resid = sizeof(struct ktr_header);
380 	auio.uio_iovcnt = 1;
381 	auio.uio_procp = (struct proc *)0;
382 	if (kth->ktr_len > 0) {
383 		auio.uio_iovcnt++;
384 		aiov[1].iov_base = kth->ktr_buf;
385 		aiov[1].iov_len = kth->ktr_len;
386 		auio.uio_resid += kth->ktr_len;
387 	}
388 	VOP_LOCK(vp);
389 	error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred);
390 	VOP_UNLOCK(vp);
391 	if (!error)
392 		return;
393 	/*
394 	 * If error encountered, give up tracing on this vnode.
395 	 */
396 	log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
397 	    error);
398 	for (p = (struct proc *)allproc; p != NULL; p = p->p_nxt) {
399 		if (p->p_tracep == vp) {
400 			p->p_tracep = NULL;
401 			p->p_traceflag = 0;
402 			vrele(vp);
403 		}
404 	}
405 }
406 
407 /*
408  * Return true if caller has permission to set the ktracing state
409  * of target.  Essentially, the target can't possess any
410  * more permissions than the caller.  KTRFAC_ROOT signifies that
411  * root previously set the tracing status on the target process, and
412  * so, only root may further change it.
413  *
414  * TODO: check groups.  use caller effective gid.
415  */
416 ktrcanset(callp, targetp)
417 	struct proc *callp, *targetp;
418 {
419 	register struct pcred *caller = callp->p_cred;
420 	register struct pcred *target = targetp->p_cred;
421 
422 	if ((caller->pc_ucred->cr_uid == target->p_ruid &&
423 	     target->p_ruid == target->p_svuid &&
424 	     caller->p_rgid == target->p_rgid &&	/* XXX */
425 	     target->p_rgid == target->p_svgid &&
426 	     (targetp->p_traceflag & KTRFAC_ROOT) == 0) ||
427 	     caller->pc_ucred->cr_uid == 0)
428 		return (1);
429 
430 	return (0);
431 }
432 
433 #endif
434