xref: /original-bsd/sys/kern/kern_ktrace.c (revision 68549010)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_ktrace.c	7.16 (Berkeley) 03/18/92
8  */
9 
10 #ifdef KTRACE
11 
12 #include "param.h"
13 #include "proc.h"
14 #include "file.h"
15 #include "namei.h"
16 #include "vnode.h"
17 #include "ktrace.h"
18 #include "malloc.h"
19 #include "syslog.h"
20 
21 struct ktr_header *
22 ktrgetheader(type)
23 {
24 	register struct ktr_header *kth;
25 	struct proc *p = curproc;	/* XXX */
26 
27 	MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
28 		M_TEMP, M_WAITOK);
29 	kth->ktr_type = type;
30 	microtime(&kth->ktr_time);
31 	kth->ktr_pid = p->p_pid;
32 	bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN);
33 	return (kth);
34 }
35 
36 ktrsyscall(vp, code, narg, args)
37 	struct vnode *vp;
38 	int code, narg, args[];
39 {
40 	struct	ktr_header *kth;
41 	struct	ktr_syscall *ktp;
42 	register len = sizeof(struct ktr_syscall) + (narg * sizeof(int));
43 	struct proc *p = curproc;	/* XXX */
44 	int 	*argp, i;
45 
46 	p->p_traceflag |= KTRFAC_ACTIVE;
47 	kth = ktrgetheader(KTR_SYSCALL);
48 	MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK);
49 	ktp->ktr_code = code;
50 	ktp->ktr_narg = narg;
51 	argp = (int *)((char *)ktp + sizeof(struct ktr_syscall));
52 	for (i = 0; i < narg; i++)
53 		*argp++ = args[i];
54 	kth->ktr_buf = (caddr_t)ktp;
55 	kth->ktr_len = len;
56 	ktrwrite(vp, kth);
57 	FREE(ktp, M_TEMP);
58 	FREE(kth, M_TEMP);
59 	p->p_traceflag &= ~KTRFAC_ACTIVE;
60 }
61 
62 ktrsysret(vp, code, error, retval)
63 	struct vnode *vp;
64 	int code, error, retval;
65 {
66 	struct ktr_header *kth;
67 	struct ktr_sysret ktp;
68 	struct proc *p = curproc;	/* XXX */
69 
70 	p->p_traceflag |= KTRFAC_ACTIVE;
71 	kth = ktrgetheader(KTR_SYSRET);
72 	ktp.ktr_code = code;
73 	ktp.ktr_error = error;
74 	ktp.ktr_retval = retval;		/* what about val2 ? */
75 
76 	kth->ktr_buf = (caddr_t)&ktp;
77 	kth->ktr_len = sizeof(struct ktr_sysret);
78 
79 	ktrwrite(vp, kth);
80 	FREE(kth, M_TEMP);
81 	p->p_traceflag &= ~KTRFAC_ACTIVE;
82 }
83 
84 ktrnamei(vp, path)
85 	struct vnode *vp;
86 	char *path;
87 {
88 	struct ktr_header *kth;
89 	struct proc *p = curproc;	/* XXX */
90 
91 	p->p_traceflag |= KTRFAC_ACTIVE;
92 	kth = ktrgetheader(KTR_NAMEI);
93 	kth->ktr_len = strlen(path);
94 	kth->ktr_buf = path;
95 
96 	ktrwrite(vp, kth);
97 	FREE(kth, M_TEMP);
98 	p->p_traceflag &= ~KTRFAC_ACTIVE;
99 }
100 
101 ktrgenio(vp, fd, rw, iov, len, error)
102 	struct vnode *vp;
103 	int fd;
104 	enum uio_rw rw;
105 	register struct iovec *iov;
106 {
107 	struct ktr_header *kth;
108 	register struct ktr_genio *ktp;
109 	register caddr_t cp;
110 	register int resid = len, cnt;
111 	struct proc *p = curproc;	/* XXX */
112 
113 	if (error)
114 		return;
115 	p->p_traceflag |= KTRFAC_ACTIVE;
116 	kth = ktrgetheader(KTR_GENIO);
117 	MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len,
118 		M_TEMP, M_WAITOK);
119 	ktp->ktr_fd = fd;
120 	ktp->ktr_rw = rw;
121 	cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio));
122 	while (resid > 0) {
123 		if ((cnt = iov->iov_len) > resid)
124 			cnt = resid;
125 		if (copyin(iov->iov_base, cp, (unsigned)cnt))
126 			goto done;
127 		cp += cnt;
128 		resid -= cnt;
129 		iov++;
130 	}
131 	kth->ktr_buf = (caddr_t)ktp;
132 	kth->ktr_len = sizeof (struct ktr_genio) + len;
133 
134 	ktrwrite(vp, kth);
135 done:
136 	FREE(kth, M_TEMP);
137 	FREE(ktp, M_TEMP);
138 	p->p_traceflag &= ~KTRFAC_ACTIVE;
139 }
140 
141 ktrpsig(vp, sig, action, mask, code)
142 	struct	vnode *vp;
143 	sig_t	action;
144 {
145 	struct ktr_header *kth;
146 	struct ktr_psig	kp;
147 	struct proc *p = curproc;	/* XXX */
148 
149 	p->p_traceflag |= KTRFAC_ACTIVE;
150 	kth = ktrgetheader(KTR_PSIG);
151 	kp.signo = (char)sig;
152 	kp.action = action;
153 	kp.mask = mask;
154 	kp.code = code;
155 	kth->ktr_buf = (caddr_t)&kp;
156 	kth->ktr_len = sizeof (struct ktr_psig);
157 
158 	ktrwrite(vp, kth);
159 	FREE(kth, M_TEMP);
160 	p->p_traceflag &= ~KTRFAC_ACTIVE;
161 }
162 
163 ktrcsw(vp, out, user)
164 	struct	vnode *vp;
165 	int	out, user;
166 {
167 	struct ktr_header *kth;
168 	struct	ktr_csw kc;
169 	struct proc *p = curproc;	/* XXX */
170 
171 	p->p_traceflag |= KTRFAC_ACTIVE;
172 	kth = ktrgetheader(KTR_CSW);
173 	kc.out = out;
174 	kc.user = user;
175 	kth->ktr_buf = (caddr_t)&kc;
176 	kth->ktr_len = sizeof (struct ktr_csw);
177 
178 	ktrwrite(vp, kth);
179 	FREE(kth, M_TEMP);
180 	p->p_traceflag &= ~KTRFAC_ACTIVE;
181 }
182 
183 /* Interface and common routines */
184 
185 /*
186  * ktrace system call
187  */
188 /* ARGSUSED */
189 ktrace(curp, uap, retval)
190 	struct proc *curp;
191 	register struct args {
192 		char	*fname;
193 		int	ops;
194 		int	facs;
195 		int	pid;
196 	} *uap;
197 	int *retval;
198 {
199 	register struct vnode *vp = NULL;
200 	register struct proc *p;
201 	struct pgrp *pg;
202 	int facs = uap->facs & ~KTRFAC_ROOT;
203 	int ops = KTROP(uap->ops);
204 	int descend = uap->ops & KTRFLAG_DESCEND;
205 	int ret = 0;
206 	int error = 0;
207 	struct nameidata nd;
208 
209 	curp->p_traceflag |= KTRFAC_ACTIVE;
210 	if (ops != KTROP_CLEAR) {
211 		/*
212 		 * an operation which requires a file argument.
213 		 */
214 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->fname, curp);
215 		if (error = vn_open(&nd, FREAD|FWRITE, 0)) {
216 			curp->p_traceflag &= ~KTRFAC_ACTIVE;
217 			return (error);
218 		}
219 		vp = nd.ni_vp;
220 		VOP_UNLOCK(vp);
221 		if (vp->v_type != VREG) {
222 			(void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
223 			curp->p_traceflag &= ~KTRFAC_ACTIVE;
224 			return (EACCES);
225 		}
226 	}
227 	/*
228 	 * Clear all uses of the tracefile
229 	 */
230 	if (ops == KTROP_CLEARFILE) {
231 		for (p = allproc; p != NULL; p = p->p_nxt) {
232 			if (p->p_tracep == vp) {
233 				if (ktrcanset(curp, p)) {
234 					p->p_tracep = NULL;
235 					p->p_traceflag = 0;
236 					(void) vn_close(vp, FREAD|FWRITE,
237 						p->p_ucred, p);
238 				} else
239 					error = EPERM;
240 			}
241 		}
242 		goto done;
243 	}
244 	/*
245 	 * need something to (un)trace (XXX - why is this here?)
246 	 */
247 	if (!facs) {
248 		error = EINVAL;
249 		goto done;
250 	}
251 	/*
252 	 * do it
253 	 */
254 	if (uap->pid < 0) {
255 		/*
256 		 * by process group
257 		 */
258 		pg = pgfind(-uap->pid);
259 		if (pg == NULL) {
260 			error = ESRCH;
261 			goto done;
262 		}
263 		for (p = pg->pg_mem; p != NULL; p = p->p_pgrpnxt)
264 			if (descend)
265 				ret |= ktrsetchildren(curp, p, ops, facs, vp);
266 			else
267 				ret |= ktrops(curp, p, ops, facs, vp);
268 
269 	} else {
270 		/*
271 		 * by pid
272 		 */
273 		p = pfind(uap->pid);
274 		if (p == NULL) {
275 			error = ESRCH;
276 			goto done;
277 		}
278 		if (descend)
279 			ret |= ktrsetchildren(curp, p, ops, facs, vp);
280 		else
281 			ret |= ktrops(curp, p, ops, facs, vp);
282 	}
283 	if (!ret)
284 		error = EPERM;
285 done:
286 	if (vp != NULL)
287 		(void) vn_close(vp, FWRITE, curp->p_ucred, curp);
288 	curp->p_traceflag &= ~KTRFAC_ACTIVE;
289 	return (error);
290 }
291 
292 ktrops(curp, p, ops, facs, vp)
293 	struct proc *curp, *p;
294 	struct vnode *vp;
295 {
296 
297 	if (!ktrcanset(curp, p))
298 		return (0);
299 	if (ops == KTROP_SET) {
300 		if (p->p_tracep != vp) {
301 			/*
302 			 * if trace file already in use, relinquish
303 			 */
304 			if (p->p_tracep != NULL)
305 				vrele(p->p_tracep);
306 			VREF(vp);
307 			p->p_tracep = vp;
308 		}
309 		p->p_traceflag |= facs;
310 		if (curp->p_ucred->cr_uid == 0)
311 			p->p_traceflag |= KTRFAC_ROOT;
312 	} else {
313 		/* KTROP_CLEAR */
314 		if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
315 			/* no more tracing */
316 			p->p_traceflag = 0;
317 			if (p->p_tracep != NULL) {
318 				vrele(p->p_tracep);
319 				p->p_tracep = NULL;
320 			}
321 		}
322 	}
323 
324 	return (1);
325 }
326 
327 ktrsetchildren(curp, top, ops, facs, vp)
328 	struct proc *curp, *top;
329 	struct vnode *vp;
330 {
331 	register struct proc *p;
332 	register int ret = 0;
333 
334 	p = top;
335 	for (;;) {
336 		ret |= ktrops(curp, p, ops, facs, vp);
337 		/*
338 		 * If this process has children, descend to them next,
339 		 * otherwise do any siblings, and if done with this level,
340 		 * follow back up the tree (but not past top).
341 		 */
342 		if (p->p_cptr)
343 			p = p->p_cptr;
344 		else if (p == top)
345 			return (ret);
346 		else if (p->p_osptr)
347 			p = p->p_osptr;
348 		else for (;;) {
349 			p = p->p_pptr;
350 			if (p == top)
351 				return (ret);
352 			if (p->p_osptr) {
353 				p = p->p_osptr;
354 				break;
355 			}
356 		}
357 	}
358 	/*NOTREACHED*/
359 }
360 
361 ktrwrite(vp, kth)
362 	struct vnode *vp;
363 	register struct ktr_header *kth;
364 {
365 	struct uio auio;
366 	struct iovec aiov[2];
367 	register struct proc *p = curproc;	/* XXX */
368 	int error;
369 
370 	if (vp == NULL)
371 		return;
372 	auio.uio_iov = &aiov[0];
373 	auio.uio_offset = 0;
374 	auio.uio_segflg = UIO_SYSSPACE;
375 	auio.uio_rw = UIO_WRITE;
376 	aiov[0].iov_base = (caddr_t)kth;
377 	aiov[0].iov_len = sizeof(struct ktr_header);
378 	auio.uio_resid = sizeof(struct ktr_header);
379 	auio.uio_iovcnt = 1;
380 	auio.uio_procp = (struct proc *)0;
381 	if (kth->ktr_len > 0) {
382 		auio.uio_iovcnt++;
383 		aiov[1].iov_base = kth->ktr_buf;
384 		aiov[1].iov_len = kth->ktr_len;
385 		auio.uio_resid += kth->ktr_len;
386 	}
387 	VOP_LOCK(vp);
388 	error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred);
389 	VOP_UNLOCK(vp);
390 	if (!error)
391 		return;
392 	/*
393 	 * If error encountered, give up tracing on this vnode.
394 	 */
395 	log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
396 	    error);
397 	for (p = allproc; p != NULL; p = p->p_nxt) {
398 		if (p->p_tracep == vp) {
399 			p->p_tracep = NULL;
400 			p->p_traceflag = 0;
401 			vrele(vp);
402 		}
403 	}
404 }
405 
406 /*
407  * Return true if caller has permission to set the ktracing state
408  * of target.  Essentially, the target can't possess any
409  * more permissions than the caller.  KTRFAC_ROOT signifies that
410  * root previously set the tracing status on the target process, and
411  * so, only root may further change it.
412  *
413  * TODO: check groups.  use caller effective gid.
414  */
415 ktrcanset(callp, targetp)
416 	struct proc *callp, *targetp;
417 {
418 	register struct pcred *caller = callp->p_cred;
419 	register struct pcred *target = targetp->p_cred;
420 
421 	if ((caller->pc_ucred->cr_uid == target->p_ruid &&
422 	     target->p_ruid == target->p_svuid &&
423 	     caller->p_rgid == target->p_rgid &&	/* XXX */
424 	     target->p_rgid == target->p_svgid &&
425 	     (targetp->p_traceflag & KTRFAC_ROOT) == 0) ||
426 	     caller->pc_ucred->cr_uid == 0)
427 		return (1);
428 
429 	return (0);
430 }
431 
432 #endif
433