xref: /original-bsd/sys/kern/kern_ktrace.c (revision 817cfbae)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_ktrace.c	7.15 (Berkeley) 06/21/91
8  */
9 
10 #ifdef KTRACE
11 
12 #include "param.h"
13 #include "proc.h"
14 #include "file.h"
15 #include "namei.h"
16 #include "vnode.h"
17 #include "ktrace.h"
18 #include "malloc.h"
19 #include "syslog.h"
20 
21 struct ktr_header *
22 ktrgetheader(type)
23 {
24 	register struct ktr_header *kth;
25 	struct proc *p = curproc;	/* XXX */
26 
27 	MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
28 		M_TEMP, M_WAITOK);
29 	kth->ktr_type = type;
30 	microtime(&kth->ktr_time);
31 	kth->ktr_pid = p->p_pid;
32 	bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN);
33 	return (kth);
34 }
35 
36 ktrsyscall(vp, code, narg, args)
37 	struct vnode *vp;
38 	int code, narg, args[];
39 {
40 	struct	ktr_header *kth = ktrgetheader(KTR_SYSCALL);
41 	struct	ktr_syscall *ktp;
42 	register len = sizeof(struct ktr_syscall) + (narg * sizeof(int));
43 	int 	*argp, i;
44 
45 	MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK);
46 	ktp->ktr_code = code;
47 	ktp->ktr_narg = narg;
48 	argp = (int *)((char *)ktp + sizeof(struct ktr_syscall));
49 	for (i = 0; i < narg; i++)
50 		*argp++ = args[i];
51 	kth->ktr_buf = (caddr_t)ktp;
52 	kth->ktr_len = len;
53 	ktrwrite(vp, kth);
54 	FREE(ktp, M_TEMP);
55 	FREE(kth, M_TEMP);
56 }
57 
58 ktrsysret(vp, code, error, retval)
59 	struct vnode *vp;
60 	int code, error, retval;
61 {
62 	struct ktr_header *kth = ktrgetheader(KTR_SYSRET);
63 	struct ktr_sysret ktp;
64 
65 	ktp.ktr_code = code;
66 	ktp.ktr_error = error;
67 	ktp.ktr_retval = retval;		/* what about val2 ? */
68 
69 	kth->ktr_buf = (caddr_t)&ktp;
70 	kth->ktr_len = sizeof(struct ktr_sysret);
71 
72 	ktrwrite(vp, kth);
73 	FREE(kth, M_TEMP);
74 }
75 
76 ktrnamei(vp, path)
77 	struct vnode *vp;
78 	char *path;
79 {
80 	struct ktr_header *kth = ktrgetheader(KTR_NAMEI);
81 
82 	kth->ktr_len = strlen(path);
83 	kth->ktr_buf = path;
84 
85 	ktrwrite(vp, kth);
86 	FREE(kth, M_TEMP);
87 }
88 
89 ktrgenio(vp, fd, rw, iov, len, error)
90 	struct vnode *vp;
91 	int fd;
92 	enum uio_rw rw;
93 	register struct iovec *iov;
94 {
95 	struct ktr_header *kth = ktrgetheader(KTR_GENIO);
96 	register struct ktr_genio *ktp;
97 	register caddr_t cp;
98 	register int resid = len, cnt;
99 
100 	if (error)
101 		return;
102 	MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len,
103 		M_TEMP, M_WAITOK);
104 	ktp->ktr_fd = fd;
105 	ktp->ktr_rw = rw;
106 	cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio));
107 	while (resid > 0) {
108 		if ((cnt = iov->iov_len) > resid)
109 			cnt = resid;
110 		if (copyin(iov->iov_base, cp, (unsigned)cnt))
111 			goto done;
112 		cp += cnt;
113 		resid -= cnt;
114 		iov++;
115 	}
116 	kth->ktr_buf = (caddr_t)ktp;
117 	kth->ktr_len = sizeof (struct ktr_genio) + len;
118 
119 	ktrwrite(vp, kth);
120 done:
121 	FREE(kth, M_TEMP);
122 	FREE(ktp, M_TEMP);
123 }
124 
125 ktrpsig(vp, sig, action, mask, code)
126 	struct	vnode *vp;
127 	sig_t	action;
128 {
129 	struct ktr_header *kth = ktrgetheader(KTR_PSIG);
130 	struct ktr_psig	kp;
131 
132 	kp.signo = (char)sig;
133 	kp.action = action;
134 	kp.mask = mask;
135 	kp.code = code;
136 	kth->ktr_buf = (caddr_t)&kp;
137 	kth->ktr_len = sizeof (struct ktr_psig);
138 
139 	ktrwrite(vp, kth);
140 	FREE(kth, M_TEMP);
141 }
142 
143 /* Interface and common routines */
144 
145 /*
146  * ktrace system call
147  */
148 /* ARGSUSED */
149 ktrace(curp, uap, retval)
150 	struct proc *curp;
151 	register struct args {
152 		char	*fname;
153 		int	ops;
154 		int	facs;
155 		int	pid;
156 	} *uap;
157 	int *retval;
158 {
159 	register struct vnode *vp = NULL;
160 	register struct proc *p;
161 	struct pgrp *pg;
162 	int facs = uap->facs & ~KTRFAC_ROOT;
163 	int ops = KTROP(uap->ops);
164 	int descend = uap->ops & KTRFLAG_DESCEND;
165 	int ret = 0;
166 	int error = 0;
167 	struct nameidata nd;
168 
169 	if (ops != KTROP_CLEAR) {
170 		/*
171 		 * an operation which requires a file argument.
172 		 */
173 		nd.ni_segflg = UIO_USERSPACE;
174 		nd.ni_dirp = uap->fname;
175 		if (error = vn_open(&nd, curp, FREAD|FWRITE, 0))
176 			return (error);
177 		vp = nd.ni_vp;
178 		VOP_UNLOCK(vp);
179 		if (vp->v_type != VREG) {
180 			(void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
181 			return (EACCES);
182 		}
183 	}
184 	/*
185 	 * Clear all uses of the tracefile
186 	 */
187 	if (ops == KTROP_CLEARFILE) {
188 		for (p = allproc; p != NULL; p = p->p_nxt) {
189 			if (p->p_tracep == vp) {
190 				if (ktrcanset(curp, p)) {
191 					p->p_tracep = NULL;
192 					p->p_traceflag = 0;
193 					(void) vn_close(vp, FREAD|FWRITE,
194 						p->p_ucred, p);
195 				} else
196 					error = EPERM;
197 			}
198 		}
199 		goto done;
200 	}
201 	/*
202 	 * need something to (un)trace (XXX - why is this here?)
203 	 */
204 	if (!facs) {
205 		error = EINVAL;
206 		goto done;
207 	}
208 	/*
209 	 * do it
210 	 */
211 	if (uap->pid < 0) {
212 		/*
213 		 * by process group
214 		 */
215 		pg = pgfind(-uap->pid);
216 		if (pg == NULL) {
217 			error = ESRCH;
218 			goto done;
219 		}
220 		for (p = pg->pg_mem; p != NULL; p = p->p_pgrpnxt)
221 			if (descend)
222 				ret |= ktrsetchildren(curp, p, ops, facs, vp);
223 			else
224 				ret |= ktrops(curp, p, ops, facs, vp);
225 
226 	} else {
227 		/*
228 		 * by pid
229 		 */
230 		p = pfind(uap->pid);
231 		if (p == NULL) {
232 			error = ESRCH;
233 			goto done;
234 		}
235 		if (descend)
236 			ret |= ktrsetchildren(curp, p, ops, facs, vp);
237 		else
238 			ret |= ktrops(curp, p, ops, facs, vp);
239 	}
240 	if (!ret)
241 		error = EPERM;
242 done:
243 	if (vp != NULL)
244 		(void) vn_close(vp, FWRITE, curp->p_ucred, curp);
245 	return (error);
246 }
247 
248 ktrops(curp, p, ops, facs, vp)
249 	struct proc *curp, *p;
250 	struct vnode *vp;
251 {
252 
253 	if (!ktrcanset(curp, p))
254 		return (0);
255 	if (ops == KTROP_SET) {
256 		if (p->p_tracep != vp) {
257 			/*
258 			 * if trace file already in use, relinquish
259 			 */
260 			if (p->p_tracep != NULL)
261 				vrele(p->p_tracep);
262 			VREF(vp);
263 			p->p_tracep = vp;
264 		}
265 		p->p_traceflag |= facs;
266 		if (curp->p_ucred->cr_uid == 0)
267 			p->p_traceflag |= KTRFAC_ROOT;
268 	} else {
269 		/* KTROP_CLEAR */
270 		if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
271 			/* no more tracing */
272 			p->p_traceflag = 0;
273 			if (p->p_tracep != NULL) {
274 				vrele(p->p_tracep);
275 				p->p_tracep = NULL;
276 			}
277 		}
278 	}
279 
280 	return (1);
281 }
282 
283 ktrsetchildren(curp, top, ops, facs, vp)
284 	struct proc *curp, *top;
285 	struct vnode *vp;
286 {
287 	register struct proc *p;
288 	register int ret = 0;
289 
290 	p = top;
291 	for (;;) {
292 		ret |= ktrops(curp, p, ops, facs, vp);
293 		/*
294 		 * If this process has children, descend to them next,
295 		 * otherwise do any siblings, and if done with this level,
296 		 * follow back up the tree (but not past top).
297 		 */
298 		if (p->p_cptr)
299 			p = p->p_cptr;
300 		else if (p == top)
301 			return (ret);
302 		else if (p->p_osptr)
303 			p = p->p_osptr;
304 		else for (;;) {
305 			p = p->p_pptr;
306 			if (p == top)
307 				return (ret);
308 			if (p->p_osptr) {
309 				p = p->p_osptr;
310 				break;
311 			}
312 		}
313 	}
314 	/*NOTREACHED*/
315 }
316 
317 ktrwrite(vp, kth)
318 	struct vnode *vp;
319 	register struct ktr_header *kth;
320 {
321 	struct uio auio;
322 	struct iovec aiov[2];
323 	register struct proc *p = curproc;	/* XXX */
324 	int error;
325 
326 	if (vp == NULL)
327 		return;
328 	auio.uio_iov = &aiov[0];
329 	auio.uio_offset = 0;
330 	auio.uio_segflg = UIO_SYSSPACE;
331 	auio.uio_rw = UIO_WRITE;
332 	aiov[0].iov_base = (caddr_t)kth;
333 	aiov[0].iov_len = sizeof(struct ktr_header);
334 	auio.uio_resid = sizeof(struct ktr_header);
335 	auio.uio_iovcnt = 1;
336 	auio.uio_procp = (struct proc *)0;
337 	if (kth->ktr_len > 0) {
338 		auio.uio_iovcnt++;
339 		aiov[1].iov_base = kth->ktr_buf;
340 		aiov[1].iov_len = kth->ktr_len;
341 		auio.uio_resid += kth->ktr_len;
342 	}
343 	VOP_LOCK(vp);
344 	error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred);
345 	VOP_UNLOCK(vp);
346 	if (!error)
347 		return;
348 	/*
349 	 * If error encountered, give up tracing on this vnode.
350 	 */
351 	log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
352 	    error);
353 	for (p = allproc; p != NULL; p = p->p_nxt) {
354 		if (p->p_tracep == vp) {
355 			p->p_tracep = NULL;
356 			p->p_traceflag = 0;
357 			vrele(vp);
358 		}
359 	}
360 }
361 
362 /*
363  * Return true if caller has permission to set the ktracing state
364  * of target.  Essentially, the target can't possess any
365  * more permissions than the caller.  KTRFAC_ROOT signifies that
366  * root previously set the tracing status on the target process, and
367  * so, only root may further change it.
368  *
369  * TODO: check groups.  use caller effective gid.
370  */
371 ktrcanset(callp, targetp)
372 	struct proc *callp, *targetp;
373 {
374 	register struct pcred *caller = callp->p_cred;
375 	register struct pcred *target = targetp->p_cred;
376 
377 	if ((caller->pc_ucred->cr_uid == target->p_ruid &&
378 	     target->p_ruid == target->p_svuid &&
379 	     caller->p_rgid == target->p_rgid &&	/* XXX */
380 	     target->p_rgid == target->p_svgid &&
381 	     (targetp->p_traceflag & KTRFAC_ROOT) == 0) ||
382 	     caller->pc_ucred->cr_uid == 0)
383 		return (1);
384 
385 	return (0);
386 }
387 
388 #endif
389