xref: /original-bsd/sys/kern/kern_ktrace.c (revision 95a66346)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_ktrace.c	7.12 (Berkeley) 03/25/91
8  */
9 
10 #ifdef KTRACE
11 
12 #include "param.h"
13 #include "proc.h"
14 #include "file.h"
15 #include "vnode.h"
16 #include "ktrace.h"
17 #include "malloc.h"
18 #include "syslog.h"
19 
20 struct ktr_header *
21 ktrgetheader(type)
22 {
23 	register struct ktr_header *kth;
24 	struct proc *p = curproc;
25 
26 	MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
27 		M_TEMP, M_WAITOK);
28 	kth->ktr_type = type;
29 	microtime(&kth->ktr_time);
30 	kth->ktr_pid = p->p_pid;
31 	bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN);
32 	return (kth);
33 }
34 
35 ktrsyscall(vp, code, narg, args)
36 	struct vnode *vp;
37 	int code, narg, args[];
38 {
39 	struct	ktr_header *kth = ktrgetheader(KTR_SYSCALL);
40 	struct	ktr_syscall *ktp;
41 	register len = sizeof(struct ktr_syscall) + (narg * sizeof(int));
42 	int 	*argp, i;
43 
44 	MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK);
45 	ktp->ktr_code = code;
46 	ktp->ktr_narg = narg;
47 	argp = (int *)((char *)ktp + sizeof(struct ktr_syscall));
48 	for (i = 0; i < narg; i++)
49 		*argp++ = args[i];
50 	kth->ktr_buf = (caddr_t)ktp;
51 	kth->ktr_len = len;
52 	ktrwrite(vp, kth);
53 	FREE(ktp, M_TEMP);
54 	FREE(kth, M_TEMP);
55 }
56 
57 ktrsysret(vp, code, error, retval)
58 	struct vnode *vp;
59 	int code, error, retval;
60 {
61 	struct ktr_header *kth = ktrgetheader(KTR_SYSRET);
62 	struct ktr_sysret ktp;
63 
64 	ktp.ktr_code = code;
65 	ktp.ktr_error = error;
66 	ktp.ktr_retval = retval;		/* what about val2 ? */
67 
68 	kth->ktr_buf = (caddr_t)&ktp;
69 	kth->ktr_len = sizeof(struct ktr_sysret);
70 
71 	ktrwrite(vp, kth);
72 	FREE(kth, M_TEMP);
73 }
74 
75 ktrnamei(vp, path)
76 	struct vnode *vp;
77 	char *path;
78 {
79 	struct ktr_header *kth = ktrgetheader(KTR_NAMEI);
80 
81 	kth->ktr_len = strlen(path);
82 	kth->ktr_buf = path;
83 
84 	ktrwrite(vp, kth);
85 	FREE(kth, M_TEMP);
86 }
87 
88 ktrgenio(vp, fd, rw, iov, len, error)
89 	struct vnode *vp;
90 	int fd;
91 	enum uio_rw rw;
92 	register struct iovec *iov;
93 {
94 	struct ktr_header *kth = ktrgetheader(KTR_GENIO);
95 	register struct ktr_genio *ktp;
96 	register caddr_t cp;
97 	register int resid = len, cnt;
98 
99 	if (error)
100 		return;
101 	MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len,
102 		M_TEMP, M_WAITOK);
103 	ktp->ktr_fd = fd;
104 	ktp->ktr_rw = rw;
105 	cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio));
106 	while (resid > 0) {
107 		if ((cnt = iov->iov_len) > resid)
108 			cnt = resid;
109 		if (copyin(iov->iov_base, cp, (unsigned)cnt))
110 			goto done;
111 		cp += cnt;
112 		resid -= cnt;
113 		iov++;
114 	}
115 	kth->ktr_buf = (caddr_t)ktp;
116 	kth->ktr_len = sizeof (struct ktr_genio) + len;
117 
118 	ktrwrite(vp, kth);
119 done:
120 	FREE(kth, M_TEMP);
121 	FREE(ktp, M_TEMP);
122 }
123 
124 ktrpsig(vp, sig, action, mask, code)
125 	struct	vnode *vp;
126 	sig_t	action;
127 {
128 	struct ktr_header *kth = ktrgetheader(KTR_PSIG);
129 	struct ktr_psig	kp;
130 
131 	kp.signo = (char)sig;
132 	kp.action = action;
133 	kp.mask = mask;
134 	kp.code = code;
135 	kth->ktr_buf = (caddr_t)&kp;
136 	kth->ktr_len = sizeof (struct ktr_psig);
137 
138 	ktrwrite(vp, kth);
139 	FREE(kth, M_TEMP);
140 }
141 
142 /* Interface and common routines */
143 
144 /*
145  * ktrace system call
146  */
147 /* ARGSUSED */
148 ktrace(curp, uap, retval)
149 	struct proc *curp;
150 	register struct args {
151 		char	*fname;
152 		int	ops;
153 		int	facs;
154 		int	pid;
155 	} *uap;
156 	int *retval;
157 {
158 	register struct vnode *vp = NULL;
159 	register struct proc *p;
160 	struct pgrp *pg;
161 	int facs = uap->facs & ~KTRFAC_ROOT;
162 	int ops = KTROP(uap->ops);
163 	int descend = uap->ops & KTRFLAG_DESCEND;
164 	int ret = 0;
165 	int error = 0;
166 	struct nameidata nd;
167 
168 	if (ops != KTROP_CLEAR) {
169 		/*
170 		 * an operation which requires a file argument.
171 		 */
172 		nd.ni_segflg = UIO_USERSPACE;
173 		nd.ni_dirp = uap->fname;
174 		if (error = vn_open(&nd, curp, FREAD|FWRITE, 0))
175 			return (error);
176 		vp = nd.ni_vp;
177 		if (vp->v_type != VREG) {
178 			vrele(vp);
179 			return (EACCES);
180 		}
181 	}
182 	/*
183 	 * Clear all uses of the tracefile
184 	 */
185 	if (ops == KTROP_CLEARFILE) {
186 		for (p = allproc; p != NULL; p = p->p_nxt) {
187 			if (p->p_tracep == vp) {
188 				if (ktrcanset(curp, p)) {
189 					p->p_tracep = NULL;
190 					p->p_traceflag = 0;
191 					vrele(vp);
192 				} else
193 					error = EPERM;
194 			}
195 		}
196 		goto done;
197 	}
198 	/*
199 	 * need something to (un)trace (XXX - why is this here?)
200 	 */
201 	if (!facs) {
202 		error = EINVAL;
203 		goto done;
204 	}
205 	/*
206 	 * do it
207 	 */
208 	if (uap->pid < 0) {
209 		/*
210 		 * by process group
211 		 */
212 		pg = pgfind(-uap->pid);
213 		if (pg == NULL) {
214 			error = ESRCH;
215 			goto done;
216 		}
217 		for (p = pg->pg_mem; p != NULL; p = p->p_pgrpnxt)
218 			if (descend)
219 				ret |= ktrsetchildren(curp, p, ops, facs, vp);
220 			else
221 				ret |= ktrops(curp, p, ops, facs, vp);
222 
223 	} else {
224 		/*
225 		 * by pid
226 		 */
227 		p = pfind(uap->pid);
228 		if (p == NULL) {
229 			error = ESRCH;
230 			goto done;
231 		}
232 		if (descend)
233 			ret |= ktrsetchildren(curp, p, ops, facs, vp);
234 		else
235 			ret |= ktrops(curp, p, ops, facs, vp);
236 	}
237 	if (!ret)
238 		error = EPERM;
239 done:
240 	if (vp != NULL)
241 		vrele(vp);
242 	return (error);
243 }
244 
245 ktrops(curp, p, ops, facs, vp)
246 	struct proc *curp, *p;
247 	struct vnode *vp;
248 {
249 
250 	if (!ktrcanset(curp, p))
251 		return (0);
252 	if (ops == KTROP_SET) {
253 		if (p->p_tracep != vp) {
254 			/*
255 			 * if trace file already in use, relinquish
256 			 */
257 			if (p->p_tracep != NULL)
258 				vrele(p->p_tracep);
259 			VREF(vp);
260 			p->p_tracep = vp;
261 		}
262 		p->p_traceflag |= facs;
263 		if (curp->p_ucred->cr_uid == 0)
264 			p->p_traceflag |= KTRFAC_ROOT;
265 	} else {
266 		/* KTROP_CLEAR */
267 		if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
268 			/* no more tracing */
269 			p->p_traceflag = 0;
270 			if (p->p_tracep != NULL) {
271 				vrele(p->p_tracep);
272 				p->p_tracep = NULL;
273 			}
274 		}
275 	}
276 
277 	return (1);
278 }
279 
280 ktrsetchildren(curp, top, ops, facs, vp)
281 	struct proc *curp, *top;
282 	struct vnode *vp;
283 {
284 	register struct proc *p;
285 	register int ret = 0;
286 
287 	p = top;
288 	for (;;) {
289 		ret |= ktrops(curp, p, ops, facs, vp);
290 		/*
291 		 * If this process has children, descend to them next,
292 		 * otherwise do any siblings, and if done with this level,
293 		 * follow back up the tree (but not past top).
294 		 */
295 		if (p->p_cptr)
296 			p = p->p_cptr;
297 		else if (p == top)
298 			return (ret);
299 		else if (p->p_osptr)
300 			p = p->p_osptr;
301 		else for (;;) {
302 			p = p->p_pptr;
303 			if (p == top)
304 				return (ret);
305 			if (p->p_osptr) {
306 				p = p->p_osptr;
307 				break;
308 			}
309 		}
310 	}
311 	/*NOTREACHED*/
312 }
313 
314 ktrwrite(vp, kth)
315 	struct vnode *vp;
316 	register struct ktr_header *kth;
317 {
318 	struct uio auio;
319 	struct iovec aiov[2];
320 	struct proc *p;
321 	int error;
322 
323 	if (vp == NULL)
324 		return;
325 	auio.uio_iov = &aiov[0];
326 	auio.uio_offset = 0;
327 	auio.uio_segflg = UIO_SYSSPACE;
328 	auio.uio_rw = UIO_WRITE;
329 	aiov[0].iov_base = (caddr_t)kth;
330 	aiov[0].iov_len = sizeof(struct ktr_header);
331 	auio.uio_resid = sizeof(struct ktr_header);
332 	auio.uio_iovcnt = 1;
333 	if (kth->ktr_len > 0) {
334 		auio.uio_iovcnt++;
335 		aiov[1].iov_base = kth->ktr_buf;
336 		aiov[1].iov_len = kth->ktr_len;
337 		auio.uio_resid += kth->ktr_len;
338 	}
339 	VOP_LOCK(vp);
340 	error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, curproc->p_ucred);
341 	VOP_UNLOCK(vp);
342 	if (!error)
343 		return;
344 	/*
345 	 * If error encountered, give up tracing on this vnode.
346 	 */
347 	log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
348 	    error);
349 	for (p = allproc; p != NULL; p = p->p_nxt) {
350 		if (p->p_tracep == vp) {
351 			p->p_tracep = NULL;
352 			p->p_traceflag = 0;
353 			vrele(vp);
354 		}
355 	}
356 }
357 
358 /*
359  * Return true if caller has permission to set the ktracing state
360  * of target.  Essentially, the target can't possess any
361  * more permissions than the caller.  KTRFAC_ROOT signifies that
362  * root previously set the tracing status on the target process, and
363  * so, only root may further change it.
364  *
365  * TODO: check groups.  use caller effective gid.
366  */
367 ktrcanset(callp, targetp)
368 	struct proc *callp, *targetp;
369 {
370 	register struct pcred *caller = callp->p_cred;
371 	register struct pcred *target = targetp->p_cred;
372 
373 	if ((caller->pc_ucred->cr_uid == target->p_ruid &&
374 	     target->p_ruid == target->p_svuid &&
375 	     caller->p_rgid == target->p_rgid &&	/* XXX */
376 	     target->p_rgid == target->p_svgid &&
377 	     (targetp->p_traceflag & KTRFAC_ROOT) == 0) ||
378 	     caller->pc_ucred->cr_uid == 0)
379 		return (1);
380 
381 	return (0);
382 }
383 
384 #endif
385