xref: /original-bsd/sys/kern/kern_ktrace.c (revision e61f0abc)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_ktrace.c	7.9 (Berkeley) 06/28/90
8  */
9 
10 #ifdef KTRACE
11 
12 #include "param.h"
13 #include "user.h"
14 #include "proc.h"
15 #include "file.h"
16 #include "vnode.h"
17 #include "ktrace.h"
18 #include "malloc.h"
19 
20 #include "syscalls.c"
21 
22 extern int nsysent;
23 extern char *syscallnames[];
24 
25 int ktrace_nocheck = 0;	/* set to 1 when security checks in place */
26 
27 struct ktr_header *
28 ktrgetheader(type)
29 {
30 	register struct ktr_header *kth;
31 
32 	MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
33 		M_TEMP, M_WAITOK);
34 	kth->ktr_type = type;
35 	microtime(&kth->ktr_time);
36 	kth->ktr_pid = u.u_procp->p_pid;
37 	bcopy(u.u_procp->p_comm, kth->ktr_comm, MAXCOMLEN);
38 	return (kth);
39 }
40 
41 ktrsyscall(vp, code, narg, args)
42 	struct vnode *vp;
43 	int code, narg, args[];
44 {
45 	struct	ktr_header *kth = ktrgetheader(KTR_SYSCALL);
46 	struct	ktr_syscall *ktp;
47 	register len = sizeof(struct ktr_syscall) + (narg * sizeof(int));
48 	int 	*argp, i;
49 
50 	if (kth == NULL)
51 		return;
52 	MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK);
53 	ktp->ktr_code = code;
54 	ktp->ktr_narg = narg;
55 	argp = (int *)((char *)ktp + sizeof(struct ktr_syscall));
56 	for (i = 0; i < narg; i++)
57 		*argp++ = args[i];
58 	kth->ktr_buf = (caddr_t)ktp;
59 	kth->ktr_len = len;
60 	ktrwrite(vp, kth);
61 	FREE(ktp, M_TEMP);
62 	FREE(kth, M_TEMP);
63 }
64 
65 ktrsysret(vp, code, error, retval)
66 	struct vnode *vp;
67 	int code, error, retval;
68 {
69 	struct ktr_header *kth = ktrgetheader(KTR_SYSRET);
70 	struct ktr_sysret ktp;
71 
72 	if (kth == NULL)
73 		return;
74 	ktp.ktr_code = code;
75 	ktp.ktr_error = error;
76 	ktp.ktr_retval = retval;		/* what about val2 ? */
77 
78 	kth->ktr_buf = (caddr_t)&ktp;
79 	kth->ktr_len = sizeof(struct ktr_sysret);
80 
81 	ktrwrite(vp, kth);
82 	FREE(kth, M_TEMP);
83 }
84 
85 ktrnamei(vp, path)
86 	struct vnode *vp;
87 	char *path;
88 {
89 	struct ktr_header *kth = ktrgetheader(KTR_NAMEI);
90 
91 	if (kth == NULL)
92 		return;
93 	kth->ktr_len = strlen(path);
94 	kth->ktr_buf = path;
95 
96 	ktrwrite(vp, kth);
97 	FREE(kth, M_TEMP);
98 }
99 
100 ktrgenio(vp, fd, rw, iov, len, error)
101 	struct vnode *vp;
102 	int fd;
103 	enum uio_rw rw;
104 	register struct iovec *iov;
105 {
106 	struct ktr_header *kth = ktrgetheader(KTR_GENIO);
107 	register struct ktr_genio *ktp;
108 	register caddr_t cp;
109 	register int resid = len, cnt;
110 
111 	if (kth == NULL || error)
112 		return;
113 	MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len,
114 		M_TEMP, M_WAITOK);
115 	ktp->ktr_fd = fd;
116 	ktp->ktr_rw = rw;
117 	cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio));
118 	while (resid > 0) {
119 		if ((cnt = iov->iov_len) > resid)
120 			cnt = resid;
121 		if (copyin(iov->iov_base, cp, (unsigned)cnt))
122 			goto done;
123 		cp += cnt;
124 		resid -= cnt;
125 		iov++;
126 	}
127 	kth->ktr_buf = (caddr_t)ktp;
128 	kth->ktr_len = sizeof (struct ktr_genio) + len;
129 
130 	ktrwrite(vp, kth);
131 done:
132 	FREE(kth, M_TEMP);
133 	FREE(ktp, M_TEMP);
134 }
135 
136 ktrpsig(vp, sig, action, mask, code)
137 	struct	vnode *vp;
138 	sig_t	action;
139 {
140 	struct ktr_header *kth = ktrgetheader(KTR_PSIG);
141 	struct ktr_psig	kp;
142 
143 	if (kth == NULL)
144 		return;
145 	kp.signo = (char)sig;
146 	kp.action = action;
147 	kp.mask = mask;
148 	kp.code = code;
149 	kth->ktr_buf = (caddr_t)&kp;
150 	kth->ktr_len = sizeof (struct ktr_psig);
151 
152 	ktrwrite(vp, kth);
153 	FREE(kth, M_TEMP);
154 }
155 
156 /* Interface and common routines */
157 
158 /*
159  * ktrace system call
160  */
161 /* ARGSUSED */
162 ktrace(curp, uap, retval)
163 	struct proc *curp;
164 	register struct args {
165 		char	*fname;
166 		int	ops;
167 		int	facs;
168 		int	pid;
169 	} *uap;
170 	int *retval;
171 {
172 	register struct vnode *vp = NULL;
173 	register struct nameidata *ndp = &u.u_nd;
174 	register struct proc *p;
175 	register ops = KTROP(uap->ops);
176 	struct pgrp *pg;
177 	register int facs = uap->facs;
178 	register int ret = 0;
179 	int error = 0;
180 
181 	/*
182 	 * Until security implications are thought through,
183 	 * limit tracing to root (unless ktrace_nocheck is set).
184 	 */
185 	if (!ktrace_nocheck && (error = suser(u.u_cred, &u.u_acflag)))
186 		return (error);
187 	if (ops != KTROP_CLEAR) {
188 		/*
189 		 * an operation which requires a file argument.
190 		 */
191 		ndp->ni_segflg = UIO_USERSPACE;
192 		ndp->ni_dirp = uap->fname;
193 		if (error = vn_open(ndp, FREAD|FWRITE, 0))
194 			return (error);
195 		vp = ndp->ni_vp;
196 		if (vp->v_type != VREG) {
197 			vrele(vp);
198 			return (EACCES);
199 		}
200 	}
201 	/*
202 	 * Clear all uses of the tracefile
203 	 */
204 	if (ops == KTROP_CLEARFILE) {
205 		for (p = allproc; p != NULL; p = p->p_nxt) {
206 			if (p->p_tracep == vp) {
207 				p->p_tracep = NULL;
208 				p->p_traceflag = 0;
209 				vrele(vp);
210 			}
211 		}
212 		goto done;
213 	}
214 	/*
215 	 * need something to (un)trace
216 	 */
217 	if (!facs) {
218 		error = EINVAL;
219 		goto done;
220 	}
221 	/*
222 	 * doit
223 	 */
224 	if (uap->pid < 0) {
225 		pg = pgfind(-uap->pid);
226 		if (pg == NULL) {
227 			error = ESRCH;
228 			goto done;
229 		}
230 		for (p = pg->pg_mem; p != NULL; p = p->p_pgrpnxt)
231 			if (uap->ops&KTRFLAG_DESCEND)
232 				ret |= ktrsetchildren(p, ops, facs, vp);
233 			else
234 				ret |= ktrops(p, ops, facs, vp);
235 
236 	} else {
237 		p = pfind(uap->pid);
238 		if (p == NULL) {
239 			error = ESRCH;
240 			goto done;
241 		}
242 		if (ops&KTRFLAG_DESCEND)
243 			ret |= ktrsetchildren(p, ops, facs, vp);
244 		else
245 			ret |= ktrops(p, ops, facs, vp);
246 	}
247 	if (!ret)
248 		error = EPERM;
249 done:
250 	if (vp != NULL)
251 		vrele(vp);
252 	return (error);
253 }
254 
255 ktrops(p, ops, facs, vp)
256 	struct proc *p;
257 	struct vnode *vp;
258 {
259 
260 	if (u.u_uid && u.u_uid != p->p_uid)
261 		return (0);
262 	if (ops == KTROP_SET) {
263 		if (p->p_tracep != vp) {
264 			/*
265 			 * if trace file already in use, relinquish
266 			 */
267 			if (p->p_tracep != NULL)
268 				vrele(p->p_tracep);
269 			VREF(vp);
270 			p->p_tracep = vp;
271 		}
272 		p->p_traceflag |= facs;
273 	} else {
274 		/* KTROP_CLEAR */
275 		if (((p->p_traceflag &= ~facs) & ~KTRFAC_INHERIT) == 0) {
276 			/* no more tracing */
277 			p->p_traceflag = 0;
278 			if (p->p_tracep != NULL) {
279 				vrele(p->p_tracep);
280 				p->p_tracep = NULL;
281 			}
282 		}
283 	}
284 
285 	return 1;
286 }
287 
288 ktrsetchildren(top, ops, facs, vp)
289 	struct proc *top;
290 	struct vnode *vp;
291 {
292 	register struct proc *p;
293 	register int ret = 0;
294 
295 	p = top;
296 	for (;;) {
297 		ret |= ktrops(p, ops, facs, vp);
298 		/*
299 		 * If this process has children, descend to them next,
300 		 * otherwise do any siblings, and if done with this level,
301 		 * follow back up the tree (but not past top).
302 		 */
303 		if (p->p_cptr)
304 			p = p->p_cptr;
305 		else if (p == top)
306 			return ret;
307 		else if (p->p_osptr)
308 			p = p->p_osptr;
309 		else for (;;) {
310 			p = p->p_pptr;
311 			if (p == top)
312 				return ret;
313 			if (p->p_osptr) {
314 				p = p->p_osptr;
315 				break;
316 			}
317 		}
318 	}
319 	/*NOTREACHED*/
320 }
321 
322 ktrwrite(vp, kth)
323 	struct vnode *vp;
324 	register struct ktr_header *kth;
325 {
326 	struct uio auio;
327 	struct iovec aiov[2];
328 	struct proc *p;
329 	int error;
330 
331 	if (vp == NULL)
332 		return;
333 	auio.uio_iov = &aiov[0];
334 	auio.uio_offset = 0;
335 	auio.uio_segflg = UIO_SYSSPACE;
336 	auio.uio_rw = UIO_WRITE;
337 	aiov[0].iov_base = (caddr_t)kth;
338 	aiov[0].iov_len = sizeof(struct ktr_header);
339 	auio.uio_resid = sizeof(struct ktr_header);
340 	auio.uio_iovcnt = 1;
341 	if (kth->ktr_len > 0) {
342 		auio.uio_iovcnt++;
343 		aiov[1].iov_base = kth->ktr_buf;
344 		aiov[1].iov_len = kth->ktr_len;
345 		auio.uio_resid += kth->ktr_len;
346 	}
347 	VOP_LOCK(vp);
348 	error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, u.u_cred);
349 	VOP_UNLOCK(vp);
350 	if (!error)
351 		return;
352 	/*
353 	 * If error encountered, give up tracing on this vnode.
354 	 */
355 	uprintf("\ntrace write failed with errno %d, tracing stopped\n", error);
356 	for (p = allproc; p != NULL; p = p->p_nxt) {
357 		if (p->p_tracep == vp) {
358 			p->p_tracep = NULL;
359 			p->p_traceflag = 0;
360 			vrele(vp);
361 		}
362 	}
363 }
364 #endif
365