xref: /original-bsd/sys/kern/kern_ktrace.c (revision d24b829c)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)kern_ktrace.c	7.6 (Berkeley) 06/22/90
18  */
19 
20 #ifdef KTRACE
21 
22 #include "param.h"
23 #include "syscontext.h"
24 #include "proc.h"
25 #include "file.h"
26 #include "vnode.h"
27 #include "ktrace.h"
28 #include "malloc.h"
29 
30 #include "syscalls.c"
31 
32 extern int nsysent;
33 extern char *syscallnames[];
34 
35 int ktrace_nocheck = 1;
36 
37 struct ktr_header *
38 ktrgetheader(type)
39 {
40 	register struct ktr_header *kth;
41 
42 	MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
43 		M_TEMP, M_WAITOK);
44 	kth->ktr_type = type;
45 	microtime(&kth->ktr_time);
46 	kth->ktr_pid = u.u_procp->p_pid;
47 	bcopy(u.u_procp->p_comm, kth->ktr_comm, MAXCOMLEN);
48 	return (kth);
49 }
50 
51 ktrsyscall(vp, code, narg, args)
52 	struct vnode *vp;
53 	int code, narg, args[];
54 {
55 	struct	ktr_header *kth = ktrgetheader(KTR_SYSCALL);
56 	struct	ktr_syscall *ktp;
57 	register len = sizeof(struct ktr_syscall) + (narg * sizeof(int));
58 	int 	*argp, i;
59 
60 	if (kth == NULL)
61 		return;
62 	MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK);
63 	ktp->ktr_code = code;
64 	ktp->ktr_narg = narg;
65 	argp = (int *)((char *)ktp + sizeof(struct ktr_syscall));
66 	for (i = 0; i < narg; i++)
67 		*argp++ = args[i];
68 	kth->ktr_buf = (caddr_t)ktp;
69 	kth->ktr_len = len;
70 	ktrwrite(vp, kth);
71 	FREE(ktp, M_TEMP);
72 	FREE(kth, M_TEMP);
73 }
74 
75 ktrsysret(vp, code, error, retval)
76 	struct vnode *vp;
77 	int code, error, retval;
78 {
79 	struct ktr_header *kth = ktrgetheader(KTR_SYSRET);
80 	struct ktr_sysret ktp;
81 
82 	if (kth == NULL)
83 		return;
84 	ktp.ktr_code = code;
85 	ktp.ktr_error = error;
86 	ktp.ktr_retval = retval;		/* what about val2 ? */
87 
88 	kth->ktr_buf = (caddr_t)&ktp;
89 	kth->ktr_len = sizeof(struct ktr_sysret);
90 
91 	ktrwrite(vp, kth);
92 	FREE(kth, M_TEMP);
93 }
94 
95 ktrnamei(vp, path)
96 	struct vnode *vp;
97 	char *path;
98 {
99 	struct ktr_header *kth = ktrgetheader(KTR_NAMEI);
100 
101 	if (kth == NULL)
102 		return;
103 	kth->ktr_len = strlen(path);
104 	kth->ktr_buf = path;
105 
106 	ktrwrite(vp, kth);
107 	FREE(kth, M_TEMP);
108 }
109 
110 ktrgenio(vp, fd, rw, iov, len, error)
111 	struct vnode *vp;
112 	int fd;
113 	enum uio_rw rw;
114 	register struct iovec *iov;
115 {
116 	struct ktr_header *kth = ktrgetheader(KTR_GENIO);
117 	register struct ktr_genio *ktp;
118 	register caddr_t cp;
119 	register int resid = len, cnt;
120 
121 	if (kth == NULL || error)
122 		return;
123 	MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len,
124 		M_TEMP, M_WAITOK);
125 	ktp->ktr_fd = fd;
126 	ktp->ktr_rw = rw;
127 	cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio));
128 	while (resid > 0) {
129 		if ((cnt = iov->iov_len) > resid)
130 			cnt = resid;
131 		if (copyin(iov->iov_base, cp, (unsigned)cnt))
132 			goto done;
133 		cp += cnt;
134 		resid -= cnt;
135 		iov++;
136 	}
137 	kth->ktr_buf = (caddr_t)ktp;
138 	kth->ktr_len = sizeof (struct ktr_genio) + len;
139 
140 	ktrwrite(vp, kth);
141 done:
142 	FREE(kth, M_TEMP);
143 	FREE(ktp, M_TEMP);
144 }
145 
146 ktrpsig(vp, sig, action, mask, code)
147 	struct	vnode *vp;
148 	sig_t	action;
149 {
150 	struct ktr_header *kth = ktrgetheader(KTR_PSIG);
151 	struct ktr_psig	kp;
152 
153 	if (kth == NULL)
154 		return;
155 	kp.signo = (char)sig;
156 	kp.action = action;
157 	kp.mask = mask;
158 	kp.code = code;
159 	kth->ktr_buf = (caddr_t)&kp;
160 	kth->ktr_len = sizeof (struct ktr_psig);
161 
162 	ktrwrite(vp, kth);
163 	FREE(kth, M_TEMP);
164 }
165 
166 /* Interface and common routines */
167 
168 /*
169  * ktrace system call
170  */
171 /* ARGSUSED */
172 ktrace(curp, uap, retval)
173 	struct proc *curp;
174 	register struct args {
175 		char	*fname;
176 		int	ops;
177 		int	facs;
178 		int	pid;
179 	} *uap;
180 	int *retval;
181 {
182 	register struct vnode *vp = NULL;
183 	register struct nameidata *ndp = &u.u_nd;
184 	register struct proc *p;
185 	register ops = KTROP(uap->ops);
186 	struct pgrp *pg;
187 	register int facs = uap->facs;
188 	register int ret = 0;
189 	int error = 0;
190 
191 	/*
192 	 * Until security implications are thought through,
193 	 * limit tracing to root (unless ktrace_nocheck is set).
194 	 */
195 	if (!ktrace_nocheck && (error = suser(u.u_cred, &u.u_acflag)))
196 		RETURN (error);
197 	if (ops != KTROP_CLEAR) {
198 		/*
199 		 * an operation which requires a file argument.
200 		 */
201 		ndp->ni_segflg = UIO_USERSPACE;
202 		ndp->ni_dirp = uap->fname;
203 		if (error = vn_open(ndp, FREAD|FWRITE, 0))
204 			RETURN (error);
205 		vp = ndp->ni_vp;
206 		if (vp->v_type != VREG) {
207 			vrele(vp);
208 			RETURN (EACCES);
209 		}
210 	}
211 	/*
212 	 * Clear all uses of the tracefile
213 	 */
214 	if (ops == KTROP_CLEARFILE) {
215 		for (p = allproc; p != NULL; p = p->p_nxt) {
216 			if (p->p_tracep == vp) {
217 				p->p_tracep = NULL;
218 				p->p_traceflag = 0;
219 				vrele(vp);
220 			}
221 		}
222 		goto done;
223 	}
224 	/*
225 	 * need something to (un)trace
226 	 */
227 	if (!facs) {
228 		error = EINVAL;
229 		goto done;
230 	}
231 	/*
232 	 * doit
233 	 */
234 	if (uap->pid < 0) {
235 		pg = pgfind(-uap->pid);
236 		if (pg == NULL) {
237 			error = ESRCH;
238 			goto done;
239 		}
240 		for (p = pg->pg_mem; p != NULL; p = p->p_pgrpnxt)
241 			if (uap->ops&KTRFLAG_DESCEND)
242 				ret |= ktrsetchildren(p, ops, facs, vp);
243 			else
244 				ret |= ktrops(p, ops, facs, vp);
245 
246 	} else {
247 		p = pfind(uap->pid);
248 		if (p == NULL) {
249 			error = ESRCH;
250 			goto done;
251 		}
252 		if (ops&KTRFLAG_DESCEND)
253 			ret |= ktrsetchildren(p, ops, facs, vp);
254 		else
255 			ret |= ktrops(p, ops, facs, vp);
256 	}
257 	if (!ret)
258 		error = EPERM;
259 done:
260 	if (vp != NULL)
261 		vrele(vp);
262 	RETURN (error);
263 }
264 
265 ktrops(p, ops, facs, vp)
266 	struct proc *p;
267 	struct vnode *vp;
268 {
269 
270 	if (u.u_uid && u.u_uid != p->p_uid)
271 		return 0;
272 	if (ops == KTROP_SET) {
273 		if (p->p_tracep != vp) {
274 			/*
275 			 * if trace file already in use, relinquish
276 			 */
277 			if (p->p_tracep != NULL)
278 				vrele(p->p_tracep);
279 			VREF(vp);
280 			p->p_tracep = vp;
281 		}
282 		p->p_traceflag |= facs;
283 	} else {
284 		/* KTROP_CLEAR */
285 		if (((p->p_traceflag &= ~facs) & ~KTRFAC_INHERIT) == 0) {
286 			/* no more tracing */
287 			p->p_traceflag = 0;
288 			if (p->p_tracep != NULL) {
289 				vrele(p->p_tracep);
290 				p->p_tracep = NULL;
291 			}
292 		}
293 	}
294 
295 	return 1;
296 }
297 
298 ktrsetchildren(top, ops, facs, vp)
299 	struct proc *top;
300 	struct vnode *vp;
301 {
302 	register struct proc *p;
303 	register int ret = 0;
304 
305 	p = top;
306 	for (;;) {
307 		ret |= ktrops(p, ops, facs, vp);
308 		/*
309 		 * If this process has children, descend to them next,
310 		 * otherwise do any siblings, and if done with this level,
311 		 * follow back up the tree (but not past top).
312 		 */
313 		if (p->p_cptr)
314 			p = p->p_cptr;
315 		else if (p == top)
316 			return ret;
317 		else if (p->p_osptr)
318 			p = p->p_osptr;
319 		else for (;;) {
320 			p = p->p_pptr;
321 			if (p == top)
322 				return ret;
323 			if (p->p_osptr) {
324 				p = p->p_osptr;
325 				break;
326 			}
327 		}
328 	}
329 	/*NOTREACHED*/
330 }
331 
332 ktrwrite(vp, kth)
333 	struct vnode *vp;
334 	register struct ktr_header *kth;
335 {
336 	struct uio auio;
337 	struct iovec aiov[2];
338 	struct proc *p;
339 	int error;
340 
341 	if (vp == NULL)
342 		return;
343 	auio.uio_iov = &aiov[0];
344 	auio.uio_offset = 0;
345 	auio.uio_segflg = UIO_SYSSPACE;
346 	auio.uio_rw = UIO_WRITE;
347 	aiov[0].iov_base = (caddr_t)kth;
348 	aiov[0].iov_len = sizeof(struct ktr_header);
349 	auio.uio_resid = sizeof(struct ktr_header);
350 	auio.uio_iovcnt = 1;
351 	if (kth->ktr_len > 0) {
352 		auio.uio_iovcnt++;
353 		aiov[1].iov_base = kth->ktr_buf;
354 		aiov[1].iov_len = kth->ktr_len;
355 		auio.uio_resid += kth->ktr_len;
356 	}
357 	VOP_LOCK(vp);
358 	error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, u.u_cred);
359 	VOP_UNLOCK(vp);
360 	if (!error)
361 		return;
362 	/*
363 	 * If error encountered, give up tracing on this vnode.
364 	 */
365 	uprintf("\ntrace write failed with errno %d, tracing stopped\n", error);
366 	for (p = allproc; p != NULL; p = p->p_nxt) {
367 		if (p->p_tracep == vp) {
368 			p->p_tracep = NULL;
369 			p->p_traceflag = 0;
370 			vrele(vp);
371 		}
372 	}
373 }
374 #endif
375