1 /* 2 * Copyright (c) 1989 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)kern_ktrace.c 7.17 (Berkeley) 05/14/92 8 */ 9 10 #ifdef KTRACE 11 12 #include "param.h" 13 #include "proc.h" 14 #include "file.h" 15 #include "namei.h" 16 #include "vnode.h" 17 #include "ktrace.h" 18 #include "malloc.h" 19 #include "syslog.h" 20 21 struct ktr_header * 22 ktrgetheader(type) 23 { 24 register struct ktr_header *kth; 25 struct proc *p = curproc; /* XXX */ 26 27 MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header), 28 M_TEMP, M_WAITOK); 29 kth->ktr_type = type; 30 microtime(&kth->ktr_time); 31 kth->ktr_pid = p->p_pid; 32 bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN); 33 return (kth); 34 } 35 36 ktrsyscall(vp, code, narg, args) 37 struct vnode *vp; 38 int code, narg, args[]; 39 { 40 struct ktr_header *kth; 41 struct ktr_syscall *ktp; 42 register len = sizeof(struct ktr_syscall) + (narg * sizeof(int)); 43 struct proc *p = curproc; /* XXX */ 44 int *argp, i; 45 46 p->p_traceflag |= KTRFAC_ACTIVE; 47 kth = ktrgetheader(KTR_SYSCALL); 48 MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK); 49 ktp->ktr_code = code; 50 ktp->ktr_narg = narg; 51 argp = (int *)((char *)ktp + sizeof(struct ktr_syscall)); 52 for (i = 0; i < narg; i++) 53 *argp++ = args[i]; 54 kth->ktr_buf = (caddr_t)ktp; 55 kth->ktr_len = len; 56 ktrwrite(vp, kth); 57 FREE(ktp, M_TEMP); 58 FREE(kth, M_TEMP); 59 p->p_traceflag &= ~KTRFAC_ACTIVE; 60 } 61 62 ktrsysret(vp, code, error, retval) 63 struct vnode *vp; 64 int code, error, retval; 65 { 66 struct ktr_header *kth; 67 struct ktr_sysret ktp; 68 struct proc *p = curproc; /* XXX */ 69 70 p->p_traceflag |= KTRFAC_ACTIVE; 71 kth = ktrgetheader(KTR_SYSRET); 72 ktp.ktr_code = code; 73 ktp.ktr_error = error; 74 ktp.ktr_retval = retval; /* what about val2 ? */ 75 76 kth->ktr_buf = (caddr_t)&ktp; 77 kth->ktr_len = sizeof(struct ktr_sysret); 78 79 ktrwrite(vp, kth); 80 FREE(kth, M_TEMP); 81 p->p_traceflag &= ~KTRFAC_ACTIVE; 82 } 83 84 ktrnamei(vp, path) 85 struct vnode *vp; 86 char *path; 87 { 88 struct ktr_header *kth; 89 struct proc *p = curproc; /* XXX */ 90 91 p->p_traceflag |= KTRFAC_ACTIVE; 92 kth = ktrgetheader(KTR_NAMEI); 93 kth->ktr_len = strlen(path); 94 kth->ktr_buf = path; 95 96 ktrwrite(vp, kth); 97 FREE(kth, M_TEMP); 98 p->p_traceflag &= ~KTRFAC_ACTIVE; 99 } 100 101 ktrgenio(vp, fd, rw, iov, len, error) 102 struct vnode *vp; 103 int fd; 104 enum uio_rw rw; 105 register struct iovec *iov; 106 { 107 struct ktr_header *kth; 108 register struct ktr_genio *ktp; 109 register caddr_t cp; 110 register int resid = len, cnt; 111 struct proc *p = curproc; /* XXX */ 112 113 if (error) 114 return; 115 p->p_traceflag |= KTRFAC_ACTIVE; 116 kth = ktrgetheader(KTR_GENIO); 117 MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len, 118 M_TEMP, M_WAITOK); 119 ktp->ktr_fd = fd; 120 ktp->ktr_rw = rw; 121 cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio)); 122 while (resid > 0) { 123 if ((cnt = iov->iov_len) > resid) 124 cnt = resid; 125 if (copyin(iov->iov_base, cp, (unsigned)cnt)) 126 goto done; 127 cp += cnt; 128 resid -= cnt; 129 iov++; 130 } 131 kth->ktr_buf = (caddr_t)ktp; 132 kth->ktr_len = sizeof (struct ktr_genio) + len; 133 134 ktrwrite(vp, kth); 135 done: 136 FREE(kth, M_TEMP); 137 FREE(ktp, M_TEMP); 138 p->p_traceflag &= ~KTRFAC_ACTIVE; 139 } 140 141 ktrpsig(vp, sig, action, mask, code) 142 struct vnode *vp; 143 sig_t action; 144 { 145 struct ktr_header *kth; 146 struct ktr_psig kp; 147 struct proc *p = curproc; /* XXX */ 148 149 p->p_traceflag |= KTRFAC_ACTIVE; 150 kth = ktrgetheader(KTR_PSIG); 151 kp.signo = (char)sig; 152 kp.action = action; 153 kp.mask = mask; 154 kp.code = code; 155 kth->ktr_buf = (caddr_t)&kp; 156 kth->ktr_len = sizeof (struct ktr_psig); 157 158 ktrwrite(vp, kth); 159 FREE(kth, M_TEMP); 160 p->p_traceflag &= ~KTRFAC_ACTIVE; 161 } 162 163 ktrcsw(vp, out, user) 164 struct vnode *vp; 165 int out, user; 166 { 167 struct ktr_header *kth; 168 struct ktr_csw kc; 169 struct proc *p = curproc; /* XXX */ 170 171 p->p_traceflag |= KTRFAC_ACTIVE; 172 kth = ktrgetheader(KTR_CSW); 173 kc.out = out; 174 kc.user = user; 175 kth->ktr_buf = (caddr_t)&kc; 176 kth->ktr_len = sizeof (struct ktr_csw); 177 178 ktrwrite(vp, kth); 179 FREE(kth, M_TEMP); 180 p->p_traceflag &= ~KTRFAC_ACTIVE; 181 } 182 183 /* Interface and common routines */ 184 185 /* 186 * ktrace system call 187 */ 188 /* ARGSUSED */ 189 ktrace(curp, uap, retval) 190 struct proc *curp; 191 register struct args { 192 char *fname; 193 int ops; 194 int facs; 195 int pid; 196 } *uap; 197 int *retval; 198 { 199 USES_VOP_UNLOCK; 200 register struct vnode *vp = NULL; 201 register struct proc *p; 202 struct pgrp *pg; 203 int facs = uap->facs & ~KTRFAC_ROOT; 204 int ops = KTROP(uap->ops); 205 int descend = uap->ops & KTRFLAG_DESCEND; 206 int ret = 0; 207 int error = 0; 208 struct nameidata nd; 209 210 curp->p_traceflag |= KTRFAC_ACTIVE; 211 if (ops != KTROP_CLEAR) { 212 /* 213 * an operation which requires a file argument. 214 */ 215 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->fname, curp); 216 if (error = vn_open(&nd, FREAD|FWRITE, 0)) { 217 curp->p_traceflag &= ~KTRFAC_ACTIVE; 218 return (error); 219 } 220 vp = nd.ni_vp; 221 VOP_UNLOCK(vp); 222 if (vp->v_type != VREG) { 223 (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp); 224 curp->p_traceflag &= ~KTRFAC_ACTIVE; 225 return (EACCES); 226 } 227 } 228 /* 229 * Clear all uses of the tracefile 230 */ 231 if (ops == KTROP_CLEARFILE) { 232 for (p = allproc; p != NULL; p = p->p_nxt) { 233 if (p->p_tracep == vp) { 234 if (ktrcanset(curp, p)) { 235 p->p_tracep = NULL; 236 p->p_traceflag = 0; 237 (void) vn_close(vp, FREAD|FWRITE, 238 p->p_ucred, p); 239 } else 240 error = EPERM; 241 } 242 } 243 goto done; 244 } 245 /* 246 * need something to (un)trace (XXX - why is this here?) 247 */ 248 if (!facs) { 249 error = EINVAL; 250 goto done; 251 } 252 /* 253 * do it 254 */ 255 if (uap->pid < 0) { 256 /* 257 * by process group 258 */ 259 pg = pgfind(-uap->pid); 260 if (pg == NULL) { 261 error = ESRCH; 262 goto done; 263 } 264 for (p = pg->pg_mem; p != NULL; p = p->p_pgrpnxt) 265 if (descend) 266 ret |= ktrsetchildren(curp, p, ops, facs, vp); 267 else 268 ret |= ktrops(curp, p, ops, facs, vp); 269 270 } else { 271 /* 272 * by pid 273 */ 274 p = pfind(uap->pid); 275 if (p == NULL) { 276 error = ESRCH; 277 goto done; 278 } 279 if (descend) 280 ret |= ktrsetchildren(curp, p, ops, facs, vp); 281 else 282 ret |= ktrops(curp, p, ops, facs, vp); 283 } 284 if (!ret) 285 error = EPERM; 286 done: 287 if (vp != NULL) 288 (void) vn_close(vp, FWRITE, curp->p_ucred, curp); 289 curp->p_traceflag &= ~KTRFAC_ACTIVE; 290 return (error); 291 } 292 293 ktrops(curp, p, ops, facs, vp) 294 struct proc *curp, *p; 295 struct vnode *vp; 296 { 297 298 if (!ktrcanset(curp, p)) 299 return (0); 300 if (ops == KTROP_SET) { 301 if (p->p_tracep != vp) { 302 /* 303 * if trace file already in use, relinquish 304 */ 305 if (p->p_tracep != NULL) 306 vrele(p->p_tracep); 307 VREF(vp); 308 p->p_tracep = vp; 309 } 310 p->p_traceflag |= facs; 311 if (curp->p_ucred->cr_uid == 0) 312 p->p_traceflag |= KTRFAC_ROOT; 313 } else { 314 /* KTROP_CLEAR */ 315 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { 316 /* no more tracing */ 317 p->p_traceflag = 0; 318 if (p->p_tracep != NULL) { 319 vrele(p->p_tracep); 320 p->p_tracep = NULL; 321 } 322 } 323 } 324 325 return (1); 326 } 327 328 ktrsetchildren(curp, top, ops, facs, vp) 329 struct proc *curp, *top; 330 struct vnode *vp; 331 { 332 register struct proc *p; 333 register int ret = 0; 334 335 p = top; 336 for (;;) { 337 ret |= ktrops(curp, p, ops, facs, vp); 338 /* 339 * If this process has children, descend to them next, 340 * otherwise do any siblings, and if done with this level, 341 * follow back up the tree (but not past top). 342 */ 343 if (p->p_cptr) 344 p = p->p_cptr; 345 else if (p == top) 346 return (ret); 347 else if (p->p_osptr) 348 p = p->p_osptr; 349 else for (;;) { 350 p = p->p_pptr; 351 if (p == top) 352 return (ret); 353 if (p->p_osptr) { 354 p = p->p_osptr; 355 break; 356 } 357 } 358 } 359 /*NOTREACHED*/ 360 } 361 362 ktrwrite(vp, kth) 363 struct vnode *vp; 364 register struct ktr_header *kth; 365 { 366 USES_VOP_LOCK; 367 USES_VOP_UNLOCK; 368 USES_VOP_WRITE; 369 struct uio auio; 370 struct iovec aiov[2]; 371 register struct proc *p = curproc; /* XXX */ 372 int error; 373 374 if (vp == NULL) 375 return; 376 auio.uio_iov = &aiov[0]; 377 auio.uio_offset = 0; 378 auio.uio_segflg = UIO_SYSSPACE; 379 auio.uio_rw = UIO_WRITE; 380 aiov[0].iov_base = (caddr_t)kth; 381 aiov[0].iov_len = sizeof(struct ktr_header); 382 auio.uio_resid = sizeof(struct ktr_header); 383 auio.uio_iovcnt = 1; 384 auio.uio_procp = (struct proc *)0; 385 if (kth->ktr_len > 0) { 386 auio.uio_iovcnt++; 387 aiov[1].iov_base = kth->ktr_buf; 388 aiov[1].iov_len = kth->ktr_len; 389 auio.uio_resid += kth->ktr_len; 390 } 391 VOP_LOCK(vp); 392 error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred); 393 VOP_UNLOCK(vp); 394 if (!error) 395 return; 396 /* 397 * If error encountered, give up tracing on this vnode. 398 */ 399 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n", 400 error); 401 for (p = allproc; p != NULL; p = p->p_nxt) { 402 if (p->p_tracep == vp) { 403 p->p_tracep = NULL; 404 p->p_traceflag = 0; 405 vrele(vp); 406 } 407 } 408 } 409 410 /* 411 * Return true if caller has permission to set the ktracing state 412 * of target. Essentially, the target can't possess any 413 * more permissions than the caller. KTRFAC_ROOT signifies that 414 * root previously set the tracing status on the target process, and 415 * so, only root may further change it. 416 * 417 * TODO: check groups. use caller effective gid. 418 */ 419 ktrcanset(callp, targetp) 420 struct proc *callp, *targetp; 421 { 422 register struct pcred *caller = callp->p_cred; 423 register struct pcred *target = targetp->p_cred; 424 425 if ((caller->pc_ucred->cr_uid == target->p_ruid && 426 target->p_ruid == target->p_svuid && 427 caller->p_rgid == target->p_rgid && /* XXX */ 428 target->p_rgid == target->p_svgid && 429 (targetp->p_traceflag & KTRFAC_ROOT) == 0) || 430 caller->pc_ucred->cr_uid == 0) 431 return (1); 432 433 return (0); 434 } 435 436 #endif 437