1 /*- 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/syscallsubr.h> 42 #include <sys/sysent.h> 43 #include <sys/sysproto.h> 44 #include <sys/proc.h> 45 #include <sys/vnode.h> 46 #include <sys/ptrace.h> 47 #include <sys/sx.h> 48 #include <sys/malloc.h> 49 #include <sys/signalvar.h> 50 51 #include <machine/reg.h> 52 53 #include <security/audit/audit.h> 54 55 #include <vm/vm.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_extern.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_kern.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_page.h> 62 #include <vm/vm_pager.h> 63 #include <vm/vm_param.h> 64 65 #ifdef COMPAT_FREEBSD32 66 #include <sys/procfs.h> 67 68 struct ptrace_io_desc32 { 69 int piod_op; 70 u_int32_t piod_offs; 71 u_int32_t piod_addr; 72 u_int32_t piod_len; 73 }; 74 75 struct ptrace_vm_entry32 { 76 int pve_entry; 77 int pve_timestamp; 78 uint32_t pve_start; 79 uint32_t pve_end; 80 uint32_t pve_offset; 81 u_int pve_prot; 82 u_int pve_pathlen; 83 int32_t pve_fileid; 84 u_int pve_fsid; 85 uint32_t pve_path; 86 }; 87 88 #endif 89 90 /* 91 * Functions implemented using PROC_ACTION(): 92 * 93 * proc_read_regs(proc, regs) 94 * Get the current user-visible register set from the process 95 * and copy it into the regs structure (<machine/reg.h>). 96 * The process is stopped at the time read_regs is called. 97 * 98 * proc_write_regs(proc, regs) 99 * Update the current register set from the passed in regs 100 * structure. Take care to avoid clobbering special CPU 101 * registers or privileged bits in the PSL. 102 * Depending on the architecture this may have fix-up work to do, 103 * especially if the IAR or PCW are modified. 104 * The process is stopped at the time write_regs is called. 105 * 106 * proc_read_fpregs, proc_write_fpregs 107 * deal with the floating point register set, otherwise as above. 108 * 109 * proc_read_dbregs, proc_write_dbregs 110 * deal with the processor debug register set, otherwise as above. 111 * 112 * proc_sstep(proc) 113 * Arrange for the process to trap after executing a single instruction. 114 */ 115 116 #define PROC_ACTION(action) do { \ 117 int error; \ 118 \ 119 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \ 120 if ((td->td_proc->p_flag & P_INMEM) == 0) \ 121 error = EIO; \ 122 else \ 123 error = (action); \ 124 return (error); \ 125 } while(0) 126 127 int 128 proc_read_regs(struct thread *td, struct reg *regs) 129 { 130 131 PROC_ACTION(fill_regs(td, regs)); 132 } 133 134 int 135 proc_write_regs(struct thread *td, struct reg *regs) 136 { 137 138 PROC_ACTION(set_regs(td, regs)); 139 } 140 141 int 142 proc_read_dbregs(struct thread *td, struct dbreg *dbregs) 143 { 144 145 PROC_ACTION(fill_dbregs(td, dbregs)); 146 } 147 148 int 149 proc_write_dbregs(struct thread *td, struct dbreg *dbregs) 150 { 151 152 PROC_ACTION(set_dbregs(td, dbregs)); 153 } 154 155 /* 156 * Ptrace doesn't support fpregs at all, and there are no security holes 157 * or translations for fpregs, so we can just copy them. 158 */ 159 int 160 proc_read_fpregs(struct thread *td, struct fpreg *fpregs) 161 { 162 163 PROC_ACTION(fill_fpregs(td, fpregs)); 164 } 165 166 int 167 proc_write_fpregs(struct thread *td, struct fpreg *fpregs) 168 { 169 170 PROC_ACTION(set_fpregs(td, fpregs)); 171 } 172 173 #ifdef COMPAT_FREEBSD32 174 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */ 175 int 176 proc_read_regs32(struct thread *td, struct reg32 *regs32) 177 { 178 179 PROC_ACTION(fill_regs32(td, regs32)); 180 } 181 182 int 183 proc_write_regs32(struct thread *td, struct reg32 *regs32) 184 { 185 186 PROC_ACTION(set_regs32(td, regs32)); 187 } 188 189 int 190 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 191 { 192 193 PROC_ACTION(fill_dbregs32(td, dbregs32)); 194 } 195 196 int 197 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32) 198 { 199 200 PROC_ACTION(set_dbregs32(td, dbregs32)); 201 } 202 203 int 204 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 205 { 206 207 PROC_ACTION(fill_fpregs32(td, fpregs32)); 208 } 209 210 int 211 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32) 212 { 213 214 PROC_ACTION(set_fpregs32(td, fpregs32)); 215 } 216 #endif 217 218 int 219 proc_sstep(struct thread *td) 220 { 221 222 PROC_ACTION(ptrace_single_step(td)); 223 } 224 225 int 226 proc_rwmem(struct proc *p, struct uio *uio) 227 { 228 vm_map_t map; 229 vm_object_t backing_object, object; 230 vm_offset_t pageno; /* page number */ 231 vm_prot_t reqprot; 232 int error, writing; 233 234 /* 235 * Assert that someone has locked this vmspace. (Should be 236 * curthread but we can't assert that.) This keeps the process 237 * from exiting out from under us until this operation completes. 238 */ 239 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__, 240 p, p->p_pid)); 241 242 /* 243 * The map we want... 244 */ 245 map = &p->p_vmspace->vm_map; 246 247 writing = uio->uio_rw == UIO_WRITE; 248 reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ; 249 250 /* 251 * Only map in one page at a time. We don't have to, but it 252 * makes things easier. This way is trivial - right? 253 */ 254 do { 255 vm_map_t tmap; 256 vm_offset_t uva; 257 int page_offset; /* offset into page */ 258 vm_map_entry_t out_entry; 259 vm_prot_t out_prot; 260 boolean_t wired; 261 vm_pindex_t pindex; 262 u_int len; 263 vm_page_t m; 264 265 object = NULL; 266 267 uva = (vm_offset_t)uio->uio_offset; 268 269 /* 270 * Get the page number of this segment. 271 */ 272 pageno = trunc_page(uva); 273 page_offset = uva - pageno; 274 275 /* 276 * How many bytes to copy 277 */ 278 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 279 280 /* 281 * Fault the page on behalf of the process 282 */ 283 error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL); 284 if (error) { 285 if (error == KERN_RESOURCE_SHORTAGE) 286 error = ENOMEM; 287 else 288 error = EFAULT; 289 break; 290 } 291 292 /* 293 * Now we need to get the page. out_entry and wired 294 * aren't used. One would think the vm code 295 * would be a *bit* nicer... We use tmap because 296 * vm_map_lookup() can change the map argument. 297 */ 298 tmap = map; 299 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry, 300 &object, &pindex, &out_prot, &wired); 301 if (error) { 302 error = EFAULT; 303 break; 304 } 305 VM_OBJECT_LOCK(object); 306 while ((m = vm_page_lookup(object, pindex)) == NULL && 307 !writing && 308 (backing_object = object->backing_object) != NULL) { 309 /* 310 * Allow fallback to backing objects if we are reading. 311 */ 312 VM_OBJECT_LOCK(backing_object); 313 pindex += OFF_TO_IDX(object->backing_object_offset); 314 VM_OBJECT_UNLOCK(object); 315 object = backing_object; 316 } 317 if (writing && m != NULL) { 318 vm_page_dirty(m); 319 vm_pager_page_unswapped(m); 320 } 321 VM_OBJECT_UNLOCK(object); 322 if (m == NULL) { 323 vm_map_lookup_done(tmap, out_entry); 324 error = EFAULT; 325 break; 326 } 327 328 /* 329 * Hold the page in memory. 330 */ 331 vm_page_lock(m); 332 vm_page_hold(m); 333 vm_page_unlock(m); 334 335 /* 336 * We're done with tmap now. 337 */ 338 vm_map_lookup_done(tmap, out_entry); 339 340 /* 341 * Now do the i/o move. 342 */ 343 error = uiomove_fromphys(&m, page_offset, len, uio); 344 345 /* Make the I-cache coherent for breakpoints. */ 346 if (!error && writing && (out_prot & VM_PROT_EXECUTE)) 347 vm_sync_icache(map, uva, len); 348 349 /* 350 * Release the page. 351 */ 352 vm_page_lock(m); 353 vm_page_unhold(m); 354 vm_page_unlock(m); 355 356 } while (error == 0 && uio->uio_resid > 0); 357 358 return (error); 359 } 360 361 static int 362 ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve) 363 { 364 struct vattr vattr; 365 vm_map_t map; 366 vm_map_entry_t entry; 367 vm_object_t obj, tobj, lobj; 368 struct vmspace *vm; 369 struct vnode *vp; 370 char *freepath, *fullpath; 371 u_int pathlen; 372 int error, index, vfslocked; 373 374 error = 0; 375 obj = NULL; 376 377 vm = vmspace_acquire_ref(p); 378 map = &vm->vm_map; 379 vm_map_lock_read(map); 380 381 do { 382 entry = map->header.next; 383 index = 0; 384 while (index < pve->pve_entry && entry != &map->header) { 385 entry = entry->next; 386 index++; 387 } 388 if (index != pve->pve_entry) { 389 error = EINVAL; 390 break; 391 } 392 while (entry != &map->header && 393 (entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 394 entry = entry->next; 395 index++; 396 } 397 if (entry == &map->header) { 398 error = ENOENT; 399 break; 400 } 401 402 /* We got an entry. */ 403 pve->pve_entry = index + 1; 404 pve->pve_timestamp = map->timestamp; 405 pve->pve_start = entry->start; 406 pve->pve_end = entry->end - 1; 407 pve->pve_offset = entry->offset; 408 pve->pve_prot = entry->protection; 409 410 /* Backing object's path needed? */ 411 if (pve->pve_pathlen == 0) 412 break; 413 414 pathlen = pve->pve_pathlen; 415 pve->pve_pathlen = 0; 416 417 obj = entry->object.vm_object; 418 if (obj != NULL) 419 VM_OBJECT_LOCK(obj); 420 } while (0); 421 422 vm_map_unlock_read(map); 423 vmspace_free(vm); 424 425 pve->pve_fsid = VNOVAL; 426 pve->pve_fileid = VNOVAL; 427 428 if (error == 0 && obj != NULL) { 429 lobj = obj; 430 for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { 431 if (tobj != obj) 432 VM_OBJECT_LOCK(tobj); 433 if (lobj != obj) 434 VM_OBJECT_UNLOCK(lobj); 435 lobj = tobj; 436 pve->pve_offset += tobj->backing_object_offset; 437 } 438 vp = (lobj->type == OBJT_VNODE) ? lobj->handle : NULL; 439 if (vp != NULL) 440 vref(vp); 441 if (lobj != obj) 442 VM_OBJECT_UNLOCK(lobj); 443 VM_OBJECT_UNLOCK(obj); 444 445 if (vp != NULL) { 446 freepath = NULL; 447 fullpath = NULL; 448 vn_fullpath(td, vp, &fullpath, &freepath); 449 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 450 vn_lock(vp, LK_SHARED | LK_RETRY); 451 if (VOP_GETATTR(vp, &vattr, td->td_ucred) == 0) { 452 pve->pve_fileid = vattr.va_fileid; 453 pve->pve_fsid = vattr.va_fsid; 454 } 455 vput(vp); 456 VFS_UNLOCK_GIANT(vfslocked); 457 458 if (fullpath != NULL) { 459 pve->pve_pathlen = strlen(fullpath) + 1; 460 if (pve->pve_pathlen <= pathlen) { 461 error = copyout(fullpath, pve->pve_path, 462 pve->pve_pathlen); 463 } else 464 error = ENAMETOOLONG; 465 } 466 if (freepath != NULL) 467 free(freepath, M_TEMP); 468 } 469 } 470 471 return (error); 472 } 473 474 #ifdef COMPAT_FREEBSD32 475 static int 476 ptrace_vm_entry32(struct thread *td, struct proc *p, 477 struct ptrace_vm_entry32 *pve32) 478 { 479 struct ptrace_vm_entry pve; 480 int error; 481 482 pve.pve_entry = pve32->pve_entry; 483 pve.pve_pathlen = pve32->pve_pathlen; 484 pve.pve_path = (void *)(uintptr_t)pve32->pve_path; 485 486 error = ptrace_vm_entry(td, p, &pve); 487 if (error == 0) { 488 pve32->pve_entry = pve.pve_entry; 489 pve32->pve_timestamp = pve.pve_timestamp; 490 pve32->pve_start = pve.pve_start; 491 pve32->pve_end = pve.pve_end; 492 pve32->pve_offset = pve.pve_offset; 493 pve32->pve_prot = pve.pve_prot; 494 pve32->pve_fileid = pve.pve_fileid; 495 pve32->pve_fsid = pve.pve_fsid; 496 } 497 498 pve32->pve_pathlen = pve.pve_pathlen; 499 return (error); 500 } 501 #endif /* COMPAT_FREEBSD32 */ 502 503 /* 504 * Process debugging system call. 505 */ 506 #ifndef _SYS_SYSPROTO_H_ 507 struct ptrace_args { 508 int req; 509 pid_t pid; 510 caddr_t addr; 511 int data; 512 }; 513 #endif 514 515 #ifdef COMPAT_FREEBSD32 516 /* 517 * This CPP subterfuge is to try and reduce the number of ifdefs in 518 * the body of the code. 519 * COPYIN(uap->addr, &r.reg, sizeof r.reg); 520 * becomes either: 521 * copyin(uap->addr, &r.reg, sizeof r.reg); 522 * or 523 * copyin(uap->addr, &r.reg32, sizeof r.reg32); 524 * .. except this is done at runtime. 525 */ 526 #define COPYIN(u, k, s) wrap32 ? \ 527 copyin(u, k ## 32, s ## 32) : \ 528 copyin(u, k, s) 529 #define COPYOUT(k, u, s) wrap32 ? \ 530 copyout(k ## 32, u, s ## 32) : \ 531 copyout(k, u, s) 532 #else 533 #define COPYIN(u, k, s) copyin(u, k, s) 534 #define COPYOUT(k, u, s) copyout(k, u, s) 535 #endif 536 int 537 ptrace(struct thread *td, struct ptrace_args *uap) 538 { 539 /* 540 * XXX this obfuscation is to reduce stack usage, but the register 541 * structs may be too large to put on the stack anyway. 542 */ 543 union { 544 struct ptrace_io_desc piod; 545 struct ptrace_lwpinfo pl; 546 struct ptrace_vm_entry pve; 547 struct dbreg dbreg; 548 struct fpreg fpreg; 549 struct reg reg; 550 #ifdef COMPAT_FREEBSD32 551 struct dbreg32 dbreg32; 552 struct fpreg32 fpreg32; 553 struct reg32 reg32; 554 struct ptrace_io_desc32 piod32; 555 struct ptrace_vm_entry32 pve32; 556 #endif 557 } r; 558 void *addr; 559 int error = 0; 560 #ifdef COMPAT_FREEBSD32 561 int wrap32 = 0; 562 563 if (SV_CURPROC_FLAG(SV_ILP32)) 564 wrap32 = 1; 565 #endif 566 AUDIT_ARG_PID(uap->pid); 567 AUDIT_ARG_CMD(uap->req); 568 AUDIT_ARG_VALUE(uap->data); 569 addr = &r; 570 switch (uap->req) { 571 case PT_GETREGS: 572 case PT_GETFPREGS: 573 case PT_GETDBREGS: 574 case PT_LWPINFO: 575 break; 576 case PT_SETREGS: 577 error = COPYIN(uap->addr, &r.reg, sizeof r.reg); 578 break; 579 case PT_SETFPREGS: 580 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg); 581 break; 582 case PT_SETDBREGS: 583 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg); 584 break; 585 case PT_IO: 586 error = COPYIN(uap->addr, &r.piod, sizeof r.piod); 587 break; 588 case PT_VM_ENTRY: 589 error = COPYIN(uap->addr, &r.pve, sizeof r.pve); 590 break; 591 default: 592 addr = uap->addr; 593 break; 594 } 595 if (error) 596 return (error); 597 598 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data); 599 if (error) 600 return (error); 601 602 switch (uap->req) { 603 case PT_VM_ENTRY: 604 error = COPYOUT(&r.pve, uap->addr, sizeof r.pve); 605 break; 606 case PT_IO: 607 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod); 608 break; 609 case PT_GETREGS: 610 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg); 611 break; 612 case PT_GETFPREGS: 613 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg); 614 break; 615 case PT_GETDBREGS: 616 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg); 617 break; 618 case PT_LWPINFO: 619 error = copyout(&r.pl, uap->addr, uap->data); 620 break; 621 } 622 623 return (error); 624 } 625 #undef COPYIN 626 #undef COPYOUT 627 628 #ifdef COMPAT_FREEBSD32 629 /* 630 * PROC_READ(regs, td2, addr); 631 * becomes either: 632 * proc_read_regs(td2, addr); 633 * or 634 * proc_read_regs32(td2, addr); 635 * .. except this is done at runtime. There is an additional 636 * complication in that PROC_WRITE disallows 32 bit consumers 637 * from writing to 64 bit address space targets. 638 */ 639 #define PROC_READ(w, t, a) wrap32 ? \ 640 proc_read_ ## w ## 32(t, a) : \ 641 proc_read_ ## w (t, a) 642 #define PROC_WRITE(w, t, a) wrap32 ? \ 643 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \ 644 proc_write_ ## w (t, a) 645 #else 646 #define PROC_READ(w, t, a) proc_read_ ## w (t, a) 647 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a) 648 #endif 649 650 int 651 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data) 652 { 653 struct iovec iov; 654 struct uio uio; 655 struct proc *curp, *p, *pp; 656 struct thread *td2 = NULL; 657 struct ptrace_io_desc *piod = NULL; 658 struct ptrace_lwpinfo *pl; 659 int error, write, tmp, num; 660 int proctree_locked = 0; 661 lwpid_t tid = 0, *buf; 662 #ifdef COMPAT_FREEBSD32 663 int wrap32 = 0, safe = 0; 664 struct ptrace_io_desc32 *piod32 = NULL; 665 #endif 666 667 curp = td->td_proc; 668 669 /* Lock proctree before locking the process. */ 670 switch (req) { 671 case PT_TRACE_ME: 672 case PT_ATTACH: 673 case PT_STEP: 674 case PT_CONTINUE: 675 case PT_TO_SCE: 676 case PT_TO_SCX: 677 case PT_SYSCALL: 678 case PT_DETACH: 679 sx_xlock(&proctree_lock); 680 proctree_locked = 1; 681 break; 682 default: 683 break; 684 } 685 686 write = 0; 687 if (req == PT_TRACE_ME) { 688 p = td->td_proc; 689 PROC_LOCK(p); 690 } else { 691 if (pid <= PID_MAX) { 692 if ((p = pfind(pid)) == NULL) { 693 if (proctree_locked) 694 sx_xunlock(&proctree_lock); 695 return (ESRCH); 696 } 697 } else { 698 /* this is slow, should be optimized */ 699 sx_slock(&allproc_lock); 700 FOREACH_PROC_IN_SYSTEM(p) { 701 PROC_LOCK(p); 702 FOREACH_THREAD_IN_PROC(p, td2) { 703 if (td2->td_tid == pid) 704 break; 705 } 706 if (td2 != NULL) 707 break; /* proc lock held */ 708 PROC_UNLOCK(p); 709 } 710 sx_sunlock(&allproc_lock); 711 if (p == NULL) { 712 if (proctree_locked) 713 sx_xunlock(&proctree_lock); 714 return (ESRCH); 715 } 716 tid = pid; 717 pid = p->p_pid; 718 } 719 } 720 AUDIT_ARG_PROCESS(p); 721 722 if ((p->p_flag & P_WEXIT) != 0) { 723 error = ESRCH; 724 goto fail; 725 } 726 if ((error = p_cansee(td, p)) != 0) 727 goto fail; 728 729 if ((error = p_candebug(td, p)) != 0) 730 goto fail; 731 732 /* 733 * System processes can't be debugged. 734 */ 735 if ((p->p_flag & P_SYSTEM) != 0) { 736 error = EINVAL; 737 goto fail; 738 } 739 740 if (tid == 0) { 741 if ((p->p_flag & P_STOPPED_TRACE) != 0) { 742 KASSERT(p->p_xthread != NULL, ("NULL p_xthread")); 743 td2 = p->p_xthread; 744 } else { 745 td2 = FIRST_THREAD_IN_PROC(p); 746 } 747 tid = td2->td_tid; 748 } 749 750 #ifdef COMPAT_FREEBSD32 751 /* 752 * Test if we're a 32 bit client and what the target is. 753 * Set the wrap controls accordingly. 754 */ 755 if (SV_CURPROC_FLAG(SV_ILP32)) { 756 if (td2->td_proc->p_sysent->sv_flags & SV_ILP32) 757 safe = 1; 758 wrap32 = 1; 759 } 760 #endif 761 /* 762 * Permissions check 763 */ 764 switch (req) { 765 case PT_TRACE_ME: 766 /* Always legal. */ 767 break; 768 769 case PT_ATTACH: 770 /* Self */ 771 if (p->p_pid == td->td_proc->p_pid) { 772 error = EINVAL; 773 goto fail; 774 } 775 776 /* Already traced */ 777 if (p->p_flag & P_TRACED) { 778 error = EBUSY; 779 goto fail; 780 } 781 782 /* Can't trace an ancestor if you're being traced. */ 783 if (curp->p_flag & P_TRACED) { 784 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) { 785 if (pp == p) { 786 error = EINVAL; 787 goto fail; 788 } 789 } 790 } 791 792 793 /* OK */ 794 break; 795 796 case PT_CLEARSTEP: 797 /* Allow thread to clear single step for itself */ 798 if (td->td_tid == tid) 799 break; 800 801 /* FALLTHROUGH */ 802 default: 803 /* not being traced... */ 804 if ((p->p_flag & P_TRACED) == 0) { 805 error = EPERM; 806 goto fail; 807 } 808 809 /* not being traced by YOU */ 810 if (p->p_pptr != td->td_proc) { 811 error = EBUSY; 812 goto fail; 813 } 814 815 /* not currently stopped */ 816 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 || 817 p->p_suspcount != p->p_numthreads || 818 (p->p_flag & P_WAITED) == 0) { 819 error = EBUSY; 820 goto fail; 821 } 822 823 if ((p->p_flag & P_STOPPED_TRACE) == 0) { 824 static int count = 0; 825 if (count++ == 0) 826 printf("P_STOPPED_TRACE not set.\n"); 827 } 828 829 /* OK */ 830 break; 831 } 832 833 /* Keep this process around until we finish this request. */ 834 _PHOLD(p); 835 836 #ifdef FIX_SSTEP 837 /* 838 * Single step fixup ala procfs 839 */ 840 FIX_SSTEP(td2); 841 #endif 842 843 /* 844 * Actually do the requests 845 */ 846 847 td->td_retval[0] = 0; 848 849 switch (req) { 850 case PT_TRACE_ME: 851 /* set my trace flag and "owner" so it can read/write me */ 852 p->p_flag |= P_TRACED; 853 p->p_oppid = p->p_pptr->p_pid; 854 break; 855 856 case PT_ATTACH: 857 /* security check done above */ 858 p->p_flag |= P_TRACED; 859 p->p_oppid = p->p_pptr->p_pid; 860 if (p->p_pptr != td->td_proc) 861 proc_reparent(p, td->td_proc); 862 data = SIGSTOP; 863 goto sendsig; /* in PT_CONTINUE below */ 864 865 case PT_CLEARSTEP: 866 error = ptrace_clear_single_step(td2); 867 break; 868 869 case PT_SETSTEP: 870 error = ptrace_single_step(td2); 871 break; 872 873 case PT_SUSPEND: 874 td2->td_dbgflags |= TDB_SUSPEND; 875 thread_lock(td2); 876 td2->td_flags |= TDF_NEEDSUSPCHK; 877 thread_unlock(td2); 878 break; 879 880 case PT_RESUME: 881 td2->td_dbgflags &= ~TDB_SUSPEND; 882 break; 883 884 case PT_STEP: 885 case PT_CONTINUE: 886 case PT_TO_SCE: 887 case PT_TO_SCX: 888 case PT_SYSCALL: 889 case PT_DETACH: 890 /* Zero means do not send any signal */ 891 if (data < 0 || data > _SIG_MAXSIG) { 892 error = EINVAL; 893 break; 894 } 895 896 switch (req) { 897 case PT_STEP: 898 error = ptrace_single_step(td2); 899 if (error) 900 goto out; 901 break; 902 case PT_CONTINUE: 903 case PT_TO_SCE: 904 case PT_TO_SCX: 905 case PT_SYSCALL: 906 if (addr != (void *)1) { 907 error = ptrace_set_pc(td2, 908 (u_long)(uintfptr_t)addr); 909 if (error) 910 goto out; 911 } 912 switch (req) { 913 case PT_TO_SCE: 914 p->p_stops |= S_PT_SCE; 915 break; 916 case PT_TO_SCX: 917 p->p_stops |= S_PT_SCX; 918 break; 919 case PT_SYSCALL: 920 p->p_stops |= S_PT_SCE | S_PT_SCX; 921 break; 922 } 923 break; 924 case PT_DETACH: 925 /* reset process parent */ 926 if (p->p_oppid != p->p_pptr->p_pid) { 927 struct proc *pp; 928 929 PROC_LOCK(p->p_pptr); 930 sigqueue_take(p->p_ksi); 931 PROC_UNLOCK(p->p_pptr); 932 933 PROC_UNLOCK(p); 934 pp = pfind(p->p_oppid); 935 if (pp == NULL) 936 pp = initproc; 937 else 938 PROC_UNLOCK(pp); 939 PROC_LOCK(p); 940 proc_reparent(p, pp); 941 if (pp == initproc) 942 p->p_sigparent = SIGCHLD; 943 } 944 p->p_flag &= ~(P_TRACED | P_WAITED); 945 p->p_oppid = 0; 946 947 /* should we send SIGCHLD? */ 948 /* childproc_continued(p); */ 949 break; 950 } 951 952 sendsig: 953 if (proctree_locked) { 954 sx_xunlock(&proctree_lock); 955 proctree_locked = 0; 956 } 957 p->p_xstat = data; 958 p->p_xthread = NULL; 959 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) { 960 /* deliver or queue signal */ 961 td2->td_dbgflags &= ~TDB_XSIG; 962 td2->td_xsig = data; 963 964 if (req == PT_DETACH) { 965 struct thread *td3; 966 FOREACH_THREAD_IN_PROC(p, td3) { 967 td3->td_dbgflags &= ~TDB_SUSPEND; 968 } 969 } 970 /* 971 * unsuspend all threads, to not let a thread run, 972 * you should use PT_SUSPEND to suspend it before 973 * continuing process. 974 */ 975 PROC_SLOCK(p); 976 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED); 977 thread_unsuspend(p); 978 PROC_SUNLOCK(p); 979 } else { 980 if (data) 981 psignal(p, data); 982 } 983 break; 984 985 case PT_WRITE_I: 986 case PT_WRITE_D: 987 td2->td_dbgflags |= TDB_USERWR; 988 write = 1; 989 /* FALLTHROUGH */ 990 case PT_READ_I: 991 case PT_READ_D: 992 PROC_UNLOCK(p); 993 tmp = 0; 994 /* write = 0 set above */ 995 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 996 iov.iov_len = sizeof(int); 997 uio.uio_iov = &iov; 998 uio.uio_iovcnt = 1; 999 uio.uio_offset = (off_t)(uintptr_t)addr; 1000 uio.uio_resid = sizeof(int); 1001 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */ 1002 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 1003 uio.uio_td = td; 1004 error = proc_rwmem(p, &uio); 1005 if (uio.uio_resid != 0) { 1006 /* 1007 * XXX proc_rwmem() doesn't currently return ENOSPC, 1008 * so I think write() can bogusly return 0. 1009 * XXX what happens for short writes? We don't want 1010 * to write partial data. 1011 * XXX proc_rwmem() returns EPERM for other invalid 1012 * addresses. Convert this to EINVAL. Does this 1013 * clobber returns of EPERM for other reasons? 1014 */ 1015 if (error == 0 || error == ENOSPC || error == EPERM) 1016 error = EINVAL; /* EOF */ 1017 } 1018 if (!write) 1019 td->td_retval[0] = tmp; 1020 PROC_LOCK(p); 1021 break; 1022 1023 case PT_IO: 1024 #ifdef COMPAT_FREEBSD32 1025 if (wrap32) { 1026 piod32 = addr; 1027 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr; 1028 iov.iov_len = piod32->piod_len; 1029 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs; 1030 uio.uio_resid = piod32->piod_len; 1031 } else 1032 #endif 1033 { 1034 piod = addr; 1035 iov.iov_base = piod->piod_addr; 1036 iov.iov_len = piod->piod_len; 1037 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 1038 uio.uio_resid = piod->piod_len; 1039 } 1040 uio.uio_iov = &iov; 1041 uio.uio_iovcnt = 1; 1042 uio.uio_segflg = UIO_USERSPACE; 1043 uio.uio_td = td; 1044 #ifdef COMPAT_FREEBSD32 1045 tmp = wrap32 ? piod32->piod_op : piod->piod_op; 1046 #else 1047 tmp = piod->piod_op; 1048 #endif 1049 switch (tmp) { 1050 case PIOD_READ_D: 1051 case PIOD_READ_I: 1052 uio.uio_rw = UIO_READ; 1053 break; 1054 case PIOD_WRITE_D: 1055 case PIOD_WRITE_I: 1056 td2->td_dbgflags |= TDB_USERWR; 1057 uio.uio_rw = UIO_WRITE; 1058 break; 1059 default: 1060 error = EINVAL; 1061 goto out; 1062 } 1063 PROC_UNLOCK(p); 1064 error = proc_rwmem(p, &uio); 1065 #ifdef COMPAT_FREEBSD32 1066 if (wrap32) 1067 piod32->piod_len -= uio.uio_resid; 1068 else 1069 #endif 1070 piod->piod_len -= uio.uio_resid; 1071 PROC_LOCK(p); 1072 break; 1073 1074 case PT_KILL: 1075 data = SIGKILL; 1076 goto sendsig; /* in PT_CONTINUE above */ 1077 1078 case PT_SETREGS: 1079 td2->td_dbgflags |= TDB_USERWR; 1080 error = PROC_WRITE(regs, td2, addr); 1081 break; 1082 1083 case PT_GETREGS: 1084 error = PROC_READ(regs, td2, addr); 1085 break; 1086 1087 case PT_SETFPREGS: 1088 td2->td_dbgflags |= TDB_USERWR; 1089 error = PROC_WRITE(fpregs, td2, addr); 1090 break; 1091 1092 case PT_GETFPREGS: 1093 error = PROC_READ(fpregs, td2, addr); 1094 break; 1095 1096 case PT_SETDBREGS: 1097 td2->td_dbgflags |= TDB_USERWR; 1098 error = PROC_WRITE(dbregs, td2, addr); 1099 break; 1100 1101 case PT_GETDBREGS: 1102 error = PROC_READ(dbregs, td2, addr); 1103 break; 1104 1105 case PT_LWPINFO: 1106 if (data <= 0 || data > sizeof(*pl)) { 1107 error = EINVAL; 1108 break; 1109 } 1110 pl = addr; 1111 pl->pl_lwpid = td2->td_tid; 1112 if (td2->td_dbgflags & TDB_XSIG) 1113 pl->pl_event = PL_EVENT_SIGNAL; 1114 pl->pl_flags = 0; 1115 if (td2->td_dbgflags & TDB_SCE) 1116 pl->pl_flags |= PL_FLAG_SCE; 1117 else if (td2->td_dbgflags & TDB_SCX) 1118 pl->pl_flags |= PL_FLAG_SCX; 1119 if (td2->td_dbgflags & TDB_EXEC) 1120 pl->pl_flags |= PL_FLAG_EXEC; 1121 pl->pl_sigmask = td2->td_sigmask; 1122 pl->pl_siglist = td2->td_siglist; 1123 break; 1124 1125 case PT_GETNUMLWPS: 1126 td->td_retval[0] = p->p_numthreads; 1127 break; 1128 1129 case PT_GETLWPLIST: 1130 if (data <= 0) { 1131 error = EINVAL; 1132 break; 1133 } 1134 num = imin(p->p_numthreads, data); 1135 PROC_UNLOCK(p); 1136 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK); 1137 tmp = 0; 1138 PROC_LOCK(p); 1139 FOREACH_THREAD_IN_PROC(p, td2) { 1140 if (tmp >= num) 1141 break; 1142 buf[tmp++] = td2->td_tid; 1143 } 1144 PROC_UNLOCK(p); 1145 error = copyout(buf, addr, tmp * sizeof(lwpid_t)); 1146 free(buf, M_TEMP); 1147 if (!error) 1148 td->td_retval[0] = tmp; 1149 PROC_LOCK(p); 1150 break; 1151 1152 case PT_VM_TIMESTAMP: 1153 td->td_retval[0] = p->p_vmspace->vm_map.timestamp; 1154 break; 1155 1156 case PT_VM_ENTRY: 1157 PROC_UNLOCK(p); 1158 #ifdef COMPAT_FREEBSD32 1159 if (wrap32) 1160 error = ptrace_vm_entry32(td, p, addr); 1161 else 1162 #endif 1163 error = ptrace_vm_entry(td, p, addr); 1164 PROC_LOCK(p); 1165 break; 1166 1167 default: 1168 #ifdef __HAVE_PTRACE_MACHDEP 1169 if (req >= PT_FIRSTMACH) { 1170 PROC_UNLOCK(p); 1171 error = cpu_ptrace(td2, req, addr, data); 1172 PROC_LOCK(p); 1173 } else 1174 #endif 1175 /* Unknown request. */ 1176 error = EINVAL; 1177 break; 1178 } 1179 1180 out: 1181 /* Drop our hold on this process now that the request has completed. */ 1182 _PRELE(p); 1183 fail: 1184 PROC_UNLOCK(p); 1185 if (proctree_locked) 1186 sx_xunlock(&proctree_lock); 1187 return (error); 1188 } 1189 #undef PROC_READ 1190 #undef PROC_WRITE 1191 1192 /* 1193 * Stop a process because of a debugging event; 1194 * stay stopped until p->p_step is cleared 1195 * (cleared by PIOCCONT in procfs). 1196 */ 1197 void 1198 stopevent(struct proc *p, unsigned int event, unsigned int val) 1199 { 1200 1201 PROC_LOCK_ASSERT(p, MA_OWNED); 1202 p->p_step = 1; 1203 do { 1204 p->p_xstat = val; 1205 p->p_xthread = NULL; 1206 p->p_stype = event; /* Which event caused the stop? */ 1207 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */ 1208 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0); 1209 } while (p->p_step); 1210 } 1211