1 /* $OpenBSD: kern_sig.c,v 1.212 2017/06/08 17:14:02 bluhm Exp $ */ 2 /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Theo de Raadt. All rights reserved. 6 * Copyright (c) 1982, 1986, 1989, 1991, 1993 7 * The Regents of the University of California. All rights reserved. 8 * (c) UNIX System Laboratories, Inc. 9 * All or some portions of this file are derived from material licensed 10 * to the University of California by American Telephone and Telegraph 11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 12 * the permission of UNIX System Laboratories, Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 39 */ 40 41 #define SIGPROP /* include signal properties table */ 42 #include <sys/param.h> 43 #include <sys/signalvar.h> 44 #include <sys/resourcevar.h> 45 #include <sys/queue.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/event.h> 49 #include <sys/proc.h> 50 #include <sys/systm.h> 51 #include <sys/acct.h> 52 #include <sys/file.h> 53 #include <sys/filedesc.h> 54 #include <sys/kernel.h> 55 #include <sys/wait.h> 56 #include <sys/ktrace.h> 57 #include <sys/stat.h> 58 #include <sys/core.h> 59 #include <sys/malloc.h> 60 #include <sys/pool.h> 61 #include <sys/ptrace.h> 62 #include <sys/sched.h> 63 #include <sys/user.h> 64 #include <sys/syslog.h> 65 #include <sys/pledge.h> 66 #include <sys/witness.h> 67 68 #include <sys/mount.h> 69 #include <sys/syscallargs.h> 70 71 #include <uvm/uvm_extern.h> 72 #include <machine/tcb.h> 73 74 int filt_sigattach(struct knote *kn); 75 void filt_sigdetach(struct knote *kn); 76 int filt_signal(struct knote *kn, long hint); 77 78 struct filterops sig_filtops = 79 { 0, filt_sigattach, filt_sigdetach, filt_signal }; 80 81 void proc_stop(struct proc *p, int); 82 void proc_stop_sweep(void *); 83 struct timeout proc_stop_to; 84 85 int cansignal(struct proc *, struct process *, int); 86 87 struct pool sigacts_pool; /* memory pool for sigacts structures */ 88 89 /* 90 * Can thread p, send the signal signum to process qr? 91 */ 92 int 93 cansignal(struct proc *p, struct process *qr, int signum) 94 { 95 struct process *pr = p->p_p; 96 struct ucred *uc = p->p_ucred; 97 struct ucred *quc = qr->ps_ucred; 98 99 if (uc->cr_uid == 0) 100 return (1); /* root can always signal */ 101 102 if (pr == qr) 103 return (1); /* process can always signal itself */ 104 105 /* optimization: if the same creds then the tests below will pass */ 106 if (uc == quc) 107 return (1); 108 109 if (signum == SIGCONT && qr->ps_session == pr->ps_session) 110 return (1); /* SIGCONT in session */ 111 112 /* 113 * Using kill(), only certain signals can be sent to setugid 114 * child processes 115 */ 116 if (qr->ps_flags & PS_SUGID) { 117 switch (signum) { 118 case 0: 119 case SIGKILL: 120 case SIGINT: 121 case SIGTERM: 122 case SIGALRM: 123 case SIGSTOP: 124 case SIGTTIN: 125 case SIGTTOU: 126 case SIGTSTP: 127 case SIGHUP: 128 case SIGUSR1: 129 case SIGUSR2: 130 if (uc->cr_ruid == quc->cr_ruid || 131 uc->cr_uid == quc->cr_ruid) 132 return (1); 133 } 134 return (0); 135 } 136 137 if (uc->cr_ruid == quc->cr_ruid || 138 uc->cr_ruid == quc->cr_svuid || 139 uc->cr_uid == quc->cr_ruid || 140 uc->cr_uid == quc->cr_svuid) 141 return (1); 142 return (0); 143 } 144 145 /* 146 * Initialize signal-related data structures. 147 */ 148 void 149 signal_init(void) 150 { 151 timeout_set(&proc_stop_to, proc_stop_sweep, NULL); 152 153 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, IPL_NONE, 154 PR_WAITOK, "sigapl", NULL); 155 } 156 157 /* 158 * Create an initial sigacts structure, using the same signal state 159 * as p. 160 */ 161 struct sigacts * 162 sigactsinit(struct process *pr) 163 { 164 struct sigacts *ps; 165 166 ps = pool_get(&sigacts_pool, PR_WAITOK); 167 memcpy(ps, pr->ps_sigacts, sizeof(struct sigacts)); 168 ps->ps_refcnt = 1; 169 return (ps); 170 } 171 172 /* 173 * Share a sigacts structure. 174 */ 175 struct sigacts * 176 sigactsshare(struct process *pr) 177 { 178 struct sigacts *ps = pr->ps_sigacts; 179 180 ps->ps_refcnt++; 181 return ps; 182 } 183 184 /* 185 * Initialize a new sigaltstack structure. 186 */ 187 void 188 sigstkinit(struct sigaltstack *ss) 189 { 190 ss->ss_flags = SS_DISABLE; 191 ss->ss_size = 0; 192 ss->ss_sp = 0; 193 } 194 195 /* 196 * Make this process not share its sigacts, maintaining all 197 * signal state. 198 */ 199 void 200 sigactsunshare(struct process *pr) 201 { 202 struct sigacts *newps; 203 204 if (pr->ps_sigacts->ps_refcnt == 1) 205 return; 206 207 newps = sigactsinit(pr); 208 sigactsfree(pr); 209 pr->ps_sigacts = newps; 210 } 211 212 /* 213 * Release a sigacts structure. 214 */ 215 void 216 sigactsfree(struct process *pr) 217 { 218 struct sigacts *ps = pr->ps_sigacts; 219 220 if (--ps->ps_refcnt > 0) 221 return; 222 223 pr->ps_sigacts = NULL; 224 225 pool_put(&sigacts_pool, ps); 226 } 227 228 int 229 sys_sigaction(struct proc *p, void *v, register_t *retval) 230 { 231 struct sys_sigaction_args /* { 232 syscallarg(int) signum; 233 syscallarg(const struct sigaction *) nsa; 234 syscallarg(struct sigaction *) osa; 235 } */ *uap = v; 236 struct sigaction vec; 237 #ifdef KTRACE 238 struct sigaction ovec; 239 #endif 240 struct sigaction *sa; 241 const struct sigaction *nsa; 242 struct sigaction *osa; 243 struct sigacts *ps = p->p_p->ps_sigacts; 244 int signum; 245 int bit, error; 246 247 signum = SCARG(uap, signum); 248 nsa = SCARG(uap, nsa); 249 osa = SCARG(uap, osa); 250 251 if (signum <= 0 || signum >= NSIG || 252 (nsa && (signum == SIGKILL || signum == SIGSTOP))) 253 return (EINVAL); 254 sa = &vec; 255 if (osa) { 256 sa->sa_handler = ps->ps_sigact[signum]; 257 sa->sa_mask = ps->ps_catchmask[signum]; 258 bit = sigmask(signum); 259 sa->sa_flags = 0; 260 if ((ps->ps_sigonstack & bit) != 0) 261 sa->sa_flags |= SA_ONSTACK; 262 if ((ps->ps_sigintr & bit) == 0) 263 sa->sa_flags |= SA_RESTART; 264 if ((ps->ps_sigreset & bit) != 0) 265 sa->sa_flags |= SA_RESETHAND; 266 if ((ps->ps_siginfo & bit) != 0) 267 sa->sa_flags |= SA_SIGINFO; 268 if (signum == SIGCHLD) { 269 if ((ps->ps_flags & SAS_NOCLDSTOP) != 0) 270 sa->sa_flags |= SA_NOCLDSTOP; 271 if ((ps->ps_flags & SAS_NOCLDWAIT) != 0) 272 sa->sa_flags |= SA_NOCLDWAIT; 273 } 274 if ((sa->sa_mask & bit) == 0) 275 sa->sa_flags |= SA_NODEFER; 276 sa->sa_mask &= ~bit; 277 error = copyout(sa, osa, sizeof (vec)); 278 if (error) 279 return (error); 280 #ifdef KTRACE 281 if (KTRPOINT(p, KTR_STRUCT)) 282 ovec = vec; 283 #endif 284 } 285 if (nsa) { 286 error = copyin(nsa, sa, sizeof (vec)); 287 if (error) 288 return (error); 289 #ifdef KTRACE 290 if (KTRPOINT(p, KTR_STRUCT)) 291 ktrsigaction(p, sa); 292 #endif 293 setsigvec(p, signum, sa); 294 } 295 #ifdef KTRACE 296 if (osa && KTRPOINT(p, KTR_STRUCT)) 297 ktrsigaction(p, &ovec); 298 #endif 299 return (0); 300 } 301 302 void 303 setsigvec(struct proc *p, int signum, struct sigaction *sa) 304 { 305 struct sigacts *ps = p->p_p->ps_sigacts; 306 int bit; 307 int s; 308 309 bit = sigmask(signum); 310 /* 311 * Change setting atomically. 312 */ 313 s = splhigh(); 314 ps->ps_sigact[signum] = sa->sa_handler; 315 if ((sa->sa_flags & SA_NODEFER) == 0) 316 sa->sa_mask |= sigmask(signum); 317 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask; 318 if (signum == SIGCHLD) { 319 if (sa->sa_flags & SA_NOCLDSTOP) 320 atomic_setbits_int(&ps->ps_flags, SAS_NOCLDSTOP); 321 else 322 atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDSTOP); 323 /* 324 * If the SA_NOCLDWAIT flag is set or the handler 325 * is SIG_IGN we reparent the dying child to PID 1 326 * (init) which will reap the zombie. Because we use 327 * init to do our dirty work we never set SAS_NOCLDWAIT 328 * for PID 1. 329 * XXX exit1 rework means this is unnecessary? 330 */ 331 if (initprocess->ps_sigacts != ps && 332 ((sa->sa_flags & SA_NOCLDWAIT) || 333 sa->sa_handler == SIG_IGN)) 334 atomic_setbits_int(&ps->ps_flags, SAS_NOCLDWAIT); 335 else 336 atomic_clearbits_int(&ps->ps_flags, SAS_NOCLDWAIT); 337 } 338 if ((sa->sa_flags & SA_RESETHAND) != 0) 339 ps->ps_sigreset |= bit; 340 else 341 ps->ps_sigreset &= ~bit; 342 if ((sa->sa_flags & SA_SIGINFO) != 0) 343 ps->ps_siginfo |= bit; 344 else 345 ps->ps_siginfo &= ~bit; 346 if ((sa->sa_flags & SA_RESTART) == 0) 347 ps->ps_sigintr |= bit; 348 else 349 ps->ps_sigintr &= ~bit; 350 if ((sa->sa_flags & SA_ONSTACK) != 0) 351 ps->ps_sigonstack |= bit; 352 else 353 ps->ps_sigonstack &= ~bit; 354 /* 355 * Set bit in ps_sigignore for signals that are set to SIG_IGN, 356 * and for signals set to SIG_DFL where the default is to ignore. 357 * However, don't put SIGCONT in ps_sigignore, 358 * as we have to restart the process. 359 */ 360 if (sa->sa_handler == SIG_IGN || 361 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) { 362 atomic_clearbits_int(&p->p_siglist, bit); 363 if (signum != SIGCONT) 364 ps->ps_sigignore |= bit; /* easier in psignal */ 365 ps->ps_sigcatch &= ~bit; 366 } else { 367 ps->ps_sigignore &= ~bit; 368 if (sa->sa_handler == SIG_DFL) 369 ps->ps_sigcatch &= ~bit; 370 else 371 ps->ps_sigcatch |= bit; 372 } 373 splx(s); 374 } 375 376 /* 377 * Initialize signal state for process 0; 378 * set to ignore signals that are ignored by default. 379 */ 380 void 381 siginit(struct process *pr) 382 { 383 struct sigacts *ps = pr->ps_sigacts; 384 int i; 385 386 for (i = 0; i < NSIG; i++) 387 if (sigprop[i] & SA_IGNORE && i != SIGCONT) 388 ps->ps_sigignore |= sigmask(i); 389 ps->ps_flags = SAS_NOCLDWAIT | SAS_NOCLDSTOP; 390 } 391 392 /* 393 * Reset signals for an exec by the specified thread. 394 */ 395 void 396 execsigs(struct proc *p) 397 { 398 struct sigacts *ps; 399 int nc, mask; 400 401 sigactsunshare(p->p_p); 402 ps = p->p_p->ps_sigacts; 403 404 /* 405 * Reset caught signals. Held signals remain held 406 * through p_sigmask (unless they were caught, 407 * and are now ignored by default). 408 */ 409 while (ps->ps_sigcatch) { 410 nc = ffs((long)ps->ps_sigcatch); 411 mask = sigmask(nc); 412 ps->ps_sigcatch &= ~mask; 413 if (sigprop[nc] & SA_IGNORE) { 414 if (nc != SIGCONT) 415 ps->ps_sigignore |= mask; 416 atomic_clearbits_int(&p->p_siglist, mask); 417 } 418 ps->ps_sigact[nc] = SIG_DFL; 419 } 420 /* 421 * Reset stack state to the user stack. 422 * Clear set of signals caught on the signal stack. 423 */ 424 sigstkinit(&p->p_sigstk); 425 ps->ps_flags &= ~SAS_NOCLDWAIT; 426 if (ps->ps_sigact[SIGCHLD] == SIG_IGN) 427 ps->ps_sigact[SIGCHLD] = SIG_DFL; 428 } 429 430 /* 431 * Manipulate signal mask. 432 * Note that we receive new mask, not pointer, 433 * and return old mask as return value; 434 * the library stub does the rest. 435 */ 436 int 437 sys_sigprocmask(struct proc *p, void *v, register_t *retval) 438 { 439 struct sys_sigprocmask_args /* { 440 syscallarg(int) how; 441 syscallarg(sigset_t) mask; 442 } */ *uap = v; 443 int error = 0; 444 sigset_t mask; 445 446 *retval = p->p_sigmask; 447 mask = SCARG(uap, mask) &~ sigcantmask; 448 449 switch (SCARG(uap, how)) { 450 case SIG_BLOCK: 451 atomic_setbits_int(&p->p_sigmask, mask); 452 break; 453 case SIG_UNBLOCK: 454 atomic_clearbits_int(&p->p_sigmask, mask); 455 break; 456 case SIG_SETMASK: 457 p->p_sigmask = mask; 458 break; 459 default: 460 error = EINVAL; 461 break; 462 } 463 return (error); 464 } 465 466 int 467 sys_sigpending(struct proc *p, void *v, register_t *retval) 468 { 469 470 *retval = p->p_siglist; 471 return (0); 472 } 473 474 /* 475 * Temporarily replace calling proc's signal mask for the duration of a 476 * system call. Original signal mask will be restored by userret(). 477 */ 478 void 479 dosigsuspend(struct proc *p, sigset_t newmask) 480 { 481 KASSERT(p == curproc); 482 483 p->p_oldmask = p->p_sigmask; 484 atomic_setbits_int(&p->p_flag, P_SIGSUSPEND); 485 p->p_sigmask = newmask; 486 } 487 488 /* 489 * Suspend process until signal, providing mask to be set 490 * in the meantime. Note nonstandard calling convention: 491 * libc stub passes mask, not pointer, to save a copyin. 492 */ 493 int 494 sys_sigsuspend(struct proc *p, void *v, register_t *retval) 495 { 496 struct sys_sigsuspend_args /* { 497 syscallarg(int) mask; 498 } */ *uap = v; 499 struct process *pr = p->p_p; 500 struct sigacts *ps = pr->ps_sigacts; 501 502 dosigsuspend(p, SCARG(uap, mask) &~ sigcantmask); 503 while (tsleep(ps, PPAUSE|PCATCH, "pause", 0) == 0) 504 /* void */; 505 /* always return EINTR rather than ERESTART... */ 506 return (EINTR); 507 } 508 509 int 510 sigonstack(size_t stack) 511 { 512 const struct sigaltstack *ss = &curproc->p_sigstk; 513 514 return (ss->ss_flags & SS_DISABLE ? 0 : 515 (stack - (size_t)ss->ss_sp < ss->ss_size)); 516 } 517 518 int 519 sys_sigaltstack(struct proc *p, void *v, register_t *retval) 520 { 521 struct sys_sigaltstack_args /* { 522 syscallarg(const struct sigaltstack *) nss; 523 syscallarg(struct sigaltstack *) oss; 524 } */ *uap = v; 525 struct sigaltstack ss; 526 const struct sigaltstack *nss; 527 struct sigaltstack *oss; 528 int onstack = sigonstack(PROC_STACK(p)); 529 int error; 530 531 nss = SCARG(uap, nss); 532 oss = SCARG(uap, oss); 533 534 if (oss != NULL) { 535 ss = p->p_sigstk; 536 if (onstack) 537 ss.ss_flags |= SS_ONSTACK; 538 if ((error = copyout(&ss, oss, sizeof(ss)))) 539 return (error); 540 } 541 if (nss == NULL) 542 return (0); 543 error = copyin(nss, &ss, sizeof(ss)); 544 if (error) 545 return (error); 546 if (onstack) 547 return (EPERM); 548 if (ss.ss_flags & ~SS_DISABLE) 549 return (EINVAL); 550 if (ss.ss_flags & SS_DISABLE) { 551 p->p_sigstk.ss_flags = ss.ss_flags; 552 return (0); 553 } 554 if (ss.ss_size < MINSIGSTKSZ) 555 return (ENOMEM); 556 p->p_sigstk = ss; 557 return (0); 558 } 559 560 int 561 sys_kill(struct proc *cp, void *v, register_t *retval) 562 { 563 struct sys_kill_args /* { 564 syscallarg(int) pid; 565 syscallarg(int) signum; 566 } */ *uap = v; 567 struct process *pr; 568 int pid = SCARG(uap, pid); 569 int signum = SCARG(uap, signum); 570 int error; 571 int zombie = 0; 572 573 if ((error = pledge_kill(cp, pid)) != 0) 574 return (error); 575 if (((u_int)signum) >= NSIG) 576 return (EINVAL); 577 if (pid > 0) { 578 if ((pr = prfind(pid)) == NULL) { 579 if ((pr = zombiefind(pid)) == NULL) 580 return (ESRCH); 581 else 582 zombie = 1; 583 } 584 if (!cansignal(cp, pr, signum)) 585 return (EPERM); 586 587 /* kill single process */ 588 if (signum && !zombie) 589 prsignal(pr, signum); 590 return (0); 591 } 592 switch (pid) { 593 case -1: /* broadcast signal */ 594 return (killpg1(cp, signum, 0, 1)); 595 case 0: /* signal own process group */ 596 return (killpg1(cp, signum, 0, 0)); 597 default: /* negative explicit process group */ 598 return (killpg1(cp, signum, -pid, 0)); 599 } 600 } 601 602 int 603 sys_thrkill(struct proc *cp, void *v, register_t *retval) 604 { 605 struct sys_thrkill_args /* { 606 syscallarg(pid_t) tid; 607 syscallarg(int) signum; 608 syscallarg(void *) tcb; 609 } */ *uap = v; 610 struct proc *p; 611 int tid = SCARG(uap, tid); 612 int signum = SCARG(uap, signum); 613 void *tcb; 614 615 if (((u_int)signum) >= NSIG) 616 return (EINVAL); 617 if (tid > THREAD_PID_OFFSET) { 618 if ((p = tfind(tid - THREAD_PID_OFFSET)) == NULL) 619 return (ESRCH); 620 621 /* can only kill threads in the same process */ 622 if (p->p_p != cp->p_p) 623 return (ESRCH); 624 } else if (tid == 0) 625 p = cp; 626 else 627 return (EINVAL); 628 629 /* optionally require the target thread to have the given tcb addr */ 630 tcb = SCARG(uap, tcb); 631 if (tcb != NULL && tcb != TCB_GET(p)) 632 return (ESRCH); 633 634 if (signum) 635 ptsignal(p, signum, STHREAD); 636 return (0); 637 } 638 639 /* 640 * Common code for kill process group/broadcast kill. 641 * cp is calling process. 642 */ 643 int 644 killpg1(struct proc *cp, int signum, int pgid, int all) 645 { 646 struct process *pr; 647 struct pgrp *pgrp; 648 int nfound = 0; 649 650 if (all) { 651 /* 652 * broadcast 653 */ 654 LIST_FOREACH(pr, &allprocess, ps_list) { 655 if (pr->ps_pid <= 1 || 656 pr->ps_flags & (PS_SYSTEM | PS_NOBROADCASTKILL) || 657 pr == cp->p_p || !cansignal(cp, pr, signum)) 658 continue; 659 nfound++; 660 if (signum) 661 prsignal(pr, signum); 662 } 663 } else { 664 if (pgid == 0) 665 /* 666 * zero pgid means send to my process group. 667 */ 668 pgrp = cp->p_p->ps_pgrp; 669 else { 670 pgrp = pgfind(pgid); 671 if (pgrp == NULL) 672 return (ESRCH); 673 } 674 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) { 675 if (pr->ps_pid <= 1 || pr->ps_flags & PS_SYSTEM || 676 !cansignal(cp, pr, signum)) 677 continue; 678 nfound++; 679 if (signum) 680 prsignal(pr, signum); 681 } 682 } 683 return (nfound ? 0 : ESRCH); 684 } 685 686 #define CANDELIVER(uid, euid, pr) \ 687 (euid == 0 || \ 688 (uid) == (pr)->ps_ucred->cr_ruid || \ 689 (uid) == (pr)->ps_ucred->cr_svuid || \ 690 (uid) == (pr)->ps_ucred->cr_uid || \ 691 (euid) == (pr)->ps_ucred->cr_ruid || \ 692 (euid) == (pr)->ps_ucred->cr_svuid || \ 693 (euid) == (pr)->ps_ucred->cr_uid) 694 695 /* 696 * Deliver signum to pgid, but first check uid/euid against each 697 * process and see if it is permitted. 698 */ 699 void 700 csignal(pid_t pgid, int signum, uid_t uid, uid_t euid) 701 { 702 struct pgrp *pgrp; 703 struct process *pr; 704 705 if (pgid == 0) 706 return; 707 if (pgid < 0) { 708 pgid = -pgid; 709 if ((pgrp = pgfind(pgid)) == NULL) 710 return; 711 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) 712 if (CANDELIVER(uid, euid, pr)) 713 prsignal(pr, signum); 714 } else { 715 if ((pr = prfind(pgid)) == NULL) 716 return; 717 if (CANDELIVER(uid, euid, pr)) 718 prsignal(pr, signum); 719 } 720 } 721 722 /* 723 * Send a signal to a process group. 724 */ 725 void 726 gsignal(int pgid, int signum) 727 { 728 struct pgrp *pgrp; 729 730 if (pgid && (pgrp = pgfind(pgid))) 731 pgsignal(pgrp, signum, 0); 732 } 733 734 /* 735 * Send a signal to a process group. If checktty is 1, 736 * limit to members which have a controlling terminal. 737 */ 738 void 739 pgsignal(struct pgrp *pgrp, int signum, int checkctty) 740 { 741 struct process *pr; 742 743 if (pgrp) 744 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) 745 if (checkctty == 0 || pr->ps_flags & PS_CONTROLT) 746 prsignal(pr, signum); 747 } 748 749 /* 750 * Send a signal caused by a trap to the current thread 751 * If it will be caught immediately, deliver it with correct code. 752 * Otherwise, post it normally. 753 */ 754 void 755 trapsignal(struct proc *p, int signum, u_long trapno, int code, 756 union sigval sigval) 757 { 758 struct process *pr = p->p_p; 759 struct sigacts *ps = pr->ps_sigacts; 760 int mask; 761 762 switch (signum) { 763 case SIGILL: 764 case SIGBUS: 765 case SIGSEGV: 766 pr->ps_acflag |= ATRAP; 767 break; 768 } 769 770 mask = sigmask(signum); 771 if ((pr->ps_flags & PS_TRACED) == 0 && 772 (ps->ps_sigcatch & mask) != 0 && 773 (p->p_sigmask & mask) == 0) { 774 #ifdef KTRACE 775 if (KTRPOINT(p, KTR_PSIG)) { 776 siginfo_t si; 777 778 initsiginfo(&si, signum, trapno, code, sigval); 779 ktrpsig(p, signum, ps->ps_sigact[signum], 780 p->p_sigmask, code, &si); 781 } 782 #endif 783 p->p_ru.ru_nsignals++; 784 (*pr->ps_emul->e_sendsig)(ps->ps_sigact[signum], signum, 785 p->p_sigmask, trapno, code, sigval); 786 atomic_setbits_int(&p->p_sigmask, ps->ps_catchmask[signum]); 787 if ((ps->ps_sigreset & mask) != 0) { 788 ps->ps_sigcatch &= ~mask; 789 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 790 ps->ps_sigignore |= mask; 791 ps->ps_sigact[signum] = SIG_DFL; 792 } 793 } else { 794 p->p_sisig = signum; 795 p->p_sitrapno = trapno; /* XXX for core dump/debugger */ 796 p->p_sicode = code; 797 p->p_sigval = sigval; 798 799 /* 800 * Signals like SIGBUS and SIGSEGV should not, when 801 * generated by the kernel, be ignorable or blockable. 802 * If it is and we're not being traced, then just kill 803 * the process. 804 */ 805 if ((pr->ps_flags & PS_TRACED) == 0 && 806 (sigprop[signum] & SA_KILL) && 807 ((p->p_sigmask & mask) || (ps->ps_sigignore & mask))) 808 sigexit(p, signum); 809 ptsignal(p, signum, STHREAD); 810 } 811 } 812 813 /* 814 * Send the signal to the process. If the signal has an action, the action 815 * is usually performed by the target process rather than the caller; we add 816 * the signal to the set of pending signals for the process. 817 * 818 * Exceptions: 819 * o When a stop signal is sent to a sleeping process that takes the 820 * default action, the process is stopped without awakening it. 821 * o SIGCONT restarts stopped processes (or puts them back to sleep) 822 * regardless of the signal action (eg, blocked or ignored). 823 * 824 * Other ignored signals are discarded immediately. 825 */ 826 void 827 psignal(struct proc *p, int signum) 828 { 829 ptsignal(p, signum, SPROCESS); 830 } 831 832 /* 833 * type = SPROCESS process signal, can be diverted (sigwait()) 834 * XXX if blocked in all threads, mark as pending in struct process 835 * type = STHREAD thread signal, but should be propagated if unhandled 836 * type = SPROPAGATED propagated to this thread, so don't propagate again 837 */ 838 void 839 ptsignal(struct proc *p, int signum, enum signal_type type) 840 { 841 int s, prop; 842 sig_t action; 843 int mask; 844 struct process *pr = p->p_p; 845 struct proc *q; 846 int wakeparent = 0; 847 848 #ifdef DIAGNOSTIC 849 if ((u_int)signum >= NSIG || signum == 0) 850 panic("psignal signal number"); 851 #endif 852 853 /* Ignore signal if the target process is exiting */ 854 if (pr->ps_flags & PS_EXITING) 855 return; 856 857 mask = sigmask(signum); 858 859 if (type == SPROCESS) { 860 /* Accept SIGKILL to coredumping processes */ 861 if (pr->ps_flags & PS_COREDUMP && signum == SIGKILL) { 862 if (pr->ps_single != NULL) 863 p = pr->ps_single; 864 atomic_setbits_int(&p->p_siglist, mask); 865 return; 866 } 867 868 /* 869 * If the current thread can process the signal 870 * immediately (it's unblocked) then have it take it. 871 */ 872 q = curproc; 873 if (q != NULL && q->p_p == pr && (q->p_flag & P_WEXIT) == 0 && 874 (q->p_sigmask & mask) == 0) 875 p = q; 876 else { 877 /* 878 * A process-wide signal can be diverted to a 879 * different thread that's in sigwait() for this 880 * signal. If there isn't such a thread, then 881 * pick a thread that doesn't have it blocked so 882 * that the stop/kill consideration isn't 883 * delayed. Otherwise, mark it pending on the 884 * main thread. 885 */ 886 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 887 /* ignore exiting threads */ 888 if (q->p_flag & P_WEXIT) 889 continue; 890 891 /* skip threads that have the signal blocked */ 892 if ((q->p_sigmask & mask) != 0) 893 continue; 894 895 /* okay, could send to this thread */ 896 p = q; 897 898 /* 899 * sigsuspend, sigwait, ppoll/pselect, etc? 900 * Definitely go to this thread, as it's 901 * already blocked in the kernel. 902 */ 903 if (q->p_flag & P_SIGSUSPEND) 904 break; 905 } 906 } 907 } 908 909 if (type != SPROPAGATED) 910 KNOTE(&pr->ps_klist, NOTE_SIGNAL | signum); 911 912 prop = sigprop[signum]; 913 914 /* 915 * If proc is traced, always give parent a chance. 916 */ 917 if (pr->ps_flags & PS_TRACED) { 918 action = SIG_DFL; 919 atomic_setbits_int(&p->p_siglist, mask); 920 } else { 921 /* 922 * If the signal is being ignored, 923 * then we forget about it immediately. 924 * (Note: we don't set SIGCONT in ps_sigignore, 925 * and if it is set to SIG_IGN, 926 * action will be SIG_DFL here.) 927 */ 928 if (pr->ps_sigacts->ps_sigignore & mask) 929 return; 930 if (p->p_sigmask & mask) { 931 action = SIG_HOLD; 932 } else if (pr->ps_sigacts->ps_sigcatch & mask) { 933 action = SIG_CATCH; 934 } else { 935 action = SIG_DFL; 936 937 if (prop & SA_KILL && pr->ps_nice > NZERO) 938 pr->ps_nice = NZERO; 939 940 /* 941 * If sending a tty stop signal to a member of an 942 * orphaned process group, discard the signal here if 943 * the action is default; don't stop the process below 944 * if sleeping, and don't clear any pending SIGCONT. 945 */ 946 if (prop & SA_TTYSTOP && pr->ps_pgrp->pg_jobc == 0) 947 return; 948 } 949 950 atomic_setbits_int(&p->p_siglist, mask); 951 } 952 953 if (prop & SA_CONT) 954 atomic_clearbits_int(&p->p_siglist, stopsigmask); 955 956 if (prop & SA_STOP) { 957 atomic_clearbits_int(&p->p_siglist, contsigmask); 958 atomic_clearbits_int(&p->p_flag, P_CONTINUED); 959 } 960 961 /* 962 * XXX delay processing of SA_STOP signals unless action == SIG_DFL? 963 */ 964 if (prop & (SA_CONT | SA_STOP) && type != SPROPAGATED) 965 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) 966 if (q != p) 967 ptsignal(q, signum, SPROPAGATED); 968 969 /* 970 * Defer further processing for signals which are held, 971 * except that stopped processes must be continued by SIGCONT. 972 */ 973 if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) 974 return; 975 976 SCHED_LOCK(s); 977 978 switch (p->p_stat) { 979 980 case SSLEEP: 981 /* 982 * If process is sleeping uninterruptibly 983 * we can't interrupt the sleep... the signal will 984 * be noticed when the process returns through 985 * trap() or syscall(). 986 */ 987 if ((p->p_flag & P_SINTR) == 0) 988 goto out; 989 /* 990 * Process is sleeping and traced... make it runnable 991 * so it can discover the signal in issignal() and stop 992 * for the parent. 993 */ 994 if (pr->ps_flags & PS_TRACED) 995 goto run; 996 /* 997 * If SIGCONT is default (or ignored) and process is 998 * asleep, we are finished; the process should not 999 * be awakened. 1000 */ 1001 if ((prop & SA_CONT) && action == SIG_DFL) { 1002 atomic_clearbits_int(&p->p_siglist, mask); 1003 goto out; 1004 } 1005 /* 1006 * When a sleeping process receives a stop 1007 * signal, process immediately if possible. 1008 */ 1009 if ((prop & SA_STOP) && action == SIG_DFL) { 1010 /* 1011 * If a child holding parent blocked, 1012 * stopping could cause deadlock. 1013 */ 1014 if (pr->ps_flags & PS_PPWAIT) 1015 goto out; 1016 atomic_clearbits_int(&p->p_siglist, mask); 1017 p->p_xstat = signum; 1018 proc_stop(p, 0); 1019 goto out; 1020 } 1021 /* 1022 * All other (caught or default) signals 1023 * cause the process to run. 1024 */ 1025 goto runfast; 1026 /*NOTREACHED*/ 1027 1028 case SSTOP: 1029 /* 1030 * If traced process is already stopped, 1031 * then no further action is necessary. 1032 */ 1033 if (pr->ps_flags & PS_TRACED) 1034 goto out; 1035 1036 /* 1037 * Kill signal always sets processes running. 1038 */ 1039 if (signum == SIGKILL) { 1040 atomic_clearbits_int(&p->p_flag, P_SUSPSIG); 1041 goto runfast; 1042 } 1043 1044 if (prop & SA_CONT) { 1045 /* 1046 * If SIGCONT is default (or ignored), we continue the 1047 * process but don't leave the signal in p_siglist, as 1048 * it has no further action. If SIGCONT is held, we 1049 * continue the process and leave the signal in 1050 * p_siglist. If the process catches SIGCONT, let it 1051 * handle the signal itself. If it isn't waiting on 1052 * an event, then it goes back to run state. 1053 * Otherwise, process goes back to sleep state. 1054 */ 1055 atomic_setbits_int(&p->p_flag, P_CONTINUED); 1056 atomic_clearbits_int(&p->p_flag, P_SUSPSIG); 1057 wakeparent = 1; 1058 if (action == SIG_DFL) 1059 atomic_clearbits_int(&p->p_siglist, mask); 1060 if (action == SIG_CATCH) 1061 goto runfast; 1062 if (p->p_wchan == 0) 1063 goto run; 1064 p->p_stat = SSLEEP; 1065 goto out; 1066 } 1067 1068 if (prop & SA_STOP) { 1069 /* 1070 * Already stopped, don't need to stop again. 1071 * (If we did the shell could get confused.) 1072 */ 1073 atomic_clearbits_int(&p->p_siglist, mask); 1074 goto out; 1075 } 1076 1077 /* 1078 * If process is sleeping interruptibly, then simulate a 1079 * wakeup so that when it is continued, it will be made 1080 * runnable and can look at the signal. But don't make 1081 * the process runnable, leave it stopped. 1082 */ 1083 if (p->p_wchan && p->p_flag & P_SINTR) 1084 unsleep(p); 1085 goto out; 1086 1087 case SONPROC: 1088 signotify(p); 1089 /* FALLTHROUGH */ 1090 default: 1091 /* 1092 * SRUN, SIDL, SDEAD do nothing with the signal, 1093 * other than kicking ourselves if we are running. 1094 * It will either never be noticed, or noticed very soon. 1095 */ 1096 goto out; 1097 } 1098 /*NOTREACHED*/ 1099 1100 runfast: 1101 /* 1102 * Raise priority to at least PUSER. 1103 */ 1104 if (p->p_priority > PUSER) 1105 p->p_priority = PUSER; 1106 run: 1107 setrunnable(p); 1108 out: 1109 SCHED_UNLOCK(s); 1110 if (wakeparent) 1111 wakeup(pr->ps_pptr); 1112 } 1113 1114 /* 1115 * If the current process has received a signal (should be caught or cause 1116 * termination, should interrupt current syscall), return the signal number. 1117 * Stop signals with default action are processed immediately, then cleared; 1118 * they aren't returned. This is checked after each entry to the system for 1119 * a syscall or trap (though this can usually be done without calling issignal 1120 * by checking the pending signal masks in the CURSIG macro.) The normal call 1121 * sequence is 1122 * 1123 * while (signum = CURSIG(curproc)) 1124 * postsig(signum); 1125 * 1126 * Assumes that if the P_SINTR flag is set, we're holding both the 1127 * kernel and scheduler locks. 1128 */ 1129 int 1130 issignal(struct proc *p) 1131 { 1132 struct process *pr = p->p_p; 1133 int signum, mask, prop; 1134 int dolock = (p->p_flag & P_SINTR) == 0; 1135 int s; 1136 1137 for (;;) { 1138 mask = p->p_siglist & ~p->p_sigmask; 1139 if (pr->ps_flags & PS_PPWAIT) 1140 mask &= ~stopsigmask; 1141 if (mask == 0) /* no signal to send */ 1142 return (0); 1143 signum = ffs((long)mask); 1144 mask = sigmask(signum); 1145 atomic_clearbits_int(&p->p_siglist, mask); 1146 1147 /* 1148 * We should see pending but ignored signals 1149 * only if PS_TRACED was on when they were posted. 1150 */ 1151 if (mask & pr->ps_sigacts->ps_sigignore && 1152 (pr->ps_flags & PS_TRACED) == 0) 1153 continue; 1154 1155 if ((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) { 1156 /* 1157 * If traced, always stop, and stay 1158 * stopped until released by the debugger. 1159 */ 1160 p->p_xstat = signum; 1161 1162 if (dolock) 1163 KERNEL_LOCK(); 1164 single_thread_set(p, SINGLE_PTRACE, 0); 1165 if (dolock) 1166 KERNEL_UNLOCK(); 1167 1168 if (dolock) 1169 SCHED_LOCK(s); 1170 proc_stop(p, 1); 1171 if (dolock) 1172 SCHED_UNLOCK(s); 1173 1174 if (dolock) 1175 KERNEL_LOCK(); 1176 single_thread_clear(p, 0); 1177 if (dolock) 1178 KERNEL_UNLOCK(); 1179 1180 /* 1181 * If we are no longer being traced, or the parent 1182 * didn't give us a signal, look for more signals. 1183 */ 1184 if ((pr->ps_flags & PS_TRACED) == 0 || p->p_xstat == 0) 1185 continue; 1186 1187 /* 1188 * If the new signal is being masked, look for other 1189 * signals. 1190 */ 1191 signum = p->p_xstat; 1192 mask = sigmask(signum); 1193 if ((p->p_sigmask & mask) != 0) 1194 continue; 1195 1196 /* take the signal! */ 1197 atomic_clearbits_int(&p->p_siglist, mask); 1198 } 1199 1200 prop = sigprop[signum]; 1201 1202 /* 1203 * Decide whether the signal should be returned. 1204 * Return the signal's number, or fall through 1205 * to clear it from the pending mask. 1206 */ 1207 switch ((long)pr->ps_sigacts->ps_sigact[signum]) { 1208 case (long)SIG_DFL: 1209 /* 1210 * Don't take default actions on system processes. 1211 */ 1212 if (pr->ps_pid <= 1) { 1213 #ifdef DIAGNOSTIC 1214 /* 1215 * Are you sure you want to ignore SIGSEGV 1216 * in init? XXX 1217 */ 1218 printf("Process (pid %d) got signal" 1219 " %d\n", pr->ps_pid, signum); 1220 #endif 1221 break; /* == ignore */ 1222 } 1223 /* 1224 * If there is a pending stop signal to process 1225 * with default action, stop here, 1226 * then clear the signal. However, 1227 * if process is member of an orphaned 1228 * process group, ignore tty stop signals. 1229 */ 1230 if (prop & SA_STOP) { 1231 if (pr->ps_flags & PS_TRACED || 1232 (pr->ps_pgrp->pg_jobc == 0 && 1233 prop & SA_TTYSTOP)) 1234 break; /* == ignore */ 1235 p->p_xstat = signum; 1236 if (dolock) 1237 SCHED_LOCK(s); 1238 proc_stop(p, 1); 1239 if (dolock) 1240 SCHED_UNLOCK(s); 1241 break; 1242 } else if (prop & SA_IGNORE) { 1243 /* 1244 * Except for SIGCONT, shouldn't get here. 1245 * Default action is to ignore; drop it. 1246 */ 1247 break; /* == ignore */ 1248 } else 1249 goto keep; 1250 /*NOTREACHED*/ 1251 case (long)SIG_IGN: 1252 /* 1253 * Masking above should prevent us ever trying 1254 * to take action on an ignored signal other 1255 * than SIGCONT, unless process is traced. 1256 */ 1257 if ((prop & SA_CONT) == 0 && 1258 (pr->ps_flags & PS_TRACED) == 0) 1259 printf("issignal\n"); 1260 break; /* == ignore */ 1261 default: 1262 /* 1263 * This signal has an action, let 1264 * postsig() process it. 1265 */ 1266 goto keep; 1267 } 1268 } 1269 /* NOTREACHED */ 1270 1271 keep: 1272 atomic_setbits_int(&p->p_siglist, mask); /*leave the signal for later */ 1273 return (signum); 1274 } 1275 1276 /* 1277 * Put the argument process into the stopped state and notify the parent 1278 * via wakeup. Signals are handled elsewhere. The process must not be 1279 * on the run queue. 1280 */ 1281 void 1282 proc_stop(struct proc *p, int sw) 1283 { 1284 struct process *pr = p->p_p; 1285 extern void *softclock_si; 1286 1287 #ifdef MULTIPROCESSOR 1288 SCHED_ASSERT_LOCKED(); 1289 #endif 1290 1291 p->p_stat = SSTOP; 1292 atomic_clearbits_int(&pr->ps_flags, PS_WAITED); 1293 atomic_setbits_int(&pr->ps_flags, PS_STOPPED); 1294 atomic_setbits_int(&p->p_flag, P_SUSPSIG); 1295 if (!timeout_pending(&proc_stop_to)) { 1296 timeout_add(&proc_stop_to, 0); 1297 /* 1298 * We need this soft interrupt to be handled fast. 1299 * Extra calls to softclock don't hurt. 1300 */ 1301 softintr_schedule(softclock_si); 1302 } 1303 if (sw) 1304 mi_switch(); 1305 } 1306 1307 /* 1308 * Called from a timeout to send signals to the parents of stopped processes. 1309 * We can't do this in proc_stop because it's called with nasty locks held 1310 * and we would need recursive scheduler lock to deal with that. 1311 */ 1312 void 1313 proc_stop_sweep(void *v) 1314 { 1315 struct process *pr; 1316 1317 LIST_FOREACH(pr, &allprocess, ps_list) { 1318 if ((pr->ps_flags & PS_STOPPED) == 0) 1319 continue; 1320 atomic_clearbits_int(&pr->ps_flags, PS_STOPPED); 1321 1322 if ((pr->ps_pptr->ps_sigacts->ps_flags & SAS_NOCLDSTOP) == 0) 1323 prsignal(pr->ps_pptr, SIGCHLD); 1324 wakeup(pr->ps_pptr); 1325 } 1326 } 1327 1328 /* 1329 * Take the action for the specified signal 1330 * from the current set of pending signals. 1331 */ 1332 void 1333 postsig(int signum) 1334 { 1335 struct proc *p = curproc; 1336 struct process *pr = p->p_p; 1337 struct sigacts *ps = pr->ps_sigacts; 1338 sig_t action; 1339 u_long trapno; 1340 int mask, returnmask; 1341 union sigval sigval; 1342 int s, code; 1343 1344 #ifdef DIAGNOSTIC 1345 if (signum == 0) 1346 panic("postsig"); 1347 #endif 1348 1349 KERNEL_LOCK(); 1350 1351 mask = sigmask(signum); 1352 atomic_clearbits_int(&p->p_siglist, mask); 1353 action = ps->ps_sigact[signum]; 1354 sigval.sival_ptr = 0; 1355 1356 if (p->p_sisig != signum) { 1357 trapno = 0; 1358 code = SI_USER; 1359 sigval.sival_ptr = 0; 1360 } else { 1361 trapno = p->p_sitrapno; 1362 code = p->p_sicode; 1363 sigval = p->p_sigval; 1364 } 1365 1366 #ifdef KTRACE 1367 if (KTRPOINT(p, KTR_PSIG)) { 1368 siginfo_t si; 1369 1370 initsiginfo(&si, signum, trapno, code, sigval); 1371 ktrpsig(p, signum, action, p->p_flag & P_SIGSUSPEND ? 1372 p->p_oldmask : p->p_sigmask, code, &si); 1373 } 1374 #endif 1375 if (action == SIG_DFL) { 1376 /* 1377 * Default action, where the default is to kill 1378 * the process. (Other cases were ignored above.) 1379 */ 1380 sigexit(p, signum); 1381 /* NOTREACHED */ 1382 } else { 1383 /* 1384 * If we get here, the signal must be caught. 1385 */ 1386 #ifdef DIAGNOSTIC 1387 if (action == SIG_IGN || (p->p_sigmask & mask)) 1388 panic("postsig action"); 1389 #endif 1390 /* 1391 * Set the new mask value and also defer further 1392 * occurrences of this signal. 1393 * 1394 * Special case: user has done a sigpause. Here the 1395 * current mask is not of interest, but rather the 1396 * mask from before the sigpause is what we want 1397 * restored after the signal processing is completed. 1398 */ 1399 #ifdef MULTIPROCESSOR 1400 s = splsched(); 1401 #else 1402 s = splhigh(); 1403 #endif 1404 if (p->p_flag & P_SIGSUSPEND) { 1405 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND); 1406 returnmask = p->p_oldmask; 1407 } else { 1408 returnmask = p->p_sigmask; 1409 } 1410 atomic_setbits_int(&p->p_sigmask, ps->ps_catchmask[signum]); 1411 if ((ps->ps_sigreset & mask) != 0) { 1412 ps->ps_sigcatch &= ~mask; 1413 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE) 1414 ps->ps_sigignore |= mask; 1415 ps->ps_sigact[signum] = SIG_DFL; 1416 } 1417 splx(s); 1418 p->p_ru.ru_nsignals++; 1419 if (p->p_sisig == signum) { 1420 p->p_sisig = 0; 1421 p->p_sitrapno = 0; 1422 p->p_sicode = SI_USER; 1423 p->p_sigval.sival_ptr = NULL; 1424 } 1425 1426 (*pr->ps_emul->e_sendsig)(action, signum, returnmask, trapno, 1427 code, sigval); 1428 } 1429 1430 KERNEL_UNLOCK(); 1431 } 1432 1433 /* 1434 * Force the current process to exit with the specified signal, dumping core 1435 * if appropriate. We bypass the normal tests for masked and caught signals, 1436 * allowing unrecoverable failures to terminate the process without changing 1437 * signal state. Mark the accounting record with the signal termination. 1438 * If dumping core, save the signal number for the debugger. Calls exit and 1439 * does not return. 1440 */ 1441 void 1442 sigexit(struct proc *p, int signum) 1443 { 1444 /* Mark process as going away */ 1445 atomic_setbits_int(&p->p_flag, P_WEXIT); 1446 1447 p->p_p->ps_acflag |= AXSIG; 1448 if (sigprop[signum] & SA_CORE) { 1449 p->p_sisig = signum; 1450 1451 /* if there are other threads, pause them */ 1452 if (P_HASSIBLING(p)) 1453 single_thread_set(p, SINGLE_SUSPEND, 0); 1454 1455 if (coredump(p) == 0) 1456 signum |= WCOREFLAG; 1457 } 1458 exit1(p, W_EXITCODE(0, signum), EXIT_NORMAL); 1459 /* NOTREACHED */ 1460 } 1461 1462 int nosuidcoredump = 1; 1463 1464 struct coredump_iostate { 1465 struct proc *io_proc; 1466 struct vnode *io_vp; 1467 struct ucred *io_cred; 1468 off_t io_offset; 1469 }; 1470 1471 /* 1472 * Dump core, into a file named "progname.core", unless the process was 1473 * setuid/setgid. 1474 */ 1475 int 1476 coredump(struct proc *p) 1477 { 1478 #ifdef SMALL_KERNEL 1479 return EPERM; 1480 #else 1481 struct process *pr = p->p_p; 1482 struct vnode *vp; 1483 struct ucred *cred = p->p_ucred; 1484 struct vmspace *vm = p->p_vmspace; 1485 struct nameidata nd; 1486 struct vattr vattr; 1487 struct coredump_iostate io; 1488 int error, len, incrash = 0; 1489 char name[MAXPATHLEN]; 1490 const char *dir = "/var/crash"; 1491 1492 if (pr->ps_emul->e_coredump == NULL) 1493 return (EINVAL); 1494 1495 pr->ps_flags |= PS_COREDUMP; 1496 1497 /* 1498 * If the process has inconsistant uids, nosuidcoredump 1499 * determines coredump placement policy. 1500 */ 1501 if (((pr->ps_flags & PS_SUGID) && (error = suser(p, 0))) || 1502 ((pr->ps_flags & PS_SUGID) && nosuidcoredump)) { 1503 if (nosuidcoredump == 3 || nosuidcoredump == 2) 1504 incrash = 1; 1505 else 1506 return (EPERM); 1507 } 1508 1509 /* Don't dump if will exceed file size limit. */ 1510 if (USPACE + ptoa(vm->vm_dsize + vm->vm_ssize) >= 1511 p->p_rlimit[RLIMIT_CORE].rlim_cur) 1512 return (EFBIG); 1513 1514 if (incrash && nosuidcoredump == 3) { 1515 /* 1516 * If the program directory does not exist, dumps of 1517 * that core will silently fail. 1518 */ 1519 len = snprintf(name, sizeof(name), "%s/%s/%u.core", 1520 dir, pr->ps_comm, pr->ps_pid); 1521 } else if (incrash && nosuidcoredump == 2) 1522 len = snprintf(name, sizeof(name), "%s/%s.core", 1523 dir, pr->ps_comm); 1524 else 1525 len = snprintf(name, sizeof(name), "%s.core", pr->ps_comm); 1526 if (len >= sizeof(name)) 1527 return (EACCES); 1528 1529 /* 1530 * Control the UID used to write out. The normal case uses 1531 * the real UID. If the sugid case is going to write into the 1532 * controlled directory, we do so as root. 1533 */ 1534 if (incrash == 0) { 1535 cred = crdup(cred); 1536 cred->cr_uid = cred->cr_ruid; 1537 cred->cr_gid = cred->cr_rgid; 1538 } else { 1539 if (p->p_fd->fd_rdir) { 1540 vrele(p->p_fd->fd_rdir); 1541 p->p_fd->fd_rdir = NULL; 1542 } 1543 p->p_ucred = crdup(p->p_ucred); 1544 crfree(cred); 1545 cred = p->p_ucred; 1546 crhold(cred); 1547 cred->cr_uid = 0; 1548 cred->cr_gid = 0; 1549 } 1550 1551 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, p); 1552 1553 error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW, S_IRUSR | S_IWUSR); 1554 1555 if (error) 1556 goto out; 1557 1558 /* 1559 * Don't dump to non-regular files, files with links, or files 1560 * owned by someone else. 1561 */ 1562 vp = nd.ni_vp; 1563 if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) { 1564 VOP_UNLOCK(vp, p); 1565 vn_close(vp, FWRITE, cred, p); 1566 goto out; 1567 } 1568 if (vp->v_type != VREG || vattr.va_nlink != 1 || 1569 vattr.va_mode & ((VREAD | VWRITE) >> 3 | (VREAD | VWRITE) >> 6) || 1570 vattr.va_uid != cred->cr_uid) { 1571 error = EACCES; 1572 VOP_UNLOCK(vp, p); 1573 vn_close(vp, FWRITE, cred, p); 1574 goto out; 1575 } 1576 VATTR_NULL(&vattr); 1577 vattr.va_size = 0; 1578 VOP_SETATTR(vp, &vattr, cred, p); 1579 pr->ps_acflag |= ACORE; 1580 1581 io.io_proc = p; 1582 io.io_vp = vp; 1583 io.io_cred = cred; 1584 io.io_offset = 0; 1585 VOP_UNLOCK(vp, p); 1586 vref(vp); 1587 error = vn_close(vp, FWRITE, cred, p); 1588 if (error == 0) 1589 error = (*pr->ps_emul->e_coredump)(p, &io); 1590 vrele(vp); 1591 out: 1592 crfree(cred); 1593 return (error); 1594 #endif 1595 } 1596 1597 #ifndef SMALL_KERNEL 1598 int 1599 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len) 1600 { 1601 struct coredump_iostate *io = cookie; 1602 off_t coffset = 0; 1603 size_t csize; 1604 int chunk, error; 1605 1606 csize = len; 1607 do { 1608 if (io->io_proc->p_siglist & sigmask(SIGKILL)) 1609 return (EINTR); 1610 1611 /* Rest of the loop sleeps with lock held, so... */ 1612 yield(); 1613 1614 chunk = MIN(csize, MAXPHYS); 1615 error = vn_rdwr(UIO_WRITE, io->io_vp, 1616 (caddr_t)data + coffset, chunk, 1617 io->io_offset + coffset, segflg, 1618 IO_UNIT, io->io_cred, NULL, io->io_proc); 1619 if (error) { 1620 struct process *pr = io->io_proc->p_p; 1621 if (error == ENOSPC) 1622 log(LOG_ERR, "coredump of %s(%d) failed, filesystem full\n", 1623 pr->ps_comm, pr->ps_pid); 1624 else 1625 log(LOG_ERR, "coredump of %s(%d), write failed: errno %d\n", 1626 pr->ps_comm, pr->ps_pid, error); 1627 return (error); 1628 } 1629 1630 coffset += chunk; 1631 csize -= chunk; 1632 } while (csize > 0); 1633 1634 io->io_offset += len; 1635 return (0); 1636 } 1637 1638 void 1639 coredump_unmap(void *cookie, vaddr_t start, vaddr_t end) 1640 { 1641 struct coredump_iostate *io = cookie; 1642 1643 uvm_unmap(&io->io_proc->p_vmspace->vm_map, start, end); 1644 } 1645 1646 #endif /* !SMALL_KERNEL */ 1647 1648 /* 1649 * Nonexistent system call-- signal process (may want to handle it). 1650 * Flag error in case process won't see signal immediately (blocked or ignored). 1651 */ 1652 int 1653 sys_nosys(struct proc *p, void *v, register_t *retval) 1654 { 1655 1656 ptsignal(p, SIGSYS, STHREAD); 1657 return (ENOSYS); 1658 } 1659 1660 int 1661 sys___thrsigdivert(struct proc *p, void *v, register_t *retval) 1662 { 1663 static int sigwaitsleep; 1664 struct sys___thrsigdivert_args /* { 1665 syscallarg(sigset_t) sigmask; 1666 syscallarg(siginfo_t *) info; 1667 syscallarg(const struct timespec *) timeout; 1668 } */ *uap = v; 1669 struct process *pr = p->p_p; 1670 sigset_t *m; 1671 sigset_t mask = SCARG(uap, sigmask) &~ sigcantmask; 1672 siginfo_t si; 1673 uint64_t to_ticks = 0; 1674 int timeinvalid = 0; 1675 int error = 0; 1676 1677 memset(&si, 0, sizeof(si)); 1678 1679 if (SCARG(uap, timeout) != NULL) { 1680 struct timespec ts; 1681 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))) != 0) 1682 return (error); 1683 #ifdef KTRACE 1684 if (KTRPOINT(p, KTR_STRUCT)) 1685 ktrreltimespec(p, &ts); 1686 #endif 1687 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) 1688 timeinvalid = 1; 1689 else { 1690 to_ticks = (uint64_t)hz * ts.tv_sec + 1691 ts.tv_nsec / (tick * 1000); 1692 if (to_ticks > INT_MAX) 1693 to_ticks = INT_MAX; 1694 if (to_ticks == 0 && ts.tv_nsec) 1695 to_ticks = 1; 1696 } 1697 } 1698 1699 dosigsuspend(p, p->p_sigmask &~ mask); 1700 for (;;) { 1701 si.si_signo = CURSIG(p); 1702 if (si.si_signo != 0) { 1703 sigset_t smask = sigmask(si.si_signo); 1704 if (smask & mask) { 1705 if (p->p_siglist & smask) 1706 m = &p->p_siglist; 1707 else if (pr->ps_mainproc->p_siglist & smask) 1708 m = &pr->ps_mainproc->p_siglist; 1709 else { 1710 /* signal got eaten by someone else? */ 1711 continue; 1712 } 1713 atomic_clearbits_int(m, smask); 1714 error = 0; 1715 break; 1716 } 1717 } 1718 1719 /* per-POSIX, delay this error until after the above */ 1720 if (timeinvalid) 1721 error = EINVAL; 1722 1723 if (SCARG(uap, timeout) != NULL && to_ticks == 0) 1724 error = EAGAIN; 1725 1726 if (error != 0) 1727 break; 1728 1729 error = tsleep(&sigwaitsleep, PPAUSE|PCATCH, "sigwait", 1730 (int)to_ticks); 1731 } 1732 1733 if (error == 0) { 1734 *retval = si.si_signo; 1735 if (SCARG(uap, info) != NULL) 1736 error = copyout(&si, SCARG(uap, info), sizeof(si)); 1737 } else if (error == ERESTART && SCARG(uap, timeout) != NULL) { 1738 /* 1739 * Restarting is wrong if there's a timeout, as it'll be 1740 * for the same interval again 1741 */ 1742 error = EINTR; 1743 } 1744 1745 return (error); 1746 } 1747 1748 void 1749 initsiginfo(siginfo_t *si, int sig, u_long trapno, int code, union sigval val) 1750 { 1751 memset(si, 0, sizeof(*si)); 1752 1753 si->si_signo = sig; 1754 si->si_code = code; 1755 if (code == SI_USER) { 1756 si->si_value = val; 1757 } else { 1758 switch (sig) { 1759 case SIGSEGV: 1760 case SIGILL: 1761 case SIGBUS: 1762 case SIGFPE: 1763 si->si_addr = val.sival_ptr; 1764 si->si_trapno = trapno; 1765 break; 1766 case SIGXFSZ: 1767 break; 1768 } 1769 } 1770 } 1771 1772 int 1773 filt_sigattach(struct knote *kn) 1774 { 1775 struct process *pr = curproc->p_p; 1776 1777 if (kn->kn_id >= NSIG) 1778 return EINVAL; 1779 1780 kn->kn_ptr.p_process = pr; 1781 kn->kn_flags |= EV_CLEAR; /* automatically set */ 1782 1783 /* XXX lock the proc here while adding to the list? */ 1784 SLIST_INSERT_HEAD(&pr->ps_klist, kn, kn_selnext); 1785 1786 return (0); 1787 } 1788 1789 void 1790 filt_sigdetach(struct knote *kn) 1791 { 1792 struct process *pr = kn->kn_ptr.p_process; 1793 1794 SLIST_REMOVE(&pr->ps_klist, kn, knote, kn_selnext); 1795 } 1796 1797 /* 1798 * signal knotes are shared with proc knotes, so we apply a mask to 1799 * the hint in order to differentiate them from process hints. This 1800 * could be avoided by using a signal-specific knote list, but probably 1801 * isn't worth the trouble. 1802 */ 1803 int 1804 filt_signal(struct knote *kn, long hint) 1805 { 1806 1807 if (hint & NOTE_SIGNAL) { 1808 hint &= ~NOTE_SIGNAL; 1809 1810 if (kn->kn_id == hint) 1811 kn->kn_data++; 1812 } 1813 return (kn->kn_data != 0); 1814 } 1815 1816 void 1817 userret(struct proc *p) 1818 { 1819 int sig; 1820 1821 /* send SIGPROF or SIGVTALRM if their timers interrupted this thread */ 1822 if (p->p_flag & P_PROFPEND) { 1823 atomic_clearbits_int(&p->p_flag, P_PROFPEND); 1824 KERNEL_LOCK(); 1825 psignal(p, SIGPROF); 1826 KERNEL_UNLOCK(); 1827 } 1828 if (p->p_flag & P_ALRMPEND) { 1829 atomic_clearbits_int(&p->p_flag, P_ALRMPEND); 1830 KERNEL_LOCK(); 1831 psignal(p, SIGVTALRM); 1832 KERNEL_UNLOCK(); 1833 } 1834 1835 while ((sig = CURSIG(p)) != 0) 1836 postsig(sig); 1837 1838 /* 1839 * If P_SIGSUSPEND is still set here, then we still need to restore 1840 * the original sigmask before returning to userspace. Also, this 1841 * might unmask some pending signals, so we need to check a second 1842 * time for signals to post. 1843 */ 1844 if (p->p_flag & P_SIGSUSPEND) { 1845 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND); 1846 p->p_sigmask = p->p_oldmask; 1847 1848 while ((sig = CURSIG(p)) != 0) 1849 postsig(sig); 1850 } 1851 1852 if (p->p_flag & P_SUSPSINGLE) { 1853 KERNEL_LOCK(); 1854 single_thread_check(p, 0); 1855 KERNEL_UNLOCK(); 1856 } 1857 1858 WITNESS_WARN(WARN_PANIC, NULL, "userret: returning"); 1859 1860 p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; 1861 } 1862 1863 int 1864 single_thread_check(struct proc *p, int deep) 1865 { 1866 struct process *pr = p->p_p; 1867 1868 if (pr->ps_single != NULL && pr->ps_single != p) { 1869 do { 1870 int s; 1871 1872 /* if we're in deep, we need to unwind to the edge */ 1873 if (deep) { 1874 if (pr->ps_flags & PS_SINGLEUNWIND) 1875 return (ERESTART); 1876 if (pr->ps_flags & PS_SINGLEEXIT) 1877 return (EINTR); 1878 } 1879 1880 if (--pr->ps_singlecount == 0) 1881 wakeup(&pr->ps_singlecount); 1882 if (pr->ps_flags & PS_SINGLEEXIT) 1883 exit1(p, 0, EXIT_THREAD_NOCHECK); 1884 1885 /* not exiting and don't need to unwind, so suspend */ 1886 SCHED_LOCK(s); 1887 p->p_stat = SSTOP; 1888 mi_switch(); 1889 SCHED_UNLOCK(s); 1890 } while (pr->ps_single != NULL); 1891 } 1892 1893 return (0); 1894 } 1895 1896 /* 1897 * Stop other threads in the process. The mode controls how and 1898 * where the other threads should stop: 1899 * - SINGLE_SUSPEND: stop wherever they are, will later either be told to exit 1900 * (by setting to SINGLE_EXIT) or be released (via single_thread_clear()) 1901 * - SINGLE_PTRACE: stop wherever they are, will wait for them to stop 1902 * later (via single_thread_wait()) and released as with SINGLE_SUSPEND 1903 * - SINGLE_UNWIND: just unwind to kernel boundary, will be told to exit 1904 * or released as with SINGLE_SUSPEND 1905 * - SINGLE_EXIT: unwind to kernel boundary and exit 1906 */ 1907 int 1908 single_thread_set(struct proc *p, enum single_thread_mode mode, int deep) 1909 { 1910 struct process *pr = p->p_p; 1911 struct proc *q; 1912 int error; 1913 1914 KERNEL_ASSERT_LOCKED(); 1915 1916 if ((error = single_thread_check(p, deep))) 1917 return error; 1918 1919 switch (mode) { 1920 case SINGLE_SUSPEND: 1921 case SINGLE_PTRACE: 1922 break; 1923 case SINGLE_UNWIND: 1924 atomic_setbits_int(&pr->ps_flags, PS_SINGLEUNWIND); 1925 break; 1926 case SINGLE_EXIT: 1927 atomic_setbits_int(&pr->ps_flags, PS_SINGLEEXIT); 1928 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND); 1929 break; 1930 #ifdef DIAGNOSTIC 1931 default: 1932 panic("single_thread_mode = %d", mode); 1933 #endif 1934 } 1935 pr->ps_single = p; 1936 pr->ps_singlecount = 0; 1937 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 1938 int s; 1939 1940 if (q == p) 1941 continue; 1942 if (q->p_flag & P_WEXIT) { 1943 if (mode == SINGLE_EXIT) { 1944 SCHED_LOCK(s); 1945 if (q->p_stat == SSTOP) { 1946 setrunnable(q); 1947 pr->ps_singlecount++; 1948 } 1949 SCHED_UNLOCK(s); 1950 } 1951 continue; 1952 } 1953 SCHED_LOCK(s); 1954 atomic_setbits_int(&q->p_flag, P_SUSPSINGLE); 1955 switch (q->p_stat) { 1956 case SIDL: 1957 case SRUN: 1958 pr->ps_singlecount++; 1959 break; 1960 case SSLEEP: 1961 /* if it's not interruptible, then just have to wait */ 1962 if (q->p_flag & P_SINTR) { 1963 /* merely need to suspend? just stop it */ 1964 if (mode == SINGLE_SUSPEND || 1965 mode == SINGLE_PTRACE) { 1966 q->p_stat = SSTOP; 1967 break; 1968 } 1969 /* need to unwind or exit, so wake it */ 1970 setrunnable(q); 1971 } 1972 pr->ps_singlecount++; 1973 break; 1974 case SSTOP: 1975 if (mode == SINGLE_EXIT) { 1976 setrunnable(q); 1977 pr->ps_singlecount++; 1978 } 1979 break; 1980 case SDEAD: 1981 break; 1982 case SONPROC: 1983 pr->ps_singlecount++; 1984 signotify(q); 1985 break; 1986 } 1987 SCHED_UNLOCK(s); 1988 } 1989 1990 if (mode != SINGLE_PTRACE) 1991 single_thread_wait(pr); 1992 1993 return 0; 1994 } 1995 1996 void 1997 single_thread_wait(struct process *pr) 1998 { 1999 /* wait until they're all suspended */ 2000 while (pr->ps_singlecount > 0) 2001 tsleep(&pr->ps_singlecount, PUSER, "suspend", 0); 2002 } 2003 2004 void 2005 single_thread_clear(struct proc *p, int flag) 2006 { 2007 struct process *pr = p->p_p; 2008 struct proc *q; 2009 2010 KASSERT(pr->ps_single == p); 2011 KERNEL_ASSERT_LOCKED(); 2012 2013 pr->ps_single = NULL; 2014 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT); 2015 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 2016 int s; 2017 2018 if (q == p || (q->p_flag & P_SUSPSINGLE) == 0) 2019 continue; 2020 atomic_clearbits_int(&q->p_flag, P_SUSPSINGLE); 2021 2022 /* 2023 * if the thread was only stopped for single threading 2024 * then clearing that either makes it runnable or puts 2025 * it back into some sleep queue 2026 */ 2027 SCHED_LOCK(s); 2028 if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) { 2029 if (q->p_wchan == 0) 2030 setrunnable(q); 2031 else 2032 q->p_stat = SSLEEP; 2033 } 2034 SCHED_UNLOCK(s); 2035 } 2036 } 2037