1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 35 * $FreeBSD: src/sys/kern/kern_sig.c,v 1.72.2.17 2003/05/16 16:34:34 obrien Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/sysproto.h> 44 #include <sys/signalvar.h> 45 #include <sys/resourcevar.h> 46 #include <sys/vnode.h> 47 #include <sys/event.h> 48 #include <sys/proc.h> 49 #include <sys/nlookup.h> 50 #include <sys/pioctl.h> 51 #include <sys/acct.h> 52 #include <sys/fcntl.h> 53 #include <sys/lock.h> 54 #include <sys/wait.h> 55 #include <sys/ktrace.h> 56 #include <sys/syslog.h> 57 #include <sys/stat.h> 58 #include <sys/sysent.h> 59 #include <sys/sysctl.h> 60 #include <sys/malloc.h> 61 #include <sys/interrupt.h> 62 #include <sys/unistd.h> 63 #include <sys/kern_syscall.h> 64 #include <sys/vkernel.h> 65 66 #include <sys/signal2.h> 67 #include <sys/thread2.h> 68 #include <sys/spinlock2.h> 69 70 #include <machine/cpu.h> 71 #include <machine/smp.h> 72 73 static int coredump(struct lwp *, int); 74 static char *expand_name(const char *, uid_t, pid_t); 75 static int dokillpg(int sig, int pgid, int all); 76 static int sig_ffs(sigset_t *set); 77 static int sigprop(int sig); 78 static void lwp_signotify(struct lwp *lp); 79 static void lwp_signotify_remote(void *arg); 80 static int kern_sigtimedwait(sigset_t set, siginfo_t *info, 81 struct timespec *timeout); 82 static void proc_stopwait(struct proc *p); 83 84 static int filt_sigattach(struct knote *kn); 85 static void filt_sigdetach(struct knote *kn); 86 static int filt_signal(struct knote *kn, long hint); 87 88 struct filterops sig_filtops = 89 { FILTEROP_MPSAFE, filt_sigattach, filt_sigdetach, filt_signal }; 90 91 static int kern_logsigexit = 1; 92 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW, 93 &kern_logsigexit, 0, 94 "Log processes quitting on abnormal signals to syslog(3)"); 95 96 /* 97 * Can process p, with pcred pc, send the signal sig to process q? 98 */ 99 #define CANSIGNAL(q, sig) \ 100 (!p_trespass(curproc->p_ucred, (q)->p_ucred) || \ 101 ((sig) == SIGCONT && (q)->p_session == curproc->p_session)) 102 103 /* 104 * Policy -- Can real uid ruid with ucred uc send a signal to process q? 105 */ 106 #define CANSIGIO(ruid, uc, q) \ 107 ((uc)->cr_uid == 0 || \ 108 (ruid) == (q)->p_ucred->cr_ruid || \ 109 (uc)->cr_uid == (q)->p_ucred->cr_ruid || \ 110 (ruid) == (q)->p_ucred->cr_uid || \ 111 (uc)->cr_uid == (q)->p_ucred->cr_uid) 112 113 int sugid_coredump; 114 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW, 115 &sugid_coredump, 0, "Enable coredumping set user/group ID processes"); 116 117 static int do_coredump = 1; 118 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW, 119 &do_coredump, 0, "Enable/Disable coredumps"); 120 121 /* 122 * Signal properties and actions. 123 * The array below categorizes the signals and their default actions 124 * according to the following properties: 125 */ 126 #define SA_KILL 0x01 /* terminates process by default */ 127 #define SA_CORE 0x02 /* ditto and coredumps */ 128 #define SA_STOP 0x04 /* suspend process */ 129 #define SA_TTYSTOP 0x08 /* ditto, from tty */ 130 #define SA_IGNORE 0x10 /* ignore by default */ 131 #define SA_CONT 0x20 /* continue if suspended */ 132 #define SA_CANTMASK 0x40 /* non-maskable, catchable */ 133 #define SA_CKPT 0x80 /* checkpoint process */ 134 135 136 static int sigproptbl[NSIG] = { 137 SA_KILL, /* SIGHUP */ 138 SA_KILL, /* SIGINT */ 139 SA_KILL|SA_CORE, /* SIGQUIT */ 140 SA_KILL|SA_CORE, /* SIGILL */ 141 SA_KILL|SA_CORE, /* SIGTRAP */ 142 SA_KILL|SA_CORE, /* SIGABRT */ 143 SA_KILL|SA_CORE, /* SIGEMT */ 144 SA_KILL|SA_CORE, /* SIGFPE */ 145 SA_KILL, /* SIGKILL */ 146 SA_KILL|SA_CORE, /* SIGBUS */ 147 SA_KILL|SA_CORE, /* SIGSEGV */ 148 SA_KILL|SA_CORE, /* SIGSYS */ 149 SA_KILL, /* SIGPIPE */ 150 SA_KILL, /* SIGALRM */ 151 SA_KILL, /* SIGTERM */ 152 SA_IGNORE, /* SIGURG */ 153 SA_STOP, /* SIGSTOP */ 154 SA_STOP|SA_TTYSTOP, /* SIGTSTP */ 155 SA_IGNORE|SA_CONT, /* SIGCONT */ 156 SA_IGNORE, /* SIGCHLD */ 157 SA_STOP|SA_TTYSTOP, /* SIGTTIN */ 158 SA_STOP|SA_TTYSTOP, /* SIGTTOU */ 159 SA_IGNORE, /* SIGIO */ 160 SA_KILL, /* SIGXCPU */ 161 SA_KILL, /* SIGXFSZ */ 162 SA_KILL, /* SIGVTALRM */ 163 SA_KILL, /* SIGPROF */ 164 SA_IGNORE, /* SIGWINCH */ 165 SA_IGNORE, /* SIGINFO */ 166 SA_KILL, /* SIGUSR1 */ 167 SA_KILL, /* SIGUSR2 */ 168 SA_IGNORE, /* SIGTHR */ 169 SA_CKPT, /* SIGCKPT */ 170 SA_KILL|SA_CKPT, /* SIGCKPTEXIT */ 171 SA_IGNORE, 172 SA_IGNORE, 173 SA_IGNORE, 174 SA_IGNORE, 175 SA_IGNORE, 176 SA_IGNORE, 177 SA_IGNORE, 178 SA_IGNORE, 179 SA_IGNORE, 180 SA_IGNORE, 181 SA_IGNORE, 182 SA_IGNORE, 183 SA_IGNORE, 184 SA_IGNORE, 185 SA_IGNORE, 186 SA_IGNORE, 187 SA_IGNORE, 188 SA_IGNORE, 189 SA_IGNORE, 190 SA_IGNORE, 191 SA_IGNORE, 192 SA_IGNORE, 193 SA_IGNORE, 194 SA_IGNORE, 195 SA_IGNORE, 196 SA_IGNORE, 197 SA_IGNORE, 198 SA_IGNORE, 199 SA_IGNORE, 200 SA_IGNORE, 201 202 }; 203 204 static __inline int 205 sigprop(int sig) 206 { 207 208 if (sig > 0 && sig < NSIG) 209 return (sigproptbl[_SIG_IDX(sig)]); 210 return (0); 211 } 212 213 static __inline int 214 sig_ffs(sigset_t *set) 215 { 216 int i; 217 218 for (i = 0; i < _SIG_WORDS; i++) 219 if (set->__bits[i]) 220 return (ffs(set->__bits[i]) + (i * 32)); 221 return (0); 222 } 223 224 /* 225 * No requirements. 226 */ 227 int 228 kern_sigaction(int sig, struct sigaction *act, struct sigaction *oact) 229 { 230 struct thread *td = curthread; 231 struct proc *p = td->td_proc; 232 struct lwp *lp; 233 struct sigacts *ps = p->p_sigacts; 234 235 if (sig <= 0 || sig > _SIG_MAXSIG) 236 return (EINVAL); 237 238 lwkt_gettoken(&p->p_token); 239 240 if (oact) { 241 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)]; 242 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)]; 243 oact->sa_flags = 0; 244 if (SIGISMEMBER(ps->ps_sigonstack, sig)) 245 oact->sa_flags |= SA_ONSTACK; 246 if (!SIGISMEMBER(ps->ps_sigintr, sig)) 247 oact->sa_flags |= SA_RESTART; 248 if (SIGISMEMBER(ps->ps_sigreset, sig)) 249 oact->sa_flags |= SA_RESETHAND; 250 if (SIGISMEMBER(ps->ps_signodefer, sig)) 251 oact->sa_flags |= SA_NODEFER; 252 if (SIGISMEMBER(ps->ps_siginfo, sig)) 253 oact->sa_flags |= SA_SIGINFO; 254 if (sig == SIGCHLD && p->p_sigacts->ps_flag & PS_NOCLDSTOP) 255 oact->sa_flags |= SA_NOCLDSTOP; 256 if (sig == SIGCHLD && p->p_sigacts->ps_flag & PS_NOCLDWAIT) 257 oact->sa_flags |= SA_NOCLDWAIT; 258 } 259 if (act) { 260 /* 261 * Check for invalid requests. KILL and STOP cannot be 262 * caught. 263 */ 264 if (sig == SIGKILL || sig == SIGSTOP) { 265 if (act->sa_handler != SIG_DFL) { 266 lwkt_reltoken(&p->p_token); 267 return (EINVAL); 268 } 269 } 270 271 /* 272 * Change setting atomically. 273 */ 274 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask; 275 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]); 276 if (act->sa_flags & SA_SIGINFO) { 277 ps->ps_sigact[_SIG_IDX(sig)] = 278 (__sighandler_t *)act->sa_sigaction; 279 SIGADDSET(ps->ps_siginfo, sig); 280 } else { 281 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler; 282 SIGDELSET(ps->ps_siginfo, sig); 283 } 284 if (!(act->sa_flags & SA_RESTART)) 285 SIGADDSET(ps->ps_sigintr, sig); 286 else 287 SIGDELSET(ps->ps_sigintr, sig); 288 if (act->sa_flags & SA_ONSTACK) 289 SIGADDSET(ps->ps_sigonstack, sig); 290 else 291 SIGDELSET(ps->ps_sigonstack, sig); 292 if (act->sa_flags & SA_RESETHAND) 293 SIGADDSET(ps->ps_sigreset, sig); 294 else 295 SIGDELSET(ps->ps_sigreset, sig); 296 if (act->sa_flags & SA_NODEFER) 297 SIGADDSET(ps->ps_signodefer, sig); 298 else 299 SIGDELSET(ps->ps_signodefer, sig); 300 if (sig == SIGCHLD) { 301 if (act->sa_flags & SA_NOCLDSTOP) 302 p->p_sigacts->ps_flag |= PS_NOCLDSTOP; 303 else 304 p->p_sigacts->ps_flag &= ~PS_NOCLDSTOP; 305 if (act->sa_flags & SA_NOCLDWAIT) { 306 /* 307 * Paranoia: since SA_NOCLDWAIT is implemented 308 * by reparenting the dying child to PID 1 (and 309 * trust it to reap the zombie), PID 1 itself 310 * is forbidden to set SA_NOCLDWAIT. 311 */ 312 if (p->p_pid == 1) 313 p->p_sigacts->ps_flag &= ~PS_NOCLDWAIT; 314 else 315 p->p_sigacts->ps_flag |= PS_NOCLDWAIT; 316 } else { 317 p->p_sigacts->ps_flag &= ~PS_NOCLDWAIT; 318 } 319 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 320 ps->ps_flag |= PS_CLDSIGIGN; 321 else 322 ps->ps_flag &= ~PS_CLDSIGIGN; 323 } 324 /* 325 * Set bit in p_sigignore for signals that are set to SIG_IGN, 326 * and for signals set to SIG_DFL where the default is to 327 * ignore. However, don't put SIGCONT in p_sigignore, as we 328 * have to restart the process. 329 * 330 * Also remove the signal from the process and lwp signal 331 * list. 332 */ 333 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 334 (sigprop(sig) & SA_IGNORE && 335 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { 336 SIGDELSET(p->p_siglist, sig); 337 FOREACH_LWP_IN_PROC(lp, p) { 338 spin_lock(&lp->lwp_spin); 339 SIGDELSET(lp->lwp_siglist, sig); 340 spin_unlock(&lp->lwp_spin); 341 } 342 if (sig != SIGCONT) { 343 /* easier in ksignal */ 344 SIGADDSET(p->p_sigignore, sig); 345 } 346 SIGDELSET(p->p_sigcatch, sig); 347 } else { 348 SIGDELSET(p->p_sigignore, sig); 349 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL) 350 SIGDELSET(p->p_sigcatch, sig); 351 else 352 SIGADDSET(p->p_sigcatch, sig); 353 } 354 } 355 lwkt_reltoken(&p->p_token); 356 return (0); 357 } 358 359 int 360 sys_sigaction(struct sigaction_args *uap) 361 { 362 struct sigaction act, oact; 363 struct sigaction *actp, *oactp; 364 int error; 365 366 actp = (uap->act != NULL) ? &act : NULL; 367 oactp = (uap->oact != NULL) ? &oact : NULL; 368 if (actp) { 369 error = copyin(uap->act, actp, sizeof(act)); 370 if (error) 371 return (error); 372 } 373 error = kern_sigaction(uap->sig, actp, oactp); 374 if (oactp && !error) { 375 error = copyout(oactp, uap->oact, sizeof(oact)); 376 } 377 return (error); 378 } 379 380 /* 381 * Initialize signal state for process 0; 382 * set to ignore signals that are ignored by default. 383 */ 384 void 385 siginit(struct proc *p) 386 { 387 int i; 388 389 for (i = 1; i <= NSIG; i++) 390 if (sigprop(i) & SA_IGNORE && i != SIGCONT) 391 SIGADDSET(p->p_sigignore, i); 392 } 393 394 /* 395 * Reset signals for an exec of the specified process. 396 */ 397 void 398 execsigs(struct proc *p) 399 { 400 struct sigacts *ps = p->p_sigacts; 401 struct lwp *lp; 402 int sig; 403 404 lp = ONLY_LWP_IN_PROC(p); 405 406 /* 407 * Reset caught signals. Held signals remain held 408 * through p_sigmask (unless they were caught, 409 * and are now ignored by default). 410 */ 411 while (SIGNOTEMPTY(p->p_sigcatch)) { 412 sig = sig_ffs(&p->p_sigcatch); 413 SIGDELSET(p->p_sigcatch, sig); 414 if (sigprop(sig) & SA_IGNORE) { 415 if (sig != SIGCONT) 416 SIGADDSET(p->p_sigignore, sig); 417 SIGDELSET(p->p_siglist, sig); 418 /* don't need spinlock */ 419 SIGDELSET(lp->lwp_siglist, sig); 420 } 421 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 422 } 423 424 /* 425 * Reset stack state to the user stack. 426 * Clear set of signals caught on the signal stack. 427 */ 428 lp->lwp_sigstk.ss_flags = SS_DISABLE; 429 lp->lwp_sigstk.ss_size = 0; 430 lp->lwp_sigstk.ss_sp = NULL; 431 lp->lwp_flags &= ~LWP_ALTSTACK; 432 /* 433 * Reset no zombies if child dies flag as Solaris does. 434 */ 435 p->p_sigacts->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN); 436 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 437 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL; 438 } 439 440 /* 441 * kern_sigprocmask() - MP SAFE ONLY IF p == curproc 442 * 443 * Manipulate signal mask. This routine is MP SAFE *ONLY* if 444 * p == curproc. 445 */ 446 int 447 kern_sigprocmask(int how, sigset_t *set, sigset_t *oset) 448 { 449 struct thread *td = curthread; 450 struct lwp *lp = td->td_lwp; 451 struct proc *p = td->td_proc; 452 int error; 453 454 lwkt_gettoken(&p->p_token); 455 456 if (oset != NULL) 457 *oset = lp->lwp_sigmask; 458 459 error = 0; 460 if (set != NULL) { 461 switch (how) { 462 case SIG_BLOCK: 463 SIG_CANTMASK(*set); 464 SIGSETOR(lp->lwp_sigmask, *set); 465 break; 466 case SIG_UNBLOCK: 467 SIGSETNAND(lp->lwp_sigmask, *set); 468 break; 469 case SIG_SETMASK: 470 SIG_CANTMASK(*set); 471 lp->lwp_sigmask = *set; 472 break; 473 default: 474 error = EINVAL; 475 break; 476 } 477 } 478 479 lwkt_reltoken(&p->p_token); 480 481 return (error); 482 } 483 484 /* 485 * sigprocmask() 486 * 487 * MPSAFE 488 */ 489 int 490 sys_sigprocmask(struct sigprocmask_args *uap) 491 { 492 sigset_t set, oset; 493 sigset_t *setp, *osetp; 494 int error; 495 496 setp = (uap->set != NULL) ? &set : NULL; 497 osetp = (uap->oset != NULL) ? &oset : NULL; 498 if (setp) { 499 error = copyin(uap->set, setp, sizeof(set)); 500 if (error) 501 return (error); 502 } 503 error = kern_sigprocmask(uap->how, setp, osetp); 504 if (osetp && !error) { 505 error = copyout(osetp, uap->oset, sizeof(oset)); 506 } 507 return (error); 508 } 509 510 /* 511 * MPSAFE 512 */ 513 int 514 kern_sigpending(struct __sigset *set) 515 { 516 struct lwp *lp = curthread->td_lwp; 517 518 *set = lwp_sigpend(lp); 519 520 return (0); 521 } 522 523 /* 524 * MPSAFE 525 */ 526 int 527 sys_sigpending(struct sigpending_args *uap) 528 { 529 sigset_t set; 530 int error; 531 532 error = kern_sigpending(&set); 533 534 if (error == 0) 535 error = copyout(&set, uap->set, sizeof(set)); 536 return (error); 537 } 538 539 /* 540 * Suspend process until signal, providing mask to be set 541 * in the meantime. 542 * 543 * MPSAFE 544 */ 545 int 546 kern_sigsuspend(struct __sigset *set) 547 { 548 struct thread *td = curthread; 549 struct lwp *lp = td->td_lwp; 550 struct proc *p = td->td_proc; 551 struct sigacts *ps = p->p_sigacts; 552 553 /* 554 * When returning from sigsuspend, we want 555 * the old mask to be restored after the 556 * signal handler has finished. Thus, we 557 * save it here and mark the sigacts structure 558 * to indicate this. 559 */ 560 lp->lwp_oldsigmask = lp->lwp_sigmask; 561 lp->lwp_flags |= LWP_OLDMASK; 562 563 SIG_CANTMASK(*set); 564 lp->lwp_sigmask = *set; 565 while (tsleep(ps, PCATCH, "pause", 0) == 0) 566 /* void */; 567 /* always return EINTR rather than ERESTART... */ 568 return (EINTR); 569 } 570 571 /* 572 * Note nonstandard calling convention: libc stub passes mask, not 573 * pointer, to save a copyin. 574 * 575 * MPSAFE 576 */ 577 int 578 sys_sigsuspend(struct sigsuspend_args *uap) 579 { 580 sigset_t mask; 581 int error; 582 583 error = copyin(uap->sigmask, &mask, sizeof(mask)); 584 if (error) 585 return (error); 586 587 error = kern_sigsuspend(&mask); 588 589 return (error); 590 } 591 592 /* 593 * MPSAFE 594 */ 595 int 596 kern_sigaltstack(struct sigaltstack *ss, struct sigaltstack *oss) 597 { 598 struct thread *td = curthread; 599 struct lwp *lp = td->td_lwp; 600 struct proc *p = td->td_proc; 601 602 if ((lp->lwp_flags & LWP_ALTSTACK) == 0) 603 lp->lwp_sigstk.ss_flags |= SS_DISABLE; 604 605 if (oss) 606 *oss = lp->lwp_sigstk; 607 608 if (ss) { 609 if (ss->ss_flags & ~SS_DISABLE) 610 return (EINVAL); 611 if (ss->ss_flags & SS_DISABLE) { 612 if (lp->lwp_sigstk.ss_flags & SS_ONSTACK) 613 return (EPERM); 614 lp->lwp_flags &= ~LWP_ALTSTACK; 615 lp->lwp_sigstk.ss_flags = ss->ss_flags; 616 } else { 617 if (ss->ss_size < p->p_sysent->sv_minsigstksz) 618 return (ENOMEM); 619 lp->lwp_flags |= LWP_ALTSTACK; 620 lp->lwp_sigstk = *ss; 621 } 622 } 623 624 return (0); 625 } 626 627 /* 628 * MPSAFE 629 */ 630 int 631 sys_sigaltstack(struct sigaltstack_args *uap) 632 { 633 stack_t ss, oss; 634 int error; 635 636 if (uap->ss) { 637 error = copyin(uap->ss, &ss, sizeof(ss)); 638 if (error) 639 return (error); 640 } 641 642 error = kern_sigaltstack(uap->ss ? &ss : NULL, 643 uap->oss ? &oss : NULL); 644 645 if (error == 0 && uap->oss) 646 error = copyout(&oss, uap->oss, sizeof(*uap->oss)); 647 return (error); 648 } 649 650 /* 651 * Common code for kill process group/broadcast kill. 652 * cp is calling process. 653 */ 654 struct killpg_info { 655 int nfound; 656 int sig; 657 }; 658 659 static int killpg_all_callback(struct proc *p, void *data); 660 661 static int 662 dokillpg(int sig, int pgid, int all) 663 { 664 struct killpg_info info; 665 struct proc *cp = curproc; 666 struct proc *p; 667 struct pgrp *pgrp; 668 669 info.nfound = 0; 670 info.sig = sig; 671 672 if (all) { 673 /* 674 * broadcast 675 */ 676 allproc_scan(killpg_all_callback, &info); 677 } else { 678 if (pgid == 0) { 679 /* 680 * zero pgid means send to my process group. 681 */ 682 pgrp = cp->p_pgrp; 683 pgref(pgrp); 684 } else { 685 pgrp = pgfind(pgid); 686 if (pgrp == NULL) 687 return (ESRCH); 688 } 689 690 /* 691 * Must interlock all signals against fork 692 */ 693 lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE); 694 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 695 if (p->p_pid <= 1 || 696 p->p_stat == SZOMB || 697 (p->p_flags & P_SYSTEM) || 698 !CANSIGNAL(p, sig)) { 699 continue; 700 } 701 ++info.nfound; 702 if (sig) 703 ksignal(p, sig); 704 } 705 lockmgr(&pgrp->pg_lock, LK_RELEASE); 706 pgrel(pgrp); 707 } 708 return (info.nfound ? 0 : ESRCH); 709 } 710 711 static int 712 killpg_all_callback(struct proc *p, void *data) 713 { 714 struct killpg_info *info = data; 715 716 if (p->p_pid <= 1 || (p->p_flags & P_SYSTEM) || 717 p == curproc || !CANSIGNAL(p, info->sig)) { 718 return (0); 719 } 720 ++info->nfound; 721 if (info->sig) 722 ksignal(p, info->sig); 723 return(0); 724 } 725 726 /* 727 * Send a general signal to a process or LWPs within that process. 728 * 729 * Note that new signals cannot be sent if a process is exiting or already 730 * a zombie, but we return success anyway as userland is likely to not handle 731 * the race properly. 732 * 733 * No requirements. 734 */ 735 int 736 kern_kill(int sig, pid_t pid, lwpid_t tid) 737 { 738 int t; 739 740 if ((u_int)sig > _SIG_MAXSIG) 741 return (EINVAL); 742 743 if (pid > 0) { 744 struct proc *p; 745 struct lwp *lp = NULL; 746 747 /* 748 * Send a signal to a single process. If the kill() is 749 * racing an exiting process which has not yet been reaped 750 * act as though the signal was delivered successfully but 751 * don't actually try to deliver the signal. 752 */ 753 if ((p = pfind(pid)) == NULL) { 754 if ((p = zpfind(pid)) == NULL) 755 return (ESRCH); 756 PRELE(p); 757 return (0); 758 } 759 lwkt_gettoken(&p->p_token); 760 if (!CANSIGNAL(p, sig)) { 761 lwkt_reltoken(&p->p_token); 762 PRELE(p); 763 return (EPERM); 764 } 765 766 /* 767 * NOP if the process is exiting. Note that lwpsignal() is 768 * called directly with P_WEXIT set to kill individual LWPs 769 * during exit, which is allowed. 770 */ 771 if (p->p_flags & P_WEXIT) { 772 lwkt_reltoken(&p->p_token); 773 PRELE(p); 774 return (0); 775 } 776 if (tid != -1) { 777 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, tid); 778 if (lp == NULL) { 779 lwkt_reltoken(&p->p_token); 780 PRELE(p); 781 return (ESRCH); 782 } 783 } 784 if (sig) 785 lwpsignal(p, lp, sig); 786 lwkt_reltoken(&p->p_token); 787 PRELE(p); 788 789 return (0); 790 } 791 792 /* 793 * If we come here, pid is a special broadcast pid. 794 * This doesn't mix with a tid. 795 */ 796 if (tid != -1) 797 return (EINVAL); 798 799 switch (pid) { 800 case -1: /* broadcast signal */ 801 t = (dokillpg(sig, 0, 1)); 802 break; 803 case 0: /* signal own process group */ 804 t = (dokillpg(sig, 0, 0)); 805 break; 806 default: /* negative explicit process group */ 807 t = (dokillpg(sig, -pid, 0)); 808 break; 809 } 810 return t; 811 } 812 813 int 814 sys_kill(struct kill_args *uap) 815 { 816 int error; 817 818 error = kern_kill(uap->signum, uap->pid, -1); 819 return (error); 820 } 821 822 int 823 sys_lwp_kill(struct lwp_kill_args *uap) 824 { 825 int error; 826 pid_t pid = uap->pid; 827 828 /* 829 * A tid is mandatory for lwp_kill(), otherwise 830 * you could simply use kill(). 831 */ 832 if (uap->tid == -1) 833 return (EINVAL); 834 835 /* 836 * To save on a getpid() function call for intra-process 837 * signals, pid == -1 means current process. 838 */ 839 if (pid == -1) 840 pid = curproc->p_pid; 841 842 error = kern_kill(uap->signum, pid, uap->tid); 843 return (error); 844 } 845 846 /* 847 * Send a signal to a process group. 848 */ 849 void 850 gsignal(int pgid, int sig) 851 { 852 struct pgrp *pgrp; 853 854 if (pgid && (pgrp = pgfind(pgid))) 855 pgsignal(pgrp, sig, 0); 856 } 857 858 /* 859 * Send a signal to a process group. If checktty is 1, 860 * limit to members which have a controlling terminal. 861 * 862 * pg_lock interlocks against a fork that might be in progress, to 863 * ensure that the new child process picks up the signal. 864 */ 865 void 866 pgsignal(struct pgrp *pgrp, int sig, int checkctty) 867 { 868 struct proc *p; 869 870 /* 871 * Must interlock all signals against fork 872 */ 873 if (pgrp) { 874 pgref(pgrp); 875 lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE); 876 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 877 if (checkctty == 0 || p->p_flags & P_CONTROLT) 878 ksignal(p, sig); 879 } 880 lockmgr(&pgrp->pg_lock, LK_RELEASE); 881 pgrel(pgrp); 882 } 883 } 884 885 /* 886 * Send a signal caused by a trap to the current lwp. If it will be caught 887 * immediately, deliver it with correct code. Otherwise, post it normally. 888 * 889 * These signals may ONLY be delivered to the specified lwp and may never 890 * be delivered to the process generically. 891 */ 892 void 893 trapsignal(struct lwp *lp, int sig, u_long code) 894 { 895 struct proc *p = lp->lwp_proc; 896 struct sigacts *ps = p->p_sigacts; 897 898 /* 899 * If we are a virtual kernel running an emulated user process 900 * context, switch back to the virtual kernel context before 901 * trying to post the signal. 902 */ 903 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 904 struct trapframe *tf = lp->lwp_md.md_regs; 905 tf->tf_trapno = 0; 906 vkernel_trap(lp, tf); 907 } 908 909 910 if ((p->p_flags & P_TRACED) == 0 && SIGISMEMBER(p->p_sigcatch, sig) && 911 !SIGISMEMBER(lp->lwp_sigmask, sig)) { 912 lp->lwp_ru.ru_nsignals++; 913 #ifdef KTRACE 914 if (KTRPOINT(lp->lwp_thread, KTR_PSIG)) 915 ktrpsig(lp, sig, ps->ps_sigact[_SIG_IDX(sig)], 916 &lp->lwp_sigmask, code); 917 #endif 918 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig, 919 &lp->lwp_sigmask, code); 920 SIGSETOR(lp->lwp_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 921 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 922 SIGADDSET(lp->lwp_sigmask, sig); 923 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 924 /* 925 * See kern_sigaction() for origin of this code. 926 */ 927 SIGDELSET(p->p_sigcatch, sig); 928 if (sig != SIGCONT && 929 sigprop(sig) & SA_IGNORE) 930 SIGADDSET(p->p_sigignore, sig); 931 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 932 } 933 } else { 934 lp->lwp_code = code; /* XXX for core dump/debugger */ 935 lp->lwp_sig = sig; /* XXX to verify code */ 936 lwpsignal(p, lp, sig); 937 } 938 } 939 940 /* 941 * Find a suitable lwp to deliver the signal to. Returns NULL if all 942 * lwps hold the signal blocked. 943 * 944 * Caller must hold p->p_token. 945 * 946 * Returns a lp or NULL. If non-NULL the lp is held and its token is 947 * acquired. 948 */ 949 static struct lwp * 950 find_lwp_for_signal(struct proc *p, int sig) 951 { 952 struct lwp *lp; 953 struct lwp *run, *sleep, *stop; 954 955 /* 956 * If the running/preempted thread belongs to the proc to which 957 * the signal is being delivered and this thread does not block 958 * the signal, then we can avoid a context switch by delivering 959 * the signal to this thread, because it will return to userland 960 * soon anyways. 961 */ 962 lp = lwkt_preempted_proc(); 963 if (lp != NULL && lp->lwp_proc == p) { 964 LWPHOLD(lp); 965 lwkt_gettoken(&lp->lwp_token); 966 if (!SIGISMEMBER(lp->lwp_sigmask, sig)) { 967 /* return w/ token held */ 968 return (lp); 969 } 970 lwkt_reltoken(&lp->lwp_token); 971 LWPRELE(lp); 972 } 973 974 run = sleep = stop = NULL; 975 FOREACH_LWP_IN_PROC(lp, p) { 976 /* 977 * If the signal is being blocked by the lwp, then this 978 * lwp is not eligible for receiving the signal. 979 */ 980 LWPHOLD(lp); 981 lwkt_gettoken(&lp->lwp_token); 982 983 if (SIGISMEMBER(lp->lwp_sigmask, sig)) { 984 lwkt_reltoken(&lp->lwp_token); 985 LWPRELE(lp); 986 continue; 987 } 988 989 switch (lp->lwp_stat) { 990 case LSRUN: 991 if (sleep) { 992 lwkt_token_swap(); 993 lwkt_reltoken(&sleep->lwp_token); 994 LWPRELE(sleep); 995 sleep = NULL; 996 run = lp; 997 } else if (stop) { 998 lwkt_token_swap(); 999 lwkt_reltoken(&stop->lwp_token); 1000 LWPRELE(stop); 1001 stop = NULL; 1002 run = lp; 1003 } else { 1004 run = lp; 1005 } 1006 break; 1007 case LSSLEEP: 1008 if (lp->lwp_flags & LWP_SINTR) { 1009 if (sleep) { 1010 lwkt_reltoken(&lp->lwp_token); 1011 LWPRELE(lp); 1012 } else if (stop) { 1013 lwkt_token_swap(); 1014 lwkt_reltoken(&stop->lwp_token); 1015 LWPRELE(stop); 1016 stop = NULL; 1017 sleep = lp; 1018 } else { 1019 sleep = lp; 1020 } 1021 } else { 1022 lwkt_reltoken(&lp->lwp_token); 1023 LWPRELE(lp); 1024 } 1025 break; 1026 case LSSTOP: 1027 if (sleep) { 1028 lwkt_reltoken(&lp->lwp_token); 1029 LWPRELE(lp); 1030 } else if (stop) { 1031 lwkt_reltoken(&lp->lwp_token); 1032 LWPRELE(lp); 1033 } else { 1034 stop = lp; 1035 } 1036 break; 1037 } 1038 if (run) 1039 break; 1040 } 1041 1042 if (run != NULL) 1043 return (run); 1044 else if (sleep != NULL) 1045 return (sleep); 1046 else 1047 return (stop); 1048 } 1049 1050 /* 1051 * Send the signal to the process. If the signal has an action, the action 1052 * is usually performed by the target process rather than the caller; we add 1053 * the signal to the set of pending signals for the process. 1054 * 1055 * Exceptions: 1056 * o When a stop signal is sent to a sleeping process that takes the 1057 * default action, the process is stopped without awakening it. 1058 * o SIGCONT restarts stopped processes (or puts them back to sleep) 1059 * regardless of the signal action (eg, blocked or ignored). 1060 * 1061 * Other ignored signals are discarded immediately. 1062 * 1063 * If the caller wishes to call this function from a hard code section the 1064 * caller must already hold p->p_token (see kern_clock.c). 1065 * 1066 * No requirements. 1067 */ 1068 void 1069 ksignal(struct proc *p, int sig) 1070 { 1071 lwpsignal(p, NULL, sig); 1072 } 1073 1074 /* 1075 * The core for ksignal. lp may be NULL, then a suitable thread 1076 * will be chosen. If not, lp MUST be a member of p. 1077 * 1078 * If the caller wishes to call this function from a hard code section the 1079 * caller must already hold p->p_token. 1080 * 1081 * No requirements. 1082 */ 1083 void 1084 lwpsignal(struct proc *p, struct lwp *lp, int sig) 1085 { 1086 struct proc *q; 1087 sig_t action; 1088 int prop; 1089 1090 if (sig > _SIG_MAXSIG || sig <= 0) { 1091 kprintf("lwpsignal: signal %d\n", sig); 1092 panic("lwpsignal signal number"); 1093 } 1094 1095 KKASSERT(lp == NULL || lp->lwp_proc == p); 1096 1097 /* 1098 * We don't want to race... well, all sorts of things. Get appropriate 1099 * tokens. 1100 * 1101 * Don't try to deliver a generic signal to an exiting process, 1102 * the signal structures could be in flux. We check the LWP later 1103 * on. 1104 */ 1105 PHOLD(p); 1106 lwkt_gettoken(&p->p_token); 1107 if (lp) { 1108 LWPHOLD(lp); 1109 lwkt_gettoken(&lp->lwp_token); 1110 } else if (p->p_flags & P_WEXIT) { 1111 goto out; 1112 } 1113 1114 prop = sigprop(sig); 1115 1116 /* 1117 * If proc is traced, always give parent a chance; 1118 * if signal event is tracked by procfs, give *that* 1119 * a chance, as well. 1120 */ 1121 if ((p->p_flags & P_TRACED) || (p->p_stops & S_SIG)) { 1122 action = SIG_DFL; 1123 } else { 1124 /* 1125 * Do not try to deliver signals to an exiting lwp. Note 1126 * that we must still deliver the signal if P_WEXIT is set 1127 * in the process flags. 1128 */ 1129 if (lp && (lp->lwp_mpflags & LWP_MP_WEXIT)) { 1130 if (lp) { 1131 lwkt_reltoken(&lp->lwp_token); 1132 LWPRELE(lp); 1133 } 1134 lwkt_reltoken(&p->p_token); 1135 PRELE(p); 1136 return; 1137 } 1138 1139 /* 1140 * If the signal is being ignored, then we forget about 1141 * it immediately. NOTE: We don't set SIGCONT in p_sigignore, 1142 * and if it is set to SIG_IGN, action will be SIG_DFL here. 1143 */ 1144 if (SIGISMEMBER(p->p_sigignore, sig)) { 1145 /* 1146 * Even if a signal is set SIG_IGN, it may still be 1147 * lurking in a kqueue. 1148 */ 1149 KNOTE(&p->p_klist, NOTE_SIGNAL | sig); 1150 if (lp) { 1151 lwkt_reltoken(&lp->lwp_token); 1152 LWPRELE(lp); 1153 } 1154 lwkt_reltoken(&p->p_token); 1155 PRELE(p); 1156 return; 1157 } 1158 if (SIGISMEMBER(p->p_sigcatch, sig)) 1159 action = SIG_CATCH; 1160 else 1161 action = SIG_DFL; 1162 } 1163 1164 /* 1165 * If continuing, clear any pending STOP signals. 1166 */ 1167 if (prop & SA_CONT) 1168 SIG_STOPSIGMASK(p->p_siglist); 1169 1170 if (prop & SA_STOP) { 1171 /* 1172 * If sending a tty stop signal to a member of an orphaned 1173 * process group, discard the signal here if the action 1174 * is default; don't stop the process below if sleeping, 1175 * and don't clear any pending SIGCONT. 1176 */ 1177 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 && 1178 action == SIG_DFL) { 1179 if (lp) { 1180 lwkt_reltoken(&lp->lwp_token); 1181 LWPRELE(lp); 1182 } 1183 lwkt_reltoken(&p->p_token); 1184 PRELE(p); 1185 return; 1186 } 1187 SIG_CONTSIGMASK(p->p_siglist); 1188 p->p_flags &= ~P_CONTINUED; 1189 } 1190 1191 if (p->p_stat == SSTOP) { 1192 /* 1193 * Nobody can handle this signal, add it to the lwp or 1194 * process pending list 1195 */ 1196 if (lp) { 1197 spin_lock(&lp->lwp_spin); 1198 SIGADDSET(lp->lwp_siglist, sig); 1199 spin_unlock(&lp->lwp_spin); 1200 } else { 1201 SIGADDSET(p->p_siglist, sig); 1202 } 1203 1204 /* 1205 * If the process is stopped and is being traced, then no 1206 * further action is necessary. 1207 */ 1208 if (p->p_flags & P_TRACED) 1209 goto out; 1210 1211 /* 1212 * If the process is stopped and receives a KILL signal, 1213 * make the process runnable. 1214 */ 1215 if (sig == SIGKILL) { 1216 proc_unstop(p, SSTOP); 1217 goto active_process; 1218 } 1219 1220 /* 1221 * If the process is stopped and receives a CONT signal, 1222 * then try to make the process runnable again. 1223 */ 1224 if (prop & SA_CONT) { 1225 /* 1226 * If SIGCONT is default (or ignored), we continue the 1227 * process but don't leave the signal in p_siglist, as 1228 * it has no further action. If SIGCONT is held, we 1229 * continue the process and leave the signal in 1230 * p_siglist. If the process catches SIGCONT, let it 1231 * handle the signal itself. 1232 * 1233 * XXX what if the signal is being held blocked? 1234 * 1235 * Token required to interlock kern_wait(). 1236 * Reparenting can also cause a race so we have to 1237 * hold (q). 1238 */ 1239 q = p->p_pptr; 1240 PHOLD(q); 1241 lwkt_gettoken(&q->p_token); 1242 p->p_flags |= P_CONTINUED; 1243 wakeup(q); 1244 if (action == SIG_DFL) 1245 SIGDELSET(p->p_siglist, sig); 1246 proc_unstop(p, SSTOP); 1247 lwkt_reltoken(&q->p_token); 1248 PRELE(q); 1249 if (action == SIG_CATCH) 1250 goto active_process; 1251 goto out; 1252 } 1253 1254 /* 1255 * If the process is stopped and receives another STOP 1256 * signal, we do not need to stop it again. If we did 1257 * the shell could get confused. 1258 * 1259 * However, if the current/preempted lwp is part of the 1260 * process receiving the signal, we need to keep it, 1261 * so that this lwp can stop in issignal() later, as 1262 * we don't want to wait until it reaches userret! 1263 */ 1264 if (prop & SA_STOP) { 1265 if (lwkt_preempted_proc() == NULL || 1266 lwkt_preempted_proc()->lwp_proc != p) 1267 SIGDELSET(p->p_siglist, sig); 1268 } 1269 1270 /* 1271 * Otherwise the process is stopped and it received some 1272 * signal, which does not change its stopped state. When 1273 * the process is continued a wakeup(p) will be issued which 1274 * will wakeup any threads sleeping in tstop(). 1275 */ 1276 if (lp == NULL) { 1277 /* NOTE: returns lp w/ token held */ 1278 lp = find_lwp_for_signal(p, sig); 1279 } 1280 goto out; 1281 1282 /* NOTREACHED */ 1283 } 1284 /* else not stopped */ 1285 active_process: 1286 1287 /* 1288 * Never deliver a lwp-specific signal to a random lwp. 1289 */ 1290 if (lp == NULL) { 1291 /* NOTE: returns lp w/ token held */ 1292 lp = find_lwp_for_signal(p, sig); 1293 if (lp) { 1294 if (SIGISMEMBER(lp->lwp_sigmask, sig)) { 1295 lwkt_reltoken(&lp->lwp_token); 1296 LWPRELE(lp); 1297 lp = NULL; 1298 } 1299 } 1300 } 1301 1302 /* 1303 * Deliver to the process generically if (1) the signal is being 1304 * sent to any thread or (2) we could not find a thread to deliver 1305 * it to. 1306 */ 1307 if (lp == NULL) { 1308 SIGADDSET(p->p_siglist, sig); 1309 goto out; 1310 } 1311 1312 /* 1313 * Deliver to a specific LWP whether it masks it or not. It will 1314 * not be dispatched if masked but we must still deliver it. 1315 */ 1316 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) && 1317 (p->p_flags & P_TRACED) == 0) { 1318 p->p_nice = NZERO; 1319 } 1320 1321 /* 1322 * If the process receives a STOP signal which indeed needs to 1323 * stop the process, do so. If the process chose to catch the 1324 * signal, it will be treated like any other signal. 1325 */ 1326 if ((prop & SA_STOP) && action == SIG_DFL) { 1327 /* 1328 * If a child holding parent blocked, stopping 1329 * could cause deadlock. Take no action at this 1330 * time. 1331 */ 1332 if (p->p_flags & P_PPWAIT) { 1333 SIGADDSET(p->p_siglist, sig); 1334 goto out; 1335 } 1336 1337 /* 1338 * Do not actually try to manipulate the process, but simply 1339 * stop it. Lwps will stop as soon as they safely can. 1340 * 1341 * Ignore stop if the process is exiting. 1342 */ 1343 if ((p->p_flags & P_WEXIT) == 0) { 1344 p->p_xstat = sig; 1345 proc_stop(p, SSTOP); 1346 } 1347 goto out; 1348 } 1349 1350 /* 1351 * If it is a CONT signal with default action, just ignore it. 1352 */ 1353 if ((prop & SA_CONT) && action == SIG_DFL) 1354 goto out; 1355 1356 /* 1357 * Mark signal pending at this specific thread. 1358 */ 1359 spin_lock(&lp->lwp_spin); 1360 SIGADDSET(lp->lwp_siglist, sig); 1361 spin_unlock(&lp->lwp_spin); 1362 1363 lwp_signotify(lp); 1364 1365 out: 1366 if (lp) { 1367 lwkt_reltoken(&lp->lwp_token); 1368 LWPRELE(lp); 1369 } 1370 lwkt_reltoken(&p->p_token); 1371 PRELE(p); 1372 } 1373 1374 /* 1375 * Notify the LWP that a signal has arrived. The LWP does not have to be 1376 * sleeping on the current cpu. 1377 * 1378 * p->p_token and lp->lwp_token must be held on call. 1379 * 1380 * We can only safely schedule the thread on its current cpu and only if 1381 * one of the SINTR flags is set. If an SINTR flag is set AND we are on 1382 * the correct cpu we are properly interlocked, otherwise we could be 1383 * racing other thread transition states (or the lwp is on the user scheduler 1384 * runq but not scheduled) and must not do anything. 1385 * 1386 * Since we hold the lwp token we know the lwp cannot be ripped out from 1387 * under us so we can safely hold it to prevent it from being ripped out 1388 * from under us if we are forced to IPI another cpu to make the local 1389 * checks there. 1390 * 1391 * Adjustment of lp->lwp_stat can only occur when we hold the lwp_token, 1392 * which we won't in an IPI so any fixups have to be done here, effectively 1393 * replicating part of what setrunnable() does. 1394 */ 1395 static void 1396 lwp_signotify(struct lwp *lp) 1397 { 1398 thread_t dtd; 1399 1400 ASSERT_LWKT_TOKEN_HELD(&lp->lwp_proc->p_token); 1401 dtd = lp->lwp_thread; 1402 1403 crit_enter(); 1404 if (lp == lwkt_preempted_proc()) { 1405 /* 1406 * lwp is on the current cpu AND it is currently running 1407 * (we preempted it). 1408 */ 1409 signotify(); 1410 } else if (lp->lwp_flags & LWP_SINTR) { 1411 /* 1412 * lwp is sitting in tsleep() with PCATCH set 1413 */ 1414 if (dtd->td_gd == mycpu) { 1415 setrunnable(lp); 1416 } else { 1417 /* 1418 * We can only adjust lwp_stat while we hold the 1419 * lwp_token, and we won't in the IPI function. 1420 */ 1421 LWPHOLD(lp); 1422 if (lp->lwp_stat == LSSTOP) 1423 lp->lwp_stat = LSSLEEP; 1424 lwkt_send_ipiq(dtd->td_gd, lwp_signotify_remote, lp); 1425 } 1426 } else if (dtd->td_flags & TDF_SINTR) { 1427 /* 1428 * lwp is sitting in lwkt_sleep() with PCATCH set. 1429 */ 1430 if (dtd->td_gd == mycpu) { 1431 setrunnable(lp); 1432 } else { 1433 /* 1434 * We can only adjust lwp_stat while we hold the 1435 * lwp_token, and we won't in the IPI function. 1436 */ 1437 LWPHOLD(lp); 1438 if (lp->lwp_stat == LSSTOP) 1439 lp->lwp_stat = LSSLEEP; 1440 lwkt_send_ipiq(dtd->td_gd, lwp_signotify_remote, lp); 1441 } 1442 } else { 1443 /* 1444 * Otherwise the lwp is either in some uninterruptible state 1445 * or it is on the userland scheduler's runqueue waiting to 1446 * be scheduled to a cpu, or it is running in userland. We 1447 * generally want to send an IPI so a running target gets the 1448 * signal ASAP, otherwise a scheduler-tick worth of latency 1449 * will occur. 1450 * 1451 * Issue an IPI to the remote cpu to knock it into the kernel, 1452 * remote cpu will issue the cpu-local signotify() if the IPI 1453 * preempts the desired thread. 1454 */ 1455 if (dtd->td_gd != mycpu) { 1456 LWPHOLD(lp); 1457 lwkt_send_ipiq(dtd->td_gd, lwp_signotify_remote, lp); 1458 } 1459 } 1460 crit_exit(); 1461 } 1462 1463 /* 1464 * This function is called via an IPI so we cannot call setrunnable() here 1465 * (because while we hold the lp we don't own its token, and can't get it 1466 * from an IPI). 1467 * 1468 * We are interlocked by virtue of being on the same cpu as the target. If 1469 * we still are and LWP_SINTR or TDF_SINTR is set we can safely schedule 1470 * the target thread. 1471 */ 1472 static void 1473 lwp_signotify_remote(void *arg) 1474 { 1475 struct lwp *lp = arg; 1476 thread_t td = lp->lwp_thread; 1477 1478 if (lp == lwkt_preempted_proc()) { 1479 signotify(); 1480 LWPRELE(lp); 1481 } else if (td->td_gd == mycpu) { 1482 if ((lp->lwp_flags & LWP_SINTR) || 1483 (td->td_flags & TDF_SINTR)) { 1484 lwkt_schedule(td); 1485 } 1486 LWPRELE(lp); 1487 } else { 1488 lwkt_send_ipiq(td->td_gd, lwp_signotify_remote, lp); 1489 /* LWPHOLD() is forwarded to the target cpu */ 1490 } 1491 } 1492 1493 /* 1494 * Caller must hold p->p_token 1495 */ 1496 void 1497 proc_stop(struct proc *p, int sig) 1498 { 1499 struct proc *q; 1500 struct lwp *lp; 1501 1502 ASSERT_LWKT_TOKEN_HELD(&p->p_token); 1503 1504 /* 1505 * If somebody raced us, be happy with it. SCORE overrides SSTOP. 1506 */ 1507 if (sig == SCORE) { 1508 if (p->p_stat == SCORE || p->p_stat == SZOMB) 1509 return; 1510 } else { 1511 if (p->p_stat == SSTOP || p->p_stat == SCORE || 1512 p->p_stat == SZOMB) { 1513 return; 1514 } 1515 } 1516 p->p_stat = sig; 1517 1518 FOREACH_LWP_IN_PROC(lp, p) { 1519 LWPHOLD(lp); 1520 lwkt_gettoken(&lp->lwp_token); 1521 1522 switch (lp->lwp_stat) { 1523 case LSSTOP: 1524 /* 1525 * Do nothing, we are already counted in 1526 * p_nstopped. 1527 */ 1528 break; 1529 1530 case LSSLEEP: 1531 /* 1532 * We're sleeping, but we will stop before 1533 * returning to userspace, so count us 1534 * as stopped as well. We set LWP_MP_WSTOP 1535 * to signal the lwp that it should not 1536 * increase p_nstopped when reaching tstop(). 1537 * 1538 * LWP_MP_WSTOP is protected by lp->lwp_token. 1539 */ 1540 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 1541 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1542 ++p->p_nstopped; 1543 } 1544 break; 1545 1546 case LSRUN: 1547 /* 1548 * We might notify ourself, but that's not 1549 * a problem. 1550 */ 1551 lwp_signotify(lp); 1552 break; 1553 } 1554 lwkt_reltoken(&lp->lwp_token); 1555 LWPRELE(lp); 1556 } 1557 1558 if (p->p_nstopped == p->p_nthreads) { 1559 /* 1560 * Token required to interlock kern_wait(). Reparenting can 1561 * also cause a race so we have to hold (q). 1562 */ 1563 q = p->p_pptr; 1564 PHOLD(q); 1565 lwkt_gettoken(&q->p_token); 1566 p->p_flags &= ~P_WAITED; 1567 wakeup(q); 1568 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0) 1569 ksignal(p->p_pptr, SIGCHLD); 1570 lwkt_reltoken(&q->p_token); 1571 PRELE(q); 1572 } 1573 } 1574 1575 /* 1576 * Caller must hold p_token 1577 */ 1578 void 1579 proc_unstop(struct proc *p, int sig) 1580 { 1581 struct lwp *lp; 1582 1583 ASSERT_LWKT_TOKEN_HELD(&p->p_token); 1584 1585 if (p->p_stat != sig) 1586 return; 1587 1588 p->p_stat = SACTIVE; 1589 1590 FOREACH_LWP_IN_PROC(lp, p) { 1591 LWPHOLD(lp); 1592 lwkt_gettoken(&lp->lwp_token); 1593 1594 switch (lp->lwp_stat) { 1595 case LSRUN: 1596 /* 1597 * Uh? Not stopped? Well, I guess that's okay. 1598 */ 1599 if (bootverbose) 1600 kprintf("proc_unstop: lwp %d/%d not sleeping\n", 1601 p->p_pid, lp->lwp_tid); 1602 break; 1603 1604 case LSSLEEP: 1605 /* 1606 * Still sleeping. Don't bother waking it up. 1607 * However, if this thread was counted as 1608 * stopped, undo this. 1609 * 1610 * Nevertheless we call setrunnable() so that it 1611 * will wake up in case a signal or timeout arrived 1612 * in the meantime. 1613 * 1614 * LWP_MP_WSTOP is protected by lp->lwp_token. 1615 */ 1616 if (lp->lwp_mpflags & LWP_MP_WSTOP) { 1617 atomic_clear_int(&lp->lwp_mpflags, 1618 LWP_MP_WSTOP); 1619 --p->p_nstopped; 1620 } else { 1621 if (bootverbose) 1622 kprintf("proc_unstop: lwp %d/%d sleeping, not stopped\n", 1623 p->p_pid, lp->lwp_tid); 1624 } 1625 /* FALLTHROUGH */ 1626 1627 case LSSTOP: 1628 /* 1629 * This handles any lwp's waiting in a tsleep with 1630 * SIGCATCH. 1631 */ 1632 lwp_signotify(lp); 1633 break; 1634 1635 } 1636 lwkt_reltoken(&lp->lwp_token); 1637 LWPRELE(lp); 1638 } 1639 1640 /* 1641 * This handles any lwp's waiting in tstop(). We have interlocked 1642 * the setting of p_stat by acquiring and releasing each lpw's 1643 * token. 1644 */ 1645 wakeup(p); 1646 } 1647 1648 /* 1649 * Wait for all threads except the current thread to stop. 1650 */ 1651 static void 1652 proc_stopwait(struct proc *p) 1653 { 1654 while ((p->p_stat == SSTOP || p->p_stat == SCORE) && 1655 p->p_nstopped < p->p_nthreads - 1) { 1656 tsleep_interlock(&p->p_nstopped, 0); 1657 if (p->p_nstopped < p->p_nthreads - 1) { 1658 tsleep(&p->p_nstopped, PINTERLOCKED, "stopwt", hz); 1659 } 1660 } 1661 } 1662 1663 /* 1664 * No requirements. 1665 */ 1666 static int 1667 kern_sigtimedwait(sigset_t waitset, siginfo_t *info, struct timespec *timeout) 1668 { 1669 sigset_t savedmask, set; 1670 struct proc *p = curproc; 1671 struct lwp *lp = curthread->td_lwp; 1672 int error, sig, hz, timevalid = 0; 1673 struct timespec rts, ets, ts; 1674 struct timeval tv; 1675 1676 error = 0; 1677 sig = 0; 1678 ets.tv_sec = 0; /* silence compiler warning */ 1679 ets.tv_nsec = 0; /* silence compiler warning */ 1680 SIG_CANTMASK(waitset); 1681 savedmask = lp->lwp_sigmask; 1682 1683 if (timeout) { 1684 if (timeout->tv_sec >= 0 && timeout->tv_nsec >= 0 && 1685 timeout->tv_nsec < 1000000000) { 1686 timevalid = 1; 1687 getnanouptime(&rts); 1688 ets = rts; 1689 timespecadd(&ets, timeout); 1690 } 1691 } 1692 1693 for (;;) { 1694 set = lwp_sigpend(lp); 1695 SIGSETAND(set, waitset); 1696 if ((sig = sig_ffs(&set)) != 0) { 1697 SIGFILLSET(lp->lwp_sigmask); 1698 SIGDELSET(lp->lwp_sigmask, sig); 1699 SIG_CANTMASK(lp->lwp_sigmask); 1700 sig = issignal(lp, 1); 1701 /* 1702 * It may be a STOP signal, in the case, issignal 1703 * returns 0, because we may stop there, and new 1704 * signal can come in, we should restart if we got 1705 * nothing. 1706 */ 1707 if (sig == 0) 1708 continue; 1709 else 1710 break; 1711 } 1712 1713 /* 1714 * Previous checking got nothing, and we retried but still 1715 * got nothing, we should return the error status. 1716 */ 1717 if (error) 1718 break; 1719 1720 /* 1721 * POSIX says this must be checked after looking for pending 1722 * signals. 1723 */ 1724 if (timeout) { 1725 if (timevalid == 0) { 1726 error = EINVAL; 1727 break; 1728 } 1729 getnanouptime(&rts); 1730 if (timespeccmp(&rts, &ets, >=)) { 1731 error = EAGAIN; 1732 break; 1733 } 1734 ts = ets; 1735 timespecsub(&ts, &rts); 1736 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1737 hz = tvtohz_high(&tv); 1738 } else { 1739 hz = 0; 1740 } 1741 1742 lp->lwp_sigmask = savedmask; 1743 SIGSETNAND(lp->lwp_sigmask, waitset); 1744 /* 1745 * We won't ever be woken up. Instead, our sleep will 1746 * be broken in lwpsignal(). 1747 */ 1748 error = tsleep(&p->p_sigacts, PCATCH, "sigwt", hz); 1749 if (timeout) { 1750 if (error == ERESTART) { 1751 /* can not restart a timeout wait. */ 1752 error = EINTR; 1753 } else if (error == EAGAIN) { 1754 /* will calculate timeout by ourself. */ 1755 error = 0; 1756 } 1757 } 1758 /* Retry ... */ 1759 } 1760 1761 lp->lwp_sigmask = savedmask; 1762 if (sig) { 1763 error = 0; 1764 bzero(info, sizeof(*info)); 1765 info->si_signo = sig; 1766 spin_lock(&lp->lwp_spin); 1767 lwp_delsig(lp, sig); /* take the signal! */ 1768 spin_unlock(&lp->lwp_spin); 1769 1770 if (sig == SIGKILL) { 1771 sigexit(lp, sig); 1772 /* NOT REACHED */ 1773 } 1774 } 1775 1776 return (error); 1777 } 1778 1779 /* 1780 * MPALMOSTSAFE 1781 */ 1782 int 1783 sys_sigtimedwait(struct sigtimedwait_args *uap) 1784 { 1785 struct timespec ts; 1786 struct timespec *timeout; 1787 sigset_t set; 1788 siginfo_t info; 1789 int error; 1790 1791 if (uap->timeout) { 1792 error = copyin(uap->timeout, &ts, sizeof(ts)); 1793 if (error) 1794 return (error); 1795 timeout = &ts; 1796 } else { 1797 timeout = NULL; 1798 } 1799 error = copyin(uap->set, &set, sizeof(set)); 1800 if (error) 1801 return (error); 1802 error = kern_sigtimedwait(set, &info, timeout); 1803 if (error) 1804 return (error); 1805 if (uap->info) 1806 error = copyout(&info, uap->info, sizeof(info)); 1807 /* Repost if we got an error. */ 1808 /* 1809 * XXX lwp 1810 * 1811 * This could transform a thread-specific signal to another 1812 * thread / process pending signal. 1813 */ 1814 if (error) { 1815 ksignal(curproc, info.si_signo); 1816 } else { 1817 uap->sysmsg_result = info.si_signo; 1818 } 1819 return (error); 1820 } 1821 1822 /* 1823 * MPALMOSTSAFE 1824 */ 1825 int 1826 sys_sigwaitinfo(struct sigwaitinfo_args *uap) 1827 { 1828 siginfo_t info; 1829 sigset_t set; 1830 int error; 1831 1832 error = copyin(uap->set, &set, sizeof(set)); 1833 if (error) 1834 return (error); 1835 error = kern_sigtimedwait(set, &info, NULL); 1836 if (error) 1837 return (error); 1838 if (uap->info) 1839 error = copyout(&info, uap->info, sizeof(info)); 1840 /* Repost if we got an error. */ 1841 /* 1842 * XXX lwp 1843 * 1844 * This could transform a thread-specific signal to another 1845 * thread / process pending signal. 1846 */ 1847 if (error) { 1848 ksignal(curproc, info.si_signo); 1849 } else { 1850 uap->sysmsg_result = info.si_signo; 1851 } 1852 return (error); 1853 } 1854 1855 /* 1856 * If the current process has received a signal that would interrupt a 1857 * system call, return EINTR or ERESTART as appropriate. 1858 */ 1859 int 1860 iscaught(struct lwp *lp) 1861 { 1862 struct proc *p = lp->lwp_proc; 1863 int sig; 1864 1865 if (p) { 1866 if ((sig = CURSIG(lp)) != 0) { 1867 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 1868 return (EINTR); 1869 return (ERESTART); 1870 } 1871 } 1872 return(EWOULDBLOCK); 1873 } 1874 1875 /* 1876 * If the current process has received a signal (should be caught or cause 1877 * termination, should interrupt current syscall), return the signal number. 1878 * Stop signals with default action are processed immediately, then cleared; 1879 * they aren't returned. This is checked after each entry to the system for 1880 * a syscall or trap (though this can usually be done without calling issignal 1881 * by checking the pending signal masks in the CURSIG macro). 1882 * 1883 * This routine is called via CURSIG/__cursig. We will acquire and release 1884 * p->p_token but if the caller needs to interlock the test the caller must 1885 * also hold p->p_token. 1886 * 1887 * while (sig = CURSIG(curproc)) 1888 * postsig(sig); 1889 * 1890 * MPSAFE 1891 */ 1892 int 1893 issignal(struct lwp *lp, int maytrace) 1894 { 1895 struct proc *p = lp->lwp_proc; 1896 sigset_t mask; 1897 int sig, prop; 1898 1899 lwkt_gettoken(&p->p_token); 1900 1901 for (;;) { 1902 int traced = (p->p_flags & P_TRACED) || (p->p_stops & S_SIG); 1903 1904 /* 1905 * If this process is supposed to stop, stop this thread. 1906 */ 1907 if (p->p_stat == SSTOP || p->p_stat == SCORE) 1908 tstop(); 1909 1910 mask = lwp_sigpend(lp); 1911 SIGSETNAND(mask, lp->lwp_sigmask); 1912 if (p->p_flags & P_PPWAIT) 1913 SIG_STOPSIGMASK(mask); 1914 if (SIGISEMPTY(mask)) { /* no signal to send */ 1915 lwkt_reltoken(&p->p_token); 1916 return (0); 1917 } 1918 sig = sig_ffs(&mask); 1919 1920 STOPEVENT(p, S_SIG, sig); 1921 1922 /* 1923 * We should see pending but ignored signals 1924 * only if P_TRACED was on when they were posted. 1925 */ 1926 if (SIGISMEMBER(p->p_sigignore, sig) && (traced == 0)) { 1927 spin_lock(&lp->lwp_spin); 1928 lwp_delsig(lp, sig); 1929 spin_unlock(&lp->lwp_spin); 1930 continue; 1931 } 1932 if (maytrace && 1933 (p->p_flags & P_TRACED) && 1934 (p->p_flags & P_PPWAIT) == 0) { 1935 /* 1936 * If traced, always stop, and stay stopped until 1937 * released by the parent. 1938 * 1939 * NOTE: SSTOP may get cleared during the loop, 1940 * but we do not re-notify the parent if we have 1941 * to loop several times waiting for the parent 1942 * to let us continue. 1943 * 1944 * XXX not sure if this is still true 1945 */ 1946 p->p_xstat = sig; 1947 proc_stop(p, SSTOP); 1948 do { 1949 tstop(); 1950 } while (!trace_req(p) && (p->p_flags & P_TRACED)); 1951 1952 /* 1953 * If parent wants us to take the signal, 1954 * then it will leave it in p->p_xstat; 1955 * otherwise we just look for signals again. 1956 */ 1957 spin_lock(&lp->lwp_spin); 1958 lwp_delsig(lp, sig); /* clear old signal */ 1959 spin_unlock(&lp->lwp_spin); 1960 sig = p->p_xstat; 1961 if (sig == 0) 1962 continue; 1963 1964 /* 1965 * Put the new signal into p_siglist. If the 1966 * signal is being masked, look for other signals. 1967 * 1968 * XXX lwp might need a call to ksignal() 1969 */ 1970 SIGADDSET(p->p_siglist, sig); 1971 if (SIGISMEMBER(lp->lwp_sigmask, sig)) 1972 continue; 1973 1974 /* 1975 * If the traced bit got turned off, go back up 1976 * to the top to rescan signals. This ensures 1977 * that p_sig* and ps_sigact are consistent. 1978 */ 1979 if ((p->p_flags & P_TRACED) == 0) 1980 continue; 1981 } 1982 1983 prop = sigprop(sig); 1984 1985 /* 1986 * Decide whether the signal should be returned. 1987 * Return the signal's number, or fall through 1988 * to clear it from the pending mask. 1989 */ 1990 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) { 1991 case (intptr_t)SIG_DFL: 1992 /* 1993 * Don't take default actions on system processes. 1994 */ 1995 if (p->p_pid <= 1) { 1996 #ifdef DIAGNOSTIC 1997 /* 1998 * Are you sure you want to ignore SIGSEGV 1999 * in init? XXX 2000 */ 2001 kprintf("Process (pid %lu) got signal %d\n", 2002 (u_long)p->p_pid, sig); 2003 #endif 2004 break; /* == ignore */ 2005 } 2006 2007 /* 2008 * Handle the in-kernel checkpoint action 2009 */ 2010 if (prop & SA_CKPT) { 2011 checkpoint_signal_handler(lp); 2012 break; 2013 } 2014 2015 /* 2016 * If there is a pending stop signal to process 2017 * with default action, stop here, 2018 * then clear the signal. However, 2019 * if process is member of an orphaned 2020 * process group, ignore tty stop signals. 2021 */ 2022 if (prop & SA_STOP) { 2023 if (p->p_flags & P_TRACED || 2024 (p->p_pgrp->pg_jobc == 0 && 2025 prop & SA_TTYSTOP)) 2026 break; /* == ignore */ 2027 if ((p->p_flags & P_WEXIT) == 0) { 2028 p->p_xstat = sig; 2029 proc_stop(p, SSTOP); 2030 tstop(); 2031 } 2032 break; 2033 } else if (prop & SA_IGNORE) { 2034 /* 2035 * Except for SIGCONT, shouldn't get here. 2036 * Default action is to ignore; drop it. 2037 */ 2038 break; /* == ignore */ 2039 } else { 2040 lwkt_reltoken(&p->p_token); 2041 return (sig); 2042 } 2043 2044 /*NOTREACHED*/ 2045 2046 case (intptr_t)SIG_IGN: 2047 /* 2048 * Masking above should prevent us ever trying 2049 * to take action on an ignored signal other 2050 * than SIGCONT, unless process is traced. 2051 */ 2052 if ((prop & SA_CONT) == 0 && 2053 (p->p_flags & P_TRACED) == 0) 2054 kprintf("issignal\n"); 2055 break; /* == ignore */ 2056 2057 default: 2058 /* 2059 * This signal has an action, let 2060 * postsig() process it. 2061 */ 2062 lwkt_reltoken(&p->p_token); 2063 return (sig); 2064 } 2065 spin_lock(&lp->lwp_spin); 2066 lwp_delsig(lp, sig); /* take the signal! */ 2067 spin_unlock(&lp->lwp_spin); 2068 } 2069 /* NOTREACHED */ 2070 } 2071 2072 /* 2073 * Take the action for the specified signal 2074 * from the current set of pending signals. 2075 * 2076 * Caller must hold p->p_token 2077 */ 2078 void 2079 postsig(int sig) 2080 { 2081 struct lwp *lp = curthread->td_lwp; 2082 struct proc *p = lp->lwp_proc; 2083 struct sigacts *ps = p->p_sigacts; 2084 sig_t action; 2085 sigset_t returnmask; 2086 int code; 2087 2088 KASSERT(sig != 0, ("postsig")); 2089 2090 KNOTE(&p->p_klist, NOTE_SIGNAL | sig); 2091 2092 /* 2093 * If we are a virtual kernel running an emulated user process 2094 * context, switch back to the virtual kernel context before 2095 * trying to post the signal. 2096 */ 2097 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 2098 struct trapframe *tf = lp->lwp_md.md_regs; 2099 tf->tf_trapno = 0; 2100 vkernel_trap(lp, tf); 2101 } 2102 2103 spin_lock(&lp->lwp_spin); 2104 lwp_delsig(lp, sig); 2105 spin_unlock(&lp->lwp_spin); 2106 action = ps->ps_sigact[_SIG_IDX(sig)]; 2107 #ifdef KTRACE 2108 if (KTRPOINT(lp->lwp_thread, KTR_PSIG)) 2109 ktrpsig(lp, sig, action, lp->lwp_flags & LWP_OLDMASK ? 2110 &lp->lwp_oldsigmask : &lp->lwp_sigmask, 0); 2111 #endif 2112 STOPEVENT(p, S_SIG, sig); 2113 2114 if (action == SIG_DFL) { 2115 /* 2116 * Default action, where the default is to kill 2117 * the process. (Other cases were ignored above.) 2118 */ 2119 sigexit(lp, sig); 2120 /* NOTREACHED */ 2121 } else { 2122 /* 2123 * If we get here, the signal must be caught. 2124 */ 2125 KASSERT(action != SIG_IGN && !SIGISMEMBER(lp->lwp_sigmask, sig), 2126 ("postsig action")); 2127 2128 /* 2129 * Reset the signal handler if asked to 2130 */ 2131 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 2132 /* 2133 * See kern_sigaction() for origin of this code. 2134 */ 2135 SIGDELSET(p->p_sigcatch, sig); 2136 if (sig != SIGCONT && 2137 sigprop(sig) & SA_IGNORE) 2138 SIGADDSET(p->p_sigignore, sig); 2139 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 2140 } 2141 2142 /* 2143 * Set the signal mask and calculate the mask to restore 2144 * when the signal function returns. 2145 * 2146 * Special case: user has done a sigsuspend. Here the 2147 * current mask is not of interest, but rather the 2148 * mask from before the sigsuspend is what we want 2149 * restored after the signal processing is completed. 2150 */ 2151 if (lp->lwp_flags & LWP_OLDMASK) { 2152 returnmask = lp->lwp_oldsigmask; 2153 lp->lwp_flags &= ~LWP_OLDMASK; 2154 } else { 2155 returnmask = lp->lwp_sigmask; 2156 } 2157 2158 SIGSETOR(lp->lwp_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 2159 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 2160 SIGADDSET(lp->lwp_sigmask, sig); 2161 2162 lp->lwp_ru.ru_nsignals++; 2163 if (lp->lwp_sig != sig) { 2164 code = 0; 2165 } else { 2166 code = lp->lwp_code; 2167 lp->lwp_code = 0; 2168 lp->lwp_sig = 0; 2169 } 2170 (*p->p_sysent->sv_sendsig)(action, sig, &returnmask, code); 2171 } 2172 } 2173 2174 /* 2175 * Kill the current process for stated reason. 2176 */ 2177 void 2178 killproc(struct proc *p, char *why) 2179 { 2180 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", 2181 p->p_pid, p->p_comm, 2182 p->p_ucred ? p->p_ucred->cr_uid : -1, why); 2183 ksignal(p, SIGKILL); 2184 } 2185 2186 /* 2187 * Force the current process to exit with the specified signal, dumping core 2188 * if appropriate. We bypass the normal tests for masked and caught signals, 2189 * allowing unrecoverable failures to terminate the process without changing 2190 * signal state. Mark the accounting record with the signal termination. 2191 * If dumping core, save the signal number for the debugger. Calls exit and 2192 * does not return. 2193 * 2194 * This routine does not return. 2195 */ 2196 void 2197 sigexit(struct lwp *lp, int sig) 2198 { 2199 struct proc *p = lp->lwp_proc; 2200 2201 lwkt_gettoken(&p->p_token); 2202 p->p_acflag |= AXSIG; 2203 if (sigprop(sig) & SA_CORE) { 2204 lp->lwp_sig = sig; 2205 2206 /* 2207 * All threads must be stopped before we can safely coredump. 2208 * Stop threads using SCORE, which cannot be overridden. 2209 */ 2210 if (p->p_stat != SCORE) { 2211 proc_stop(p, SCORE); 2212 proc_stopwait(p); 2213 2214 if (coredump(lp, sig) == 0) 2215 sig |= WCOREFLAG; 2216 p->p_stat = SSTOP; 2217 } 2218 2219 /* 2220 * Log signals which would cause core dumps 2221 * (Log as LOG_INFO to appease those who don't want 2222 * these messages.) 2223 * XXX : Todo, as well as euid, write out ruid too 2224 */ 2225 if (kern_logsigexit) 2226 log(LOG_INFO, 2227 "pid %d (%s), uid %d: exited on signal %d%s\n", 2228 p->p_pid, p->p_comm, 2229 p->p_ucred ? p->p_ucred->cr_uid : -1, 2230 sig &~ WCOREFLAG, 2231 sig & WCOREFLAG ? " (core dumped)" : ""); 2232 } 2233 lwkt_reltoken(&p->p_token); 2234 exit1(W_EXITCODE(0, sig)); 2235 /* NOTREACHED */ 2236 } 2237 2238 static char corefilename[MAXPATHLEN+1] = {"%N.core"}; 2239 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename, 2240 sizeof(corefilename), "process corefile name format string"); 2241 2242 /* 2243 * expand_name(name, uid, pid) 2244 * Expand the name described in corefilename, using name, uid, and pid. 2245 * corefilename is a kprintf-like string, with three format specifiers: 2246 * %N name of process ("name") 2247 * %P process id (pid) 2248 * %U user id (uid) 2249 * For example, "%N.core" is the default; they can be disabled completely 2250 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P". 2251 * This is controlled by the sysctl variable kern.corefile (see above). 2252 */ 2253 2254 static char * 2255 expand_name(const char *name, uid_t uid, pid_t pid) 2256 { 2257 char *temp; 2258 char buf[11]; /* Buffer for pid/uid -- max 4B */ 2259 int i, n; 2260 char *format = corefilename; 2261 size_t namelen; 2262 2263 temp = kmalloc(MAXPATHLEN + 1, M_TEMP, M_NOWAIT); 2264 if (temp == NULL) 2265 return NULL; 2266 namelen = strlen(name); 2267 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) { 2268 int l; 2269 switch (format[i]) { 2270 case '%': /* Format character */ 2271 i++; 2272 switch (format[i]) { 2273 case '%': 2274 temp[n++] = '%'; 2275 break; 2276 case 'N': /* process name */ 2277 if ((n + namelen) > MAXPATHLEN) { 2278 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n", 2279 pid, name, uid, temp, name); 2280 kfree(temp, M_TEMP); 2281 return NULL; 2282 } 2283 memcpy(temp+n, name, namelen); 2284 n += namelen; 2285 break; 2286 case 'P': /* process id */ 2287 l = ksprintf(buf, "%u", pid); 2288 if ((n + l) > MAXPATHLEN) { 2289 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n", 2290 pid, name, uid, temp, name); 2291 kfree(temp, M_TEMP); 2292 return NULL; 2293 } 2294 memcpy(temp+n, buf, l); 2295 n += l; 2296 break; 2297 case 'U': /* user id */ 2298 l = ksprintf(buf, "%u", uid); 2299 if ((n + l) > MAXPATHLEN) { 2300 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n", 2301 pid, name, uid, temp, name); 2302 kfree(temp, M_TEMP); 2303 return NULL; 2304 } 2305 memcpy(temp+n, buf, l); 2306 n += l; 2307 break; 2308 default: 2309 log(LOG_ERR, "Unknown format character %c in `%s'\n", format[i], format); 2310 } 2311 break; 2312 default: 2313 temp[n++] = format[i]; 2314 } 2315 } 2316 temp[n] = '\0'; 2317 return temp; 2318 } 2319 2320 /* 2321 * Dump a process' core. The main routine does some 2322 * policy checking, and creates the name of the coredump; 2323 * then it passes on a vnode and a size limit to the process-specific 2324 * coredump routine if there is one; if there _is not_ one, it returns 2325 * ENOSYS; otherwise it returns the error from the process-specific routine. 2326 * 2327 * The parameter `lp' is the lwp which triggered the coredump. 2328 */ 2329 2330 static int 2331 coredump(struct lwp *lp, int sig) 2332 { 2333 struct proc *p = lp->lwp_proc; 2334 struct vnode *vp; 2335 struct ucred *cred = p->p_ucred; 2336 struct flock lf; 2337 struct nlookupdata nd; 2338 struct vattr vattr; 2339 int error, error1; 2340 char *name; /* name of corefile */ 2341 off_t limit; 2342 2343 STOPEVENT(p, S_CORE, 0); 2344 2345 if (((sugid_coredump == 0) && p->p_flags & P_SUGID) || do_coredump == 0) 2346 return (EFAULT); 2347 2348 /* 2349 * Note that the bulk of limit checking is done after 2350 * the corefile is created. The exception is if the limit 2351 * for corefiles is 0, in which case we don't bother 2352 * creating the corefile at all. This layout means that 2353 * a corefile is truncated instead of not being created, 2354 * if it is larger than the limit. 2355 */ 2356 limit = p->p_rlimit[RLIMIT_CORE].rlim_cur; 2357 if (limit == 0) 2358 return EFBIG; 2359 2360 name = expand_name(p->p_comm, p->p_ucred->cr_uid, p->p_pid); 2361 if (name == NULL) 2362 return (EINVAL); 2363 error = nlookup_init(&nd, name, UIO_SYSSPACE, NLC_LOCKVP); 2364 if (error == 0) 2365 error = vn_open(&nd, NULL, O_CREAT | FWRITE | O_NOFOLLOW, S_IRUSR | S_IWUSR); 2366 kfree(name, M_TEMP); 2367 if (error) { 2368 nlookup_done(&nd); 2369 return (error); 2370 } 2371 vp = nd.nl_open_vp; 2372 nd.nl_open_vp = NULL; 2373 nlookup_done(&nd); 2374 2375 vn_unlock(vp); 2376 lf.l_whence = SEEK_SET; 2377 lf.l_start = 0; 2378 lf.l_len = 0; 2379 lf.l_type = F_WRLCK; 2380 error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, 0); 2381 if (error) 2382 goto out2; 2383 2384 /* Don't dump to non-regular files or files with links. */ 2385 if (vp->v_type != VREG || 2386 VOP_GETATTR(vp, &vattr) || vattr.va_nlink != 1) { 2387 error = EFAULT; 2388 goto out1; 2389 } 2390 2391 /* Don't dump to files current user does not own */ 2392 if (vattr.va_uid != p->p_ucred->cr_uid) { 2393 error = EFAULT; 2394 goto out1; 2395 } 2396 2397 VATTR_NULL(&vattr); 2398 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2399 vattr.va_size = 0; 2400 VOP_SETATTR(vp, &vattr, cred); 2401 p->p_acflag |= ACORE; 2402 vn_unlock(vp); 2403 2404 error = p->p_sysent->sv_coredump ? 2405 p->p_sysent->sv_coredump(lp, sig, vp, limit) : ENOSYS; 2406 2407 out1: 2408 lf.l_type = F_UNLCK; 2409 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, 0); 2410 out2: 2411 error1 = vn_close(vp, FWRITE, NULL); 2412 if (error == 0) 2413 error = error1; 2414 return (error); 2415 } 2416 2417 /* 2418 * Nonexistent system call-- signal process (may want to handle it). 2419 * Flag error in case process won't see signal immediately (blocked or ignored). 2420 * 2421 * MPALMOSTSAFE 2422 */ 2423 /* ARGSUSED */ 2424 int 2425 sys_nosys(struct nosys_args *args) 2426 { 2427 lwpsignal(curproc, curthread->td_lwp, SIGSYS); 2428 return (EINVAL); 2429 } 2430 2431 /* 2432 * Send a SIGIO or SIGURG signal to a process or process group using 2433 * stored credentials rather than those of the current process. 2434 */ 2435 void 2436 pgsigio(struct sigio *sigio, int sig, int checkctty) 2437 { 2438 if (sigio == NULL) 2439 return; 2440 2441 if (sigio->sio_pgid > 0) { 2442 if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred, 2443 sigio->sio_proc)) 2444 ksignal(sigio->sio_proc, sig); 2445 } else if (sigio->sio_pgid < 0) { 2446 struct proc *p; 2447 struct pgrp *pg = sigio->sio_pgrp; 2448 2449 /* 2450 * Must interlock all signals against fork 2451 */ 2452 pgref(pg); 2453 lockmgr(&pg->pg_lock, LK_EXCLUSIVE); 2454 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 2455 if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred, p) && 2456 (checkctty == 0 || (p->p_flags & P_CONTROLT))) 2457 ksignal(p, sig); 2458 } 2459 lockmgr(&pg->pg_lock, LK_RELEASE); 2460 pgrel(pg); 2461 } 2462 } 2463 2464 static int 2465 filt_sigattach(struct knote *kn) 2466 { 2467 struct proc *p = curproc; 2468 2469 kn->kn_ptr.p_proc = p; 2470 kn->kn_flags |= EV_CLEAR; /* automatically set */ 2471 2472 /* XXX lock the proc here while adding to the list? */ 2473 knote_insert(&p->p_klist, kn); 2474 2475 return (0); 2476 } 2477 2478 static void 2479 filt_sigdetach(struct knote *kn) 2480 { 2481 struct proc *p = kn->kn_ptr.p_proc; 2482 2483 knote_remove(&p->p_klist, kn); 2484 } 2485 2486 /* 2487 * signal knotes are shared with proc knotes, so we apply a mask to 2488 * the hint in order to differentiate them from process hints. This 2489 * could be avoided by using a signal-specific knote list, but probably 2490 * isn't worth the trouble. 2491 */ 2492 static int 2493 filt_signal(struct knote *kn, long hint) 2494 { 2495 if (hint & NOTE_SIGNAL) { 2496 hint &= ~NOTE_SIGNAL; 2497 2498 if (kn->kn_id == hint) 2499 kn->kn_data++; 2500 } 2501 return (kn->kn_data != 0); 2502 } 2503