1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 39 * $FreeBSD: src/sys/kern/kern_sig.c,v 1.72.2.17 2003/05/16 16:34:34 obrien Exp $ 40 */ 41 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/sysproto.h> 48 #include <sys/signalvar.h> 49 #include <sys/resourcevar.h> 50 #include <sys/vnode.h> 51 #include <sys/event.h> 52 #include <sys/proc.h> 53 #include <sys/nlookup.h> 54 #include <sys/pioctl.h> 55 #include <sys/acct.h> 56 #include <sys/fcntl.h> 57 #include <sys/lock.h> 58 #include <sys/wait.h> 59 #include <sys/ktrace.h> 60 #include <sys/syslog.h> 61 #include <sys/stat.h> 62 #include <sys/sysent.h> 63 #include <sys/sysctl.h> 64 #include <sys/malloc.h> 65 #include <sys/interrupt.h> 66 #include <sys/unistd.h> 67 #include <sys/kern_syscall.h> 68 #include <sys/vkernel.h> 69 70 #include <sys/signal2.h> 71 #include <sys/thread2.h> 72 #include <sys/spinlock2.h> 73 74 #include <machine/cpu.h> 75 #include <machine/smp.h> 76 77 static int coredump(struct lwp *, int); 78 static char *expand_name(const char *, uid_t, pid_t); 79 static int dokillpg(int sig, int pgid, int all); 80 static int sig_ffs(sigset_t *set); 81 static int sigprop(int sig); 82 static void lwp_signotify(struct lwp *lp); 83 static void lwp_signotify_remote(void *arg); 84 static int kern_sigtimedwait(sigset_t set, siginfo_t *info, 85 struct timespec *timeout); 86 87 static int filt_sigattach(struct knote *kn); 88 static void filt_sigdetach(struct knote *kn); 89 static int filt_signal(struct knote *kn, long hint); 90 91 struct filterops sig_filtops = 92 { 0, filt_sigattach, filt_sigdetach, filt_signal }; 93 94 static int kern_logsigexit = 1; 95 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW, 96 &kern_logsigexit, 0, 97 "Log processes quitting on abnormal signals to syslog(3)"); 98 99 /* 100 * Can process p, with pcred pc, send the signal sig to process q? 101 */ 102 #define CANSIGNAL(q, sig) \ 103 (!p_trespass(curproc->p_ucred, (q)->p_ucred) || \ 104 ((sig) == SIGCONT && (q)->p_session == curproc->p_session)) 105 106 /* 107 * Policy -- Can real uid ruid with ucred uc send a signal to process q? 108 */ 109 #define CANSIGIO(ruid, uc, q) \ 110 ((uc)->cr_uid == 0 || \ 111 (ruid) == (q)->p_ucred->cr_ruid || \ 112 (uc)->cr_uid == (q)->p_ucred->cr_ruid || \ 113 (ruid) == (q)->p_ucred->cr_uid || \ 114 (uc)->cr_uid == (q)->p_ucred->cr_uid) 115 116 int sugid_coredump; 117 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW, 118 &sugid_coredump, 0, "Enable coredumping set user/group ID processes"); 119 120 static int do_coredump = 1; 121 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW, 122 &do_coredump, 0, "Enable/Disable coredumps"); 123 124 /* 125 * Signal properties and actions. 126 * The array below categorizes the signals and their default actions 127 * according to the following properties: 128 */ 129 #define SA_KILL 0x01 /* terminates process by default */ 130 #define SA_CORE 0x02 /* ditto and coredumps */ 131 #define SA_STOP 0x04 /* suspend process */ 132 #define SA_TTYSTOP 0x08 /* ditto, from tty */ 133 #define SA_IGNORE 0x10 /* ignore by default */ 134 #define SA_CONT 0x20 /* continue if suspended */ 135 #define SA_CANTMASK 0x40 /* non-maskable, catchable */ 136 #define SA_CKPT 0x80 /* checkpoint process */ 137 138 139 static int sigproptbl[NSIG] = { 140 SA_KILL, /* SIGHUP */ 141 SA_KILL, /* SIGINT */ 142 SA_KILL|SA_CORE, /* SIGQUIT */ 143 SA_KILL|SA_CORE, /* SIGILL */ 144 SA_KILL|SA_CORE, /* SIGTRAP */ 145 SA_KILL|SA_CORE, /* SIGABRT */ 146 SA_KILL|SA_CORE, /* SIGEMT */ 147 SA_KILL|SA_CORE, /* SIGFPE */ 148 SA_KILL, /* SIGKILL */ 149 SA_KILL|SA_CORE, /* SIGBUS */ 150 SA_KILL|SA_CORE, /* SIGSEGV */ 151 SA_KILL|SA_CORE, /* SIGSYS */ 152 SA_KILL, /* SIGPIPE */ 153 SA_KILL, /* SIGALRM */ 154 SA_KILL, /* SIGTERM */ 155 SA_IGNORE, /* SIGURG */ 156 SA_STOP, /* SIGSTOP */ 157 SA_STOP|SA_TTYSTOP, /* SIGTSTP */ 158 SA_IGNORE|SA_CONT, /* SIGCONT */ 159 SA_IGNORE, /* SIGCHLD */ 160 SA_STOP|SA_TTYSTOP, /* SIGTTIN */ 161 SA_STOP|SA_TTYSTOP, /* SIGTTOU */ 162 SA_IGNORE, /* SIGIO */ 163 SA_KILL, /* SIGXCPU */ 164 SA_KILL, /* SIGXFSZ */ 165 SA_KILL, /* SIGVTALRM */ 166 SA_KILL, /* SIGPROF */ 167 SA_IGNORE, /* SIGWINCH */ 168 SA_IGNORE, /* SIGINFO */ 169 SA_KILL, /* SIGUSR1 */ 170 SA_KILL, /* SIGUSR2 */ 171 SA_IGNORE, /* SIGTHR */ 172 SA_CKPT, /* SIGCKPT */ 173 SA_KILL|SA_CKPT, /* SIGCKPTEXIT */ 174 SA_IGNORE, 175 SA_IGNORE, 176 SA_IGNORE, 177 SA_IGNORE, 178 SA_IGNORE, 179 SA_IGNORE, 180 SA_IGNORE, 181 SA_IGNORE, 182 SA_IGNORE, 183 SA_IGNORE, 184 SA_IGNORE, 185 SA_IGNORE, 186 SA_IGNORE, 187 SA_IGNORE, 188 SA_IGNORE, 189 SA_IGNORE, 190 SA_IGNORE, 191 SA_IGNORE, 192 SA_IGNORE, 193 SA_IGNORE, 194 SA_IGNORE, 195 SA_IGNORE, 196 SA_IGNORE, 197 SA_IGNORE, 198 SA_IGNORE, 199 SA_IGNORE, 200 SA_IGNORE, 201 SA_IGNORE, 202 SA_IGNORE, 203 SA_IGNORE, 204 205 }; 206 207 static __inline int 208 sigprop(int sig) 209 { 210 211 if (sig > 0 && sig < NSIG) 212 return (sigproptbl[_SIG_IDX(sig)]); 213 return (0); 214 } 215 216 static __inline int 217 sig_ffs(sigset_t *set) 218 { 219 int i; 220 221 for (i = 0; i < _SIG_WORDS; i++) 222 if (set->__bits[i]) 223 return (ffs(set->__bits[i]) + (i * 32)); 224 return (0); 225 } 226 227 /* 228 * No requirements. 229 */ 230 int 231 kern_sigaction(int sig, struct sigaction *act, struct sigaction *oact) 232 { 233 struct thread *td = curthread; 234 struct proc *p = td->td_proc; 235 struct lwp *lp; 236 struct sigacts *ps = p->p_sigacts; 237 238 if (sig <= 0 || sig > _SIG_MAXSIG) 239 return (EINVAL); 240 241 lwkt_gettoken(&p->p_token); 242 243 if (oact) { 244 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)]; 245 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)]; 246 oact->sa_flags = 0; 247 if (SIGISMEMBER(ps->ps_sigonstack, sig)) 248 oact->sa_flags |= SA_ONSTACK; 249 if (!SIGISMEMBER(ps->ps_sigintr, sig)) 250 oact->sa_flags |= SA_RESTART; 251 if (SIGISMEMBER(ps->ps_sigreset, sig)) 252 oact->sa_flags |= SA_RESETHAND; 253 if (SIGISMEMBER(ps->ps_signodefer, sig)) 254 oact->sa_flags |= SA_NODEFER; 255 if (SIGISMEMBER(ps->ps_siginfo, sig)) 256 oact->sa_flags |= SA_SIGINFO; 257 if (sig == SIGCHLD && p->p_sigacts->ps_flag & PS_NOCLDSTOP) 258 oact->sa_flags |= SA_NOCLDSTOP; 259 if (sig == SIGCHLD && p->p_sigacts->ps_flag & PS_NOCLDWAIT) 260 oact->sa_flags |= SA_NOCLDWAIT; 261 } 262 if (act) { 263 /* 264 * Check for invalid requests. KILL and STOP cannot be 265 * caught. 266 */ 267 if (sig == SIGKILL || sig == SIGSTOP) { 268 if (act->sa_handler != SIG_DFL) { 269 lwkt_reltoken(&p->p_token); 270 return (EINVAL); 271 } 272 } 273 274 /* 275 * Change setting atomically. 276 */ 277 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask; 278 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]); 279 if (act->sa_flags & SA_SIGINFO) { 280 ps->ps_sigact[_SIG_IDX(sig)] = 281 (__sighandler_t *)act->sa_sigaction; 282 SIGADDSET(ps->ps_siginfo, sig); 283 } else { 284 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler; 285 SIGDELSET(ps->ps_siginfo, sig); 286 } 287 if (!(act->sa_flags & SA_RESTART)) 288 SIGADDSET(ps->ps_sigintr, sig); 289 else 290 SIGDELSET(ps->ps_sigintr, sig); 291 if (act->sa_flags & SA_ONSTACK) 292 SIGADDSET(ps->ps_sigonstack, sig); 293 else 294 SIGDELSET(ps->ps_sigonstack, sig); 295 if (act->sa_flags & SA_RESETHAND) 296 SIGADDSET(ps->ps_sigreset, sig); 297 else 298 SIGDELSET(ps->ps_sigreset, sig); 299 if (act->sa_flags & SA_NODEFER) 300 SIGADDSET(ps->ps_signodefer, sig); 301 else 302 SIGDELSET(ps->ps_signodefer, sig); 303 if (sig == SIGCHLD) { 304 if (act->sa_flags & SA_NOCLDSTOP) 305 p->p_sigacts->ps_flag |= PS_NOCLDSTOP; 306 else 307 p->p_sigacts->ps_flag &= ~PS_NOCLDSTOP; 308 if (act->sa_flags & SA_NOCLDWAIT) { 309 /* 310 * Paranoia: since SA_NOCLDWAIT is implemented 311 * by reparenting the dying child to PID 1 (and 312 * trust it to reap the zombie), PID 1 itself 313 * is forbidden to set SA_NOCLDWAIT. 314 */ 315 if (p->p_pid == 1) 316 p->p_sigacts->ps_flag &= ~PS_NOCLDWAIT; 317 else 318 p->p_sigacts->ps_flag |= PS_NOCLDWAIT; 319 } else { 320 p->p_sigacts->ps_flag &= ~PS_NOCLDWAIT; 321 } 322 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 323 ps->ps_flag |= PS_CLDSIGIGN; 324 else 325 ps->ps_flag &= ~PS_CLDSIGIGN; 326 } 327 /* 328 * Set bit in p_sigignore for signals that are set to SIG_IGN, 329 * and for signals set to SIG_DFL where the default is to 330 * ignore. However, don't put SIGCONT in p_sigignore, as we 331 * have to restart the process. 332 * 333 * Also remove the signal from the process and lwp signal 334 * list. 335 */ 336 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || 337 (sigprop(sig) & SA_IGNORE && 338 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { 339 SIGDELSET(p->p_siglist, sig); 340 FOREACH_LWP_IN_PROC(lp, p) { 341 spin_lock(&lp->lwp_spin); 342 SIGDELSET(lp->lwp_siglist, sig); 343 spin_unlock(&lp->lwp_spin); 344 } 345 if (sig != SIGCONT) { 346 /* easier in ksignal */ 347 SIGADDSET(p->p_sigignore, sig); 348 } 349 SIGDELSET(p->p_sigcatch, sig); 350 } else { 351 SIGDELSET(p->p_sigignore, sig); 352 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL) 353 SIGDELSET(p->p_sigcatch, sig); 354 else 355 SIGADDSET(p->p_sigcatch, sig); 356 } 357 } 358 lwkt_reltoken(&p->p_token); 359 return (0); 360 } 361 362 int 363 sys_sigaction(struct sigaction_args *uap) 364 { 365 struct sigaction act, oact; 366 struct sigaction *actp, *oactp; 367 int error; 368 369 actp = (uap->act != NULL) ? &act : NULL; 370 oactp = (uap->oact != NULL) ? &oact : NULL; 371 if (actp) { 372 error = copyin(uap->act, actp, sizeof(act)); 373 if (error) 374 return (error); 375 } 376 error = kern_sigaction(uap->sig, actp, oactp); 377 if (oactp && !error) { 378 error = copyout(oactp, uap->oact, sizeof(oact)); 379 } 380 return (error); 381 } 382 383 /* 384 * Initialize signal state for process 0; 385 * set to ignore signals that are ignored by default. 386 */ 387 void 388 siginit(struct proc *p) 389 { 390 int i; 391 392 for (i = 1; i <= NSIG; i++) 393 if (sigprop(i) & SA_IGNORE && i != SIGCONT) 394 SIGADDSET(p->p_sigignore, i); 395 } 396 397 /* 398 * Reset signals for an exec of the specified process. 399 */ 400 void 401 execsigs(struct proc *p) 402 { 403 struct sigacts *ps = p->p_sigacts; 404 struct lwp *lp; 405 int sig; 406 407 lp = ONLY_LWP_IN_PROC(p); 408 409 /* 410 * Reset caught signals. Held signals remain held 411 * through p_sigmask (unless they were caught, 412 * and are now ignored by default). 413 */ 414 while (SIGNOTEMPTY(p->p_sigcatch)) { 415 sig = sig_ffs(&p->p_sigcatch); 416 SIGDELSET(p->p_sigcatch, sig); 417 if (sigprop(sig) & SA_IGNORE) { 418 if (sig != SIGCONT) 419 SIGADDSET(p->p_sigignore, sig); 420 SIGDELSET(p->p_siglist, sig); 421 /* don't need spinlock */ 422 SIGDELSET(lp->lwp_siglist, sig); 423 } 424 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 425 } 426 427 /* 428 * Reset stack state to the user stack. 429 * Clear set of signals caught on the signal stack. 430 */ 431 lp->lwp_sigstk.ss_flags = SS_DISABLE; 432 lp->lwp_sigstk.ss_size = 0; 433 lp->lwp_sigstk.ss_sp = NULL; 434 lp->lwp_flags &= ~LWP_ALTSTACK; 435 /* 436 * Reset no zombies if child dies flag as Solaris does. 437 */ 438 p->p_sigacts->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN); 439 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) 440 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL; 441 } 442 443 /* 444 * kern_sigprocmask() - MP SAFE ONLY IF p == curproc 445 * 446 * Manipulate signal mask. This routine is MP SAFE *ONLY* if 447 * p == curproc. 448 */ 449 int 450 kern_sigprocmask(int how, sigset_t *set, sigset_t *oset) 451 { 452 struct thread *td = curthread; 453 struct lwp *lp = td->td_lwp; 454 struct proc *p = td->td_proc; 455 int error; 456 457 lwkt_gettoken(&p->p_token); 458 459 if (oset != NULL) 460 *oset = lp->lwp_sigmask; 461 462 error = 0; 463 if (set != NULL) { 464 switch (how) { 465 case SIG_BLOCK: 466 SIG_CANTMASK(*set); 467 SIGSETOR(lp->lwp_sigmask, *set); 468 break; 469 case SIG_UNBLOCK: 470 SIGSETNAND(lp->lwp_sigmask, *set); 471 break; 472 case SIG_SETMASK: 473 SIG_CANTMASK(*set); 474 lp->lwp_sigmask = *set; 475 break; 476 default: 477 error = EINVAL; 478 break; 479 } 480 } 481 482 lwkt_reltoken(&p->p_token); 483 484 return (error); 485 } 486 487 /* 488 * sigprocmask() 489 * 490 * MPSAFE 491 */ 492 int 493 sys_sigprocmask(struct sigprocmask_args *uap) 494 { 495 sigset_t set, oset; 496 sigset_t *setp, *osetp; 497 int error; 498 499 setp = (uap->set != NULL) ? &set : NULL; 500 osetp = (uap->oset != NULL) ? &oset : NULL; 501 if (setp) { 502 error = copyin(uap->set, setp, sizeof(set)); 503 if (error) 504 return (error); 505 } 506 error = kern_sigprocmask(uap->how, setp, osetp); 507 if (osetp && !error) { 508 error = copyout(osetp, uap->oset, sizeof(oset)); 509 } 510 return (error); 511 } 512 513 /* 514 * MPSAFE 515 */ 516 int 517 kern_sigpending(struct __sigset *set) 518 { 519 struct lwp *lp = curthread->td_lwp; 520 521 *set = lwp_sigpend(lp); 522 523 return (0); 524 } 525 526 /* 527 * MPSAFE 528 */ 529 int 530 sys_sigpending(struct sigpending_args *uap) 531 { 532 sigset_t set; 533 int error; 534 535 error = kern_sigpending(&set); 536 537 if (error == 0) 538 error = copyout(&set, uap->set, sizeof(set)); 539 return (error); 540 } 541 542 /* 543 * Suspend process until signal, providing mask to be set 544 * in the meantime. 545 * 546 * MPSAFE 547 */ 548 int 549 kern_sigsuspend(struct __sigset *set) 550 { 551 struct thread *td = curthread; 552 struct lwp *lp = td->td_lwp; 553 struct proc *p = td->td_proc; 554 struct sigacts *ps = p->p_sigacts; 555 556 /* 557 * When returning from sigsuspend, we want 558 * the old mask to be restored after the 559 * signal handler has finished. Thus, we 560 * save it here and mark the sigacts structure 561 * to indicate this. 562 */ 563 lp->lwp_oldsigmask = lp->lwp_sigmask; 564 lp->lwp_flags |= LWP_OLDMASK; 565 566 SIG_CANTMASK(*set); 567 lp->lwp_sigmask = *set; 568 while (tsleep(ps, PCATCH, "pause", 0) == 0) 569 /* void */; 570 /* always return EINTR rather than ERESTART... */ 571 return (EINTR); 572 } 573 574 /* 575 * Note nonstandard calling convention: libc stub passes mask, not 576 * pointer, to save a copyin. 577 * 578 * MPSAFE 579 */ 580 int 581 sys_sigsuspend(struct sigsuspend_args *uap) 582 { 583 sigset_t mask; 584 int error; 585 586 error = copyin(uap->sigmask, &mask, sizeof(mask)); 587 if (error) 588 return (error); 589 590 error = kern_sigsuspend(&mask); 591 592 return (error); 593 } 594 595 /* 596 * MPSAFE 597 */ 598 int 599 kern_sigaltstack(struct sigaltstack *ss, struct sigaltstack *oss) 600 { 601 struct thread *td = curthread; 602 struct lwp *lp = td->td_lwp; 603 struct proc *p = td->td_proc; 604 605 if ((lp->lwp_flags & LWP_ALTSTACK) == 0) 606 lp->lwp_sigstk.ss_flags |= SS_DISABLE; 607 608 if (oss) 609 *oss = lp->lwp_sigstk; 610 611 if (ss) { 612 if (ss->ss_flags & SS_DISABLE) { 613 if (lp->lwp_sigstk.ss_flags & SS_ONSTACK) 614 return (EINVAL); 615 lp->lwp_flags &= ~LWP_ALTSTACK; 616 lp->lwp_sigstk.ss_flags = ss->ss_flags; 617 } else { 618 if (ss->ss_size < p->p_sysent->sv_minsigstksz) 619 return (ENOMEM); 620 lp->lwp_flags |= LWP_ALTSTACK; 621 lp->lwp_sigstk = *ss; 622 } 623 } 624 625 return (0); 626 } 627 628 /* 629 * MPSAFE 630 */ 631 int 632 sys_sigaltstack(struct sigaltstack_args *uap) 633 { 634 stack_t ss, oss; 635 int error; 636 637 if (uap->ss) { 638 error = copyin(uap->ss, &ss, sizeof(ss)); 639 if (error) 640 return (error); 641 } 642 643 error = kern_sigaltstack(uap->ss ? &ss : NULL, 644 uap->oss ? &oss : NULL); 645 646 if (error == 0 && uap->oss) 647 error = copyout(&oss, uap->oss, sizeof(*uap->oss)); 648 return (error); 649 } 650 651 /* 652 * Common code for kill process group/broadcast kill. 653 * cp is calling process. 654 */ 655 struct killpg_info { 656 int nfound; 657 int sig; 658 }; 659 660 static int killpg_all_callback(struct proc *p, void *data); 661 662 static int 663 dokillpg(int sig, int pgid, int all) 664 { 665 struct killpg_info info; 666 struct proc *cp = curproc; 667 struct proc *p; 668 struct pgrp *pgrp; 669 670 info.nfound = 0; 671 info.sig = sig; 672 673 if (all) { 674 /* 675 * broadcast 676 */ 677 allproc_scan(killpg_all_callback, &info); 678 } else { 679 if (pgid == 0) { 680 /* 681 * zero pgid means send to my process group. 682 */ 683 pgrp = cp->p_pgrp; 684 pgref(pgrp); 685 } else { 686 pgrp = pgfind(pgid); 687 if (pgrp == NULL) 688 return (ESRCH); 689 } 690 691 /* 692 * Must interlock all signals against fork 693 */ 694 lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE); 695 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 696 if (p->p_pid <= 1 || 697 p->p_stat == SZOMB || 698 (p->p_flags & P_SYSTEM) || 699 !CANSIGNAL(p, sig)) { 700 continue; 701 } 702 ++info.nfound; 703 if (sig) 704 ksignal(p, sig); 705 } 706 lockmgr(&pgrp->pg_lock, LK_RELEASE); 707 pgrel(pgrp); 708 } 709 return (info.nfound ? 0 : ESRCH); 710 } 711 712 static int 713 killpg_all_callback(struct proc *p, void *data) 714 { 715 struct killpg_info *info = data; 716 717 if (p->p_pid <= 1 || (p->p_flags & P_SYSTEM) || 718 p == curproc || !CANSIGNAL(p, info->sig)) { 719 return (0); 720 } 721 ++info->nfound; 722 if (info->sig) 723 ksignal(p, info->sig); 724 return(0); 725 } 726 727 /* 728 * Send a general signal to a process or LWPs within that process. 729 * 730 * Note that new signals cannot be sent if a process is exiting or already 731 * a zombie, but we return success anyway as userland is likely to not handle 732 * the race properly. 733 * 734 * No requirements. 735 */ 736 int 737 kern_kill(int sig, pid_t pid, lwpid_t tid) 738 { 739 int t; 740 741 if ((u_int)sig > _SIG_MAXSIG) 742 return (EINVAL); 743 744 lwkt_gettoken(&proc_token); 745 746 if (pid > 0) { 747 struct proc *p; 748 struct lwp *lp = NULL; 749 750 /* 751 * Send a signal to a single process. If the kill() is 752 * racing an exiting process which has not yet been reaped 753 * act as though the signal was delivered successfully but 754 * don't actually try to deliver the signal. 755 */ 756 if ((p = pfind(pid)) == NULL) { 757 if ((p = zpfind(pid)) == NULL) { 758 lwkt_reltoken(&proc_token); 759 return (ESRCH); 760 } 761 lwkt_reltoken(&proc_token); 762 PRELE(p); 763 return (0); 764 } 765 lwkt_gettoken(&p->p_token); 766 if (!CANSIGNAL(p, sig)) { 767 lwkt_reltoken(&p->p_token); 768 PRELE(p); 769 lwkt_reltoken(&proc_token); 770 return (EPERM); 771 } 772 773 /* 774 * NOP if the process is exiting. Note that lwpsignal() is 775 * called directly with P_WEXIT set to kill individual LWPs 776 * during exit, which is allowed. 777 */ 778 if (p->p_flags & P_WEXIT) { 779 lwkt_reltoken(&p->p_token); 780 PRELE(p); 781 lwkt_reltoken(&proc_token); 782 return (0); 783 } 784 if (tid != -1) { 785 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, tid); 786 if (lp == NULL) { 787 lwkt_reltoken(&p->p_token); 788 PRELE(p); 789 lwkt_reltoken(&proc_token); 790 return (ESRCH); 791 } 792 } 793 if (sig) 794 lwpsignal(p, lp, sig); 795 lwkt_reltoken(&p->p_token); 796 PRELE(p); 797 lwkt_reltoken(&proc_token); 798 return (0); 799 } 800 801 /* 802 * If we come here, pid is a special broadcast pid. 803 * This doesn't mix with a tid. 804 */ 805 if (tid != -1) { 806 lwkt_reltoken(&proc_token); 807 return (EINVAL); 808 } 809 switch (pid) { 810 case -1: /* broadcast signal */ 811 t = (dokillpg(sig, 0, 1)); 812 break; 813 case 0: /* signal own process group */ 814 t = (dokillpg(sig, 0, 0)); 815 break; 816 default: /* negative explicit process group */ 817 t = (dokillpg(sig, -pid, 0)); 818 break; 819 } 820 lwkt_reltoken(&proc_token); 821 return t; 822 } 823 824 int 825 sys_kill(struct kill_args *uap) 826 { 827 int error; 828 829 error = kern_kill(uap->signum, uap->pid, -1); 830 return (error); 831 } 832 833 int 834 sys_lwp_kill(struct lwp_kill_args *uap) 835 { 836 int error; 837 pid_t pid = uap->pid; 838 839 /* 840 * A tid is mandatory for lwp_kill(), otherwise 841 * you could simply use kill(). 842 */ 843 if (uap->tid == -1) 844 return (EINVAL); 845 846 /* 847 * To save on a getpid() function call for intra-process 848 * signals, pid == -1 means current process. 849 */ 850 if (pid == -1) 851 pid = curproc->p_pid; 852 853 error = kern_kill(uap->signum, pid, uap->tid); 854 return (error); 855 } 856 857 /* 858 * Send a signal to a process group. 859 */ 860 void 861 gsignal(int pgid, int sig) 862 { 863 struct pgrp *pgrp; 864 865 if (pgid && (pgrp = pgfind(pgid))) 866 pgsignal(pgrp, sig, 0); 867 } 868 869 /* 870 * Send a signal to a process group. If checktty is 1, 871 * limit to members which have a controlling terminal. 872 * 873 * pg_lock interlocks against a fork that might be in progress, to 874 * ensure that the new child process picks up the signal. 875 */ 876 void 877 pgsignal(struct pgrp *pgrp, int sig, int checkctty) 878 { 879 struct proc *p; 880 881 /* 882 * Must interlock all signals against fork 883 */ 884 if (pgrp) { 885 pgref(pgrp); 886 lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE); 887 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 888 if (checkctty == 0 || p->p_flags & P_CONTROLT) 889 ksignal(p, sig); 890 } 891 lockmgr(&pgrp->pg_lock, LK_RELEASE); 892 pgrel(pgrp); 893 } 894 } 895 896 /* 897 * Send a signal caused by a trap to the current lwp. If it will be caught 898 * immediately, deliver it with correct code. Otherwise, post it normally. 899 * 900 * These signals may ONLY be delivered to the specified lwp and may never 901 * be delivered to the process generically. 902 */ 903 void 904 trapsignal(struct lwp *lp, int sig, u_long code) 905 { 906 struct proc *p = lp->lwp_proc; 907 struct sigacts *ps = p->p_sigacts; 908 909 /* 910 * If we are a virtual kernel running an emulated user process 911 * context, switch back to the virtual kernel context before 912 * trying to post the signal. 913 */ 914 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 915 struct trapframe *tf = lp->lwp_md.md_regs; 916 tf->tf_trapno = 0; 917 vkernel_trap(lp, tf); 918 } 919 920 921 if ((p->p_flags & P_TRACED) == 0 && SIGISMEMBER(p->p_sigcatch, sig) && 922 !SIGISMEMBER(lp->lwp_sigmask, sig)) { 923 lp->lwp_ru.ru_nsignals++; 924 #ifdef KTRACE 925 if (KTRPOINT(lp->lwp_thread, KTR_PSIG)) 926 ktrpsig(lp, sig, ps->ps_sigact[_SIG_IDX(sig)], 927 &lp->lwp_sigmask, code); 928 #endif 929 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig, 930 &lp->lwp_sigmask, code); 931 SIGSETOR(lp->lwp_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 932 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 933 SIGADDSET(lp->lwp_sigmask, sig); 934 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 935 /* 936 * See kern_sigaction() for origin of this code. 937 */ 938 SIGDELSET(p->p_sigcatch, sig); 939 if (sig != SIGCONT && 940 sigprop(sig) & SA_IGNORE) 941 SIGADDSET(p->p_sigignore, sig); 942 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 943 } 944 } else { 945 lp->lwp_code = code; /* XXX for core dump/debugger */ 946 lp->lwp_sig = sig; /* XXX to verify code */ 947 lwpsignal(p, lp, sig); 948 } 949 } 950 951 /* 952 * Find a suitable lwp to deliver the signal to. Returns NULL if all 953 * lwps hold the signal blocked. 954 * 955 * Caller must hold p->p_token. 956 * 957 * Returns a lp or NULL. If non-NULL the lp is held and its token is 958 * acquired. 959 */ 960 static struct lwp * 961 find_lwp_for_signal(struct proc *p, int sig) 962 { 963 struct lwp *lp; 964 struct lwp *run, *sleep, *stop; 965 966 /* 967 * If the running/preempted thread belongs to the proc to which 968 * the signal is being delivered and this thread does not block 969 * the signal, then we can avoid a context switch by delivering 970 * the signal to this thread, because it will return to userland 971 * soon anyways. 972 */ 973 lp = lwkt_preempted_proc(); 974 if (lp != NULL && lp->lwp_proc == p) { 975 LWPHOLD(lp); 976 lwkt_gettoken(&lp->lwp_token); 977 if (!SIGISMEMBER(lp->lwp_sigmask, sig)) { 978 /* return w/ token held */ 979 return (lp); 980 } 981 lwkt_reltoken(&lp->lwp_token); 982 LWPRELE(lp); 983 } 984 985 run = sleep = stop = NULL; 986 FOREACH_LWP_IN_PROC(lp, p) { 987 /* 988 * If the signal is being blocked by the lwp, then this 989 * lwp is not eligible for receiving the signal. 990 */ 991 LWPHOLD(lp); 992 lwkt_gettoken(&lp->lwp_token); 993 994 if (SIGISMEMBER(lp->lwp_sigmask, sig)) { 995 lwkt_reltoken(&lp->lwp_token); 996 LWPRELE(lp); 997 continue; 998 } 999 1000 switch (lp->lwp_stat) { 1001 case LSRUN: 1002 if (sleep) { 1003 lwkt_token_swap(); 1004 lwkt_reltoken(&sleep->lwp_token); 1005 LWPRELE(sleep); 1006 sleep = NULL; 1007 run = lp; 1008 } else if (stop) { 1009 lwkt_token_swap(); 1010 lwkt_reltoken(&stop->lwp_token); 1011 LWPRELE(stop); 1012 stop = NULL; 1013 run = lp; 1014 } else { 1015 run = lp; 1016 } 1017 break; 1018 case LSSLEEP: 1019 if (lp->lwp_flags & LWP_SINTR) { 1020 if (sleep) { 1021 lwkt_reltoken(&lp->lwp_token); 1022 LWPRELE(lp); 1023 } else if (stop) { 1024 lwkt_token_swap(); 1025 lwkt_reltoken(&stop->lwp_token); 1026 LWPRELE(stop); 1027 stop = NULL; 1028 sleep = lp; 1029 } else { 1030 sleep = lp; 1031 } 1032 } else { 1033 lwkt_reltoken(&lp->lwp_token); 1034 LWPRELE(lp); 1035 } 1036 break; 1037 case LSSTOP: 1038 if (sleep) { 1039 lwkt_reltoken(&lp->lwp_token); 1040 LWPRELE(lp); 1041 } else if (stop) { 1042 lwkt_reltoken(&lp->lwp_token); 1043 LWPRELE(lp); 1044 } else { 1045 stop = lp; 1046 } 1047 break; 1048 } 1049 if (run) 1050 break; 1051 } 1052 1053 if (run != NULL) 1054 return (run); 1055 else if (sleep != NULL) 1056 return (sleep); 1057 else 1058 return (stop); 1059 } 1060 1061 /* 1062 * Send the signal to the process. If the signal has an action, the action 1063 * is usually performed by the target process rather than the caller; we add 1064 * the signal to the set of pending signals for the process. 1065 * 1066 * Exceptions: 1067 * o When a stop signal is sent to a sleeping process that takes the 1068 * default action, the process is stopped without awakening it. 1069 * o SIGCONT restarts stopped processes (or puts them back to sleep) 1070 * regardless of the signal action (eg, blocked or ignored). 1071 * 1072 * Other ignored signals are discarded immediately. 1073 * 1074 * If the caller wishes to call this function from a hard code section the 1075 * caller must already hold p->p_token (see kern_clock.c). 1076 * 1077 * No requirements. 1078 */ 1079 void 1080 ksignal(struct proc *p, int sig) 1081 { 1082 lwpsignal(p, NULL, sig); 1083 } 1084 1085 /* 1086 * The core for ksignal. lp may be NULL, then a suitable thread 1087 * will be chosen. If not, lp MUST be a member of p. 1088 * 1089 * If the caller wishes to call this function from a hard code section the 1090 * caller must already hold p->p_token. 1091 * 1092 * No requirements. 1093 */ 1094 void 1095 lwpsignal(struct proc *p, struct lwp *lp, int sig) 1096 { 1097 struct proc *q; 1098 sig_t action; 1099 int prop; 1100 1101 if (sig > _SIG_MAXSIG || sig <= 0) { 1102 kprintf("lwpsignal: signal %d\n", sig); 1103 panic("lwpsignal signal number"); 1104 } 1105 1106 KKASSERT(lp == NULL || lp->lwp_proc == p); 1107 1108 /* 1109 * We don't want to race... well, all sorts of things. Get appropriate 1110 * tokens. 1111 * 1112 * Don't try to deliver a generic signal to an exiting process, 1113 * the signal structures could be in flux. We check the LWP later 1114 * on. 1115 */ 1116 PHOLD(p); 1117 lwkt_gettoken(&p->p_token); 1118 if (lp) { 1119 LWPHOLD(lp); 1120 lwkt_gettoken(&lp->lwp_token); 1121 } else if (p->p_flags & P_WEXIT) { 1122 goto out; 1123 } 1124 1125 prop = sigprop(sig); 1126 1127 /* 1128 * If proc is traced, always give parent a chance; 1129 * if signal event is tracked by procfs, give *that* 1130 * a chance, as well. 1131 */ 1132 if ((p->p_flags & P_TRACED) || (p->p_stops & S_SIG)) { 1133 action = SIG_DFL; 1134 } else { 1135 /* 1136 * Do not try to deliver signals to an exiting lwp. Note 1137 * that we must still deliver the signal if P_WEXIT is set 1138 * in the process flags. 1139 */ 1140 if (lp && (lp->lwp_mpflags & LWP_MP_WEXIT)) { 1141 if (lp) { 1142 lwkt_reltoken(&lp->lwp_token); 1143 LWPRELE(lp); 1144 } 1145 lwkt_reltoken(&p->p_token); 1146 PRELE(p); 1147 return; 1148 } 1149 1150 /* 1151 * If the signal is being ignored, then we forget about 1152 * it immediately. NOTE: We don't set SIGCONT in p_sigignore, 1153 * and if it is set to SIG_IGN, action will be SIG_DFL here. 1154 */ 1155 if (SIGISMEMBER(p->p_sigignore, sig)) { 1156 /* 1157 * Even if a signal is set SIG_IGN, it may still be 1158 * lurking in a kqueue. 1159 */ 1160 KNOTE(&p->p_klist, NOTE_SIGNAL | sig); 1161 if (lp) { 1162 lwkt_reltoken(&lp->lwp_token); 1163 LWPRELE(lp); 1164 } 1165 lwkt_reltoken(&p->p_token); 1166 PRELE(p); 1167 return; 1168 } 1169 if (SIGISMEMBER(p->p_sigcatch, sig)) 1170 action = SIG_CATCH; 1171 else 1172 action = SIG_DFL; 1173 } 1174 1175 /* 1176 * If continuing, clear any pending STOP signals. 1177 */ 1178 if (prop & SA_CONT) 1179 SIG_STOPSIGMASK(p->p_siglist); 1180 1181 if (prop & SA_STOP) { 1182 /* 1183 * If sending a tty stop signal to a member of an orphaned 1184 * process group, discard the signal here if the action 1185 * is default; don't stop the process below if sleeping, 1186 * and don't clear any pending SIGCONT. 1187 */ 1188 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 && 1189 action == SIG_DFL) { 1190 lwkt_reltoken(&p->p_token); 1191 PRELE(p); 1192 return; 1193 } 1194 SIG_CONTSIGMASK(p->p_siglist); 1195 p->p_flags &= ~P_CONTINUED; 1196 } 1197 1198 if (p->p_stat == SSTOP) { 1199 /* 1200 * Nobody can handle this signal, add it to the lwp or 1201 * process pending list 1202 */ 1203 if (lp) { 1204 spin_lock(&lp->lwp_spin); 1205 SIGADDSET(lp->lwp_siglist, sig); 1206 spin_unlock(&lp->lwp_spin); 1207 } else { 1208 SIGADDSET(p->p_siglist, sig); 1209 } 1210 1211 /* 1212 * If the process is stopped and is being traced, then no 1213 * further action is necessary. 1214 */ 1215 if (p->p_flags & P_TRACED) 1216 goto out; 1217 1218 /* 1219 * If the process is stopped and receives a KILL signal, 1220 * make the process runnable. 1221 */ 1222 if (sig == SIGKILL) { 1223 proc_unstop(p); 1224 goto active_process; 1225 } 1226 1227 /* 1228 * If the process is stopped and receives a CONT signal, 1229 * then try to make the process runnable again. 1230 */ 1231 if (prop & SA_CONT) { 1232 /* 1233 * If SIGCONT is default (or ignored), we continue the 1234 * process but don't leave the signal in p_siglist, as 1235 * it has no further action. If SIGCONT is held, we 1236 * continue the process and leave the signal in 1237 * p_siglist. If the process catches SIGCONT, let it 1238 * handle the signal itself. 1239 * 1240 * XXX what if the signal is being held blocked? 1241 * 1242 * Token required to interlock kern_wait(). 1243 * Reparenting can also cause a race so we have to 1244 * hold (q). 1245 */ 1246 q = p->p_pptr; 1247 PHOLD(q); 1248 lwkt_gettoken(&q->p_token); 1249 p->p_flags |= P_CONTINUED; 1250 wakeup(q); 1251 if (action == SIG_DFL) 1252 SIGDELSET(p->p_siglist, sig); 1253 proc_unstop(p); 1254 lwkt_reltoken(&q->p_token); 1255 PRELE(q); 1256 if (action == SIG_CATCH) 1257 goto active_process; 1258 goto out; 1259 } 1260 1261 /* 1262 * If the process is stopped and receives another STOP 1263 * signal, we do not need to stop it again. If we did 1264 * the shell could get confused. 1265 * 1266 * However, if the current/preempted lwp is part of the 1267 * process receiving the signal, we need to keep it, 1268 * so that this lwp can stop in issignal() later, as 1269 * we don't want to wait until it reaches userret! 1270 */ 1271 if (prop & SA_STOP) { 1272 if (lwkt_preempted_proc() == NULL || 1273 lwkt_preempted_proc()->lwp_proc != p) 1274 SIGDELSET(p->p_siglist, sig); 1275 } 1276 1277 /* 1278 * Otherwise the process is stopped and it received some 1279 * signal, which does not change its stopped state. When 1280 * the process is continued a wakeup(p) will be issued which 1281 * will wakeup any threads sleeping in tstop(). 1282 */ 1283 if (lp == NULL) { 1284 /* NOTE: returns lp w/ token held */ 1285 lp = find_lwp_for_signal(p, sig); 1286 } 1287 goto out; 1288 1289 /* NOTREACHED */ 1290 } 1291 /* else not stopped */ 1292 active_process: 1293 1294 /* 1295 * Never deliver a lwp-specific signal to a random lwp. 1296 */ 1297 if (lp == NULL) { 1298 /* NOTE: returns lp w/ token held */ 1299 lp = find_lwp_for_signal(p, sig); 1300 if (lp) { 1301 if (SIGISMEMBER(lp->lwp_sigmask, sig)) { 1302 lwkt_reltoken(&lp->lwp_token); 1303 LWPRELE(lp); 1304 lp = NULL; 1305 } 1306 } 1307 } 1308 1309 /* 1310 * Deliver to the process generically if (1) the signal is being 1311 * sent to any thread or (2) we could not find a thread to deliver 1312 * it to. 1313 */ 1314 if (lp == NULL) { 1315 SIGADDSET(p->p_siglist, sig); 1316 goto out; 1317 } 1318 1319 /* 1320 * Deliver to a specific LWP whether it masks it or not. It will 1321 * not be dispatched if masked but we must still deliver it. 1322 */ 1323 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) && 1324 (p->p_flags & P_TRACED) == 0) { 1325 p->p_nice = NZERO; 1326 } 1327 1328 /* 1329 * If the process receives a STOP signal which indeed needs to 1330 * stop the process, do so. If the process chose to catch the 1331 * signal, it will be treated like any other signal. 1332 */ 1333 if ((prop & SA_STOP) && action == SIG_DFL) { 1334 /* 1335 * If a child holding parent blocked, stopping 1336 * could cause deadlock. Take no action at this 1337 * time. 1338 */ 1339 if (p->p_flags & P_PPWAIT) { 1340 SIGADDSET(p->p_siglist, sig); 1341 goto out; 1342 } 1343 1344 /* 1345 * Do not actually try to manipulate the process, but simply 1346 * stop it. Lwps will stop as soon as they safely can. 1347 * 1348 * Ignore stop if the process is exiting. 1349 */ 1350 if ((p->p_flags & P_WEXIT) == 0) { 1351 p->p_xstat = sig; 1352 proc_stop(p); 1353 } 1354 goto out; 1355 } 1356 1357 /* 1358 * If it is a CONT signal with default action, just ignore it. 1359 */ 1360 if ((prop & SA_CONT) && action == SIG_DFL) 1361 goto out; 1362 1363 /* 1364 * Mark signal pending at this specific thread. 1365 */ 1366 spin_lock(&lp->lwp_spin); 1367 SIGADDSET(lp->lwp_siglist, sig); 1368 spin_unlock(&lp->lwp_spin); 1369 1370 lwp_signotify(lp); 1371 1372 out: 1373 if (lp) { 1374 lwkt_reltoken(&lp->lwp_token); 1375 LWPRELE(lp); 1376 } 1377 lwkt_reltoken(&p->p_token); 1378 PRELE(p); 1379 } 1380 1381 /* 1382 * Notify the LWP that a signal has arrived. The LWP does not have to be 1383 * sleeping on the current cpu. 1384 * 1385 * p->p_token and lp->lwp_token must be held on call. 1386 * 1387 * We can only safely schedule the thread on its current cpu and only if 1388 * one of the SINTR flags is set. If an SINTR flag is set AND we are on 1389 * the correct cpu we are properly interlocked, otherwise we could be 1390 * racing other thread transition states (or the lwp is on the user scheduler 1391 * runq but not scheduled) and must not do anything. 1392 * 1393 * Since we hold the lwp token we know the lwp cannot be ripped out from 1394 * under us so we can safely hold it to prevent it from being ripped out 1395 * from under us if we are forced to IPI another cpu to make the local 1396 * checks there. 1397 * 1398 * Adjustment of lp->lwp_stat can only occur when we hold the lwp_token, 1399 * which we won't in an IPI so any fixups have to be done here, effectively 1400 * replicating part of what setrunnable() does. 1401 */ 1402 static void 1403 lwp_signotify(struct lwp *lp) 1404 { 1405 ASSERT_LWKT_TOKEN_HELD(&lp->lwp_proc->p_token); 1406 1407 crit_enter(); 1408 if (lp == lwkt_preempted_proc()) { 1409 /* 1410 * lwp is on the current cpu AND it is currently running 1411 * (we preempted it). 1412 */ 1413 signotify(); 1414 } else if (lp->lwp_flags & LWP_SINTR) { 1415 /* 1416 * lwp is sitting in tsleep() with PCATCH set 1417 */ 1418 if (lp->lwp_thread->td_gd == mycpu) { 1419 setrunnable(lp); 1420 } else { 1421 /* 1422 * We can only adjust lwp_stat while we hold the 1423 * lwp_token, and we won't in the IPI function. 1424 */ 1425 LWPHOLD(lp); 1426 if (lp->lwp_stat == LSSTOP) 1427 lp->lwp_stat = LSSLEEP; 1428 lwkt_send_ipiq(lp->lwp_thread->td_gd, 1429 lwp_signotify_remote, lp); 1430 } 1431 } else if (lp->lwp_thread->td_flags & TDF_SINTR) { 1432 /* 1433 * lwp is sitting in lwkt_sleep() with PCATCH set. 1434 */ 1435 if (lp->lwp_thread->td_gd == mycpu) { 1436 setrunnable(lp); 1437 } else { 1438 /* 1439 * We can only adjust lwp_stat while we hold the 1440 * lwp_token, and we won't in the IPI function. 1441 */ 1442 LWPHOLD(lp); 1443 if (lp->lwp_stat == LSSTOP) 1444 lp->lwp_stat = LSSLEEP; 1445 lwkt_send_ipiq(lp->lwp_thread->td_gd, 1446 lwp_signotify_remote, lp); 1447 } 1448 } else { 1449 /* 1450 * Otherwise the lwp is either in some uninterruptable state 1451 * or it is on the userland scheduler's runqueue waiting to 1452 * be scheduled to a cpu. 1453 */ 1454 } 1455 crit_exit(); 1456 } 1457 1458 /* 1459 * This function is called via an IPI so we cannot call setrunnable() here 1460 * (because while we hold the lp we don't own its token, and can't get it 1461 * from an IPI). 1462 * 1463 * We are interlocked by virtue of being on the same cpu as the target. If 1464 * we still are and LWP_SINTR or TDF_SINTR is set we can safely schedule 1465 * the target thread. 1466 */ 1467 static void 1468 lwp_signotify_remote(void *arg) 1469 { 1470 struct lwp *lp = arg; 1471 thread_t td = lp->lwp_thread; 1472 1473 if (lp == lwkt_preempted_proc()) { 1474 signotify(); 1475 LWPRELE(lp); 1476 } else if (td->td_gd == mycpu) { 1477 if ((lp->lwp_flags & LWP_SINTR) || 1478 (td->td_flags & TDF_SINTR)) { 1479 lwkt_schedule(td); 1480 } 1481 LWPRELE(lp); 1482 } else { 1483 lwkt_send_ipiq(td->td_gd, lwp_signotify_remote, lp); 1484 /* LWPHOLD() is forwarded to the target cpu */ 1485 } 1486 } 1487 1488 /* 1489 * Caller must hold p->p_token 1490 */ 1491 void 1492 proc_stop(struct proc *p) 1493 { 1494 struct proc *q; 1495 struct lwp *lp; 1496 1497 ASSERT_LWKT_TOKEN_HELD(&p->p_token); 1498 1499 /* If somebody raced us, be happy with it */ 1500 if (p->p_stat == SSTOP || p->p_stat == SZOMB) { 1501 return; 1502 } 1503 p->p_stat = SSTOP; 1504 1505 FOREACH_LWP_IN_PROC(lp, p) { 1506 LWPHOLD(lp); 1507 lwkt_gettoken(&lp->lwp_token); 1508 1509 switch (lp->lwp_stat) { 1510 case LSSTOP: 1511 /* 1512 * Do nothing, we are already counted in 1513 * p_nstopped. 1514 */ 1515 break; 1516 1517 case LSSLEEP: 1518 /* 1519 * We're sleeping, but we will stop before 1520 * returning to userspace, so count us 1521 * as stopped as well. We set LWP_MP_WSTOP 1522 * to signal the lwp that it should not 1523 * increase p_nstopped when reaching tstop(). 1524 * 1525 * LWP_MP_WSTOP is protected by lp->lwp_token. 1526 */ 1527 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) { 1528 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 1529 ++p->p_nstopped; 1530 } 1531 break; 1532 1533 case LSRUN: 1534 /* 1535 * We might notify ourself, but that's not 1536 * a problem. 1537 */ 1538 lwp_signotify(lp); 1539 break; 1540 } 1541 lwkt_reltoken(&lp->lwp_token); 1542 LWPRELE(lp); 1543 } 1544 1545 if (p->p_nstopped == p->p_nthreads) { 1546 /* 1547 * Token required to interlock kern_wait(). Reparenting can 1548 * also cause a race so we have to hold (q). 1549 */ 1550 q = p->p_pptr; 1551 PHOLD(q); 1552 lwkt_gettoken(&q->p_token); 1553 p->p_flags &= ~P_WAITED; 1554 wakeup(q); 1555 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0) 1556 ksignal(p->p_pptr, SIGCHLD); 1557 lwkt_reltoken(&q->p_token); 1558 PRELE(q); 1559 } 1560 } 1561 1562 /* 1563 * Caller must hold proc_token 1564 */ 1565 void 1566 proc_unstop(struct proc *p) 1567 { 1568 struct lwp *lp; 1569 1570 ASSERT_LWKT_TOKEN_HELD(&p->p_token); 1571 1572 if (p->p_stat != SSTOP) 1573 return; 1574 1575 p->p_stat = SACTIVE; 1576 1577 FOREACH_LWP_IN_PROC(lp, p) { 1578 LWPHOLD(lp); 1579 lwkt_gettoken(&lp->lwp_token); 1580 1581 switch (lp->lwp_stat) { 1582 case LSRUN: 1583 /* 1584 * Uh? Not stopped? Well, I guess that's okay. 1585 */ 1586 if (bootverbose) 1587 kprintf("proc_unstop: lwp %d/%d not sleeping\n", 1588 p->p_pid, lp->lwp_tid); 1589 break; 1590 1591 case LSSLEEP: 1592 /* 1593 * Still sleeping. Don't bother waking it up. 1594 * However, if this thread was counted as 1595 * stopped, undo this. 1596 * 1597 * Nevertheless we call setrunnable() so that it 1598 * will wake up in case a signal or timeout arrived 1599 * in the meantime. 1600 * 1601 * LWP_MP_WSTOP is protected by lp->lwp_token. 1602 */ 1603 if (lp->lwp_mpflags & LWP_MP_WSTOP) { 1604 atomic_clear_int(&lp->lwp_mpflags, 1605 LWP_MP_WSTOP); 1606 --p->p_nstopped; 1607 } else { 1608 if (bootverbose) 1609 kprintf("proc_unstop: lwp %d/%d sleeping, not stopped\n", 1610 p->p_pid, lp->lwp_tid); 1611 } 1612 /* FALLTHROUGH */ 1613 1614 case LSSTOP: 1615 /* 1616 * This handles any lwp's waiting in a tsleep with 1617 * SIGCATCH. 1618 */ 1619 lwp_signotify(lp); 1620 break; 1621 1622 } 1623 lwkt_reltoken(&lp->lwp_token); 1624 LWPRELE(lp); 1625 } 1626 1627 /* 1628 * This handles any lwp's waiting in tstop(). We have interlocked 1629 * the setting of p_stat by acquiring and releasing each lpw's 1630 * token. 1631 */ 1632 wakeup(p); 1633 } 1634 1635 /* 1636 * No requirements. 1637 */ 1638 static int 1639 kern_sigtimedwait(sigset_t waitset, siginfo_t *info, struct timespec *timeout) 1640 { 1641 sigset_t savedmask, set; 1642 struct proc *p = curproc; 1643 struct lwp *lp = curthread->td_lwp; 1644 int error, sig, hz, timevalid = 0; 1645 struct timespec rts, ets, ts; 1646 struct timeval tv; 1647 1648 error = 0; 1649 sig = 0; 1650 ets.tv_sec = 0; /* silence compiler warning */ 1651 ets.tv_nsec = 0; /* silence compiler warning */ 1652 SIG_CANTMASK(waitset); 1653 savedmask = lp->lwp_sigmask; 1654 1655 if (timeout) { 1656 if (timeout->tv_sec >= 0 && timeout->tv_nsec >= 0 && 1657 timeout->tv_nsec < 1000000000) { 1658 timevalid = 1; 1659 getnanouptime(&rts); 1660 ets = rts; 1661 timespecadd(&ets, timeout); 1662 } 1663 } 1664 1665 for (;;) { 1666 set = lwp_sigpend(lp); 1667 SIGSETAND(set, waitset); 1668 if ((sig = sig_ffs(&set)) != 0) { 1669 SIGFILLSET(lp->lwp_sigmask); 1670 SIGDELSET(lp->lwp_sigmask, sig); 1671 SIG_CANTMASK(lp->lwp_sigmask); 1672 sig = issignal(lp, 1); 1673 /* 1674 * It may be a STOP signal, in the case, issignal 1675 * returns 0, because we may stop there, and new 1676 * signal can come in, we should restart if we got 1677 * nothing. 1678 */ 1679 if (sig == 0) 1680 continue; 1681 else 1682 break; 1683 } 1684 1685 /* 1686 * Previous checking got nothing, and we retried but still 1687 * got nothing, we should return the error status. 1688 */ 1689 if (error) 1690 break; 1691 1692 /* 1693 * POSIX says this must be checked after looking for pending 1694 * signals. 1695 */ 1696 if (timeout) { 1697 if (timevalid == 0) { 1698 error = EINVAL; 1699 break; 1700 } 1701 getnanouptime(&rts); 1702 if (timespeccmp(&rts, &ets, >=)) { 1703 error = EAGAIN; 1704 break; 1705 } 1706 ts = ets; 1707 timespecsub(&ts, &rts); 1708 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1709 hz = tvtohz_high(&tv); 1710 } else { 1711 hz = 0; 1712 } 1713 1714 lp->lwp_sigmask = savedmask; 1715 SIGSETNAND(lp->lwp_sigmask, waitset); 1716 /* 1717 * We won't ever be woken up. Instead, our sleep will 1718 * be broken in lwpsignal(). 1719 */ 1720 error = tsleep(&p->p_sigacts, PCATCH, "sigwt", hz); 1721 if (timeout) { 1722 if (error == ERESTART) { 1723 /* can not restart a timeout wait. */ 1724 error = EINTR; 1725 } else if (error == EAGAIN) { 1726 /* will calculate timeout by ourself. */ 1727 error = 0; 1728 } 1729 } 1730 /* Retry ... */ 1731 } 1732 1733 lp->lwp_sigmask = savedmask; 1734 if (sig) { 1735 error = 0; 1736 bzero(info, sizeof(*info)); 1737 info->si_signo = sig; 1738 spin_lock(&lp->lwp_spin); 1739 lwp_delsig(lp, sig); /* take the signal! */ 1740 spin_unlock(&lp->lwp_spin); 1741 1742 if (sig == SIGKILL) { 1743 sigexit(lp, sig); 1744 /* NOT REACHED */ 1745 } 1746 } 1747 1748 return (error); 1749 } 1750 1751 /* 1752 * MPALMOSTSAFE 1753 */ 1754 int 1755 sys_sigtimedwait(struct sigtimedwait_args *uap) 1756 { 1757 struct timespec ts; 1758 struct timespec *timeout; 1759 sigset_t set; 1760 siginfo_t info; 1761 int error; 1762 1763 if (uap->timeout) { 1764 error = copyin(uap->timeout, &ts, sizeof(ts)); 1765 if (error) 1766 return (error); 1767 timeout = &ts; 1768 } else { 1769 timeout = NULL; 1770 } 1771 error = copyin(uap->set, &set, sizeof(set)); 1772 if (error) 1773 return (error); 1774 error = kern_sigtimedwait(set, &info, timeout); 1775 if (error) 1776 return (error); 1777 if (uap->info) 1778 error = copyout(&info, uap->info, sizeof(info)); 1779 /* Repost if we got an error. */ 1780 /* 1781 * XXX lwp 1782 * 1783 * This could transform a thread-specific signal to another 1784 * thread / process pending signal. 1785 */ 1786 if (error) { 1787 ksignal(curproc, info.si_signo); 1788 } else { 1789 uap->sysmsg_result = info.si_signo; 1790 } 1791 return (error); 1792 } 1793 1794 /* 1795 * MPALMOSTSAFE 1796 */ 1797 int 1798 sys_sigwaitinfo(struct sigwaitinfo_args *uap) 1799 { 1800 siginfo_t info; 1801 sigset_t set; 1802 int error; 1803 1804 error = copyin(uap->set, &set, sizeof(set)); 1805 if (error) 1806 return (error); 1807 error = kern_sigtimedwait(set, &info, NULL); 1808 if (error) 1809 return (error); 1810 if (uap->info) 1811 error = copyout(&info, uap->info, sizeof(info)); 1812 /* Repost if we got an error. */ 1813 /* 1814 * XXX lwp 1815 * 1816 * This could transform a thread-specific signal to another 1817 * thread / process pending signal. 1818 */ 1819 if (error) { 1820 ksignal(curproc, info.si_signo); 1821 } else { 1822 uap->sysmsg_result = info.si_signo; 1823 } 1824 return (error); 1825 } 1826 1827 /* 1828 * If the current process has received a signal that would interrupt a 1829 * system call, return EINTR or ERESTART as appropriate. 1830 */ 1831 int 1832 iscaught(struct lwp *lp) 1833 { 1834 struct proc *p = lp->lwp_proc; 1835 int sig; 1836 1837 if (p) { 1838 if ((sig = CURSIG(lp)) != 0) { 1839 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 1840 return (EINTR); 1841 return (ERESTART); 1842 } 1843 } 1844 return(EWOULDBLOCK); 1845 } 1846 1847 /* 1848 * If the current process has received a signal (should be caught or cause 1849 * termination, should interrupt current syscall), return the signal number. 1850 * Stop signals with default action are processed immediately, then cleared; 1851 * they aren't returned. This is checked after each entry to the system for 1852 * a syscall or trap (though this can usually be done without calling issignal 1853 * by checking the pending signal masks in the CURSIG macro). 1854 * 1855 * This routine is called via CURSIG/__cursig. We will acquire and release 1856 * p->p_token but if the caller needs to interlock the test the caller must 1857 * also hold p->p_token. 1858 * 1859 * while (sig = CURSIG(curproc)) 1860 * postsig(sig); 1861 * 1862 * MPSAFE 1863 */ 1864 int 1865 issignal(struct lwp *lp, int maytrace) 1866 { 1867 struct proc *p = lp->lwp_proc; 1868 sigset_t mask; 1869 int sig, prop; 1870 1871 lwkt_gettoken(&p->p_token); 1872 1873 for (;;) { 1874 int traced = (p->p_flags & P_TRACED) || (p->p_stops & S_SIG); 1875 1876 /* 1877 * If this process is supposed to stop, stop this thread. 1878 */ 1879 if (p->p_stat == SSTOP) 1880 tstop(); 1881 1882 mask = lwp_sigpend(lp); 1883 SIGSETNAND(mask, lp->lwp_sigmask); 1884 if (p->p_flags & P_PPWAIT) 1885 SIG_STOPSIGMASK(mask); 1886 if (SIGISEMPTY(mask)) { /* no signal to send */ 1887 lwkt_reltoken(&p->p_token); 1888 return (0); 1889 } 1890 sig = sig_ffs(&mask); 1891 1892 STOPEVENT(p, S_SIG, sig); 1893 1894 /* 1895 * We should see pending but ignored signals 1896 * only if P_TRACED was on when they were posted. 1897 */ 1898 if (SIGISMEMBER(p->p_sigignore, sig) && (traced == 0)) { 1899 spin_lock(&lp->lwp_spin); 1900 lwp_delsig(lp, sig); 1901 spin_unlock(&lp->lwp_spin); 1902 continue; 1903 } 1904 if (maytrace && 1905 (p->p_flags & P_TRACED) && 1906 (p->p_flags & P_PPWAIT) == 0) { 1907 /* 1908 * If traced, always stop, and stay stopped until 1909 * released by the parent. 1910 * 1911 * NOTE: SSTOP may get cleared during the loop, 1912 * but we do not re-notify the parent if we have 1913 * to loop several times waiting for the parent 1914 * to let us continue. 1915 * 1916 * XXX not sure if this is still true 1917 */ 1918 p->p_xstat = sig; 1919 proc_stop(p); 1920 do { 1921 tstop(); 1922 } while (!trace_req(p) && (p->p_flags & P_TRACED)); 1923 1924 /* 1925 * If parent wants us to take the signal, 1926 * then it will leave it in p->p_xstat; 1927 * otherwise we just look for signals again. 1928 */ 1929 spin_lock(&lp->lwp_spin); 1930 lwp_delsig(lp, sig); /* clear old signal */ 1931 spin_unlock(&lp->lwp_spin); 1932 sig = p->p_xstat; 1933 if (sig == 0) 1934 continue; 1935 1936 /* 1937 * Put the new signal into p_siglist. If the 1938 * signal is being masked, look for other signals. 1939 * 1940 * XXX lwp might need a call to ksignal() 1941 */ 1942 SIGADDSET(p->p_siglist, sig); 1943 if (SIGISMEMBER(lp->lwp_sigmask, sig)) 1944 continue; 1945 1946 /* 1947 * If the traced bit got turned off, go back up 1948 * to the top to rescan signals. This ensures 1949 * that p_sig* and ps_sigact are consistent. 1950 */ 1951 if ((p->p_flags & P_TRACED) == 0) 1952 continue; 1953 } 1954 1955 prop = sigprop(sig); 1956 1957 /* 1958 * Decide whether the signal should be returned. 1959 * Return the signal's number, or fall through 1960 * to clear it from the pending mask. 1961 */ 1962 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) { 1963 case (intptr_t)SIG_DFL: 1964 /* 1965 * Don't take default actions on system processes. 1966 */ 1967 if (p->p_pid <= 1) { 1968 #ifdef DIAGNOSTIC 1969 /* 1970 * Are you sure you want to ignore SIGSEGV 1971 * in init? XXX 1972 */ 1973 kprintf("Process (pid %lu) got signal %d\n", 1974 (u_long)p->p_pid, sig); 1975 #endif 1976 break; /* == ignore */ 1977 } 1978 1979 /* 1980 * Handle the in-kernel checkpoint action 1981 */ 1982 if (prop & SA_CKPT) { 1983 checkpoint_signal_handler(lp); 1984 break; 1985 } 1986 1987 /* 1988 * If there is a pending stop signal to process 1989 * with default action, stop here, 1990 * then clear the signal. However, 1991 * if process is member of an orphaned 1992 * process group, ignore tty stop signals. 1993 */ 1994 if (prop & SA_STOP) { 1995 if (p->p_flags & P_TRACED || 1996 (p->p_pgrp->pg_jobc == 0 && 1997 prop & SA_TTYSTOP)) 1998 break; /* == ignore */ 1999 if ((p->p_flags & P_WEXIT) == 0) { 2000 p->p_xstat = sig; 2001 proc_stop(p); 2002 tstop(); 2003 } 2004 break; 2005 } else if (prop & SA_IGNORE) { 2006 /* 2007 * Except for SIGCONT, shouldn't get here. 2008 * Default action is to ignore; drop it. 2009 */ 2010 break; /* == ignore */ 2011 } else { 2012 lwkt_reltoken(&p->p_token); 2013 return (sig); 2014 } 2015 2016 /*NOTREACHED*/ 2017 2018 case (intptr_t)SIG_IGN: 2019 /* 2020 * Masking above should prevent us ever trying 2021 * to take action on an ignored signal other 2022 * than SIGCONT, unless process is traced. 2023 */ 2024 if ((prop & SA_CONT) == 0 && 2025 (p->p_flags & P_TRACED) == 0) 2026 kprintf("issignal\n"); 2027 break; /* == ignore */ 2028 2029 default: 2030 /* 2031 * This signal has an action, let 2032 * postsig() process it. 2033 */ 2034 lwkt_reltoken(&p->p_token); 2035 return (sig); 2036 } 2037 spin_lock(&lp->lwp_spin); 2038 lwp_delsig(lp, sig); /* take the signal! */ 2039 spin_unlock(&lp->lwp_spin); 2040 } 2041 /* NOTREACHED */ 2042 } 2043 2044 /* 2045 * Take the action for the specified signal 2046 * from the current set of pending signals. 2047 * 2048 * Caller must hold p->p_token 2049 */ 2050 void 2051 postsig(int sig) 2052 { 2053 struct lwp *lp = curthread->td_lwp; 2054 struct proc *p = lp->lwp_proc; 2055 struct sigacts *ps = p->p_sigacts; 2056 sig_t action; 2057 sigset_t returnmask; 2058 int code; 2059 2060 KASSERT(sig != 0, ("postsig")); 2061 2062 KNOTE(&p->p_klist, NOTE_SIGNAL | sig); 2063 2064 /* 2065 * If we are a virtual kernel running an emulated user process 2066 * context, switch back to the virtual kernel context before 2067 * trying to post the signal. 2068 */ 2069 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 2070 struct trapframe *tf = lp->lwp_md.md_regs; 2071 tf->tf_trapno = 0; 2072 vkernel_trap(lp, tf); 2073 } 2074 2075 spin_lock(&lp->lwp_spin); 2076 lwp_delsig(lp, sig); 2077 spin_unlock(&lp->lwp_spin); 2078 action = ps->ps_sigact[_SIG_IDX(sig)]; 2079 #ifdef KTRACE 2080 if (KTRPOINT(lp->lwp_thread, KTR_PSIG)) 2081 ktrpsig(lp, sig, action, lp->lwp_flags & LWP_OLDMASK ? 2082 &lp->lwp_oldsigmask : &lp->lwp_sigmask, 0); 2083 #endif 2084 STOPEVENT(p, S_SIG, sig); 2085 2086 if (action == SIG_DFL) { 2087 /* 2088 * Default action, where the default is to kill 2089 * the process. (Other cases were ignored above.) 2090 */ 2091 sigexit(lp, sig); 2092 /* NOTREACHED */ 2093 } else { 2094 /* 2095 * If we get here, the signal must be caught. 2096 */ 2097 KASSERT(action != SIG_IGN && !SIGISMEMBER(lp->lwp_sigmask, sig), 2098 ("postsig action")); 2099 2100 /* 2101 * Reset the signal handler if asked to 2102 */ 2103 if (SIGISMEMBER(ps->ps_sigreset, sig)) { 2104 /* 2105 * See kern_sigaction() for origin of this code. 2106 */ 2107 SIGDELSET(p->p_sigcatch, sig); 2108 if (sig != SIGCONT && 2109 sigprop(sig) & SA_IGNORE) 2110 SIGADDSET(p->p_sigignore, sig); 2111 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; 2112 } 2113 2114 /* 2115 * Set the signal mask and calculate the mask to restore 2116 * when the signal function returns. 2117 * 2118 * Special case: user has done a sigsuspend. Here the 2119 * current mask is not of interest, but rather the 2120 * mask from before the sigsuspend is what we want 2121 * restored after the signal processing is completed. 2122 */ 2123 if (lp->lwp_flags & LWP_OLDMASK) { 2124 returnmask = lp->lwp_oldsigmask; 2125 lp->lwp_flags &= ~LWP_OLDMASK; 2126 } else { 2127 returnmask = lp->lwp_sigmask; 2128 } 2129 2130 SIGSETOR(lp->lwp_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); 2131 if (!SIGISMEMBER(ps->ps_signodefer, sig)) 2132 SIGADDSET(lp->lwp_sigmask, sig); 2133 2134 lp->lwp_ru.ru_nsignals++; 2135 if (lp->lwp_sig != sig) { 2136 code = 0; 2137 } else { 2138 code = lp->lwp_code; 2139 lp->lwp_code = 0; 2140 lp->lwp_sig = 0; 2141 } 2142 (*p->p_sysent->sv_sendsig)(action, sig, &returnmask, code); 2143 } 2144 } 2145 2146 /* 2147 * Kill the current process for stated reason. 2148 */ 2149 void 2150 killproc(struct proc *p, char *why) 2151 { 2152 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", 2153 p->p_pid, p->p_comm, 2154 p->p_ucred ? p->p_ucred->cr_uid : -1, why); 2155 ksignal(p, SIGKILL); 2156 } 2157 2158 /* 2159 * Force the current process to exit with the specified signal, dumping core 2160 * if appropriate. We bypass the normal tests for masked and caught signals, 2161 * allowing unrecoverable failures to terminate the process without changing 2162 * signal state. Mark the accounting record with the signal termination. 2163 * If dumping core, save the signal number for the debugger. Calls exit and 2164 * does not return. 2165 * 2166 * This routine does not return. 2167 */ 2168 void 2169 sigexit(struct lwp *lp, int sig) 2170 { 2171 struct proc *p = lp->lwp_proc; 2172 2173 lwkt_gettoken(&p->p_token); 2174 p->p_acflag |= AXSIG; 2175 if (sigprop(sig) & SA_CORE) { 2176 lp->lwp_sig = sig; 2177 /* 2178 * Log signals which would cause core dumps 2179 * (Log as LOG_INFO to appease those who don't want 2180 * these messages.) 2181 * XXX : Todo, as well as euid, write out ruid too 2182 */ 2183 if (coredump(lp, sig) == 0) 2184 sig |= WCOREFLAG; 2185 if (kern_logsigexit) 2186 log(LOG_INFO, 2187 "pid %d (%s), uid %d: exited on signal %d%s\n", 2188 p->p_pid, p->p_comm, 2189 p->p_ucred ? p->p_ucred->cr_uid : -1, 2190 sig &~ WCOREFLAG, 2191 sig & WCOREFLAG ? " (core dumped)" : ""); 2192 } 2193 lwkt_reltoken(&p->p_token); 2194 exit1(W_EXITCODE(0, sig)); 2195 /* NOTREACHED */ 2196 } 2197 2198 static char corefilename[MAXPATHLEN+1] = {"%N.core"}; 2199 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename, 2200 sizeof(corefilename), "process corefile name format string"); 2201 2202 /* 2203 * expand_name(name, uid, pid) 2204 * Expand the name described in corefilename, using name, uid, and pid. 2205 * corefilename is a kprintf-like string, with three format specifiers: 2206 * %N name of process ("name") 2207 * %P process id (pid) 2208 * %U user id (uid) 2209 * For example, "%N.core" is the default; they can be disabled completely 2210 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P". 2211 * This is controlled by the sysctl variable kern.corefile (see above). 2212 */ 2213 2214 static char * 2215 expand_name(const char *name, uid_t uid, pid_t pid) 2216 { 2217 char *temp; 2218 char buf[11]; /* Buffer for pid/uid -- max 4B */ 2219 int i, n; 2220 char *format = corefilename; 2221 size_t namelen; 2222 2223 temp = kmalloc(MAXPATHLEN + 1, M_TEMP, M_NOWAIT); 2224 if (temp == NULL) 2225 return NULL; 2226 namelen = strlen(name); 2227 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) { 2228 int l; 2229 switch (format[i]) { 2230 case '%': /* Format character */ 2231 i++; 2232 switch (format[i]) { 2233 case '%': 2234 temp[n++] = '%'; 2235 break; 2236 case 'N': /* process name */ 2237 if ((n + namelen) > MAXPATHLEN) { 2238 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n", 2239 pid, name, uid, temp, name); 2240 kfree(temp, M_TEMP); 2241 return NULL; 2242 } 2243 memcpy(temp+n, name, namelen); 2244 n += namelen; 2245 break; 2246 case 'P': /* process id */ 2247 l = ksprintf(buf, "%u", pid); 2248 if ((n + l) > MAXPATHLEN) { 2249 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n", 2250 pid, name, uid, temp, name); 2251 kfree(temp, M_TEMP); 2252 return NULL; 2253 } 2254 memcpy(temp+n, buf, l); 2255 n += l; 2256 break; 2257 case 'U': /* user id */ 2258 l = ksprintf(buf, "%u", uid); 2259 if ((n + l) > MAXPATHLEN) { 2260 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n", 2261 pid, name, uid, temp, name); 2262 kfree(temp, M_TEMP); 2263 return NULL; 2264 } 2265 memcpy(temp+n, buf, l); 2266 n += l; 2267 break; 2268 default: 2269 log(LOG_ERR, "Unknown format character %c in `%s'\n", format[i], format); 2270 } 2271 break; 2272 default: 2273 temp[n++] = format[i]; 2274 } 2275 } 2276 temp[n] = '\0'; 2277 return temp; 2278 } 2279 2280 /* 2281 * Dump a process' core. The main routine does some 2282 * policy checking, and creates the name of the coredump; 2283 * then it passes on a vnode and a size limit to the process-specific 2284 * coredump routine if there is one; if there _is not_ one, it returns 2285 * ENOSYS; otherwise it returns the error from the process-specific routine. 2286 * 2287 * The parameter `lp' is the lwp which triggered the coredump. 2288 */ 2289 2290 static int 2291 coredump(struct lwp *lp, int sig) 2292 { 2293 struct proc *p = lp->lwp_proc; 2294 struct vnode *vp; 2295 struct ucred *cred = p->p_ucred; 2296 struct flock lf; 2297 struct nlookupdata nd; 2298 struct vattr vattr; 2299 int error, error1; 2300 char *name; /* name of corefile */ 2301 off_t limit; 2302 2303 STOPEVENT(p, S_CORE, 0); 2304 2305 if (((sugid_coredump == 0) && p->p_flags & P_SUGID) || do_coredump == 0) 2306 return (EFAULT); 2307 2308 /* 2309 * Note that the bulk of limit checking is done after 2310 * the corefile is created. The exception is if the limit 2311 * for corefiles is 0, in which case we don't bother 2312 * creating the corefile at all. This layout means that 2313 * a corefile is truncated instead of not being created, 2314 * if it is larger than the limit. 2315 */ 2316 limit = p->p_rlimit[RLIMIT_CORE].rlim_cur; 2317 if (limit == 0) 2318 return EFBIG; 2319 2320 name = expand_name(p->p_comm, p->p_ucred->cr_uid, p->p_pid); 2321 if (name == NULL) 2322 return (EINVAL); 2323 error = nlookup_init(&nd, name, UIO_SYSSPACE, NLC_LOCKVP); 2324 if (error == 0) 2325 error = vn_open(&nd, NULL, O_CREAT | FWRITE | O_NOFOLLOW, S_IRUSR | S_IWUSR); 2326 kfree(name, M_TEMP); 2327 if (error) { 2328 nlookup_done(&nd); 2329 return (error); 2330 } 2331 vp = nd.nl_open_vp; 2332 nd.nl_open_vp = NULL; 2333 nlookup_done(&nd); 2334 2335 vn_unlock(vp); 2336 lf.l_whence = SEEK_SET; 2337 lf.l_start = 0; 2338 lf.l_len = 0; 2339 lf.l_type = F_WRLCK; 2340 error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, 0); 2341 if (error) 2342 goto out2; 2343 2344 /* Don't dump to non-regular files or files with links. */ 2345 if (vp->v_type != VREG || 2346 VOP_GETATTR(vp, &vattr) || vattr.va_nlink != 1) { 2347 error = EFAULT; 2348 goto out1; 2349 } 2350 2351 /* Don't dump to files current user does not own */ 2352 if (vattr.va_uid != p->p_ucred->cr_uid) { 2353 error = EFAULT; 2354 goto out1; 2355 } 2356 2357 VATTR_NULL(&vattr); 2358 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2359 vattr.va_size = 0; 2360 VOP_SETATTR(vp, &vattr, cred); 2361 p->p_acflag |= ACORE; 2362 vn_unlock(vp); 2363 2364 error = p->p_sysent->sv_coredump ? 2365 p->p_sysent->sv_coredump(lp, sig, vp, limit) : ENOSYS; 2366 2367 out1: 2368 lf.l_type = F_UNLCK; 2369 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, 0); 2370 out2: 2371 error1 = vn_close(vp, FWRITE); 2372 if (error == 0) 2373 error = error1; 2374 return (error); 2375 } 2376 2377 /* 2378 * Nonexistent system call-- signal process (may want to handle it). 2379 * Flag error in case process won't see signal immediately (blocked or ignored). 2380 * 2381 * MPALMOSTSAFE 2382 */ 2383 /* ARGSUSED */ 2384 int 2385 sys_nosys(struct nosys_args *args) 2386 { 2387 lwpsignal(curproc, curthread->td_lwp, SIGSYS); 2388 return (EINVAL); 2389 } 2390 2391 /* 2392 * Send a SIGIO or SIGURG signal to a process or process group using 2393 * stored credentials rather than those of the current process. 2394 */ 2395 void 2396 pgsigio(struct sigio *sigio, int sig, int checkctty) 2397 { 2398 if (sigio == NULL) 2399 return; 2400 2401 if (sigio->sio_pgid > 0) { 2402 if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred, 2403 sigio->sio_proc)) 2404 ksignal(sigio->sio_proc, sig); 2405 } else if (sigio->sio_pgid < 0) { 2406 struct proc *p; 2407 struct pgrp *pg = sigio->sio_pgrp; 2408 2409 /* 2410 * Must interlock all signals against fork 2411 */ 2412 pgref(pg); 2413 lockmgr(&pg->pg_lock, LK_EXCLUSIVE); 2414 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 2415 if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred, p) && 2416 (checkctty == 0 || (p->p_flags & P_CONTROLT))) 2417 ksignal(p, sig); 2418 } 2419 lockmgr(&pg->pg_lock, LK_RELEASE); 2420 pgrel(pg); 2421 } 2422 } 2423 2424 static int 2425 filt_sigattach(struct knote *kn) 2426 { 2427 struct proc *p = curproc; 2428 2429 kn->kn_ptr.p_proc = p; 2430 kn->kn_flags |= EV_CLEAR; /* automatically set */ 2431 2432 /* XXX lock the proc here while adding to the list? */ 2433 knote_insert(&p->p_klist, kn); 2434 2435 return (0); 2436 } 2437 2438 static void 2439 filt_sigdetach(struct knote *kn) 2440 { 2441 struct proc *p = kn->kn_ptr.p_proc; 2442 2443 knote_remove(&p->p_klist, kn); 2444 } 2445 2446 /* 2447 * signal knotes are shared with proc knotes, so we apply a mask to 2448 * the hint in order to differentiate them from process hints. This 2449 * could be avoided by using a signal-specific knote list, but probably 2450 * isn't worth the trouble. 2451 */ 2452 static int 2453 filt_signal(struct knote *kn, long hint) 2454 { 2455 if (hint & NOTE_SIGNAL) { 2456 hint &= ~NOTE_SIGNAL; 2457 2458 if (kn->kn_id == hint) 2459 kn->kn_data++; 2460 } 2461 return (kn->kn_data != 0); 2462 } 2463