1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
35 * $FreeBSD: src/sys/kern/kern_sig.c,v 1.72.2.17 2003/05/16 16:34:34 obrien Exp $
36 */
37
38 #include "opt_ktrace.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/sysmsg.h>
44 #include <sys/signalvar.h>
45 #include <sys/resourcevar.h>
46 #include <sys/vnode.h>
47 #include <sys/event.h>
48 #include <sys/proc.h>
49 #include <sys/nlookup.h>
50 #include <sys/pioctl.h>
51 #include <sys/acct.h>
52 #include <sys/fcntl.h>
53 #include <sys/lock.h>
54 #include <sys/wait.h>
55 #include <sys/ktrace.h>
56 #include <sys/syslog.h>
57 #include <sys/stat.h>
58 #include <sys/sysent.h>
59 #include <sys/sysctl.h>
60 #include <sys/malloc.h>
61 #include <sys/interrupt.h>
62 #include <sys/unistd.h>
63 #include <sys/kern_syscall.h>
64 #include <sys/vkernel.h>
65
66 #include <sys/signal2.h>
67 #include <sys/thread2.h>
68 #include <sys/spinlock2.h>
69
70 #include <machine/cpu.h>
71 #include <machine/smp.h>
72
73 static int coredump(struct lwp *, int);
74 static char *expand_name(const char *, uid_t, pid_t);
75 static int dokillpg(int sig, int pgid, int all);
76 static int sig_ffs(sigset_t *set);
77 static int sigprop(int sig);
78 static void lwp_signotify(struct lwp *lp);
79 static void lwp_signotify_remote(void *arg);
80 static int kern_sigtimedwait(sigset_t set, siginfo_t *info,
81 struct timespec *timeout);
82 static void proc_stopwait(struct proc *p);
83
84 static int filt_sigattach(struct knote *kn);
85 static void filt_sigdetach(struct knote *kn);
86 static int filt_signal(struct knote *kn, long hint);
87
88 struct filterops sig_filtops =
89 { FILTEROP_MPSAFE, filt_sigattach, filt_sigdetach, filt_signal };
90
91 static int kern_logsigexit = 1;
92 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
93 &kern_logsigexit, 0,
94 "Log processes quitting on abnormal signals to syslog(3)");
95
96 /*
97 * Can process p send the signal sig to process q? Only processes within
98 * the current reaper or children of the current reaper can be signaled.
99 * Normally the reaper itself cannot be signalled, unless initok is set.
100 */
101 #define CANSIGNAL(q, sig, initok) \
102 ((!p_trespass(curproc->p_ucred, (q)->p_ucred) && \
103 reaper_sigtest(curproc, p, initok)) || \
104 ((sig) == SIGCONT && (q)->p_session == curproc->p_session))
105
106 /*
107 * Policy -- Can real uid ruid with ucred uc send a signal to process q?
108 */
109 #define CANSIGIO(ruid, uc, q) \
110 ((uc)->cr_uid == 0 || \
111 (ruid) == (q)->p_ucred->cr_ruid || \
112 (uc)->cr_uid == (q)->p_ucred->cr_ruid || \
113 (ruid) == (q)->p_ucred->cr_uid || \
114 (uc)->cr_uid == (q)->p_ucred->cr_uid)
115
116 int sugid_coredump;
117 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
118 &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
119
120 static int do_coredump = 1;
121 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
122 &do_coredump, 0, "Enable/Disable coredumps");
123
124 /*
125 * Signal properties and actions.
126 * The array below categorizes the signals and their default actions
127 * according to the following properties:
128 */
129 #define SA_KILL 0x01 /* terminates process by default */
130 #define SA_CORE 0x02 /* ditto and coredumps */
131 #define SA_STOP 0x04 /* suspend process */
132 #define SA_TTYSTOP 0x08 /* ditto, from tty */
133 #define SA_IGNORE 0x10 /* ignore by default */
134 #define SA_CONT 0x20 /* continue if suspended */
135 #define SA_CANTMASK 0x40 /* non-maskable, catchable */
136 #define SA_CKPT 0x80 /* checkpoint process */
137
138
139 static int sigproptbl[NSIG] = {
140 SA_KILL, /* SIGHUP */
141 SA_KILL, /* SIGINT */
142 SA_KILL|SA_CORE, /* SIGQUIT */
143 SA_KILL|SA_CORE, /* SIGILL */
144 SA_KILL|SA_CORE, /* SIGTRAP */
145 SA_KILL|SA_CORE, /* SIGABRT */
146 SA_KILL|SA_CORE, /* SIGEMT */
147 SA_KILL|SA_CORE, /* SIGFPE */
148 SA_KILL, /* SIGKILL */
149 SA_KILL|SA_CORE, /* SIGBUS */
150 SA_KILL|SA_CORE, /* SIGSEGV */
151 SA_KILL|SA_CORE, /* SIGSYS */
152 SA_KILL, /* SIGPIPE */
153 SA_KILL, /* SIGALRM */
154 SA_KILL, /* SIGTERM */
155 SA_IGNORE, /* SIGURG */
156 SA_STOP, /* SIGSTOP */
157 SA_STOP|SA_TTYSTOP, /* SIGTSTP */
158 SA_IGNORE|SA_CONT, /* SIGCONT */
159 SA_IGNORE, /* SIGCHLD */
160 SA_STOP|SA_TTYSTOP, /* SIGTTIN */
161 SA_STOP|SA_TTYSTOP, /* SIGTTOU */
162 SA_IGNORE, /* SIGIO */
163 SA_KILL, /* SIGXCPU */
164 SA_KILL, /* SIGXFSZ */
165 SA_KILL, /* SIGVTALRM */
166 SA_KILL, /* SIGPROF */
167 SA_IGNORE, /* SIGWINCH */
168 SA_IGNORE, /* SIGINFO */
169 SA_KILL, /* SIGUSR1 */
170 SA_KILL, /* SIGUSR2 */
171 SA_IGNORE, /* SIGTHR */
172 SA_CKPT, /* SIGCKPT */
173 SA_KILL|SA_CKPT, /* SIGCKPTEXIT */
174 SA_IGNORE,
175 SA_IGNORE,
176 SA_IGNORE,
177 SA_IGNORE,
178 SA_IGNORE,
179 SA_IGNORE,
180 SA_IGNORE,
181 SA_IGNORE,
182 SA_IGNORE,
183 SA_IGNORE,
184 SA_IGNORE,
185 SA_IGNORE,
186 SA_IGNORE,
187 SA_IGNORE,
188 SA_IGNORE,
189 SA_IGNORE,
190 SA_IGNORE,
191 SA_IGNORE,
192 SA_IGNORE,
193 SA_IGNORE,
194 SA_IGNORE,
195 SA_IGNORE,
196 SA_IGNORE,
197 SA_IGNORE,
198 SA_IGNORE,
199 SA_IGNORE,
200 SA_IGNORE,
201 SA_IGNORE,
202 SA_IGNORE,
203 SA_IGNORE,
204 };
205
206 __read_mostly sigset_t sigcantmask_mask;
207
208 static __inline int
sigprop(int sig)209 sigprop(int sig)
210 {
211
212 if (sig > 0 && sig < NSIG)
213 return (sigproptbl[_SIG_IDX(sig)]);
214
215 return (0);
216 }
217
218 static __inline int
sig_ffs(sigset_t * set)219 sig_ffs(sigset_t *set)
220 {
221 int i;
222
223 for (i = 0; i < _SIG_WORDS; i++)
224 if (set->__bits[i])
225 return (ffs(set->__bits[i]) + (i * 32));
226 return (0);
227 }
228
229 /*
230 * Allows us to populate siginfo->si_pid and si_uid in the target process
231 * (p) from the originating thread (td). This function must work properly
232 * even if a kernel thread is sending the signal.
233 *
234 * NOTE: Signals are not queued, so if multiple signals are received the
235 * signal handler will only see the most recent pid and uid for any
236 * given signal number.
237 */
238 static __inline void
sigsetfrompid(thread_t td,struct proc * p,int sig)239 sigsetfrompid(thread_t td, struct proc *p, int sig)
240 {
241 struct sigacts *sap;
242
243 if ((sap = p->p_sigacts) == NULL)
244 return;
245 if (td->td_proc) {
246 sap->ps_frominfo[sig].pid = td->td_proc->p_pid;
247 sap->ps_frominfo[sig].uid = td->td_ucred->cr_uid;
248 } else {
249 sap->ps_frominfo[sig].pid = 0;
250 sap->ps_frominfo[sig].uid = 0;
251 }
252 }
253
254 /*
255 * No requirements.
256 */
257 int
kern_sigaction(int sig,struct sigaction * act,struct sigaction * oact)258 kern_sigaction(int sig, struct sigaction *act, struct sigaction *oact)
259 {
260 struct thread *td = curthread;
261 struct proc *p = td->td_proc;
262 struct lwp *lp;
263 struct sigacts *ps = p->p_sigacts;
264
265 if (sig <= 0 || sig >= _SIG_MAXSIG)
266 return (EINVAL);
267
268 lwkt_gettoken(&p->p_token);
269
270 if (oact) {
271 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
272 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
273 oact->sa_flags = 0;
274 if (SIGISMEMBER(ps->ps_sigonstack, sig))
275 oact->sa_flags |= SA_ONSTACK;
276 if (!SIGISMEMBER(ps->ps_sigintr, sig))
277 oact->sa_flags |= SA_RESTART;
278 if (SIGISMEMBER(ps->ps_sigreset, sig))
279 oact->sa_flags |= SA_RESETHAND;
280 if (SIGISMEMBER(ps->ps_signodefer, sig))
281 oact->sa_flags |= SA_NODEFER;
282 if (SIGISMEMBER(ps->ps_siginfo, sig))
283 oact->sa_flags |= SA_SIGINFO;
284 if (sig == SIGCHLD && p->p_sigacts->ps_flag & PS_NOCLDSTOP)
285 oact->sa_flags |= SA_NOCLDSTOP;
286 if (sig == SIGCHLD && p->p_sigacts->ps_flag & PS_NOCLDWAIT)
287 oact->sa_flags |= SA_NOCLDWAIT;
288 }
289 if (act) {
290 /*
291 * Check for invalid requests. KILL and STOP cannot be
292 * caught.
293 */
294 if (sig == SIGKILL || sig == SIGSTOP) {
295 if (act->sa_handler != SIG_DFL) {
296 lwkt_reltoken(&p->p_token);
297 return (EINVAL);
298 }
299 }
300
301 /*
302 * Change setting atomically.
303 */
304 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
305 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
306 if (act->sa_flags & SA_SIGINFO) {
307 ps->ps_sigact[_SIG_IDX(sig)] =
308 (__sighandler_t *)act->sa_sigaction;
309 SIGADDSET(ps->ps_siginfo, sig);
310 } else {
311 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
312 SIGDELSET(ps->ps_siginfo, sig);
313 }
314 if (!(act->sa_flags & SA_RESTART))
315 SIGADDSET(ps->ps_sigintr, sig);
316 else
317 SIGDELSET(ps->ps_sigintr, sig);
318 if (act->sa_flags & SA_ONSTACK)
319 SIGADDSET(ps->ps_sigonstack, sig);
320 else
321 SIGDELSET(ps->ps_sigonstack, sig);
322 if (act->sa_flags & SA_RESETHAND)
323 SIGADDSET(ps->ps_sigreset, sig);
324 else
325 SIGDELSET(ps->ps_sigreset, sig);
326 if (act->sa_flags & SA_NODEFER)
327 SIGADDSET(ps->ps_signodefer, sig);
328 else
329 SIGDELSET(ps->ps_signodefer, sig);
330 if (sig == SIGCHLD) {
331 if (act->sa_flags & SA_NOCLDSTOP)
332 p->p_sigacts->ps_flag |= PS_NOCLDSTOP;
333 else
334 p->p_sigacts->ps_flag &= ~PS_NOCLDSTOP;
335 if (act->sa_flags & SA_NOCLDWAIT) {
336 /*
337 * Paranoia: since SA_NOCLDWAIT is implemented
338 * by reparenting the dying child to PID 1 (and
339 * trust it to reap the zombie), PID 1 itself
340 * is forbidden to set SA_NOCLDWAIT.
341 */
342 if (p->p_pid == 1)
343 p->p_sigacts->ps_flag &= ~PS_NOCLDWAIT;
344 else
345 p->p_sigacts->ps_flag |= PS_NOCLDWAIT;
346 } else {
347 p->p_sigacts->ps_flag &= ~PS_NOCLDWAIT;
348 }
349 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
350 ps->ps_flag |= PS_CLDSIGIGN;
351 else
352 ps->ps_flag &= ~PS_CLDSIGIGN;
353 }
354 /*
355 * Set bit in p_sigignore for signals that are set to SIG_IGN,
356 * and for signals set to SIG_DFL where the default is to
357 * ignore. However, don't put SIGCONT in p_sigignore, as we
358 * have to restart the process.
359 *
360 * Also remove the signal from the process and lwp signal
361 * list.
362 */
363 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
364 (sigprop(sig) & SA_IGNORE &&
365 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
366 SIGDELSET_ATOMIC(p->p_siglist, sig);
367 FOREACH_LWP_IN_PROC(lp, p) {
368 spin_lock(&lp->lwp_spin);
369 SIGDELSET(lp->lwp_siglist, sig);
370 spin_unlock(&lp->lwp_spin);
371 }
372 if (sig != SIGCONT) {
373 /* easier in ksignal */
374 SIGADDSET(p->p_sigignore, sig);
375 }
376 SIGDELSET(p->p_sigcatch, sig);
377 } else {
378 SIGDELSET(p->p_sigignore, sig);
379 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
380 SIGDELSET(p->p_sigcatch, sig);
381 else
382 SIGADDSET(p->p_sigcatch, sig);
383 }
384 }
385 lwkt_reltoken(&p->p_token);
386 return (0);
387 }
388
389 int
sys_sigaction(struct sysmsg * sysmsg,const struct sigaction_args * uap)390 sys_sigaction(struct sysmsg *sysmsg, const struct sigaction_args *uap)
391 {
392 struct sigaction act, oact;
393 struct sigaction *actp, *oactp;
394 int error;
395
396 actp = (uap->act != NULL) ? &act : NULL;
397 oactp = (uap->oact != NULL) ? &oact : NULL;
398 if (actp) {
399 error = copyin(uap->act, actp, sizeof(act));
400 if (error)
401 return (error);
402 }
403 error = kern_sigaction(uap->sig, actp, oactp);
404 if (oactp && !error) {
405 error = copyout(oactp, uap->oact, sizeof(oact));
406 }
407 return (error);
408 }
409
410 /*
411 * Initialize signal state for process 0;
412 * set to ignore signals that are ignored by default.
413 */
414 void
siginit(struct proc * p)415 siginit(struct proc *p)
416 {
417 int i;
418
419 for (i = 1; i <= NSIG; i++) {
420 if (sigprop(i) & SA_IGNORE && i != SIGCONT)
421 SIGADDSET(p->p_sigignore, i);
422 }
423
424 /*
425 * Also initialize signal-related global state.
426 */
427 SIGSETOR_CANTMASK(sigcantmask_mask);
428 }
429
430 /*
431 * Reset signals for an exec of the specified process.
432 */
433 void
execsigs(struct proc * p)434 execsigs(struct proc *p)
435 {
436 struct sigacts *ps = p->p_sigacts;
437 struct lwp *lp;
438 int sig;
439
440 lp = ONLY_LWP_IN_PROC(p);
441
442 /*
443 * Reset caught signals. Held signals remain held
444 * through p_sigmask (unless they were caught,
445 * and are now ignored by default).
446 */
447 while (SIGNOTEMPTY(p->p_sigcatch)) {
448 sig = sig_ffs(&p->p_sigcatch);
449 SIGDELSET(p->p_sigcatch, sig);
450 if (sigprop(sig) & SA_IGNORE) {
451 if (sig != SIGCONT)
452 SIGADDSET(p->p_sigignore, sig);
453 SIGDELSET_ATOMIC(p->p_siglist, sig);
454 /* don't need spinlock */
455 SIGDELSET(lp->lwp_siglist, sig);
456 }
457 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
458 }
459
460 /*
461 * Reset stack state to the user stack.
462 * Clear set of signals caught on the signal stack.
463 */
464 lp->lwp_sigstk.ss_flags = SS_DISABLE;
465 lp->lwp_sigstk.ss_size = 0;
466 lp->lwp_sigstk.ss_sp = NULL;
467 lp->lwp_flags &= ~LWP_ALTSTACK;
468 /*
469 * Reset no zombies if child dies flag as Solaris does.
470 */
471 p->p_sigacts->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
472 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
473 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
474 }
475
476 /*
477 * kern_sigprocmask() - MP SAFE ONLY IF p == curproc
478 *
479 * Manipulate signal mask. This routine is MP SAFE *ONLY* if
480 * p == curproc.
481 */
482 int
kern_sigprocmask(int how,sigset_t * set,sigset_t * oset)483 kern_sigprocmask(int how, sigset_t *set, sigset_t *oset)
484 {
485 struct thread *td = curthread;
486 struct lwp *lp = td->td_lwp;
487 struct proc *p = td->td_proc;
488 int error;
489
490 lwkt_gettoken(&p->p_token);
491
492 if (oset != NULL)
493 *oset = lp->lwp_sigmask;
494
495 error = 0;
496 if (set != NULL) {
497 switch (how) {
498 case SIG_BLOCK:
499 SIG_CANTMASK(*set);
500 SIGSETOR(lp->lwp_sigmask, *set);
501 break;
502 case SIG_UNBLOCK:
503 SIGSETNAND(lp->lwp_sigmask, *set);
504 break;
505 case SIG_SETMASK:
506 SIG_CANTMASK(*set);
507 lp->lwp_sigmask = *set;
508 break;
509 default:
510 error = EINVAL;
511 break;
512 }
513 }
514
515 lwkt_reltoken(&p->p_token);
516
517 return (error);
518 }
519
520 /*
521 * sigprocmask()
522 *
523 * MPSAFE
524 */
525 int
sys_sigprocmask(struct sysmsg * sysmsg,const struct sigprocmask_args * uap)526 sys_sigprocmask(struct sysmsg *sysmsg, const struct sigprocmask_args *uap)
527 {
528 sigset_t set, oset;
529 sigset_t *setp, *osetp;
530 int error;
531
532 setp = (uap->set != NULL) ? &set : NULL;
533 osetp = (uap->oset != NULL) ? &oset : NULL;
534 if (setp) {
535 error = copyin(uap->set, setp, sizeof(set));
536 if (error)
537 return (error);
538 }
539 error = kern_sigprocmask(uap->how, setp, osetp);
540 if (osetp && !error) {
541 error = copyout(osetp, uap->oset, sizeof(oset));
542 }
543 return (error);
544 }
545
546 /*
547 * MPSAFE
548 */
549 int
kern_sigpending(sigset_t * set)550 kern_sigpending(sigset_t *set)
551 {
552 struct lwp *lp = curthread->td_lwp;
553
554 *set = lwp_sigpend(lp);
555
556 return (0);
557 }
558
559 /*
560 * MPSAFE
561 */
562 int
sys_sigpending(struct sysmsg * sysmsg,const struct sigpending_args * uap)563 sys_sigpending(struct sysmsg *sysmsg, const struct sigpending_args *uap)
564 {
565 sigset_t set;
566 int error;
567
568 error = kern_sigpending(&set);
569
570 if (error == 0)
571 error = copyout(&set, uap->set, sizeof(set));
572 return (error);
573 }
574
575 /*
576 * Suspend process until signal, providing mask to be set
577 * in the meantime.
578 *
579 * MPSAFE
580 */
581 int
kern_sigsuspend(sigset_t * set)582 kern_sigsuspend(sigset_t *set)
583 {
584 struct thread *td = curthread;
585 struct lwp *lp = td->td_lwp;
586 struct proc *p = td->td_proc;
587 struct sigacts *ps = p->p_sigacts;
588
589 /*
590 * When returning from sigsuspend, we want
591 * the old mask to be restored after the
592 * signal handler has finished. Thus, we
593 * save it here and mark the sigacts structure
594 * to indicate this.
595 */
596 lp->lwp_oldsigmask = lp->lwp_sigmask;
597 lp->lwp_flags |= LWP_OLDMASK;
598
599 SIG_CANTMASK(*set);
600 lp->lwp_sigmask = *set;
601 while (tsleep(ps, PCATCH, "pause", 0) == 0)
602 /* void */;
603 /* always return EINTR rather than ERESTART... */
604 return (EINTR);
605 }
606
607 /*
608 * Note nonstandard calling convention: libc stub passes mask, not
609 * pointer, to save a copyin.
610 *
611 * MPSAFE
612 */
613 int
sys_sigsuspend(struct sysmsg * sysmsg,const struct sigsuspend_args * uap)614 sys_sigsuspend(struct sysmsg *sysmsg, const struct sigsuspend_args *uap)
615 {
616 sigset_t mask;
617 int error;
618
619 error = copyin(uap->sigmask, &mask, sizeof(mask));
620 if (error)
621 return (error);
622
623 error = kern_sigsuspend(&mask);
624
625 return (error);
626 }
627
628 /*
629 * MPSAFE
630 */
631 int
kern_sigaltstack(stack_t * ss,stack_t * oss)632 kern_sigaltstack(stack_t *ss, stack_t *oss)
633 {
634 struct thread *td = curthread;
635 struct lwp *lp = td->td_lwp;
636 struct proc *p = td->td_proc;
637
638 if ((lp->lwp_flags & LWP_ALTSTACK) == 0)
639 lp->lwp_sigstk.ss_flags |= SS_DISABLE;
640
641 if (oss)
642 *oss = lp->lwp_sigstk;
643
644 if (ss) {
645 if (ss->ss_flags & ~SS_DISABLE)
646 return (EINVAL);
647 if (ss->ss_flags & SS_DISABLE) {
648 if (lp->lwp_sigstk.ss_flags & SS_ONSTACK)
649 return (EPERM);
650 lp->lwp_flags &= ~LWP_ALTSTACK;
651 lp->lwp_sigstk.ss_flags = ss->ss_flags;
652 } else {
653 if (ss->ss_size < p->p_sysent->sv_minsigstksz)
654 return (ENOMEM);
655 lp->lwp_flags |= LWP_ALTSTACK;
656 lp->lwp_sigstk = *ss;
657 }
658 }
659
660 return (0);
661 }
662
663 /*
664 * MPSAFE
665 */
666 int
sys_sigaltstack(struct sysmsg * sysmsg,const struct sigaltstack_args * uap)667 sys_sigaltstack(struct sysmsg *sysmsg, const struct sigaltstack_args *uap)
668 {
669 stack_t ss, oss;
670 int error;
671
672 if (uap->ss) {
673 error = copyin(uap->ss, &ss, sizeof(ss));
674 if (error)
675 return (error);
676 }
677
678 error = kern_sigaltstack(uap->ss ? &ss : NULL, uap->oss ? &oss : NULL);
679
680 if (error == 0 && uap->oss)
681 error = copyout(&oss, uap->oss, sizeof(*uap->oss));
682 return (error);
683 }
684
685 /*
686 * Common code for kill process group/broadcast kill.
687 * cp is calling process.
688 */
689 struct killpg_info {
690 int nfound;
691 int sig;
692 };
693
694 static int killpg_all_callback(struct proc *p, void *data);
695
696 static int
dokillpg(int sig,int pgid,int all)697 dokillpg(int sig, int pgid, int all)
698 {
699 struct killpg_info info;
700 struct proc *cp = curproc;
701 struct proc *p;
702 struct pgrp *pgrp;
703
704 info.nfound = 0;
705 info.sig = sig;
706
707 if (all) {
708 /*
709 * broadcast
710 */
711 allproc_scan(killpg_all_callback, &info, 0);
712 } else {
713 if (pgid == 0) {
714 /*
715 * zero pgid means send to my process group.
716 */
717 pgrp = cp->p_pgrp;
718 pgref(pgrp);
719 } else {
720 pgrp = pgfind(pgid);
721 if (pgrp == NULL)
722 return (ESRCH);
723 }
724
725 /*
726 * Must interlock all signals against fork
727 */
728 lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE);
729 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
730 if (p->p_pid <= 1 ||
731 p->p_stat == SZOMB ||
732 (p->p_flags & P_SYSTEM) ||
733 !CANSIGNAL(p, sig, 0)) {
734 continue;
735 }
736 ++info.nfound;
737 if (sig)
738 ksignal(p, sig);
739 }
740 lockmgr(&pgrp->pg_lock, LK_RELEASE);
741 pgrel(pgrp);
742 }
743 return (info.nfound ? 0 : ESRCH);
744 }
745
746 static int
killpg_all_callback(struct proc * p,void * data)747 killpg_all_callback(struct proc *p, void *data)
748 {
749 struct killpg_info *info = data;
750
751 if (p->p_pid <= 1 || (p->p_flags & P_SYSTEM) ||
752 p == curproc || !CANSIGNAL(p, info->sig, 0)) {
753 return (0);
754 }
755 ++info->nfound;
756 if (info->sig)
757 ksignal(p, info->sig);
758 return(0);
759 }
760
761 /*
762 * Send a general signal to a process or LWPs within that process.
763 *
764 * Note that new signals cannot be sent if a process is exiting or already
765 * a zombie, but we return success anyway as userland is likely to not handle
766 * the race properly.
767 *
768 * No requirements.
769 */
770 int
kern_kill(int sig,pid_t pid,lwpid_t tid)771 kern_kill(int sig, pid_t pid, lwpid_t tid)
772 {
773 int t;
774
775 if ((u_int)sig >= _SIG_MAXSIG)
776 return (EINVAL);
777
778 if (pid > 0) {
779 struct proc *p;
780 struct lwp *lp = NULL;
781
782 /*
783 * Sending a signal to pid 1 as root requires that we
784 * are not reboot-restricted.
785 */
786 if (pid == 1 && caps_priv_check_self(SYSCAP_NOREBOOT))
787 return EPERM;
788
789 /*
790 * Send a signal to a single process. If the kill() is
791 * racing an exiting process which has not yet been reaped
792 * act as though the signal was delivered successfully but
793 * don't actually try to deliver the signal.
794 */
795 if ((p = pfind(pid)) == NULL) {
796 if ((p = zpfind(pid)) == NULL)
797 return (ESRCH);
798 PRELE(p);
799 return (0);
800 }
801 if (p != curproc) {
802 lwkt_gettoken_shared(&p->p_token);
803 if (!CANSIGNAL(p, sig, 1)) {
804 lwkt_reltoken(&p->p_token);
805 PRELE(p);
806 return (EPERM);
807 }
808 lwkt_reltoken(&p->p_token);
809 }
810
811 /*
812 * NOP if the process is exiting. Note that lwpsignal() is
813 * called directly with P_WEXIT set to kill individual LWPs
814 * during exit, which is allowed.
815 */
816 if (p->p_flags & P_WEXIT) {
817 PRELE(p);
818 return (0);
819 }
820 if (tid != -1) {
821 lwkt_gettoken_shared(&p->p_token);
822 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, tid);
823 if (lp == NULL) {
824 lwkt_reltoken(&p->p_token);
825 PRELE(p);
826 return (ESRCH);
827 }
828 LWPHOLD(lp);
829 lwkt_reltoken(&p->p_token);
830 }
831 if (sig)
832 lwpsignal(p, lp, sig);
833 if (lp)
834 LWPRELE(lp);
835 PRELE(p);
836
837 return (0);
838 }
839
840 /*
841 * If we come here, pid is a special broadcast pid.
842 * This doesn't mix with a tid.
843 */
844 if (tid != -1)
845 return (EINVAL);
846
847 switch (pid) {
848 case -1: /* broadcast signal */
849 t = (dokillpg(sig, 0, 1));
850 break;
851 case 0: /* signal own process group */
852 t = (dokillpg(sig, 0, 0));
853 break;
854 default: /* negative explicit process group */
855 t = (dokillpg(sig, -pid, 0));
856 break;
857 }
858 return t;
859 }
860
861 int
sys_kill(struct sysmsg * sysmsg,const struct kill_args * uap)862 sys_kill(struct sysmsg *sysmsg, const struct kill_args *uap)
863 {
864 int error;
865
866 error = kern_kill(uap->signum, uap->pid, -1);
867 return (error);
868 }
869
870 int
sys_lwp_kill(struct sysmsg * sysmsg,const struct lwp_kill_args * uap)871 sys_lwp_kill(struct sysmsg *sysmsg, const struct lwp_kill_args *uap)
872 {
873 int error;
874 pid_t pid = uap->pid;
875
876 /*
877 * A tid is mandatory for lwp_kill(), otherwise
878 * you could simply use kill().
879 */
880 if (uap->tid == -1)
881 return (EINVAL);
882
883 /*
884 * To save on a getpid() function call for intra-process
885 * signals, pid == -1 means current process.
886 */
887 if (pid == -1)
888 pid = curproc->p_pid;
889
890 error = kern_kill(uap->signum, pid, uap->tid);
891 return (error);
892 }
893
894 /*
895 * Send a signal to a process group.
896 */
897 void
gsignal(int pgid,int sig)898 gsignal(int pgid, int sig)
899 {
900 struct pgrp *pgrp;
901
902 if (pgid && (pgrp = pgfind(pgid)))
903 pgsignal(pgrp, sig, 0);
904 }
905
906 /*
907 * Send a signal to a process group. If checktty is 1,
908 * limit to members which have a controlling terminal.
909 *
910 * pg_lock interlocks against a fork that might be in progress, to
911 * ensure that the new child process picks up the signal.
912 */
913 void
pgsignal(struct pgrp * pgrp,int sig,int checkctty)914 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
915 {
916 struct proc *p;
917
918 /*
919 * Must interlock all signals against fork
920 */
921 if (pgrp) {
922 pgref(pgrp);
923 lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE);
924 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
925 if (checkctty == 0 || p->p_flags & P_CONTROLT)
926 ksignal(p, sig);
927 }
928 lockmgr(&pgrp->pg_lock, LK_RELEASE);
929 pgrel(pgrp);
930 }
931 }
932
933 /*
934 * Send a signal caused by a trap to the current lwp. If it will be caught
935 * immediately, deliver it with correct code. Otherwise, post it normally.
936 *
937 * These signals may ONLY be delivered to the specified lwp and may never
938 * be delivered to the process generically.
939 *
940 * lpmap->blockallsigs is ignored.
941 */
942 void
trapsignal(struct lwp * lp,int sig,u_long code)943 trapsignal(struct lwp *lp, int sig, u_long code)
944 {
945 struct proc *p = lp->lwp_proc;
946 struct sigacts *ps = p->p_sigacts;
947
948 /*
949 * If we are a virtual kernel running an emulated user process
950 * context, switch back to the virtual kernel context before
951 * trying to post the signal.
952 */
953 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
954 struct trapframe *tf = lp->lwp_md.md_regs;
955 tf->tf_trapno = 0;
956 vkernel_trap(lp, tf);
957 }
958
959 if ((p->p_flags & P_TRACED) == 0 && SIGISMEMBER(p->p_sigcatch, sig) &&
960 !SIGISMEMBER(lp->lwp_sigmask, sig)) {
961 lp->lwp_ru.ru_nsignals++;
962 #ifdef KTRACE
963 if (KTRPOINT(lp->lwp_thread, KTR_PSIG))
964 ktrpsig(lp, sig, ps->ps_sigact[_SIG_IDX(sig)],
965 &lp->lwp_sigmask, code);
966 #endif
967 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig,
968 &lp->lwp_sigmask, code);
969 SIGSETOR(lp->lwp_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
970 if (!SIGISMEMBER(ps->ps_signodefer, sig))
971 SIGADDSET(lp->lwp_sigmask, sig);
972 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
973 /*
974 * See kern_sigaction() for origin of this code.
975 */
976 SIGDELSET(p->p_sigcatch, sig);
977 if (sig != SIGCONT &&
978 sigprop(sig) & SA_IGNORE)
979 SIGADDSET(p->p_sigignore, sig);
980 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
981 }
982 } else {
983 lp->lwp_code = code; /* XXX for core dump/debugger */
984 lp->lwp_sig = sig; /* XXX to verify code */
985 lwpsignal(p, lp, sig);
986 }
987 }
988
989 /*
990 * Find a suitable lwp to deliver the signal to. Returns NULL if all
991 * lwps hold the signal blocked.
992 *
993 * Caller must hold p->p_token.
994 *
995 * Returns a lp or NULL. If non-NULL the lp is held and its token is
996 * acquired.
997 */
998 static struct lwp *
find_lwp_for_signal(struct proc * p,int sig)999 find_lwp_for_signal(struct proc *p, int sig)
1000 {
1001 struct lwp *lp;
1002 struct lwp *run, *sleep, *stop;
1003
1004 /*
1005 * If the running/preempted thread belongs to the proc to which
1006 * the signal is being delivered and this thread does not block
1007 * the signal, then we can avoid a context switch by delivering
1008 * the signal to this thread, because it will return to userland
1009 * soon anyways.
1010 */
1011 lp = lwkt_preempted_proc();
1012 if (lp != NULL && lp->lwp_proc == p) {
1013 LWPHOLD(lp);
1014 lwkt_gettoken(&lp->lwp_token);
1015 if (!SIGISMEMBER(lp->lwp_sigmask, sig)) {
1016 /* return w/ token held */
1017 return (lp);
1018 }
1019 lwkt_reltoken(&lp->lwp_token);
1020 LWPRELE(lp);
1021 }
1022
1023 run = sleep = stop = NULL;
1024 FOREACH_LWP_IN_PROC(lp, p) {
1025 /*
1026 * If the signal is being blocked by the lwp, then this
1027 * lwp is not eligible for receiving the signal.
1028 */
1029 LWPHOLD(lp);
1030 lwkt_gettoken(&lp->lwp_token);
1031
1032 if (SIGISMEMBER(lp->lwp_sigmask, sig)) {
1033 lwkt_reltoken(&lp->lwp_token);
1034 LWPRELE(lp);
1035 continue;
1036 }
1037
1038 switch (lp->lwp_stat) {
1039 case LSRUN:
1040 if (sleep) {
1041 lwkt_token_swap();
1042 lwkt_reltoken(&sleep->lwp_token);
1043 LWPRELE(sleep);
1044 sleep = NULL;
1045 run = lp;
1046 } else if (stop) {
1047 lwkt_token_swap();
1048 lwkt_reltoken(&stop->lwp_token);
1049 LWPRELE(stop);
1050 stop = NULL;
1051 run = lp;
1052 } else {
1053 run = lp;
1054 }
1055 break;
1056 case LSSLEEP:
1057 if (lp->lwp_flags & LWP_SINTR) {
1058 if (sleep) {
1059 lwkt_reltoken(&lp->lwp_token);
1060 LWPRELE(lp);
1061 } else if (stop) {
1062 lwkt_token_swap();
1063 lwkt_reltoken(&stop->lwp_token);
1064 LWPRELE(stop);
1065 stop = NULL;
1066 sleep = lp;
1067 } else {
1068 sleep = lp;
1069 }
1070 } else {
1071 lwkt_reltoken(&lp->lwp_token);
1072 LWPRELE(lp);
1073 }
1074 break;
1075 case LSSTOP:
1076 if (sleep) {
1077 lwkt_reltoken(&lp->lwp_token);
1078 LWPRELE(lp);
1079 } else if (stop) {
1080 lwkt_reltoken(&lp->lwp_token);
1081 LWPRELE(lp);
1082 } else {
1083 stop = lp;
1084 }
1085 break;
1086 }
1087 if (run)
1088 break;
1089 }
1090
1091 if (run != NULL)
1092 return (run);
1093 else if (sleep != NULL)
1094 return (sleep);
1095 else
1096 return (stop);
1097 }
1098
1099 /*
1100 * Send the signal to the process. If the signal has an action, the action
1101 * is usually performed by the target process rather than the caller; we add
1102 * the signal to the set of pending signals for the process.
1103 *
1104 * Exceptions:
1105 * o When a stop signal is sent to a sleeping process that takes the
1106 * default action, the process is stopped without awakening it.
1107 * o SIGCONT restarts stopped processes (or puts them back to sleep)
1108 * regardless of the signal action (eg, blocked or ignored).
1109 *
1110 * Other ignored signals are discarded immediately.
1111 *
1112 * If the caller wishes to call this function from a hard code section the
1113 * caller must already hold p->p_token (see kern_clock.c).
1114 *
1115 * No requirements.
1116 */
1117 void
ksignal(struct proc * p,int sig)1118 ksignal(struct proc *p, int sig)
1119 {
1120 lwpsignal(p, NULL, sig);
1121 }
1122
1123 /*
1124 * The core for ksignal. lp may be NULL, then a suitable thread
1125 * will be chosen. If not, lp MUST be a member of p.
1126 *
1127 * If the caller wishes to call this function from a hard code section the
1128 * caller must already hold p->p_token.
1129 *
1130 * No requirements.
1131 */
1132 void
lwpsignal(struct proc * p,struct lwp * lp,int sig)1133 lwpsignal(struct proc *p, struct lwp *lp, int sig)
1134 {
1135 struct proc *q;
1136 sig_t action;
1137 int prop;
1138
1139 if (sig >= _SIG_MAXSIG || sig <= 0) {
1140 kprintf("lwpsignal: signal %d\n", sig);
1141 panic("lwpsignal signal number");
1142 }
1143
1144 KKASSERT(lp == NULL || lp->lwp_proc == p);
1145
1146 /*
1147 * We don't want to race... well, all sorts of things. Get appropriate
1148 * tokens.
1149 *
1150 * Don't try to deliver a generic signal to an exiting process,
1151 * the signal structures could be in flux. We check the LWP later
1152 * on.
1153 */
1154 PHOLD(p);
1155 if (lp) {
1156 LWPHOLD(lp);
1157 lwkt_gettoken(&lp->lwp_token);
1158 } else {
1159 lwkt_gettoken(&p->p_token);
1160 if (p->p_flags & P_WEXIT)
1161 goto out;
1162 }
1163
1164 prop = sigprop(sig);
1165
1166 /*
1167 * If proc is traced, always give parent a chance;
1168 * if signal event is tracked by procfs, give *that*
1169 * a chance, as well.
1170 */
1171 if ((p->p_flags & P_TRACED) || (p->p_stops & S_SIG)) {
1172 action = SIG_DFL;
1173 } else {
1174 /*
1175 * Do not try to deliver signals to an exiting lwp other
1176 * than SIGKILL. Note that we must still deliver the signal
1177 * if P_WEXIT is set in the process flags.
1178 */
1179 if (lp && (lp->lwp_mpflags & LWP_MP_WEXIT) && sig != SIGKILL) {
1180 lwkt_reltoken(&lp->lwp_token);
1181 LWPRELE(lp);
1182 PRELE(p);
1183 return;
1184 }
1185
1186 /*
1187 * If the signal is being ignored, then we forget about
1188 * it immediately. NOTE: We don't set SIGCONT in p_sigignore,
1189 * and if it is set to SIG_IGN, action will be SIG_DFL here.
1190 */
1191 if (SIGISMEMBER(p->p_sigignore, sig)) {
1192 /*
1193 * Even if a signal is set SIG_IGN, it may still be
1194 * lurking in a kqueue.
1195 */
1196 KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
1197 if (lp) {
1198 lwkt_reltoken(&lp->lwp_token);
1199 LWPRELE(lp);
1200 } else {
1201 lwkt_reltoken(&p->p_token);
1202 }
1203 PRELE(p);
1204 return;
1205 }
1206 if (SIGISMEMBER(p->p_sigcatch, sig))
1207 action = SIG_CATCH;
1208 else
1209 action = SIG_DFL;
1210 }
1211
1212 /*
1213 * If continuing, clear any pending STOP signals for the whole
1214 * process.
1215 */
1216 if (prop & SA_CONT) {
1217 lwkt_gettoken(&p->p_token);
1218 SIG_STOPSIGMASK_ATOMIC(p->p_siglist);
1219 lwkt_reltoken(&p->p_token);
1220 }
1221
1222 if (prop & SA_STOP) {
1223 /*
1224 * If sending a tty stop signal to a member of an orphaned
1225 * process group, discard the signal here if the action
1226 * is default; don't stop the process below if sleeping,
1227 * and don't clear any pending SIGCONT.
1228 */
1229 if ((prop & SA_TTYSTOP) && p->p_pgrp->pg_jobc == 0 &&
1230 action == SIG_DFL) {
1231 if (lp) {
1232 lwkt_reltoken(&lp->lwp_token);
1233 LWPRELE(lp);
1234 } else {
1235 lwkt_reltoken(&p->p_token);
1236 }
1237 PRELE(p);
1238 return;
1239 }
1240 lwkt_gettoken(&p->p_token);
1241 SIG_CONTSIGMASK_ATOMIC(p->p_siglist);
1242 p->p_flags &= ~P_CONTINUED;
1243 lwkt_reltoken(&p->p_token);
1244 }
1245
1246 if (p->p_stat == SSTOP) {
1247 /*
1248 * Nobody can handle this signal, add it to the lwp or
1249 * process pending list
1250 */
1251 lwkt_gettoken(&p->p_token);
1252 if (p->p_stat != SSTOP) {
1253 lwkt_reltoken(&p->p_token);
1254 goto not_stopped;
1255 }
1256 sigsetfrompid(curthread, p, sig);
1257 if (lp) {
1258 spin_lock(&lp->lwp_spin);
1259 SIGADDSET(lp->lwp_siglist, sig);
1260 spin_unlock(&lp->lwp_spin);
1261 } else {
1262 SIGADDSET_ATOMIC(p->p_siglist, sig);
1263 }
1264
1265 /*
1266 * If the process is stopped and is being traced, then no
1267 * further action is necessary.
1268 */
1269 if (p->p_flags & P_TRACED) {
1270 lwkt_reltoken(&p->p_token);
1271 goto out;
1272 }
1273
1274 /*
1275 * If the process is stopped and receives a KILL signal,
1276 * make the process runnable.
1277 */
1278 if (sig == SIGKILL) {
1279 proc_unstop(p, SSTOP);
1280 lwkt_reltoken(&p->p_token);
1281 goto active_process;
1282 }
1283
1284 /*
1285 * If the process is stopped and receives a CONT signal,
1286 * then try to make the process runnable again.
1287 */
1288 if (prop & SA_CONT) {
1289 /*
1290 * If SIGCONT is default (or ignored), we continue the
1291 * process but don't leave the signal in p_siglist, as
1292 * it has no further action. If SIGCONT is held, we
1293 * continue the process and leave the signal in
1294 * p_siglist. If the process catches SIGCONT, let it
1295 * handle the signal itself.
1296 *
1297 * XXX what if the signal is being held blocked?
1298 *
1299 * Token required to interlock kern_wait().
1300 * Reparenting can also cause a race so we have to
1301 * hold (q).
1302 */
1303 q = p->p_pptr;
1304 PHOLD(q);
1305 lwkt_gettoken(&q->p_token);
1306 p->p_flags |= P_CONTINUED;
1307 wakeup(q);
1308 if (action == SIG_DFL)
1309 SIGDELSET_ATOMIC(p->p_siglist, sig);
1310 proc_unstop(p, SSTOP);
1311 lwkt_reltoken(&q->p_token);
1312 PRELE(q);
1313 lwkt_reltoken(&p->p_token);
1314 if (action == SIG_CATCH)
1315 goto active_process;
1316 goto out;
1317 }
1318
1319 /*
1320 * If the process is stopped and receives another STOP
1321 * signal, we do not need to stop it again. If we did
1322 * the shell could get confused.
1323 *
1324 * However, if the current/preempted lwp is part of the
1325 * process receiving the signal, we need to keep it,
1326 * so that this lwp can stop in issignal() later, as
1327 * we don't want to wait until it reaches userret!
1328 */
1329 if (prop & SA_STOP) {
1330 if (lwkt_preempted_proc() == NULL ||
1331 lwkt_preempted_proc()->lwp_proc != p) {
1332 SIGDELSET_ATOMIC(p->p_siglist, sig);
1333 }
1334 }
1335
1336 /*
1337 * Otherwise the process is stopped and it received some
1338 * signal, which does not change its stopped state. When
1339 * the process is continued a wakeup(p) will be issued which
1340 * will wakeup any threads sleeping in tstop().
1341 */
1342 lwkt_reltoken(&p->p_token);
1343 goto out;
1344 /* NOTREACHED */
1345 }
1346 not_stopped:
1347 ;
1348 /* else not stopped */
1349 active_process:
1350
1351 /*
1352 * Never deliver a lwp-specific signal to a random lwp.
1353 */
1354 if (lp == NULL) {
1355 /* NOTE: returns lp w/ token held */
1356 lp = find_lwp_for_signal(p, sig);
1357 if (lp) {
1358 if (SIGISMEMBER(lp->lwp_sigmask, sig)) {
1359 lwkt_reltoken(&lp->lwp_token);
1360 LWPRELE(lp);
1361 lp = NULL;
1362 /* maintain proc token */
1363 } else {
1364 lwkt_token_swap();
1365 lwkt_reltoken(&p->p_token);
1366 /* maintain lp token */
1367 }
1368 }
1369 }
1370
1371 /*
1372 * Deliver to the process generically if (1) the signal is being
1373 * sent to any thread or (2) we could not find a thread to deliver
1374 * it to.
1375 */
1376 if (lp == NULL) {
1377 sigsetfrompid(curthread, p, sig);
1378 KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
1379 SIGADDSET_ATOMIC(p->p_siglist, sig);
1380 goto out;
1381 }
1382
1383 /*
1384 * Deliver to a specific LWP whether it masks it or not. It will
1385 * not be dispatched if masked but we must still deliver it.
1386 */
1387 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
1388 (p->p_flags & P_TRACED) == 0) {
1389 lwkt_gettoken(&p->p_token);
1390 p->p_nice = NZERO;
1391 lwkt_reltoken(&p->p_token);
1392 }
1393
1394 /*
1395 * If the process receives a STOP signal which indeed needs to
1396 * stop the process, do so. If the process chose to catch the
1397 * signal, it will be treated like any other signal.
1398 */
1399 if ((prop & SA_STOP) && action == SIG_DFL) {
1400 /*
1401 * If a child holding parent blocked, stopping
1402 * could cause deadlock. Take no action at this
1403 * time.
1404 */
1405 lwkt_gettoken(&p->p_token);
1406 if (p->p_flags & P_PPWAIT) {
1407 sigsetfrompid(curthread, p, sig);
1408 SIGADDSET_ATOMIC(p->p_siglist, sig);
1409 lwkt_reltoken(&p->p_token);
1410 goto out;
1411 }
1412
1413 /*
1414 * Do not actually try to manipulate the process, but simply
1415 * stop it. Lwps will stop as soon as they safely can.
1416 *
1417 * Ignore stop if the process is exiting.
1418 */
1419 if ((p->p_flags & P_WEXIT) == 0) {
1420 p->p_xstat = sig;
1421 proc_stop(p, SSTOP);
1422 }
1423 lwkt_reltoken(&p->p_token);
1424 goto out;
1425 }
1426
1427 /*
1428 * If it is a CONT signal with default action, just ignore it.
1429 */
1430 if ((prop & SA_CONT) && action == SIG_DFL)
1431 goto out;
1432
1433 /*
1434 * Mark signal pending at this specific thread.
1435 */
1436 sigsetfrompid(curthread, p, sig);
1437 spin_lock(&lp->lwp_spin);
1438 SIGADDSET(lp->lwp_siglist, sig);
1439 spin_unlock(&lp->lwp_spin);
1440
1441 lwp_signotify(lp);
1442
1443 out:
1444 if (lp) {
1445 lwkt_reltoken(&lp->lwp_token);
1446 LWPRELE(lp);
1447 } else {
1448 lwkt_reltoken(&p->p_token);
1449 }
1450 PRELE(p);
1451 }
1452
1453 /*
1454 * Notify the LWP that a signal has arrived. The LWP does not have to be
1455 * sleeping on the current cpu.
1456 *
1457 * p->p_token and lp->lwp_token must be held on call.
1458 *
1459 * We can only safely schedule the thread on its current cpu and only if
1460 * one of the SINTR flags is set. If an SINTR flag is set AND we are on
1461 * the correct cpu we are properly interlocked, otherwise we could be
1462 * racing other thread transition states (or the lwp is on the user scheduler
1463 * runq but not scheduled) and must not do anything.
1464 *
1465 * Since we hold the lwp token we know the lwp cannot be ripped out from
1466 * under us so we can safely hold it to prevent it from being ripped out
1467 * from under us if we are forced to IPI another cpu to make the local
1468 * checks there.
1469 *
1470 * Adjustment of lp->lwp_stat can only occur when we hold the lwp_token,
1471 * which we won't in an IPI so any fixups have to be done here, effectively
1472 * replicating part of what setrunnable() does.
1473 */
1474 static void
lwp_signotify(struct lwp * lp)1475 lwp_signotify(struct lwp *lp)
1476 {
1477 thread_t dtd;
1478
1479 ASSERT_LWKT_TOKEN_HELD(&lp->lwp_token);
1480 dtd = lp->lwp_thread;
1481
1482 crit_enter();
1483 if (lp == lwkt_preempted_proc()) {
1484 /*
1485 * lwp is on the current cpu AND it is currently running
1486 * (we preempted it).
1487 */
1488 signotify();
1489 } else if (lp->lwp_flags & LWP_SINTR) {
1490 /*
1491 * lwp is sitting in tsleep() with PCATCH set
1492 */
1493 if (dtd->td_gd == mycpu) {
1494 setrunnable(lp);
1495 } else {
1496 /*
1497 * We can only adjust lwp_stat while we hold the
1498 * lwp_token, and we won't in the IPI function.
1499 */
1500 LWPHOLD(lp);
1501 if (lp->lwp_stat == LSSTOP)
1502 lp->lwp_stat = LSSLEEP;
1503 lwkt_send_ipiq(dtd->td_gd, lwp_signotify_remote, lp);
1504 }
1505 } else if (dtd->td_flags & TDF_SINTR) {
1506 /*
1507 * lwp is sitting in lwkt_sleep() with PCATCH set.
1508 */
1509 if (dtd->td_gd == mycpu) {
1510 setrunnable(lp);
1511 } else {
1512 /*
1513 * We can only adjust lwp_stat while we hold the
1514 * lwp_token, and we won't in the IPI function.
1515 */
1516 LWPHOLD(lp);
1517 if (lp->lwp_stat == LSSTOP)
1518 lp->lwp_stat = LSSLEEP;
1519 lwkt_send_ipiq(dtd->td_gd, lwp_signotify_remote, lp);
1520 }
1521 } else {
1522 /*
1523 * Otherwise the lwp is either in some uninterruptible state
1524 * or it is on the userland scheduler's runqueue waiting to
1525 * be scheduled to a cpu, or it is running in userland. We
1526 * generally want to send an IPI so a running target gets the
1527 * signal ASAP, otherwise a scheduler-tick worth of latency
1528 * will occur.
1529 *
1530 * Issue an IPI to the remote cpu to knock it into the kernel,
1531 * remote cpu will issue the cpu-local signotify() if the IPI
1532 * preempts the desired thread.
1533 */
1534 if (dtd->td_gd != mycpu) {
1535 LWPHOLD(lp);
1536 lwkt_send_ipiq(dtd->td_gd, lwp_signotify_remote, lp);
1537 }
1538 }
1539 crit_exit();
1540 }
1541
1542 /*
1543 * This function is called via an IPI so we cannot call setrunnable() here
1544 * (because while we hold the lp we don't own its token, and can't get it
1545 * from an IPI).
1546 *
1547 * We are interlocked by virtue of being on the same cpu as the target. If
1548 * we still are and LWP_SINTR or TDF_SINTR is set we can safely schedule
1549 * the target thread.
1550 */
1551 static void
lwp_signotify_remote(void * arg)1552 lwp_signotify_remote(void *arg)
1553 {
1554 struct lwp *lp = arg;
1555 thread_t td = lp->lwp_thread;
1556
1557 if (lp == lwkt_preempted_proc()) {
1558 signotify();
1559 LWPRELE(lp);
1560 } else if (td->td_gd == mycpu) {
1561 if ((lp->lwp_flags & LWP_SINTR) ||
1562 (td->td_flags & TDF_SINTR)) {
1563 lwkt_schedule(td);
1564 }
1565 LWPRELE(lp);
1566 } else {
1567 lwkt_send_ipiq(td->td_gd, lwp_signotify_remote, lp);
1568 /* LWPHOLD() is forwarded to the target cpu */
1569 }
1570 }
1571
1572 /*
1573 * Caller must hold p->p_token
1574 */
1575 void
proc_stop(struct proc * p,int stat)1576 proc_stop(struct proc *p, int stat)
1577 {
1578 struct proc *q;
1579 struct lwp *lp;
1580
1581 ASSERT_LWKT_TOKEN_HELD(&p->p_token);
1582
1583 /*
1584 * If somebody raced us, be happy with it. SCORE overrides SSTOP.
1585 */
1586 if (stat == SCORE) {
1587 if (p->p_stat == SCORE || p->p_stat == SZOMB)
1588 return;
1589 } else {
1590 if (p->p_stat == SSTOP || p->p_stat == SCORE ||
1591 p->p_stat == SZOMB) {
1592 return;
1593 }
1594 }
1595 p->p_stat = stat;
1596
1597 FOREACH_LWP_IN_PROC(lp, p) {
1598 LWPHOLD(lp);
1599 lwkt_gettoken(&lp->lwp_token);
1600
1601 switch (lp->lwp_stat) {
1602 case LSSTOP:
1603 /*
1604 * Do nothing, we are already counted in
1605 * p_nstopped.
1606 */
1607 break;
1608
1609 case LSSLEEP:
1610 /*
1611 * We're sleeping, but we will stop before
1612 * returning to userspace, so count us
1613 * as stopped as well. We set LWP_MP_WSTOP
1614 * to signal the lwp that it should not
1615 * increase p_nstopped when reaching tstop().
1616 *
1617 * LWP_MP_WSTOP is protected by lp->lwp_token.
1618 */
1619 if ((lp->lwp_mpflags & LWP_MP_WSTOP) == 0) {
1620 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
1621 ++p->p_nstopped;
1622 }
1623 break;
1624
1625 case LSRUN:
1626 /*
1627 * We might notify ourself, but that's not
1628 * a problem.
1629 */
1630 lwp_signotify(lp);
1631 break;
1632 }
1633 lwkt_reltoken(&lp->lwp_token);
1634 LWPRELE(lp);
1635 }
1636
1637 if (p->p_nstopped == p->p_nthreads) {
1638 /*
1639 * Token required to interlock kern_wait(). Reparenting can
1640 * also cause a race so we have to hold (q).
1641 */
1642 q = p->p_pptr;
1643 PHOLD(q);
1644 lwkt_gettoken(&q->p_token);
1645 p->p_flags &= ~P_WAITED;
1646 wakeup(q);
1647 if ((q->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0)
1648 ksignal(p->p_pptr, SIGCHLD);
1649 lwkt_reltoken(&q->p_token);
1650 PRELE(q);
1651 }
1652 }
1653
1654 /*
1655 * Caller must hold p_token
1656 */
1657 void
proc_unstop(struct proc * p,int stat)1658 proc_unstop(struct proc *p, int stat)
1659 {
1660 struct lwp *lp;
1661
1662 ASSERT_LWKT_TOKEN_HELD(&p->p_token);
1663
1664 if (p->p_stat != stat)
1665 return;
1666
1667 p->p_stat = SACTIVE;
1668
1669 FOREACH_LWP_IN_PROC(lp, p) {
1670 LWPHOLD(lp);
1671 lwkt_gettoken(&lp->lwp_token);
1672
1673 switch (lp->lwp_stat) {
1674 case LSRUN:
1675 /*
1676 * Uh? Not stopped? Well, I guess that's okay.
1677 */
1678 if (bootverbose)
1679 kprintf("proc_unstop: lwp %d/%d not sleeping\n",
1680 p->p_pid, lp->lwp_tid);
1681 break;
1682
1683 case LSSLEEP:
1684 /*
1685 * Still sleeping. Don't bother waking it up.
1686 * However, if this thread was counted as
1687 * stopped, undo this.
1688 *
1689 * Nevertheless we call setrunnable() so that it
1690 * will wake up in case a signal or timeout arrived
1691 * in the meantime.
1692 *
1693 * LWP_MP_WSTOP is protected by lp->lwp_token.
1694 */
1695 if (lp->lwp_mpflags & LWP_MP_WSTOP) {
1696 atomic_clear_int(&lp->lwp_mpflags,
1697 LWP_MP_WSTOP);
1698 --p->p_nstopped;
1699 } else {
1700 if (bootverbose)
1701 kprintf("proc_unstop: lwp %d/%d sleeping, not stopped\n",
1702 p->p_pid, lp->lwp_tid);
1703 }
1704 /* FALLTHROUGH */
1705
1706 case LSSTOP:
1707 /*
1708 * This handles any lwp's waiting in a tsleep with
1709 * SIGCATCH.
1710 */
1711 lwp_signotify(lp);
1712 break;
1713
1714 }
1715 lwkt_reltoken(&lp->lwp_token);
1716 LWPRELE(lp);
1717 }
1718
1719 /*
1720 * This handles any lwp's waiting in tstop(). We have interlocked
1721 * the setting of p_stat by acquiring and releasing each lpw's
1722 * token.
1723 */
1724 wakeup(p);
1725 }
1726
1727 /*
1728 * Wait for all threads except the current thread to stop.
1729 */
1730 static void
proc_stopwait(struct proc * p)1731 proc_stopwait(struct proc *p)
1732 {
1733 while ((p->p_stat == SSTOP || p->p_stat == SCORE) &&
1734 p->p_nstopped < p->p_nthreads - 1) {
1735 tsleep_interlock(&p->p_nstopped, 0);
1736 if (p->p_nstopped < p->p_nthreads - 1) {
1737 tsleep(&p->p_nstopped, PINTERLOCKED, "stopwt", hz);
1738 }
1739 }
1740 }
1741
1742 /*
1743 * No requirements.
1744 */
1745 static int
kern_sigtimedwait(sigset_t waitset,siginfo_t * info,struct timespec * timeout)1746 kern_sigtimedwait(sigset_t waitset, siginfo_t *info, struct timespec *timeout)
1747 {
1748 sigset_t savedmask, set;
1749 struct proc *p = curproc;
1750 struct lwp *lp = curthread->td_lwp;
1751 int error, sig, hz, timevalid = 0;
1752 struct timespec rts, ets, ts;
1753 struct timeval tv;
1754
1755 error = 0;
1756 sig = 0;
1757 ets.tv_sec = 0; /* silence compiler warning */
1758 ets.tv_nsec = 0; /* silence compiler warning */
1759 SIG_CANTMASK(waitset);
1760 savedmask = lp->lwp_sigmask;
1761
1762 if (timeout) {
1763 if (timeout->tv_sec >= 0 && timeout->tv_nsec >= 0 &&
1764 timeout->tv_nsec < 1000000000) {
1765 timevalid = 1;
1766 getnanouptime(&rts);
1767 timespecadd(&rts, timeout, &ets);
1768 }
1769 }
1770
1771 for (;;) {
1772 set = lwp_sigpend(lp);
1773 SIGSETAND(set, waitset);
1774 if ((sig = sig_ffs(&set)) != 0) {
1775 SIGFILLSET(lp->lwp_sigmask);
1776 SIGDELSET(lp->lwp_sigmask, sig);
1777 SIG_CANTMASK(lp->lwp_sigmask);
1778 sig = issignal(lp, 1, 0);
1779 /*
1780 * It may be a STOP signal, in the case, issignal
1781 * returns 0, because we may stop there, and new
1782 * signal can come in, we should restart if we got
1783 * nothing.
1784 */
1785 if (sig == 0)
1786 continue;
1787 else
1788 break;
1789 }
1790
1791 /*
1792 * Previous checking got nothing, and we retried but still
1793 * got nothing, we should return the error status.
1794 */
1795 if (error)
1796 break;
1797
1798 /*
1799 * POSIX says this must be checked after looking for pending
1800 * signals.
1801 */
1802 if (timeout) {
1803 if (timevalid == 0) {
1804 error = EINVAL;
1805 break;
1806 }
1807 getnanouptime(&rts);
1808 if (timespeccmp(&rts, &ets, >=)) {
1809 error = EAGAIN;
1810 break;
1811 }
1812 timespecsub(&ets, &rts, &ts);
1813 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1814 hz = tvtohz_high(&tv);
1815 } else {
1816 hz = 0;
1817 }
1818
1819 lp->lwp_sigmask = savedmask;
1820 SIGSETNAND(lp->lwp_sigmask, waitset);
1821 /*
1822 * We won't ever be woken up. Instead, our sleep will
1823 * be broken in lwpsignal().
1824 */
1825 error = tsleep(&p->p_sigacts, PCATCH, "sigwt", hz);
1826 if (timeout) {
1827 if (error == ERESTART) {
1828 /* can not restart a timeout wait. */
1829 error = EINTR;
1830 } else if (error == EAGAIN) {
1831 /* will calculate timeout by ourself. */
1832 error = 0;
1833 }
1834 }
1835 /* Retry ... */
1836 }
1837
1838 lp->lwp_sigmask = savedmask;
1839 if (sig) {
1840 error = 0;
1841 bzero(info, sizeof(*info));
1842 info->si_signo = sig;
1843 spin_lock(&lp->lwp_spin);
1844 lwp_delsig(lp, sig, 1); /* take the signal! */
1845 spin_unlock(&lp->lwp_spin);
1846
1847 if (sig == SIGKILL) {
1848 sigexit(lp, sig);
1849 /* NOT REACHED */
1850 }
1851 }
1852
1853 return (error);
1854 }
1855
1856 /*
1857 * MPALMOSTSAFE
1858 */
1859 int
sys_sigtimedwait(struct sysmsg * sysmsg,const struct sigtimedwait_args * uap)1860 sys_sigtimedwait(struct sysmsg *sysmsg, const struct sigtimedwait_args *uap)
1861 {
1862 struct timespec ts;
1863 struct timespec *timeout;
1864 sigset_t set;
1865 siginfo_t info;
1866 int error;
1867
1868 if (uap->timeout) {
1869 error = copyin(uap->timeout, &ts, sizeof(ts));
1870 if (error)
1871 return (error);
1872 timeout = &ts;
1873 } else {
1874 timeout = NULL;
1875 }
1876 error = copyin(uap->set, &set, sizeof(set));
1877 if (error)
1878 return (error);
1879 error = kern_sigtimedwait(set, &info, timeout);
1880 if (error)
1881 return (error);
1882 if (uap->info)
1883 error = copyout(&info, uap->info, sizeof(info));
1884 /* Repost if we got an error. */
1885 /*
1886 * XXX lwp
1887 *
1888 * This could transform a thread-specific signal to another
1889 * thread / process pending signal.
1890 */
1891 if (error) {
1892 ksignal(curproc, info.si_signo);
1893 } else {
1894 sysmsg->sysmsg_result = info.si_signo;
1895 }
1896 return (error);
1897 }
1898
1899 /*
1900 * MPALMOSTSAFE
1901 */
1902 int
sys_sigwaitinfo(struct sysmsg * sysmsg,const struct sigwaitinfo_args * uap)1903 sys_sigwaitinfo(struct sysmsg *sysmsg, const struct sigwaitinfo_args *uap)
1904 {
1905 siginfo_t info;
1906 sigset_t set;
1907 int error;
1908
1909 error = copyin(uap->set, &set, sizeof(set));
1910 if (error)
1911 return (error);
1912 error = kern_sigtimedwait(set, &info, NULL);
1913 if (error)
1914 return (error);
1915 if (uap->info)
1916 error = copyout(&info, uap->info, sizeof(info));
1917 /* Repost if we got an error. */
1918 /*
1919 * XXX lwp
1920 *
1921 * This could transform a thread-specific signal to another
1922 * thread / process pending signal.
1923 */
1924 if (error) {
1925 ksignal(curproc, info.si_signo);
1926 } else {
1927 sysmsg->sysmsg_result = info.si_signo;
1928 }
1929 return (error);
1930 }
1931
1932 /*
1933 * If the current process has received a signal that would interrupt a
1934 * system call, return EINTR or ERESTART as appropriate.
1935 */
1936 int
iscaught(struct lwp * lp)1937 iscaught(struct lwp *lp)
1938 {
1939 struct proc *p = lp->lwp_proc;
1940 int sig;
1941
1942 if (p) {
1943 if ((sig = CURSIG(lp)) != 0) {
1944 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
1945 return (EINTR);
1946 return (ERESTART);
1947 }
1948 }
1949 return(EWOULDBLOCK);
1950 }
1951
1952 /*
1953 * If the current lwp/proc has received a signal (should be caught or cause
1954 * termination, should interrupt current syscall), return the signal number.
1955 * Stop signals with default action are processed immediately, then cleared;
1956 * they aren't returned. This is checked after each entry to the system for
1957 * a syscall or trap (though this can usually be done without calling issignal
1958 * by checking the pending signal masks in the CURSIG macro).
1959 *
1960 * This routine is called via CURSIG/__cursig. We will acquire and release
1961 * p->p_token but if the caller needs to interlock the test the caller must
1962 * also hold p->p_token.
1963 *
1964 * while (sig = CURSIG(curproc))
1965 * postsig(sig);
1966 */
1967 int
issignal(struct lwp * lp,int maytrace,int * ptokp)1968 issignal(struct lwp *lp, int maytrace, int *ptokp)
1969 {
1970 struct proc *p = lp->lwp_proc;
1971 sigset_t mask;
1972 int sig, prop;
1973 int haveptok;
1974
1975 for (;;) {
1976 int traced = (p->p_flags & P_TRACED) || (p->p_stops & S_SIG);
1977
1978 haveptok = 0;
1979
1980 /*
1981 * NOTE: Do not tstop here. Issue the proc_stop()
1982 * so other parties see that we know we need
1983 * to stop, but don't block here. Locks might
1984 * be held.
1985 *
1986 * XXX If this process is supposed to stop, stop this thread.
1987 * removed.
1988 */
1989 #if 0
1990 if (STOPLWP(p, lp)) {
1991 lwkt_gettoken(&p->p_token);
1992 tstop();
1993 lwkt_reltoken(&p->p_token);
1994 }
1995 #endif
1996
1997 /*
1998 * Quick check without token
1999 */
2000 mask = lwp_sigpend(lp);
2001 SIGSETNAND(mask, lp->lwp_sigmask);
2002 if (p->p_flags & P_PPWAIT)
2003 SIG_STOPSIGMASK(mask);
2004 SIG_CONDBLOCKALLSIGS(mask, lp);
2005
2006 if (SIGISEMPTY(mask)) /* no signal to send */
2007 return (0);
2008
2009 /*
2010 * If the signal is a member of the process signal set
2011 * we need p_token (even if it is also a member of the
2012 * lwp signal set).
2013 */
2014 sig = sig_ffs(&mask);
2015 if (SIGISMEMBER(p->p_siglist, sig)) {
2016 /*
2017 * Recheck with token
2018 */
2019 haveptok = 1;
2020 lwkt_gettoken(&p->p_token);
2021
2022 mask = lwp_sigpend(lp);
2023 SIGSETNAND(mask, lp->lwp_sigmask);
2024 if (p->p_flags & P_PPWAIT)
2025 SIG_STOPSIGMASK(mask);
2026 if (SIGISEMPTY(mask)) { /* no signal to send */
2027 /* haveptok is TRUE */
2028 lwkt_reltoken(&p->p_token);
2029 return (0);
2030 }
2031 sig = sig_ffs(&mask);
2032 }
2033
2034 STOPEVENT(p, S_SIG, sig);
2035
2036 /*
2037 * We should see pending but ignored signals
2038 * only if P_TRACED was on when they were posted.
2039 */
2040 if (SIGISMEMBER(p->p_sigignore, sig) && (traced == 0)) {
2041 spin_lock(&lp->lwp_spin);
2042 lwp_delsig(lp, sig, haveptok);
2043 spin_unlock(&lp->lwp_spin);
2044 if (haveptok)
2045 lwkt_reltoken(&p->p_token);
2046 continue;
2047 }
2048 if (maytrace &&
2049 (p->p_flags & P_TRACED) &&
2050 (p->p_flags & P_PPWAIT) == 0) {
2051 /*
2052 * If traced, always stop, and stay stopped until
2053 * released by the parent.
2054 *
2055 * NOTE: SSTOP may get cleared during the loop, but
2056 * we do not re-notify the parent if we have
2057 * to loop several times waiting for the parent
2058 * to let us continue. XXX not sure if this is
2059 * still true
2060 *
2061 * NOTE: Do not tstop here. Issue the proc_stop()
2062 * so other parties see that we know we need
2063 * to stop, but don't block here. Locks might
2064 * be held.
2065 */
2066 if (haveptok == 0) {
2067 lwkt_gettoken(&p->p_token);
2068 haveptok = 1;
2069 }
2070 p->p_xstat = sig;
2071 proc_stop(p, SSTOP);
2072
2073 /*
2074 * Normally we don't stop until we return to userland, but
2075 * make an exception when tracing and 'maytrace' is asserted.
2076 */
2077 if (p->p_flags & P_TRACED)
2078 tstop();
2079
2080 /*
2081 * If parent wants us to take the signal,
2082 * then it will leave it in p->p_xstat;
2083 * otherwise we just look for signals again.
2084 */
2085 spin_lock(&lp->lwp_spin);
2086 lwp_delsig(lp, sig, 1); /* clear old signal */
2087 spin_unlock(&lp->lwp_spin);
2088 sig = p->p_xstat;
2089 if (sig == 0) {
2090 /* haveptok is TRUE */
2091 lwkt_reltoken(&p->p_token);
2092 continue;
2093 }
2094
2095 /*
2096 * Put the new signal into p_siglist. If the
2097 * signal is being masked, look for other signals.
2098 *
2099 * XXX lwp might need a call to ksignal()
2100 */
2101 SIGADDSET_ATOMIC(p->p_siglist, sig);
2102 if (SIGISMEMBER(lp->lwp_sigmask, sig)) {
2103 /* haveptok is TRUE */
2104 lwkt_reltoken(&p->p_token);
2105 continue;
2106 }
2107
2108 /*
2109 * If the traced bit got turned off, go back up
2110 * to the top to rescan signals. This ensures
2111 * that p_sig* and ps_sigact are consistent.
2112 */
2113 if ((p->p_flags & P_TRACED) == 0) {
2114 /* haveptok is TRUE */
2115 lwkt_reltoken(&p->p_token);
2116 continue;
2117 }
2118 }
2119
2120 /*
2121 * p_token may be held here
2122 */
2123 prop = sigprop(sig);
2124
2125 /*
2126 * Decide whether the signal should be returned.
2127 * Return the signal's number, or fall through
2128 * to clear it from the pending mask.
2129 */
2130 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
2131 case (intptr_t)SIG_DFL:
2132 /*
2133 * Don't take default actions on system processes.
2134 */
2135 if (p->p_pid <= 1) {
2136 #ifdef DIAGNOSTIC
2137 /*
2138 * Are you sure you want to ignore SIGSEGV
2139 * in init? XXX
2140 */
2141 kprintf("Process (pid %lu) got signal %d\n",
2142 (u_long)p->p_pid, sig);
2143 #endif
2144 break; /* == ignore */
2145 }
2146
2147 /*
2148 * Handle the in-kernel checkpoint action
2149 */
2150 if (prop & SA_CKPT) {
2151 if (haveptok == 0) {
2152 lwkt_gettoken(&p->p_token);
2153 haveptok = 1;
2154 }
2155 checkpoint_signal_handler(lp);
2156 break;
2157 }
2158
2159 /*
2160 * If there is a pending stop signal to process
2161 * with default action, stop here,
2162 * then clear the signal. However,
2163 * if process is member of an orphaned
2164 * process group, ignore tty stop signals.
2165 */
2166 if (prop & SA_STOP) {
2167 if (haveptok == 0) {
2168 lwkt_gettoken(&p->p_token);
2169 haveptok = 1;
2170 }
2171 if (p->p_flags & P_TRACED ||
2172 (p->p_pgrp->pg_jobc == 0 &&
2173 prop & SA_TTYSTOP))
2174 break; /* == ignore */
2175 if ((p->p_flags & P_WEXIT) == 0) {
2176 /*
2177 * NOTE: We do not block here. Issue
2178 * stopthe stop so other parties
2179 * see that we know we need to
2180 * stop. Locks might be held.
2181 */
2182 p->p_xstat = sig;
2183 proc_stop(p, SSTOP);
2184
2185 #if 0
2186 tstop();
2187 #endif
2188 }
2189 break;
2190 } else if (prop & SA_IGNORE) {
2191 /*
2192 * Except for SIGCONT, shouldn't get here.
2193 * Default action is to ignore; drop it.
2194 */
2195 break; /* == ignore */
2196 } else {
2197 if (ptokp)
2198 *ptokp = haveptok;
2199 else if (haveptok)
2200 lwkt_reltoken(&p->p_token);
2201 return (sig);
2202 }
2203
2204 /*NOTREACHED*/
2205
2206 case (intptr_t)SIG_IGN:
2207 /*
2208 * Masking above should prevent us ever trying
2209 * to take action on an ignored signal other
2210 * than SIGCONT, unless process is traced.
2211 */
2212 if ((prop & SA_CONT) == 0 &&
2213 (p->p_flags & P_TRACED) == 0)
2214 kprintf("issignal\n");
2215 break; /* == ignore */
2216
2217 default:
2218 /*
2219 * This signal has an action, let
2220 * postsig() process it.
2221 */
2222 if (ptokp)
2223 *ptokp = haveptok;
2224 else if (haveptok)
2225 lwkt_reltoken(&p->p_token);
2226 return (sig);
2227 }
2228 spin_lock(&lp->lwp_spin);
2229 lwp_delsig(lp, sig, haveptok); /* take the signal! */
2230 spin_unlock(&lp->lwp_spin);
2231
2232 if (haveptok)
2233 lwkt_reltoken(&p->p_token);
2234 }
2235 /* NOTREACHED */
2236 }
2237
2238 /*
2239 * Take the action for the specified signal from the current set of
2240 * pending signals.
2241 *
2242 * haveptok indicates whether the caller is holding p->p_token. If the
2243 * caller is, we are responsible for releasing it.
2244 *
2245 * This routine can only be called from the top-level trap from usermode.
2246 * It is expecting to be able to modify the top-level stack frame.
2247 */
2248 void
postsig(int sig,int haveptok)2249 postsig(int sig, int haveptok)
2250 {
2251 struct lwp *lp = curthread->td_lwp;
2252 struct proc *p = lp->lwp_proc;
2253 struct sigacts *ps = p->p_sigacts;
2254 sig_t action;
2255 sigset_t returnmask;
2256 int code;
2257
2258 KASSERT(sig != 0, ("postsig"));
2259
2260 /*
2261 * If we are a virtual kernel running an emulated user process
2262 * context, switch back to the virtual kernel context before
2263 * trying to post the signal.
2264 */
2265 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) {
2266 struct trapframe *tf = lp->lwp_md.md_regs;
2267 tf->tf_trapno = 0;
2268 vkernel_trap(lp, tf);
2269 }
2270
2271 KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
2272
2273 spin_lock(&lp->lwp_spin);
2274 lwp_delsig(lp, sig, haveptok);
2275 spin_unlock(&lp->lwp_spin);
2276 action = ps->ps_sigact[_SIG_IDX(sig)];
2277 #ifdef KTRACE
2278 if (KTRPOINT(lp->lwp_thread, KTR_PSIG))
2279 ktrpsig(lp, sig, action, lp->lwp_flags & LWP_OLDMASK ?
2280 &lp->lwp_oldsigmask : &lp->lwp_sigmask, 0);
2281 #endif
2282 /*
2283 * We don't need p_token after this point.
2284 */
2285 if (haveptok)
2286 lwkt_reltoken(&p->p_token);
2287
2288 STOPEVENT(p, S_SIG, sig);
2289
2290 if (action == SIG_DFL) {
2291 /*
2292 * Default action, where the default is to kill
2293 * the process. (Other cases were ignored above.)
2294 */
2295 sigexit(lp, sig);
2296 /* NOTREACHED */
2297 } else {
2298 /*
2299 * If we get here, the signal must be caught.
2300 */
2301 KASSERT(action != SIG_IGN && !SIGISMEMBER(lp->lwp_sigmask, sig),
2302 ("postsig action"));
2303
2304 /*
2305 * Reset the signal handler if asked to
2306 */
2307 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
2308 /*
2309 * See kern_sigaction() for origin of this code.
2310 */
2311 SIGDELSET(p->p_sigcatch, sig);
2312 if (sig != SIGCONT &&
2313 sigprop(sig) & SA_IGNORE)
2314 SIGADDSET(p->p_sigignore, sig);
2315 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2316 }
2317
2318 /*
2319 * Set the signal mask and calculate the mask to restore
2320 * when the signal function returns.
2321 *
2322 * Special case: user has done a sigsuspend. Here the
2323 * current mask is not of interest, but rather the
2324 * mask from before the sigsuspend is what we want
2325 * restored after the signal processing is completed.
2326 */
2327 if (lp->lwp_flags & LWP_OLDMASK) {
2328 returnmask = lp->lwp_oldsigmask;
2329 lp->lwp_flags &= ~LWP_OLDMASK;
2330 } else {
2331 returnmask = lp->lwp_sigmask;
2332 }
2333
2334 SIGSETOR(lp->lwp_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
2335 if (!SIGISMEMBER(ps->ps_signodefer, sig))
2336 SIGADDSET(lp->lwp_sigmask, sig);
2337
2338 lp->lwp_ru.ru_nsignals++;
2339 if (lp->lwp_sig != sig) {
2340 code = 0;
2341 } else {
2342 code = lp->lwp_code;
2343 lp->lwp_code = 0;
2344 lp->lwp_sig = 0;
2345 }
2346 (*p->p_sysent->sv_sendsig)(action, sig, &returnmask, code);
2347 }
2348 }
2349
2350 /*
2351 * Kill the current process for stated reason.
2352 */
2353 void
killproc(struct proc * p,char * why)2354 killproc(struct proc *p, char *why)
2355 {
2356 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n",
2357 p->p_pid, p->p_comm,
2358 p->p_ucred ? p->p_ucred->cr_uid : -1, why);
2359 ksignal(p, SIGKILL);
2360 }
2361
2362 /*
2363 * Force the current process to exit with the specified signal, dumping core
2364 * if appropriate. We bypass the normal tests for masked and caught signals,
2365 * allowing unrecoverable failures to terminate the process without changing
2366 * signal state. Mark the accounting record with the signal termination.
2367 * If dumping core, save the signal number for the debugger. Calls exit and
2368 * does not return.
2369 *
2370 * This routine does not return.
2371 */
2372 void
sigexit(struct lwp * lp,int sig)2373 sigexit(struct lwp *lp, int sig)
2374 {
2375 struct proc *p = lp->lwp_proc;
2376
2377 lwkt_gettoken(&p->p_token);
2378 p->p_acflag |= AXSIG;
2379 if (sigprop(sig) & SA_CORE) {
2380 lp->lwp_sig = sig;
2381
2382 /*
2383 * All threads must be stopped before we can safely coredump.
2384 * Stop threads using SCORE, which cannot be overridden.
2385 */
2386 if (p->p_stat != SCORE) {
2387 proc_stop(p, SCORE);
2388 proc_stopwait(p);
2389
2390 if (coredump(lp, sig) == 0)
2391 sig |= WCOREFLAG;
2392 p->p_stat = SSTOP;
2393 }
2394
2395 /*
2396 * Log signals which would cause core dumps
2397 * (Log as LOG_INFO to appease those who don't want
2398 * these messages.)
2399 * XXX : Todo, as well as euid, write out ruid too
2400 */
2401 if (kern_logsigexit) {
2402 log(LOG_INFO,
2403 "pid %d (%s), uid %d: exited on signal %d%s\n",
2404 p->p_pid, p->p_comm,
2405 p->p_ucred ? p->p_ucred->cr_uid : -1,
2406 sig &~ WCOREFLAG,
2407 sig & WCOREFLAG ? " (core dumped)" : "");
2408 if (kern_logsigexit > 1)
2409 kprintf("DEBUG - waiting on kern.logsigexit\n");
2410 while (kern_logsigexit > 1) {
2411 tsleep(&kern_logsigexit, 0, "DEBUG", hz);
2412 }
2413 }
2414 }
2415 lwkt_reltoken(&p->p_token);
2416 exit1(W_EXITCODE(0, sig));
2417 /* NOTREACHED */
2418 }
2419
2420 static char corefilename[MAXPATHLEN+1] = {"%N.core"};
2421 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
2422 sizeof(corefilename), "process corefile name format string");
2423
2424 /*
2425 * expand_name(name, uid, pid)
2426 * Expand the name described in corefilename, using name, uid, and pid.
2427 * corefilename is a kprintf-like string, with three format specifiers:
2428 * %N name of process ("name")
2429 * %P process id (pid)
2430 * %U user id (uid)
2431 * For example, "%N.core" is the default; they can be disabled completely
2432 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
2433 * This is controlled by the sysctl variable kern.corefile (see above).
2434 */
2435
2436 static char *
expand_name(const char * name,uid_t uid,pid_t pid)2437 expand_name(const char *name, uid_t uid, pid_t pid)
2438 {
2439 char *temp;
2440 char buf[11]; /* Buffer for pid/uid -- max 4B */
2441 int i, n;
2442 char *format = corefilename;
2443 size_t namelen;
2444
2445 temp = kmalloc(MAXPATHLEN + 1, M_TEMP, M_NOWAIT);
2446 if (temp == NULL)
2447 return NULL;
2448 namelen = strlen(name);
2449 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
2450 int l;
2451 switch (format[i]) {
2452 case '%': /* Format character */
2453 i++;
2454 switch (format[i]) {
2455 case '%':
2456 temp[n++] = '%';
2457 break;
2458 case 'N': /* process name */
2459 if ((n + namelen) > MAXPATHLEN) {
2460 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n",
2461 pid, name, uid, temp, name);
2462 kfree(temp, M_TEMP);
2463 return NULL;
2464 }
2465 memcpy(temp+n, name, namelen);
2466 n += namelen;
2467 break;
2468 case 'P': /* process id */
2469 l = ksprintf(buf, "%u", pid);
2470 if ((n + l) > MAXPATHLEN) {
2471 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n",
2472 pid, name, uid, temp, name);
2473 kfree(temp, M_TEMP);
2474 return NULL;
2475 }
2476 memcpy(temp+n, buf, l);
2477 n += l;
2478 break;
2479 case 'U': /* user id */
2480 l = ksprintf(buf, "%u", uid);
2481 if ((n + l) > MAXPATHLEN) {
2482 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n",
2483 pid, name, uid, temp, name);
2484 kfree(temp, M_TEMP);
2485 return NULL;
2486 }
2487 memcpy(temp+n, buf, l);
2488 n += l;
2489 break;
2490 default:
2491 log(LOG_ERR, "Unknown format character %c in `%s'\n", format[i], format);
2492 }
2493 break;
2494 default:
2495 temp[n++] = format[i];
2496 }
2497 }
2498 temp[n] = '\0';
2499 return temp;
2500 }
2501
2502 /*
2503 * Dump a process' core. The main routine does some
2504 * policy checking, and creates the name of the coredump;
2505 * then it passes on a vnode and a size limit to the process-specific
2506 * coredump routine if there is one; if there _is not_ one, it returns
2507 * ENOSYS; otherwise it returns the error from the process-specific routine.
2508 *
2509 * The parameter `lp' is the lwp which triggered the coredump.
2510 */
2511
2512 static int
coredump(struct lwp * lp,int sig)2513 coredump(struct lwp *lp, int sig)
2514 {
2515 struct proc *p = lp->lwp_proc;
2516 struct vnode *vp;
2517 struct ucred *cred = p->p_ucred;
2518 struct flock lf;
2519 struct nlookupdata nd;
2520 struct vattr vattr;
2521 int error, error1;
2522 char *name; /* name of corefile */
2523 off_t limit;
2524
2525 STOPEVENT(p, S_CORE, 0);
2526
2527 if (((sugid_coredump == 0) && p->p_flags & P_SUGID) || do_coredump == 0)
2528 return (EFAULT);
2529
2530 /*
2531 * Note that the bulk of limit checking is done after
2532 * the corefile is created. The exception is if the limit
2533 * for corefiles is 0, in which case we don't bother
2534 * creating the corefile at all. This layout means that
2535 * a corefile is truncated instead of not being created,
2536 * if it is larger than the limit.
2537 */
2538 limit = p->p_rlimit[RLIMIT_CORE].rlim_cur;
2539 if (limit == 0)
2540 return EFBIG;
2541
2542 name = expand_name(p->p_comm, p->p_ucred->cr_uid, p->p_pid);
2543 if (name == NULL)
2544 return (EINVAL);
2545 error = nlookup_init(&nd, name, UIO_SYSSPACE, NLC_LOCKVP);
2546 if (error == 0)
2547 error = vn_open(&nd, NULL,
2548 O_CREAT | FWRITE | O_NOFOLLOW,
2549 S_IRUSR | S_IWUSR);
2550 kfree(name, M_TEMP);
2551 if (error) {
2552 nlookup_done(&nd);
2553 return (error);
2554 }
2555 vp = nd.nl_open_vp;
2556 nd.nl_open_vp = NULL;
2557 nlookup_done(&nd);
2558
2559 vn_unlock(vp);
2560 lf.l_whence = SEEK_SET;
2561 lf.l_start = 0;
2562 lf.l_len = 0;
2563 lf.l_type = F_WRLCK;
2564 error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, 0);
2565 if (error)
2566 goto out2;
2567
2568 /* Don't dump to non-regular files or files with links. */
2569 if (vp->v_type != VREG ||
2570 VOP_GETATTR(vp, &vattr) || vattr.va_nlink != 1) {
2571 error = EFAULT;
2572 goto out1;
2573 }
2574
2575 /* Don't dump to files current user does not own */
2576 if (vattr.va_uid != p->p_ucred->cr_uid) {
2577 error = EFAULT;
2578 goto out1;
2579 }
2580
2581 VATTR_NULL(&vattr);
2582 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2583 vattr.va_size = 0;
2584 VOP_SETATTR(vp, &vattr, cred);
2585 p->p_acflag |= ACORE;
2586 vn_unlock(vp);
2587
2588 error = p->p_sysent->sv_coredump ?
2589 p->p_sysent->sv_coredump(lp, sig, vp, limit) : ENOSYS;
2590
2591 out1:
2592 lf.l_type = F_UNLCK;
2593 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, 0);
2594 out2:
2595 error1 = vn_close(vp, FWRITE, NULL);
2596 if (error == 0)
2597 error = error1;
2598 return (error);
2599 }
2600
2601 /*
2602 * Nonexistent system call-- signal process (may want to handle it).
2603 * Flag error in case process won't see signal immediately (blocked or ignored).
2604 *
2605 * MPALMOSTSAFE
2606 */
2607 /* ARGSUSED */
2608 int
sys_nosys(struct sysmsg * sysmsg,const struct nosys_args * args)2609 sys_nosys(struct sysmsg *sysmsg, const struct nosys_args *args)
2610 {
2611 lwpsignal(curproc, curthread->td_lwp, SIGSYS);
2612 return (EINVAL);
2613 }
2614
2615 /*
2616 * Send a SIGIO or SIGURG signal to a process or process group using
2617 * stored credentials rather than those of the current process.
2618 */
2619 void
pgsigio(struct sigio * sigio,int sig,int checkctty)2620 pgsigio(struct sigio *sigio, int sig, int checkctty)
2621 {
2622 if (sigio == NULL)
2623 return;
2624
2625 if (sigio->sio_pgid > 0) {
2626 if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred,
2627 sigio->sio_proc))
2628 ksignal(sigio->sio_proc, sig);
2629 } else if (sigio->sio_pgid < 0) {
2630 struct proc *p;
2631 struct pgrp *pg = sigio->sio_pgrp;
2632
2633 /*
2634 * Must interlock all signals against fork
2635 */
2636 pgref(pg);
2637 lockmgr(&pg->pg_lock, LK_EXCLUSIVE);
2638 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
2639 if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred, p) &&
2640 (checkctty == 0 || (p->p_flags & P_CONTROLT)))
2641 ksignal(p, sig);
2642 }
2643 lockmgr(&pg->pg_lock, LK_RELEASE);
2644 pgrel(pg);
2645 }
2646 }
2647
2648 static int
filt_sigattach(struct knote * kn)2649 filt_sigattach(struct knote *kn)
2650 {
2651 struct proc *p = curproc;
2652
2653 kn->kn_ptr.p_proc = p;
2654 kn->kn_flags |= EV_CLEAR; /* automatically set */
2655
2656 /* XXX lock the proc here while adding to the list? */
2657 knote_insert(&p->p_klist, kn);
2658
2659 return (0);
2660 }
2661
2662 static void
filt_sigdetach(struct knote * kn)2663 filt_sigdetach(struct knote *kn)
2664 {
2665 struct proc *p = kn->kn_ptr.p_proc;
2666
2667 knote_remove(&p->p_klist, kn);
2668 }
2669
2670 /*
2671 * signal knotes are shared with proc knotes, so we apply a mask to
2672 * the hint in order to differentiate them from process hints. This
2673 * could be avoided by using a signal-specific knote list, but probably
2674 * isn't worth the trouble.
2675 */
2676 static int
filt_signal(struct knote * kn,long hint)2677 filt_signal(struct knote *kn, long hint)
2678 {
2679 if (hint & NOTE_SIGNAL) {
2680 hint &= ~NOTE_SIGNAL;
2681
2682 if (kn->kn_id == hint)
2683 kn->kn_data++;
2684 }
2685 return (kn->kn_data != 0);
2686 }
2687