1 /* $OpenBSD: kern_sig.c,v 1.347 2024/11/05 09:14:19 claudio Exp $ */
2 /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
3
4 /*
5 * Copyright (c) 1997 Theo de Raadt. All rights reserved.
6 * Copyright (c) 1982, 1986, 1989, 1991, 1993
7 * The Regents of the University of California. All rights reserved.
8 * (c) UNIX System Laboratories, Inc.
9 * All or some portions of this file are derived from material licensed
10 * to the University of California by American Telephone and Telegraph
11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12 * the permission of UNIX System Laboratories, Inc.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
39 */
40
41 #include <sys/param.h>
42 #include <sys/signalvar.h>
43 #include <sys/queue.h>
44 #include <sys/namei.h>
45 #include <sys/vnode.h>
46 #include <sys/event.h>
47 #include <sys/proc.h>
48 #include <sys/systm.h>
49 #include <sys/acct.h>
50 #include <sys/fcntl.h>
51 #include <sys/filedesc.h>
52 #include <sys/wait.h>
53 #include <sys/ktrace.h>
54 #include <sys/stat.h>
55 #include <sys/malloc.h>
56 #include <sys/pool.h>
57 #include <sys/sched.h>
58 #include <sys/user.h>
59 #include <sys/syslog.h>
60 #include <sys/ttycom.h>
61 #include <sys/pledge.h>
62 #include <sys/witness.h>
63 #include <sys/exec_elf.h>
64
65 #include <sys/mount.h>
66 #include <sys/syscallargs.h>
67
68 #include <uvm/uvm_extern.h>
69 #include <machine/tcb.h>
70
71 int nosuidcoredump = 1;
72
73 /*
74 * The array below categorizes the signals and their default actions.
75 */
76 const int sigprop[NSIG] = {
77 0, /* unused */
78 SA_KILL, /* SIGHUP */
79 SA_KILL, /* SIGINT */
80 SA_KILL|SA_CORE, /* SIGQUIT */
81 SA_KILL|SA_CORE, /* SIGILL */
82 SA_KILL|SA_CORE, /* SIGTRAP */
83 SA_KILL|SA_CORE, /* SIGABRT */
84 SA_KILL|SA_CORE, /* SIGEMT */
85 SA_KILL|SA_CORE, /* SIGFPE */
86 SA_KILL, /* SIGKILL */
87 SA_KILL|SA_CORE, /* SIGBUS */
88 SA_KILL|SA_CORE, /* SIGSEGV */
89 SA_KILL|SA_CORE, /* SIGSYS */
90 SA_KILL, /* SIGPIPE */
91 SA_KILL, /* SIGALRM */
92 SA_KILL, /* SIGTERM */
93 SA_IGNORE, /* SIGURG */
94 SA_STOP, /* SIGSTOP */
95 SA_STOP|SA_TTYSTOP, /* SIGTSTP */
96 SA_IGNORE|SA_CONT, /* SIGCONT */
97 SA_IGNORE, /* SIGCHLD */
98 SA_STOP|SA_TTYSTOP, /* SIGTTIN */
99 SA_STOP|SA_TTYSTOP, /* SIGTTOU */
100 SA_IGNORE, /* SIGIO */
101 SA_KILL, /* SIGXCPU */
102 SA_KILL, /* SIGXFSZ */
103 SA_KILL, /* SIGVTALRM */
104 SA_KILL, /* SIGPROF */
105 SA_IGNORE, /* SIGWINCH */
106 SA_IGNORE, /* SIGINFO */
107 SA_KILL, /* SIGUSR1 */
108 SA_KILL, /* SIGUSR2 */
109 SA_IGNORE, /* SIGTHR */
110 };
111
112 #define CONTSIGMASK (sigmask(SIGCONT))
113 #define STOPSIGMASK (sigmask(SIGSTOP) | sigmask(SIGTSTP) | \
114 sigmask(SIGTTIN) | sigmask(SIGTTOU))
115
116 void setsigvec(struct proc *, int, struct sigaction *);
117
118 void proc_stop(struct proc *p, int);
119 void proc_stop_sweep(void *);
120 void *proc_stop_si;
121
122 void setsigctx(struct proc *, int, struct sigctx *);
123 void postsig_done(struct proc *, int, sigset_t, int);
124 void postsig(struct proc *, int, struct sigctx *);
125 int cansignal(struct proc *, struct process *, int);
126
127 void ptsignal_locked(struct proc *, int, enum signal_type);
128
129 struct pool sigacts_pool; /* memory pool for sigacts structures */
130
131 void sigio_del(struct sigiolst *);
132 void sigio_unlink(struct sigio_ref *, struct sigiolst *);
133 struct mutex sigio_lock = MUTEX_INITIALIZER(IPL_HIGH);
134
135 /*
136 * Can thread p, send the signal signum to process qr?
137 */
138 int
cansignal(struct proc * p,struct process * qr,int signum)139 cansignal(struct proc *p, struct process *qr, int signum)
140 {
141 struct process *pr = p->p_p;
142 struct ucred *uc = p->p_ucred;
143 struct ucred *quc = qr->ps_ucred;
144
145 if (uc->cr_uid == 0)
146 return (1); /* root can always signal */
147
148 if (pr == qr)
149 return (1); /* process can always signal itself */
150
151 /* optimization: if the same creds then the tests below will pass */
152 if (uc == quc)
153 return (1);
154
155 if (signum == SIGCONT && qr->ps_session == pr->ps_session)
156 return (1); /* SIGCONT in session */
157
158 /*
159 * Using kill(), only certain signals can be sent to setugid
160 * child processes
161 */
162 if (qr->ps_flags & PS_SUGID) {
163 switch (signum) {
164 case 0:
165 case SIGKILL:
166 case SIGINT:
167 case SIGTERM:
168 case SIGALRM:
169 case SIGSTOP:
170 case SIGTTIN:
171 case SIGTTOU:
172 case SIGTSTP:
173 case SIGHUP:
174 case SIGUSR1:
175 case SIGUSR2:
176 if (uc->cr_ruid == quc->cr_ruid ||
177 uc->cr_uid == quc->cr_ruid)
178 return (1);
179 }
180 return (0);
181 }
182
183 if (uc->cr_ruid == quc->cr_ruid ||
184 uc->cr_ruid == quc->cr_svuid ||
185 uc->cr_uid == quc->cr_ruid ||
186 uc->cr_uid == quc->cr_svuid)
187 return (1);
188 return (0);
189 }
190
191 /*
192 * Initialize signal-related data structures.
193 */
194 void
signal_init(void)195 signal_init(void)
196 {
197 proc_stop_si = softintr_establish(IPL_SOFTCLOCK, proc_stop_sweep,
198 NULL);
199 if (proc_stop_si == NULL)
200 panic("signal_init failed to register softintr");
201
202 pool_init(&sigacts_pool, sizeof(struct sigacts), 0, IPL_NONE,
203 PR_WAITOK, "sigapl", NULL);
204 }
205
206 /*
207 * Initialize a new sigaltstack structure.
208 */
209 void
sigstkinit(struct sigaltstack * ss)210 sigstkinit(struct sigaltstack *ss)
211 {
212 ss->ss_flags = SS_DISABLE;
213 ss->ss_size = 0;
214 ss->ss_sp = NULL;
215 }
216
217 /*
218 * Create an initial sigacts structure, using the same signal state
219 * as pr.
220 */
221 struct sigacts *
sigactsinit(struct process * pr)222 sigactsinit(struct process *pr)
223 {
224 struct sigacts *ps;
225
226 ps = pool_get(&sigacts_pool, PR_WAITOK);
227 memcpy(ps, pr->ps_sigacts, sizeof(struct sigacts));
228 return (ps);
229 }
230
231 /*
232 * Release a sigacts structure.
233 */
234 void
sigactsfree(struct sigacts * ps)235 sigactsfree(struct sigacts *ps)
236 {
237 pool_put(&sigacts_pool, ps);
238 }
239
240 int
sys_sigaction(struct proc * p,void * v,register_t * retval)241 sys_sigaction(struct proc *p, void *v, register_t *retval)
242 {
243 struct sys_sigaction_args /* {
244 syscallarg(int) signum;
245 syscallarg(const struct sigaction *) nsa;
246 syscallarg(struct sigaction *) osa;
247 } */ *uap = v;
248 struct sigaction vec;
249 #ifdef KTRACE
250 struct sigaction ovec;
251 #endif
252 struct sigaction *sa;
253 const struct sigaction *nsa;
254 struct sigaction *osa;
255 struct sigacts *ps = p->p_p->ps_sigacts;
256 int signum;
257 int bit, error;
258
259 signum = SCARG(uap, signum);
260 nsa = SCARG(uap, nsa);
261 osa = SCARG(uap, osa);
262
263 if (signum <= 0 || signum >= NSIG ||
264 (nsa && (signum == SIGKILL || signum == SIGSTOP)))
265 return (EINVAL);
266 sa = &vec;
267 if (osa) {
268 mtx_enter(&p->p_p->ps_mtx);
269 sa->sa_handler = ps->ps_sigact[signum];
270 sa->sa_mask = ps->ps_catchmask[signum];
271 bit = sigmask(signum);
272 sa->sa_flags = 0;
273 if ((ps->ps_sigonstack & bit) != 0)
274 sa->sa_flags |= SA_ONSTACK;
275 if ((ps->ps_sigintr & bit) == 0)
276 sa->sa_flags |= SA_RESTART;
277 if ((ps->ps_sigreset & bit) != 0)
278 sa->sa_flags |= SA_RESETHAND;
279 if ((ps->ps_siginfo & bit) != 0)
280 sa->sa_flags |= SA_SIGINFO;
281 if (signum == SIGCHLD) {
282 if ((ps->ps_sigflags & SAS_NOCLDSTOP) != 0)
283 sa->sa_flags |= SA_NOCLDSTOP;
284 if ((ps->ps_sigflags & SAS_NOCLDWAIT) != 0)
285 sa->sa_flags |= SA_NOCLDWAIT;
286 }
287 mtx_leave(&p->p_p->ps_mtx);
288 if ((sa->sa_mask & bit) == 0)
289 sa->sa_flags |= SA_NODEFER;
290 sa->sa_mask &= ~bit;
291 error = copyout(sa, osa, sizeof (vec));
292 if (error)
293 return (error);
294 #ifdef KTRACE
295 if (KTRPOINT(p, KTR_STRUCT))
296 ovec = vec;
297 #endif
298 }
299 if (nsa) {
300 error = copyin(nsa, sa, sizeof (vec));
301 if (error)
302 return (error);
303 #ifdef KTRACE
304 if (KTRPOINT(p, KTR_STRUCT))
305 ktrsigaction(p, sa);
306 #endif
307 setsigvec(p, signum, sa);
308 }
309 #ifdef KTRACE
310 if (osa && KTRPOINT(p, KTR_STRUCT))
311 ktrsigaction(p, &ovec);
312 #endif
313 return (0);
314 }
315
316 void
setsigvec(struct proc * p,int signum,struct sigaction * sa)317 setsigvec(struct proc *p, int signum, struct sigaction *sa)
318 {
319 struct sigacts *ps = p->p_p->ps_sigacts;
320 int bit;
321
322 bit = sigmask(signum);
323
324 mtx_enter(&p->p_p->ps_mtx);
325 ps->ps_sigact[signum] = sa->sa_handler;
326 if ((sa->sa_flags & SA_NODEFER) == 0)
327 sa->sa_mask |= sigmask(signum);
328 ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask;
329 if (signum == SIGCHLD) {
330 if (sa->sa_flags & SA_NOCLDSTOP)
331 atomic_setbits_int(&ps->ps_sigflags, SAS_NOCLDSTOP);
332 else
333 atomic_clearbits_int(&ps->ps_sigflags, SAS_NOCLDSTOP);
334 /*
335 * If the SA_NOCLDWAIT flag is set or the handler
336 * is SIG_IGN we reparent the dying child to PID 1
337 * (init) which will reap the zombie. Because we use
338 * init to do our dirty work we never set SAS_NOCLDWAIT
339 * for PID 1.
340 * XXX exit1 rework means this is unnecessary?
341 */
342 if (initprocess->ps_sigacts != ps &&
343 ((sa->sa_flags & SA_NOCLDWAIT) ||
344 sa->sa_handler == SIG_IGN))
345 atomic_setbits_int(&ps->ps_sigflags, SAS_NOCLDWAIT);
346 else
347 atomic_clearbits_int(&ps->ps_sigflags, SAS_NOCLDWAIT);
348 }
349 if ((sa->sa_flags & SA_RESETHAND) != 0)
350 ps->ps_sigreset |= bit;
351 else
352 ps->ps_sigreset &= ~bit;
353 if ((sa->sa_flags & SA_SIGINFO) != 0)
354 ps->ps_siginfo |= bit;
355 else
356 ps->ps_siginfo &= ~bit;
357 if ((sa->sa_flags & SA_RESTART) == 0)
358 ps->ps_sigintr |= bit;
359 else
360 ps->ps_sigintr &= ~bit;
361 if ((sa->sa_flags & SA_ONSTACK) != 0)
362 ps->ps_sigonstack |= bit;
363 else
364 ps->ps_sigonstack &= ~bit;
365 /*
366 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
367 * and for signals set to SIG_DFL where the default is to ignore.
368 * However, don't put SIGCONT in ps_sigignore,
369 * as we have to restart the process.
370 */
371 if (sa->sa_handler == SIG_IGN ||
372 (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) {
373 atomic_clearbits_int(&p->p_siglist, bit);
374 atomic_clearbits_int(&p->p_p->ps_siglist, bit);
375 if (signum != SIGCONT)
376 ps->ps_sigignore |= bit; /* easier in psignal */
377 ps->ps_sigcatch &= ~bit;
378 } else {
379 ps->ps_sigignore &= ~bit;
380 if (sa->sa_handler == SIG_DFL)
381 ps->ps_sigcatch &= ~bit;
382 else
383 ps->ps_sigcatch |= bit;
384 }
385 mtx_leave(&p->p_p->ps_mtx);
386 }
387
388 /*
389 * Initialize signal state for process 0;
390 * set to ignore signals that are ignored by default.
391 */
392 void
siginit(struct sigacts * ps)393 siginit(struct sigacts *ps)
394 {
395 int i;
396
397 for (i = 0; i < NSIG; i++)
398 if (sigprop[i] & SA_IGNORE && i != SIGCONT)
399 ps->ps_sigignore |= sigmask(i);
400 ps->ps_sigflags = SAS_NOCLDWAIT | SAS_NOCLDSTOP;
401 }
402
403 /*
404 * Reset signals for an exec by the specified thread.
405 */
406 void
execsigs(struct proc * p)407 execsigs(struct proc *p)
408 {
409 struct sigacts *ps;
410 int nc, mask;
411
412 ps = p->p_p->ps_sigacts;
413 mtx_enter(&p->p_p->ps_mtx);
414
415 /*
416 * Reset caught signals. Held signals remain held
417 * through p_sigmask (unless they were caught,
418 * and are now ignored by default).
419 */
420 while (ps->ps_sigcatch) {
421 nc = ffs((long)ps->ps_sigcatch);
422 mask = sigmask(nc);
423 ps->ps_sigcatch &= ~mask;
424 if (sigprop[nc] & SA_IGNORE) {
425 if (nc != SIGCONT)
426 ps->ps_sigignore |= mask;
427 atomic_clearbits_int(&p->p_siglist, mask);
428 atomic_clearbits_int(&p->p_p->ps_siglist, mask);
429 }
430 ps->ps_sigact[nc] = SIG_DFL;
431 }
432 /*
433 * Reset stack state to the user stack.
434 * Clear set of signals caught on the signal stack.
435 */
436 sigstkinit(&p->p_sigstk);
437 atomic_clearbits_int(&ps->ps_sigflags, SAS_NOCLDWAIT);
438 if (ps->ps_sigact[SIGCHLD] == SIG_IGN)
439 ps->ps_sigact[SIGCHLD] = SIG_DFL;
440 mtx_leave(&p->p_p->ps_mtx);
441 }
442
443 /*
444 * Manipulate signal mask.
445 * Note that we receive new mask, not pointer,
446 * and return old mask as return value;
447 * the library stub does the rest.
448 */
449 int
sys_sigprocmask(struct proc * p,void * v,register_t * retval)450 sys_sigprocmask(struct proc *p, void *v, register_t *retval)
451 {
452 struct sys_sigprocmask_args /* {
453 syscallarg(int) how;
454 syscallarg(sigset_t) mask;
455 } */ *uap = v;
456 int error = 0;
457 sigset_t mask;
458
459 KASSERT(p == curproc);
460
461 *retval = p->p_sigmask;
462 mask = SCARG(uap, mask) &~ sigcantmask;
463
464 switch (SCARG(uap, how)) {
465 case SIG_BLOCK:
466 SET(p->p_sigmask, mask);
467 break;
468 case SIG_UNBLOCK:
469 CLR(p->p_sigmask, mask);
470 break;
471 case SIG_SETMASK:
472 p->p_sigmask = mask;
473 break;
474 default:
475 error = EINVAL;
476 break;
477 }
478 return (error);
479 }
480
481 int
sys_sigpending(struct proc * p,void * v,register_t * retval)482 sys_sigpending(struct proc *p, void *v, register_t *retval)
483 {
484 *retval = p->p_siglist | p->p_p->ps_siglist;
485 return (0);
486 }
487
488 /*
489 * Temporarily replace calling proc's signal mask for the duration of a
490 * system call. Original signal mask will be restored by userret().
491 */
492 void
dosigsuspend(struct proc * p,sigset_t newmask)493 dosigsuspend(struct proc *p, sigset_t newmask)
494 {
495 KASSERT(p == curproc);
496
497 p->p_oldmask = p->p_sigmask;
498 p->p_sigmask = newmask;
499 atomic_setbits_int(&p->p_flag, P_SIGSUSPEND);
500 }
501
502 /*
503 * Suspend thread until signal, providing mask to be set
504 * in the meantime. Note nonstandard calling convention:
505 * libc stub passes mask, not pointer, to save a copyin.
506 */
507 int
sys_sigsuspend(struct proc * p,void * v,register_t * retval)508 sys_sigsuspend(struct proc *p, void *v, register_t *retval)
509 {
510 struct sys_sigsuspend_args /* {
511 syscallarg(int) mask;
512 } */ *uap = v;
513
514 dosigsuspend(p, SCARG(uap, mask) &~ sigcantmask);
515 while (tsleep_nsec(&nowake, PPAUSE|PCATCH, "sigsusp", INFSLP) == 0)
516 continue;
517 /* always return EINTR rather than ERESTART... */
518 return (EINTR);
519 }
520
521 int
sigonstack(size_t stack)522 sigonstack(size_t stack)
523 {
524 const struct sigaltstack *ss = &curproc->p_sigstk;
525
526 return (ss->ss_flags & SS_DISABLE ? 0 :
527 (stack - (size_t)ss->ss_sp < ss->ss_size));
528 }
529
530 int
sys_sigaltstack(struct proc * p,void * v,register_t * retval)531 sys_sigaltstack(struct proc *p, void *v, register_t *retval)
532 {
533 struct sys_sigaltstack_args /* {
534 syscallarg(const struct sigaltstack *) nss;
535 syscallarg(struct sigaltstack *) oss;
536 } */ *uap = v;
537 struct sigaltstack ss;
538 const struct sigaltstack *nss;
539 struct sigaltstack *oss;
540 int onstack = sigonstack(PROC_STACK(p));
541 int error;
542
543 nss = SCARG(uap, nss);
544 oss = SCARG(uap, oss);
545
546 if (oss != NULL) {
547 ss = p->p_sigstk;
548 if (onstack)
549 ss.ss_flags |= SS_ONSTACK;
550 if ((error = copyout(&ss, oss, sizeof(ss))))
551 return (error);
552 }
553 if (nss == NULL)
554 return (0);
555 error = copyin(nss, &ss, sizeof(ss));
556 if (error)
557 return (error);
558 if (onstack)
559 return (EPERM);
560 if (ss.ss_flags & ~SS_DISABLE)
561 return (EINVAL);
562 if (ss.ss_flags & SS_DISABLE) {
563 p->p_sigstk.ss_flags = ss.ss_flags;
564 return (0);
565 }
566 if (ss.ss_size < MINSIGSTKSZ)
567 return (ENOMEM);
568
569 error = uvm_map_remap_as_stack(p, (vaddr_t)ss.ss_sp, ss.ss_size);
570 if (error)
571 return (error);
572
573 p->p_sigstk = ss;
574 return (0);
575 }
576
577 int
sys_kill(struct proc * cp,void * v,register_t * retval)578 sys_kill(struct proc *cp, void *v, register_t *retval)
579 {
580 struct sys_kill_args /* {
581 syscallarg(int) pid;
582 syscallarg(int) signum;
583 } */ *uap = v;
584 struct process *pr;
585 int pid = SCARG(uap, pid);
586 int signum = SCARG(uap, signum);
587 int error;
588 int zombie = 0;
589
590 if ((error = pledge_kill(cp, pid)) != 0)
591 return (error);
592 if (((u_int)signum) >= NSIG)
593 return (EINVAL);
594 if (pid > 0) {
595 if ((pr = prfind(pid)) == NULL) {
596 if ((pr = zombiefind(pid)) == NULL)
597 return (ESRCH);
598 else
599 zombie = 1;
600 }
601 if (!cansignal(cp, pr, signum))
602 return (EPERM);
603
604 /* kill single process */
605 if (signum && !zombie)
606 prsignal(pr, signum);
607 return (0);
608 }
609 switch (pid) {
610 case -1: /* broadcast signal */
611 return (killpg1(cp, signum, 0, 1));
612 case 0: /* signal own process group */
613 return (killpg1(cp, signum, 0, 0));
614 default: /* negative explicit process group */
615 return (killpg1(cp, signum, -pid, 0));
616 }
617 }
618
619 int
sys_thrkill(struct proc * cp,void * v,register_t * retval)620 sys_thrkill(struct proc *cp, void *v, register_t *retval)
621 {
622 struct sys_thrkill_args /* {
623 syscallarg(pid_t) tid;
624 syscallarg(int) signum;
625 syscallarg(void *) tcb;
626 } */ *uap = v;
627 struct proc *p;
628 int tid = SCARG(uap, tid);
629 int signum = SCARG(uap, signum);
630 void *tcb;
631
632 if (((u_int)signum) >= NSIG)
633 return (EINVAL);
634
635 p = tid ? tfind_user(tid, cp->p_p) : cp;
636 if (p == NULL)
637 return (ESRCH);
638
639 /* optionally require the target thread to have the given tcb addr */
640 tcb = SCARG(uap, tcb);
641 if (tcb != NULL && tcb != TCB_GET(p))
642 return (ESRCH);
643
644 if (signum)
645 ptsignal(p, signum, STHREAD);
646 return (0);
647 }
648
649 /*
650 * Common code for kill process group/broadcast kill.
651 * cp is calling process.
652 */
653 int
killpg1(struct proc * cp,int signum,int pgid,int all)654 killpg1(struct proc *cp, int signum, int pgid, int all)
655 {
656 struct process *pr;
657 struct pgrp *pgrp;
658 int nfound = 0;
659
660 if (all) {
661 /*
662 * broadcast
663 */
664 LIST_FOREACH(pr, &allprocess, ps_list) {
665 if (pr->ps_pid <= 1 ||
666 pr->ps_flags & (PS_SYSTEM | PS_NOBROADCASTKILL) ||
667 pr == cp->p_p || !cansignal(cp, pr, signum))
668 continue;
669 nfound++;
670 if (signum)
671 prsignal(pr, signum);
672 }
673 } else {
674 if (pgid == 0)
675 /*
676 * zero pgid means send to my process group.
677 */
678 pgrp = cp->p_p->ps_pgrp;
679 else {
680 pgrp = pgfind(pgid);
681 if (pgrp == NULL)
682 return (ESRCH);
683 }
684 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist) {
685 if (pr->ps_pid <= 1 || pr->ps_flags & PS_SYSTEM ||
686 !cansignal(cp, pr, signum))
687 continue;
688 nfound++;
689 if (signum)
690 prsignal(pr, signum);
691 }
692 }
693 return (nfound ? 0 : ESRCH);
694 }
695
696 #define CANDELIVER(uid, euid, pr) \
697 (euid == 0 || \
698 (uid) == (pr)->ps_ucred->cr_ruid || \
699 (uid) == (pr)->ps_ucred->cr_svuid || \
700 (uid) == (pr)->ps_ucred->cr_uid || \
701 (euid) == (pr)->ps_ucred->cr_ruid || \
702 (euid) == (pr)->ps_ucred->cr_svuid || \
703 (euid) == (pr)->ps_ucred->cr_uid)
704
705 #define CANSIGIO(cr, pr) \
706 CANDELIVER((cr)->cr_ruid, (cr)->cr_uid, (pr))
707
708 /*
709 * Send a signal to a process group. If checktty is 1,
710 * limit to members which have a controlling terminal.
711 */
712 void
pgsignal(struct pgrp * pgrp,int signum,int checkctty)713 pgsignal(struct pgrp *pgrp, int signum, int checkctty)
714 {
715 struct process *pr;
716
717 if (pgrp)
718 LIST_FOREACH(pr, &pgrp->pg_members, ps_pglist)
719 if (checkctty == 0 || pr->ps_flags & PS_CONTROLT)
720 prsignal(pr, signum);
721 }
722
723 /*
724 * Send a SIGIO or SIGURG signal to a process or process group using stored
725 * credentials rather than those of the current process.
726 */
727 void
pgsigio(struct sigio_ref * sir,int sig,int checkctty)728 pgsigio(struct sigio_ref *sir, int sig, int checkctty)
729 {
730 struct process *pr;
731 struct sigio *sigio;
732
733 if (sir->sir_sigio == NULL)
734 return;
735
736 KERNEL_LOCK();
737 mtx_enter(&sigio_lock);
738 sigio = sir->sir_sigio;
739 if (sigio == NULL)
740 goto out;
741 if (sigio->sio_pgid > 0) {
742 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc))
743 prsignal(sigio->sio_proc, sig);
744 } else if (sigio->sio_pgid < 0) {
745 LIST_FOREACH(pr, &sigio->sio_pgrp->pg_members, ps_pglist) {
746 if (CANSIGIO(sigio->sio_ucred, pr) &&
747 (checkctty == 0 || (pr->ps_flags & PS_CONTROLT)))
748 prsignal(pr, sig);
749 }
750 }
751 out:
752 mtx_leave(&sigio_lock);
753 KERNEL_UNLOCK();
754 }
755
756 /*
757 * Recalculate the signal mask and reset the signal disposition after
758 * usermode frame for delivery is formed.
759 */
760 void
postsig_done(struct proc * p,int signum,sigset_t catchmask,int reset)761 postsig_done(struct proc *p, int signum, sigset_t catchmask, int reset)
762 {
763 p->p_ru.ru_nsignals++;
764 SET(p->p_sigmask, catchmask);
765 if (reset != 0) {
766 sigset_t mask = sigmask(signum);
767 struct sigacts *ps = p->p_p->ps_sigacts;
768
769 mtx_enter(&p->p_p->ps_mtx);
770 ps->ps_sigcatch &= ~mask;
771 if (signum != SIGCONT && sigprop[signum] & SA_IGNORE)
772 ps->ps_sigignore |= mask;
773 ps->ps_sigact[signum] = SIG_DFL;
774 mtx_leave(&p->p_p->ps_mtx);
775 }
776 }
777
778 /*
779 * Send a signal caused by a trap to the current thread
780 * If it will be caught immediately, deliver it with correct code.
781 * Otherwise, post it normally.
782 */
783 void
trapsignal(struct proc * p,int signum,u_long trapno,int code,union sigval sigval)784 trapsignal(struct proc *p, int signum, u_long trapno, int code,
785 union sigval sigval)
786 {
787 struct process *pr = p->p_p;
788 struct sigctx ctx;
789 int mask;
790
791 switch (signum) {
792 case SIGILL:
793 if (code == ILL_BTCFI) {
794 pr->ps_acflag |= ABTCFI;
795 break;
796 }
797 /* FALLTHROUGH */
798 case SIGBUS:
799 case SIGSEGV:
800 pr->ps_acflag |= ATRAP;
801 break;
802 }
803
804 mask = sigmask(signum);
805 setsigctx(p, signum, &ctx);
806 if ((pr->ps_flags & PS_TRACED) == 0 && ctx.sig_catch != 0 &&
807 (p->p_sigmask & mask) == 0) {
808 siginfo_t si;
809
810 initsiginfo(&si, signum, trapno, code, sigval);
811 #ifdef KTRACE
812 if (KTRPOINT(p, KTR_PSIG)) {
813 ktrpsig(p, signum, ctx.sig_action,
814 p->p_sigmask, code, &si);
815 }
816 #endif
817 if (sendsig(ctx.sig_action, signum, p->p_sigmask, &si,
818 ctx.sig_info, ctx.sig_onstack)) {
819 KERNEL_LOCK();
820 sigexit(p, SIGILL);
821 /* NOTREACHED */
822 }
823 postsig_done(p, signum, ctx.sig_catchmask, ctx.sig_reset);
824 } else {
825 p->p_sisig = signum;
826 p->p_sitrapno = trapno; /* XXX for core dump/debugger */
827 p->p_sicode = code;
828 p->p_sigval = sigval;
829
830 /*
831 * If traced, stop if signal is masked, and stay stopped
832 * until released by the debugger. If our parent process
833 * is waiting for us, don't hang as we could deadlock.
834 */
835 if (((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) &&
836 signum != SIGKILL && (p->p_sigmask & mask) != 0) {
837 single_thread_set(p, SINGLE_SUSPEND | SINGLE_NOWAIT);
838 pr->ps_xsig = signum;
839
840 SCHED_LOCK();
841 proc_stop(p, 1);
842 SCHED_UNLOCK();
843
844 signum = pr->ps_xsig;
845 pr->ps_xsig = 0;
846 if ((p->p_flag & P_TRACESINGLE) == 0)
847 single_thread_clear(p, 0);
848 atomic_clearbits_int(&p->p_flag, P_TRACESINGLE);
849
850 /*
851 * If we are no longer being traced, or the parent
852 * didn't give us a signal, skip sending the signal.
853 */
854 if ((pr->ps_flags & PS_TRACED) == 0 ||
855 signum == 0)
856 return;
857
858 /* update signal info */
859 p->p_sisig = signum;
860 mask = sigmask(signum);
861 }
862
863 /*
864 * Signals like SIGBUS and SIGSEGV should not, when
865 * generated by the kernel, be ignorable or blockable.
866 * If it is and we're not being traced, then just kill
867 * the process.
868 * After vfs_shutdown(9), init(8) cannot receive signals
869 * because new code pages of the signal handler cannot be
870 * mapped from halted storage. init(8) may not die or the
871 * kernel panics. Better loop between signal handler and
872 * page fault trap until the machine is halted.
873 */
874 if ((pr->ps_flags & PS_TRACED) == 0 &&
875 (sigprop[signum] & SA_KILL) &&
876 ((p->p_sigmask & mask) || ctx.sig_ignore) &&
877 pr->ps_pid != 1) {
878 KERNEL_LOCK();
879 sigexit(p, signum);
880 /* NOTREACHED */
881 }
882 ptsignal(p, signum, STHREAD);
883 }
884 }
885
886 /*
887 * Send the signal to the process. If the signal has an action, the action
888 * is usually performed by the target process rather than the caller; we add
889 * the signal to the set of pending signals for the process.
890 *
891 * Exceptions:
892 * o When a stop signal is sent to a sleeping process that takes the
893 * default action, the process is stopped without awakening it.
894 * o SIGCONT restarts stopped processes (or puts them back to sleep)
895 * regardless of the signal action (eg, blocked or ignored).
896 *
897 * Other ignored signals are discarded immediately.
898 */
899 void
psignal(struct proc * p,int signum)900 psignal(struct proc *p, int signum)
901 {
902 ptsignal(p, signum, SPROCESS);
903 }
904
905 void
prsignal(struct process * pr,int signum)906 prsignal(struct process *pr, int signum)
907 {
908 mtx_enter(&pr->ps_mtx);
909 /* Ignore signal if the target process is exiting */
910 if (pr->ps_flags & PS_EXITING) {
911 mtx_leave(&pr->ps_mtx);
912 return;
913 }
914 ptsignal_locked(TAILQ_FIRST(&pr->ps_threads), signum, SPROCESS);
915 mtx_leave(&pr->ps_mtx);
916 }
917
918 /*
919 * type = SPROCESS process signal, can be diverted (sigwait())
920 * type = STHREAD thread signal, but should be propagated if unhandled
921 * type = SPROPAGATED propagated to this thread, so don't propagate again
922 */
923 void
ptsignal(struct proc * p,int signum,enum signal_type type)924 ptsignal(struct proc *p, int signum, enum signal_type type)
925 {
926 struct process *pr = p->p_p;
927
928 mtx_enter(&pr->ps_mtx);
929 ptsignal_locked(p, signum, type);
930 mtx_leave(&pr->ps_mtx);
931 }
932
933 void
ptsignal_locked(struct proc * p,int signum,enum signal_type type)934 ptsignal_locked(struct proc *p, int signum, enum signal_type type)
935 {
936 int prop;
937 sig_t action, altaction = SIG_DFL;
938 sigset_t mask, sigmask;
939 int *siglist;
940 struct process *pr = p->p_p;
941 struct proc *q;
942 int wakeparent = 0;
943
944 MUTEX_ASSERT_LOCKED(&pr->ps_mtx);
945
946 #ifdef DIAGNOSTIC
947 if ((u_int)signum >= NSIG || signum == 0)
948 panic("psignal signal number");
949 #endif
950
951 /* Ignore signal if the target process is exiting */
952 if (pr->ps_flags & PS_EXITING)
953 return;
954
955 mask = sigmask(signum);
956 sigmask = READ_ONCE(p->p_sigmask);
957
958 if (type == SPROCESS) {
959 sigset_t tmpmask;
960
961 /* Accept SIGKILL to coredumping processes */
962 if (pr->ps_flags & PS_COREDUMP && signum == SIGKILL) {
963 atomic_setbits_int(&pr->ps_siglist, mask);
964 return;
965 }
966
967 /*
968 * If the current thread can process the signal
969 * immediately (it's unblocked) then have it take it.
970 */
971 q = curproc;
972 tmpmask = READ_ONCE(q->p_sigmask);
973 if (q->p_p == pr && (q->p_flag & P_WEXIT) == 0 &&
974 (tmpmask & mask) == 0) {
975 p = q;
976 sigmask = tmpmask;
977 } else {
978 /*
979 * A process-wide signal can be diverted to a
980 * different thread that's in sigwait() for this
981 * signal. If there isn't such a thread, then
982 * pick a thread that doesn't have it blocked so
983 * that the stop/kill consideration isn't
984 * delayed. Otherwise, mark it pending on the
985 * main thread.
986 */
987 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
988
989 /* ignore exiting threads */
990 if (q->p_flag & P_WEXIT)
991 continue;
992
993 /* skip threads that have the signal blocked */
994 tmpmask = READ_ONCE(q->p_sigmask);
995 if ((tmpmask & mask) != 0)
996 continue;
997
998 /* okay, could send to this thread */
999 p = q;
1000 sigmask = tmpmask;
1001
1002 /*
1003 * sigsuspend, sigwait, ppoll/pselect, etc?
1004 * Definitely go to this thread, as it's
1005 * already blocked in the kernel.
1006 */
1007 if (q->p_flag & P_SIGSUSPEND)
1008 break;
1009 }
1010 }
1011 }
1012
1013 if (type != SPROPAGATED)
1014 knote_locked(&pr->ps_klist, NOTE_SIGNAL | signum);
1015
1016 prop = sigprop[signum];
1017
1018 /*
1019 * If proc is traced, always give parent a chance.
1020 */
1021 if (pr->ps_flags & PS_TRACED) {
1022 action = SIG_DFL;
1023 } else {
1024 sigset_t sigcatch, sigignore;
1025
1026 /*
1027 * If the signal is being ignored,
1028 * then we forget about it immediately.
1029 * (Note: we don't set SIGCONT in ps_sigignore,
1030 * and if it is set to SIG_IGN,
1031 * action will be SIG_DFL here.)
1032 */
1033 sigignore = pr->ps_sigacts->ps_sigignore;
1034 sigcatch = pr->ps_sigacts->ps_sigcatch;
1035
1036 if (sigignore & mask)
1037 return;
1038 if (sigmask & mask) {
1039 action = SIG_HOLD;
1040 if (sigcatch & mask)
1041 altaction = SIG_CATCH;
1042 } else if (sigcatch & mask) {
1043 action = SIG_CATCH;
1044 } else {
1045 action = SIG_DFL;
1046
1047 if (prop & SA_KILL && pr->ps_nice > NZERO)
1048 pr->ps_nice = NZERO;
1049
1050 /*
1051 * If sending a tty stop signal to a member of an
1052 * orphaned process group, discard the signal here if
1053 * the action is default; don't stop the process below
1054 * if sleeping, and don't clear any pending SIGCONT.
1055 */
1056 if (prop & SA_TTYSTOP && pr->ps_pgrp->pg_jobc == 0)
1057 return;
1058 }
1059 }
1060 /*
1061 * If delivered to process, mark as pending there. Continue and stop
1062 * signals will be propagated to all threads. So they are always
1063 * marked at thread level.
1064 */
1065 siglist = (type == SPROCESS) ? &pr->ps_siglist : &p->p_siglist;
1066 if (prop & (SA_CONT | SA_STOP))
1067 siglist = &p->p_siglist;
1068
1069 /*
1070 * XXX delay processing of SA_STOP signals unless action == SIG_DFL?
1071 */
1072 if (prop & (SA_CONT | SA_STOP) && type != SPROPAGATED)
1073 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link)
1074 if (q != p)
1075 ptsignal_locked(q, signum, SPROPAGATED);
1076
1077 SCHED_LOCK();
1078
1079 switch (p->p_stat) {
1080
1081 case SSTOP:
1082 /*
1083 * If traced process is already stopped,
1084 * then no further action is necessary.
1085 */
1086 if (pr->ps_flags & PS_TRACED)
1087 goto out;
1088
1089 /*
1090 * Kill signal always sets processes running.
1091 */
1092 if (signum == SIGKILL) {
1093 atomic_clearbits_int(&p->p_flag, P_SUSPSIG);
1094 /* Raise priority to at least PUSER. */
1095 if (p->p_usrpri > PUSER)
1096 p->p_usrpri = PUSER;
1097 unsleep(p);
1098 setrunnable(p);
1099 goto out;
1100 }
1101
1102 if (prop & SA_CONT) {
1103 /*
1104 * If SIGCONT is default (or ignored), we continue the
1105 * process but don't leave the signal in p_siglist, as
1106 * it has no further action. If SIGCONT is held, we
1107 * continue the process and leave the signal in
1108 * p_siglist. If the process catches SIGCONT, let it
1109 * handle the signal itself. If it isn't waiting on
1110 * an event, then it goes back to run state.
1111 * Otherwise, process goes back to sleep state.
1112 */
1113 atomic_setbits_int(&pr->ps_flags, PS_CONTINUED);
1114 atomic_clearbits_int(&pr->ps_flags,
1115 PS_WAITED | PS_STOPPED);
1116 atomic_clearbits_int(&p->p_flag, P_SUSPSIG);
1117 wakeparent = 1;
1118 if (action == SIG_DFL)
1119 mask = 0;
1120 if (action == SIG_CATCH) {
1121 /* Raise priority to at least PUSER. */
1122 if (p->p_usrpri > PUSER)
1123 p->p_usrpri = PUSER;
1124 unsleep(p);
1125 setrunnable(p);
1126 goto out;
1127 }
1128 if (p->p_wchan == NULL) {
1129 unsleep(p);
1130 setrunnable(p);
1131 goto out;
1132 }
1133 atomic_clearbits_int(&p->p_flag, P_WSLEEP);
1134 p->p_stat = SSLEEP;
1135 goto out;
1136 }
1137
1138 /*
1139 * Defer further processing for signals which are held,
1140 * except that stopped processes must be continued by SIGCONT.
1141 */
1142 if (action == SIG_HOLD)
1143 goto out;
1144
1145 if (prop & SA_STOP) {
1146 /*
1147 * Already stopped, don't need to stop again.
1148 * (If we did the shell could get confused.)
1149 */
1150 mask = 0;
1151 goto out;
1152 }
1153
1154 /*
1155 * If process is sleeping interruptibly, then simulate a
1156 * wakeup so that when it is continued, it will be made
1157 * runnable and can look at the signal. But don't make
1158 * the process runnable, leave it stopped.
1159 */
1160 if (p->p_flag & P_SINTR)
1161 unsleep(p);
1162 goto out;
1163
1164 case SSLEEP:
1165 /*
1166 * If process is sleeping uninterruptibly
1167 * we can't interrupt the sleep... the signal will
1168 * be noticed when the process returns through
1169 * trap() or syscall().
1170 */
1171 if ((p->p_flag & P_SINTR) == 0)
1172 goto out;
1173 /*
1174 * Process is sleeping and traced... make it runnable
1175 * so it can discover the signal in cursig() and stop
1176 * for the parent.
1177 */
1178 if (pr->ps_flags & PS_TRACED) {
1179 unsleep(p);
1180 setrunnable(p);
1181 goto out;
1182 }
1183
1184 /*
1185 * Recheck sigmask before waking up the process,
1186 * there is a chance that while sending the signal
1187 * the process changed sigmask and went to sleep.
1188 */
1189 sigmask = READ_ONCE(p->p_sigmask);
1190 if (sigmask & mask)
1191 goto out;
1192 else if (action == SIG_HOLD) {
1193 /* signal got unmasked, get proper action */
1194 action = altaction;
1195
1196 if (action == SIG_DFL) {
1197 if (prop & SA_KILL && pr->ps_nice > NZERO)
1198 pr->ps_nice = NZERO;
1199
1200 /*
1201 * Discard tty stop signals sent to an
1202 * orphaned process group, see above.
1203 */
1204 if (prop & SA_TTYSTOP &&
1205 pr->ps_pgrp->pg_jobc == 0) {
1206 mask = 0;
1207 prop = 0;
1208 goto out;
1209 }
1210 }
1211 }
1212
1213 /*
1214 * If SIGCONT is default (or ignored) and process is
1215 * asleep, we are finished; the process should not
1216 * be awakened.
1217 */
1218 if ((prop & SA_CONT) && action == SIG_DFL) {
1219 mask = 0;
1220 goto out;
1221 }
1222 /*
1223 * When a sleeping process receives a stop
1224 * signal, process immediately if possible.
1225 */
1226 if ((prop & SA_STOP) && action == SIG_DFL) {
1227 /*
1228 * If a child holding parent blocked,
1229 * stopping could cause deadlock.
1230 */
1231 if (pr->ps_flags & PS_PPWAIT)
1232 goto out;
1233 mask = 0;
1234 pr->ps_xsig = signum;
1235 proc_stop(p, 0);
1236 goto out;
1237 }
1238 /*
1239 * All other (caught or default) signals
1240 * cause the process to run.
1241 * Raise priority to at least PUSER.
1242 */
1243 if (p->p_usrpri > PUSER)
1244 p->p_usrpri = PUSER;
1245 unsleep(p);
1246 setrunnable(p);
1247 goto out;
1248 /* NOTREACHED */
1249
1250 case SONPROC:
1251 if (action == SIG_HOLD)
1252 goto out;
1253
1254 /* set siglist before issuing the ast */
1255 atomic_setbits_int(siglist, mask);
1256 mask = 0;
1257 signotify(p);
1258 /* FALLTHROUGH */
1259 default:
1260 /*
1261 * SRUN, SIDL, SDEAD do nothing with the signal,
1262 * other than kicking ourselves if we are running.
1263 * It will either never be noticed, or noticed very soon.
1264 */
1265 goto out;
1266 }
1267 /* NOTREACHED */
1268
1269 out:
1270 /* finally adjust siglist */
1271 if (mask)
1272 atomic_setbits_int(siglist, mask);
1273 if (prop & SA_CONT) {
1274 atomic_clearbits_int(siglist, STOPSIGMASK);
1275 }
1276 if (prop & SA_STOP) {
1277 atomic_clearbits_int(siglist, CONTSIGMASK);
1278 atomic_clearbits_int(&pr->ps_flags, PS_CONTINUED);
1279 }
1280
1281 SCHED_UNLOCK();
1282 if (wakeparent)
1283 wakeup(pr->ps_pptr);
1284 }
1285
1286 /* fill the signal context which should be used by postsig() and issignal() */
1287 void
setsigctx(struct proc * p,int signum,struct sigctx * sctx)1288 setsigctx(struct proc *p, int signum, struct sigctx *sctx)
1289 {
1290 struct process *pr = p->p_p;
1291 struct sigacts *ps = pr->ps_sigacts;
1292 sigset_t mask;
1293
1294 mtx_enter(&pr->ps_mtx);
1295 mask = sigmask(signum);
1296 sctx->sig_action = ps->ps_sigact[signum];
1297 sctx->sig_catchmask = ps->ps_catchmask[signum];
1298 sctx->sig_reset = (ps->ps_sigreset & mask) != 0;
1299 sctx->sig_info = (ps->ps_siginfo & mask) != 0;
1300 sctx->sig_intr = (ps->ps_sigintr & mask) != 0;
1301 sctx->sig_onstack = (ps->ps_sigonstack & mask) != 0;
1302 sctx->sig_ignore = (ps->ps_sigignore & mask) != 0;
1303 sctx->sig_catch = (ps->ps_sigcatch & mask) != 0;
1304 sctx->sig_stop = sigprop[signum] & SA_STOP &&
1305 (long)sctx->sig_action == (long)SIG_DFL;
1306 if (sctx->sig_stop) {
1307 /*
1308 * If the process is a member of an orphaned
1309 * process group, ignore tty stop signals.
1310 */
1311 if (pr->ps_flags & PS_TRACED ||
1312 (pr->ps_pgrp->pg_jobc == 0 &&
1313 sigprop[signum] & SA_TTYSTOP)) {
1314 sctx->sig_stop = 0;
1315 sctx->sig_ignore = 1;
1316 }
1317 }
1318 mtx_leave(&pr->ps_mtx);
1319 }
1320
1321 /*
1322 * Determine signal that should be delivered to process p, the current
1323 * process, 0 if none.
1324 *
1325 * If the current process has received a signal (should be caught or cause
1326 * termination, should interrupt current syscall), return the signal number.
1327 * Stop signals with default action are processed immediately, then cleared;
1328 * they aren't returned. This is checked after each entry to the system for
1329 * a syscall or trap. The normal call sequence is
1330 *
1331 * while (signum = cursig(curproc, &ctx, 0))
1332 * postsig(signum, &ctx);
1333 *
1334 * Assumes that if the P_SINTR flag is set, we're holding both the
1335 * kernel and scheduler locks.
1336 */
1337 int
cursig(struct proc * p,struct sigctx * sctx,int deep)1338 cursig(struct proc *p, struct sigctx *sctx, int deep)
1339 {
1340 struct process *pr = p->p_p;
1341 int signum, mask, prop;
1342 sigset_t ps_siglist;
1343
1344 KASSERT(p == curproc);
1345
1346 for (;;) {
1347 ps_siglist = READ_ONCE(pr->ps_siglist);
1348 membar_consumer();
1349 mask = SIGPENDING(p);
1350 if (pr->ps_flags & PS_PPWAIT)
1351 mask &= ~STOPSIGMASK;
1352 if (mask == 0) /* no signal to send */
1353 return (0);
1354 signum = ffs((long)mask);
1355 mask = sigmask(signum);
1356
1357 /* take the signal! */
1358 if (atomic_cas_uint(&pr->ps_siglist, ps_siglist,
1359 ps_siglist & ~mask) != ps_siglist) {
1360 /* lost race taking the process signal, restart */
1361 continue;
1362 }
1363 atomic_clearbits_int(&p->p_siglist, mask);
1364 setsigctx(p, signum, sctx);
1365
1366 /*
1367 * We should see pending but ignored signals
1368 * only if PS_TRACED was on when they were posted.
1369 */
1370 if (sctx->sig_ignore && (pr->ps_flags & PS_TRACED) == 0)
1371 continue;
1372
1373 /*
1374 * If cursig is called while going to sleep, abort now
1375 * and stop the sleep. When the call unwinded to userret
1376 * cursig is called again and there the signal can be
1377 * handled cleanly.
1378 */
1379 if (deep)
1380 goto keep;
1381
1382 /*
1383 * If traced, always stop, and stay stopped until released
1384 * by the debugger. If our parent process is waiting for
1385 * us, don't hang as we could deadlock.
1386 */
1387 if (((pr->ps_flags & (PS_TRACED | PS_PPWAIT)) == PS_TRACED) &&
1388 signum != SIGKILL) {
1389 single_thread_set(p, SINGLE_SUSPEND | SINGLE_NOWAIT);
1390 pr->ps_xsig = signum;
1391
1392 SCHED_LOCK();
1393 proc_stop(p, 1);
1394 SCHED_UNLOCK();
1395
1396 /*
1397 * re-take the signal before releasing
1398 * the other threads. Must check the continue
1399 * conditions below and only take the signal if
1400 * those are not true.
1401 */
1402 signum = pr->ps_xsig;
1403 pr->ps_xsig = 0;
1404 mask = sigmask(signum);
1405 setsigctx(p, signum, sctx);
1406 if (!((pr->ps_flags & PS_TRACED) == 0 ||
1407 signum == 0 ||
1408 (p->p_sigmask & mask) != 0)) {
1409 atomic_clearbits_int(&p->p_siglist, mask);
1410 atomic_clearbits_int(&pr->ps_siglist, mask);
1411 }
1412
1413 if ((p->p_flag & P_TRACESINGLE) == 0)
1414 single_thread_clear(p, 0);
1415 atomic_clearbits_int(&p->p_flag, P_TRACESINGLE);
1416
1417 /*
1418 * If we are no longer being traced, or the parent
1419 * didn't give us a signal, look for more signals.
1420 */
1421 if ((pr->ps_flags & PS_TRACED) == 0 ||
1422 signum == 0)
1423 continue;
1424
1425 /*
1426 * If the new signal is being masked, look for other
1427 * signals.
1428 */
1429 if ((p->p_sigmask & mask) != 0)
1430 continue;
1431
1432 }
1433
1434 prop = sigprop[signum];
1435
1436 /*
1437 * Decide whether the signal should be returned.
1438 * Return the signal's number, or fall through
1439 * to clear it from the pending mask.
1440 */
1441 switch ((long)sctx->sig_action) {
1442 case (long)SIG_DFL:
1443 /*
1444 * Don't take default actions on system processes.
1445 */
1446 if (pr->ps_pid <= 1) {
1447 #ifdef DIAGNOSTIC
1448 /*
1449 * Are you sure you want to ignore SIGSEGV
1450 * in init? XXX
1451 */
1452 printf("Process (pid %d) got signal"
1453 " %d\n", pr->ps_pid, signum);
1454 #endif
1455 break; /* == ignore */
1456 }
1457 /*
1458 * If there is a pending stop signal to process
1459 * with default action, stop here,
1460 * then clear the signal.
1461 */
1462 if (sctx->sig_stop) {
1463 mtx_enter(&pr->ps_mtx);
1464 if (pr->ps_flags & PS_TRACED ||
1465 (pr->ps_pgrp->pg_jobc == 0 &&
1466 prop & SA_TTYSTOP)) {
1467 mtx_leave(&pr->ps_mtx);
1468 break; /* == ignore */
1469 }
1470 mtx_leave(&pr->ps_mtx);
1471 pr->ps_xsig = signum;
1472 SCHED_LOCK();
1473 proc_stop(p, 1);
1474 SCHED_UNLOCK();
1475 break;
1476 } else if (prop & SA_IGNORE) {
1477 /*
1478 * Except for SIGCONT, shouldn't get here.
1479 * Default action is to ignore; drop it.
1480 */
1481 break; /* == ignore */
1482 } else
1483 goto keep;
1484 /* NOTREACHED */
1485 case (long)SIG_IGN:
1486 /*
1487 * Masking above should prevent us ever trying
1488 * to take action on an ignored signal other
1489 * than SIGCONT, unless process is traced.
1490 */
1491 if ((prop & SA_CONT) == 0 &&
1492 (pr->ps_flags & PS_TRACED) == 0)
1493 printf("%s\n", __func__);
1494 break; /* == ignore */
1495 default:
1496 /*
1497 * This signal has an action, let
1498 * postsig() process it.
1499 */
1500 goto keep;
1501 }
1502 }
1503 /* NOTREACHED */
1504
1505 keep:
1506 atomic_setbits_int(&p->p_siglist, mask); /*leave the signal for later */
1507 return (signum);
1508 }
1509
1510 /*
1511 * Put the argument process into the stopped state and notify the parent
1512 * via wakeup. Signals are handled elsewhere. The process must not be
1513 * on the run queue.
1514 */
1515 void
proc_stop(struct proc * p,int sw)1516 proc_stop(struct proc *p, int sw)
1517 {
1518 struct process *pr = p->p_p;
1519
1520 #ifdef MULTIPROCESSOR
1521 SCHED_ASSERT_LOCKED();
1522 #endif
1523 /* do not stop exiting procs */
1524 if (ISSET(p->p_flag, P_WEXIT))
1525 return;
1526
1527 p->p_stat = SSTOP;
1528 atomic_clearbits_int(&pr->ps_flags, PS_WAITED);
1529 atomic_setbits_int(&pr->ps_flags, PS_STOPPING);
1530 atomic_setbits_int(&p->p_flag, P_SUSPSIG);
1531 /*
1532 * We need this soft interrupt to be handled fast.
1533 * Extra calls to softclock don't hurt.
1534 */
1535 softintr_schedule(proc_stop_si);
1536 if (sw)
1537 mi_switch();
1538 }
1539
1540 /*
1541 * Called from a soft interrupt to send signals to the parents of stopped
1542 * processes.
1543 * We can't do this in proc_stop because it's called with nasty locks held
1544 * and we would need recursive scheduler lock to deal with that.
1545 */
1546 void
proc_stop_sweep(void * v)1547 proc_stop_sweep(void *v)
1548 {
1549 struct process *pr;
1550
1551 LIST_FOREACH(pr, &allprocess, ps_list) {
1552 if ((pr->ps_flags & PS_STOPPING) == 0)
1553 continue;
1554 atomic_setbits_int(&pr->ps_flags, PS_STOPPED);
1555 atomic_clearbits_int(&pr->ps_flags, PS_STOPPING);
1556
1557 if ((pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDSTOP) == 0)
1558 prsignal(pr->ps_pptr, SIGCHLD);
1559 wakeup(pr->ps_pptr);
1560 }
1561 }
1562
1563 /*
1564 * Take the action for the specified signal
1565 * from the current set of pending signals.
1566 */
1567 void
postsig(struct proc * p,int signum,struct sigctx * sctx)1568 postsig(struct proc *p, int signum, struct sigctx *sctx)
1569 {
1570 u_long trapno;
1571 int mask, returnmask;
1572 siginfo_t si;
1573 union sigval sigval;
1574 int code;
1575
1576 KASSERT(signum != 0);
1577
1578 mask = sigmask(signum);
1579 atomic_clearbits_int(&p->p_siglist, mask);
1580 sigval.sival_ptr = NULL;
1581
1582 if (p->p_sisig != signum) {
1583 trapno = 0;
1584 code = SI_USER;
1585 sigval.sival_ptr = NULL;
1586 } else {
1587 trapno = p->p_sitrapno;
1588 code = p->p_sicode;
1589 sigval = p->p_sigval;
1590 }
1591 initsiginfo(&si, signum, trapno, code, sigval);
1592
1593 #ifdef KTRACE
1594 if (KTRPOINT(p, KTR_PSIG)) {
1595 ktrpsig(p, signum, sctx->sig_action, p->p_flag & P_SIGSUSPEND ?
1596 p->p_oldmask : p->p_sigmask, code, &si);
1597 }
1598 #endif
1599 if (sctx->sig_action == SIG_DFL) {
1600 /*
1601 * Default action, where the default is to kill
1602 * the process. (Other cases were ignored above.)
1603 */
1604 KERNEL_LOCK();
1605 sigexit(p, signum);
1606 /* NOTREACHED */
1607 } else {
1608 /*
1609 * If we get here, the signal must be caught.
1610 */
1611 #ifdef DIAGNOSTIC
1612 if (sctx->sig_action == SIG_IGN || (p->p_sigmask & mask))
1613 panic("postsig action");
1614 #endif
1615 /*
1616 * Set the new mask value and also defer further
1617 * occurrences of this signal.
1618 *
1619 * Special case: user has done a sigpause. Here the
1620 * current mask is not of interest, but rather the
1621 * mask from before the sigpause is what we want
1622 * restored after the signal processing is completed.
1623 */
1624 if (p->p_flag & P_SIGSUSPEND) {
1625 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND);
1626 returnmask = p->p_oldmask;
1627 } else {
1628 returnmask = p->p_sigmask;
1629 }
1630 if (p->p_sisig == signum) {
1631 p->p_sisig = 0;
1632 p->p_sitrapno = 0;
1633 p->p_sicode = SI_USER;
1634 p->p_sigval.sival_ptr = NULL;
1635 }
1636
1637 if (sendsig(sctx->sig_action, signum, returnmask, &si,
1638 sctx->sig_info, sctx->sig_onstack)) {
1639 KERNEL_LOCK();
1640 sigexit(p, SIGILL);
1641 /* NOTREACHED */
1642 }
1643 postsig_done(p, signum, sctx->sig_catchmask, sctx->sig_reset);
1644 }
1645 }
1646
1647 /*
1648 * Force the current process to exit with the specified signal, dumping core
1649 * if appropriate. We bypass the normal tests for masked and caught signals,
1650 * allowing unrecoverable failures to terminate the process without changing
1651 * signal state. Mark the accounting record with the signal termination.
1652 * If dumping core, save the signal number for the debugger. Calls exit and
1653 * does not return.
1654 */
1655 void
sigexit(struct proc * p,int signum)1656 sigexit(struct proc *p, int signum)
1657 {
1658 /* Mark process as going away */
1659 atomic_setbits_int(&p->p_flag, P_WEXIT);
1660
1661 p->p_p->ps_acflag |= AXSIG;
1662 if (sigprop[signum] & SA_CORE) {
1663 p->p_sisig = signum;
1664
1665 /* if there are other threads, pause them */
1666 if (P_HASSIBLING(p))
1667 single_thread_set(p, SINGLE_UNWIND);
1668
1669 if (coredump(p) == 0)
1670 signum |= WCOREFLAG;
1671 }
1672 exit1(p, 0, signum, EXIT_NORMAL);
1673 /* NOTREACHED */
1674 }
1675
1676 /*
1677 * Send uncatchable SIGABRT for coredump.
1678 */
1679 void
sigabort(struct proc * p)1680 sigabort(struct proc *p)
1681 {
1682 struct sigaction sa;
1683
1684 KASSERT(p == curproc || panicstr || db_active);
1685
1686 memset(&sa, 0, sizeof sa);
1687 sa.sa_handler = SIG_DFL;
1688 setsigvec(p, SIGABRT, &sa);
1689 CLR(p->p_sigmask, sigmask(SIGABRT));
1690 psignal(p, SIGABRT);
1691 }
1692
1693 /*
1694 * Return 1 if `sig', a given signal, is ignored or masked for `p', a given
1695 * thread, and 0 otherwise.
1696 */
1697 int
sigismasked(struct proc * p,int sig)1698 sigismasked(struct proc *p, int sig)
1699 {
1700 struct process *pr = p->p_p;
1701 int rv;
1702
1703 KASSERT(p == curproc);
1704
1705 mtx_enter(&pr->ps_mtx);
1706 rv = (pr->ps_sigacts->ps_sigignore & sigmask(sig)) ||
1707 (p->p_sigmask & sigmask(sig));
1708 mtx_leave(&pr->ps_mtx);
1709
1710 return !!rv;
1711 }
1712
1713 struct coredump_iostate {
1714 struct proc *io_proc;
1715 struct vnode *io_vp;
1716 struct ucred *io_cred;
1717 off_t io_offset;
1718 };
1719
1720 /*
1721 * Dump core, into a file named "progname.core", unless the process was
1722 * setuid/setgid.
1723 */
1724 int
coredump(struct proc * p)1725 coredump(struct proc *p)
1726 {
1727 #ifdef SMALL_KERNEL
1728 return EPERM;
1729 #else
1730 struct process *pr = p->p_p;
1731 struct vnode *vp;
1732 struct ucred *cred = p->p_ucred;
1733 struct vmspace *vm = p->p_vmspace;
1734 struct nameidata nd;
1735 struct vattr vattr;
1736 struct coredump_iostate io;
1737 int error, len, incrash = 0;
1738 char *name;
1739 const char *dir = "/var/crash";
1740
1741 atomic_setbits_int(&pr->ps_flags, PS_COREDUMP);
1742
1743 #ifdef PMAP_CHECK_COPYIN
1744 /* disable copyin checks, so we can write out text sections if needed */
1745 p->p_vmspace->vm_map.check_copyin_count = 0;
1746 #endif
1747
1748 /* Don't dump if will exceed file size limit. */
1749 if (USPACE + ptoa(vm->vm_dsize + vm->vm_ssize) >= lim_cur(RLIMIT_CORE))
1750 return (EFBIG);
1751
1752 name = pool_get(&namei_pool, PR_WAITOK);
1753
1754 /*
1755 * If the process has inconsistent uids, nosuidcoredump
1756 * determines coredump placement policy.
1757 */
1758 if (((pr->ps_flags & PS_SUGID) && (error = suser(p))) ||
1759 ((pr->ps_flags & PS_SUGID) && nosuidcoredump)) {
1760 if (nosuidcoredump == 3) {
1761 /*
1762 * If the program directory does not exist, dumps of
1763 * that core will silently fail.
1764 */
1765 len = snprintf(name, MAXPATHLEN, "%s/%s/%u.core",
1766 dir, pr->ps_comm, pr->ps_pid);
1767 incrash = KERNELPATH;
1768 } else if (nosuidcoredump == 2) {
1769 len = snprintf(name, MAXPATHLEN, "%s/%s.core",
1770 dir, pr->ps_comm);
1771 incrash = KERNELPATH;
1772 } else {
1773 pool_put(&namei_pool, name);
1774 return (EPERM);
1775 }
1776 } else
1777 len = snprintf(name, MAXPATHLEN, "%s.core", pr->ps_comm);
1778
1779 if (len >= MAXPATHLEN) {
1780 pool_put(&namei_pool, name);
1781 return (EACCES);
1782 }
1783
1784 /*
1785 * Control the UID used to write out. The normal case uses
1786 * the real UID. If the sugid case is going to write into the
1787 * controlled directory, we do so as root.
1788 */
1789 if (incrash == 0) {
1790 cred = crdup(cred);
1791 cred->cr_uid = cred->cr_ruid;
1792 cred->cr_gid = cred->cr_rgid;
1793 } else {
1794 if (p->p_fd->fd_rdir) {
1795 vrele(p->p_fd->fd_rdir);
1796 p->p_fd->fd_rdir = NULL;
1797 }
1798 p->p_ucred = crdup(p->p_ucred);
1799 crfree(cred);
1800 cred = p->p_ucred;
1801 crhold(cred);
1802 cred->cr_uid = 0;
1803 cred->cr_gid = 0;
1804 }
1805
1806 /* incrash should be 0 or KERNELPATH only */
1807 NDINIT(&nd, 0, BYPASSUNVEIL | incrash, UIO_SYSSPACE, name, p);
1808
1809 error = vn_open(&nd, O_CREAT | FWRITE | O_NOFOLLOW | O_NONBLOCK,
1810 S_IRUSR | S_IWUSR);
1811
1812 if (error)
1813 goto out;
1814
1815 /*
1816 * Don't dump to non-regular files, files with links, or files
1817 * owned by someone else.
1818 */
1819 vp = nd.ni_vp;
1820 if ((error = VOP_GETATTR(vp, &vattr, cred, p)) != 0) {
1821 VOP_UNLOCK(vp);
1822 vn_close(vp, FWRITE, cred, p);
1823 goto out;
1824 }
1825 if (vp->v_type != VREG || vattr.va_nlink != 1 ||
1826 vattr.va_mode & ((VREAD | VWRITE) >> 3 | (VREAD | VWRITE) >> 6) ||
1827 vattr.va_uid != cred->cr_uid) {
1828 error = EACCES;
1829 VOP_UNLOCK(vp);
1830 vn_close(vp, FWRITE, cred, p);
1831 goto out;
1832 }
1833 vattr_null(&vattr);
1834 vattr.va_size = 0;
1835 VOP_SETATTR(vp, &vattr, cred, p);
1836 pr->ps_acflag |= ACORE;
1837
1838 io.io_proc = p;
1839 io.io_vp = vp;
1840 io.io_cred = cred;
1841 io.io_offset = 0;
1842 VOP_UNLOCK(vp);
1843 vref(vp);
1844 error = vn_close(vp, FWRITE, cred, p);
1845 if (error == 0)
1846 error = coredump_elf(p, &io);
1847 vrele(vp);
1848 out:
1849 crfree(cred);
1850 pool_put(&namei_pool, name);
1851 return (error);
1852 #endif
1853 }
1854
1855 #ifndef SMALL_KERNEL
1856 int
coredump_write(void * cookie,enum uio_seg segflg,const void * data,size_t len,int isvnode)1857 coredump_write(void *cookie, enum uio_seg segflg, const void *data, size_t len,
1858 int isvnode)
1859 {
1860 struct coredump_iostate *io = cookie;
1861 off_t coffset = 0;
1862 size_t csize;
1863 int chunk, error;
1864
1865 csize = len;
1866 do {
1867 if (sigmask(SIGKILL) &
1868 (io->io_proc->p_siglist | io->io_proc->p_p->ps_siglist))
1869 return (EINTR);
1870
1871 /* Rest of the loop sleeps with lock held, so... */
1872 yield();
1873
1874 chunk = MIN(csize, MAXPHYS);
1875 error = vn_rdwr(UIO_WRITE, io->io_vp,
1876 (caddr_t)data + coffset, chunk,
1877 io->io_offset + coffset, segflg,
1878 IO_UNIT, io->io_cred, NULL, io->io_proc);
1879 if (error && (error != EFAULT || !isvnode)) {
1880 struct process *pr = io->io_proc->p_p;
1881
1882 if (error == ENOSPC)
1883 log(LOG_ERR,
1884 "coredump of %s(%d) failed, filesystem full\n",
1885 pr->ps_comm, pr->ps_pid);
1886 else
1887 log(LOG_ERR,
1888 "coredump of %s(%d), write failed: errno %d\n",
1889 pr->ps_comm, pr->ps_pid, error);
1890 return (error);
1891 }
1892
1893 coffset += chunk;
1894 csize -= chunk;
1895 } while (csize > 0);
1896
1897 io->io_offset += len;
1898 return (0);
1899 }
1900
1901 void
coredump_unmap(void * cookie,vaddr_t start,vaddr_t end)1902 coredump_unmap(void *cookie, vaddr_t start, vaddr_t end)
1903 {
1904 struct coredump_iostate *io = cookie;
1905
1906 uvm_unmap(&io->io_proc->p_vmspace->vm_map, start, end);
1907 }
1908
1909 #endif /* !SMALL_KERNEL */
1910
1911 /*
1912 * Nonexistent system call-- signal process (may want to handle it).
1913 * Flag error in case process won't see signal immediately (blocked or ignored).
1914 */
1915 int
sys_nosys(struct proc * p,void * v,register_t * retval)1916 sys_nosys(struct proc *p, void *v, register_t *retval)
1917 {
1918 ptsignal(p, SIGSYS, STHREAD);
1919 return (ENOSYS);
1920 }
1921
1922 int
sys___thrsigdivert(struct proc * p,void * v,register_t * retval)1923 sys___thrsigdivert(struct proc *p, void *v, register_t *retval)
1924 {
1925 struct sys___thrsigdivert_args /* {
1926 syscallarg(sigset_t) sigmask;
1927 syscallarg(siginfo_t *) info;
1928 syscallarg(const struct timespec *) timeout;
1929 } */ *uap = v;
1930 struct sigctx ctx;
1931 sigset_t mask = SCARG(uap, sigmask) &~ sigcantmask;
1932 siginfo_t si;
1933 uint64_t nsecs = INFSLP;
1934 int timeinvalid = 0;
1935 int error = 0;
1936
1937 memset(&si, 0, sizeof(si));
1938
1939 if (SCARG(uap, timeout) != NULL) {
1940 struct timespec ts;
1941 if ((error = copyin(SCARG(uap, timeout), &ts, sizeof(ts))) != 0)
1942 return (error);
1943 #ifdef KTRACE
1944 if (KTRPOINT(p, KTR_STRUCT))
1945 ktrreltimespec(p, &ts);
1946 #endif
1947 if (!timespecisvalid(&ts))
1948 timeinvalid = 1;
1949 else
1950 nsecs = TIMESPEC_TO_NSEC(&ts);
1951 }
1952
1953 dosigsuspend(p, p->p_sigmask &~ mask);
1954 for (;;) {
1955 si.si_signo = cursig(p, &ctx, 0);
1956 if (si.si_signo != 0) {
1957 sigset_t smask = sigmask(si.si_signo);
1958 if (smask & mask) {
1959 atomic_clearbits_int(&p->p_siglist, smask);
1960 error = 0;
1961 break;
1962 }
1963 }
1964
1965 /* per-POSIX, delay this error until after the above */
1966 if (timeinvalid)
1967 error = EINVAL;
1968 /* per-POSIX, return immediately if timeout is zero-valued */
1969 if (nsecs == 0)
1970 error = EAGAIN;
1971
1972 if (error != 0)
1973 break;
1974
1975 error = tsleep_nsec(&nowake, PPAUSE|PCATCH, "sigwait", nsecs);
1976 }
1977
1978 if (error == 0) {
1979 *retval = si.si_signo;
1980 if (SCARG(uap, info) != NULL) {
1981 error = copyout(&si, SCARG(uap, info), sizeof(si));
1982 #ifdef KTRACE
1983 if (error == 0 && KTRPOINT(p, KTR_STRUCT))
1984 ktrsiginfo(p, &si);
1985 #endif
1986 }
1987 } else if (error == ERESTART && SCARG(uap, timeout) != NULL) {
1988 /*
1989 * Restarting is wrong if there's a timeout, as it'll be
1990 * for the same interval again
1991 */
1992 error = EINTR;
1993 }
1994
1995 return (error);
1996 }
1997
1998 void
initsiginfo(siginfo_t * si,int sig,u_long trapno,int code,union sigval val)1999 initsiginfo(siginfo_t *si, int sig, u_long trapno, int code, union sigval val)
2000 {
2001 memset(si, 0, sizeof(*si));
2002
2003 si->si_signo = sig;
2004 si->si_code = code;
2005 if (code == SI_USER) {
2006 si->si_value = val;
2007 } else {
2008 switch (sig) {
2009 case SIGSEGV:
2010 case SIGILL:
2011 case SIGBUS:
2012 case SIGFPE:
2013 si->si_addr = val.sival_ptr;
2014 si->si_trapno = trapno;
2015 break;
2016 case SIGXFSZ:
2017 break;
2018 }
2019 }
2020 }
2021
2022 void
userret(struct proc * p)2023 userret(struct proc *p)
2024 {
2025 struct sigctx ctx;
2026 int signum;
2027
2028 if (p->p_flag & P_SUSPSINGLE)
2029 single_thread_check(p, 0);
2030
2031 /* send SIGPROF or SIGVTALRM if their timers interrupted this thread */
2032 if (p->p_flag & P_PROFPEND) {
2033 atomic_clearbits_int(&p->p_flag, P_PROFPEND);
2034 psignal(p, SIGPROF);
2035 }
2036 if (p->p_flag & P_ALRMPEND) {
2037 atomic_clearbits_int(&p->p_flag, P_ALRMPEND);
2038 psignal(p, SIGVTALRM);
2039 }
2040
2041 if (SIGPENDING(p) != 0) {
2042 while ((signum = cursig(p, &ctx, 0)) != 0)
2043 postsig(p, signum, &ctx);
2044 }
2045
2046 /*
2047 * If P_SIGSUSPEND is still set here, then we still need to restore
2048 * the original sigmask before returning to userspace. Also, this
2049 * might unmask some pending signals, so we need to check a second
2050 * time for signals to post.
2051 */
2052 if (p->p_flag & P_SIGSUSPEND) {
2053 p->p_sigmask = p->p_oldmask;
2054 atomic_clearbits_int(&p->p_flag, P_SIGSUSPEND);
2055
2056 while ((signum = cursig(p, &ctx, 0)) != 0)
2057 postsig(p, signum, &ctx);
2058 }
2059
2060 WITNESS_WARN(WARN_PANIC, NULL, "userret: returning");
2061
2062 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
2063 }
2064
2065 int
single_thread_check_locked(struct proc * p,int deep)2066 single_thread_check_locked(struct proc *p, int deep)
2067 {
2068 struct process *pr = p->p_p;
2069
2070 MUTEX_ASSERT_LOCKED(&pr->ps_mtx);
2071
2072 if (pr->ps_single == NULL || pr->ps_single == p)
2073 return (0);
2074
2075 do {
2076 /* if we're in deep, we need to unwind to the edge */
2077 if (deep) {
2078 if (pr->ps_flags & PS_SINGLEUNWIND)
2079 return (ERESTART);
2080 if (pr->ps_flags & PS_SINGLEEXIT)
2081 return (EINTR);
2082 }
2083
2084 if (pr->ps_flags & PS_SINGLEEXIT) {
2085 mtx_leave(&pr->ps_mtx);
2086 KERNEL_LOCK();
2087 exit1(p, 0, 0, EXIT_THREAD_NOCHECK);
2088 /* NOTREACHED */
2089 }
2090
2091 if (--pr->ps_singlecnt == 0)
2092 wakeup(&pr->ps_singlecnt);
2093
2094 /* not exiting and don't need to unwind, so suspend */
2095 mtx_leave(&pr->ps_mtx);
2096
2097 SCHED_LOCK();
2098 p->p_stat = SSTOP;
2099 mi_switch();
2100 SCHED_UNLOCK();
2101 mtx_enter(&pr->ps_mtx);
2102 } while (pr->ps_single != NULL);
2103
2104 return (0);
2105 }
2106
2107 int
single_thread_check(struct proc * p,int deep)2108 single_thread_check(struct proc *p, int deep)
2109 {
2110 int error;
2111
2112 mtx_enter(&p->p_p->ps_mtx);
2113 error = single_thread_check_locked(p, deep);
2114 mtx_leave(&p->p_p->ps_mtx);
2115
2116 return error;
2117 }
2118
2119 /*
2120 * Stop other threads in the process. The mode controls how and
2121 * where the other threads should stop:
2122 * - SINGLE_SUSPEND: stop wherever they are, will later be released (via
2123 * single_thread_clear())
2124 * - SINGLE_UNWIND: just unwind to kernel boundary, will be told to exit
2125 * (by setting to SINGLE_EXIT) or released as with SINGLE_SUSPEND
2126 * - SINGLE_EXIT: unwind to kernel boundary and exit
2127 */
2128 int
single_thread_set(struct proc * p,int flags)2129 single_thread_set(struct proc *p, int flags)
2130 {
2131 struct process *pr = p->p_p;
2132 struct proc *q;
2133 int error, mode = flags & SINGLE_MASK;
2134
2135 KASSERT(curproc == p);
2136
2137 mtx_enter(&pr->ps_mtx);
2138 error = single_thread_check_locked(p, flags & SINGLE_DEEP);
2139 if (error) {
2140 mtx_leave(&pr->ps_mtx);
2141 return error;
2142 }
2143
2144 switch (mode) {
2145 case SINGLE_SUSPEND:
2146 break;
2147 case SINGLE_UNWIND:
2148 atomic_setbits_int(&pr->ps_flags, PS_SINGLEUNWIND);
2149 break;
2150 case SINGLE_EXIT:
2151 atomic_setbits_int(&pr->ps_flags, PS_SINGLEEXIT);
2152 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND);
2153 break;
2154 #ifdef DIAGNOSTIC
2155 default:
2156 panic("single_thread_mode = %d", mode);
2157 #endif
2158 }
2159 KASSERT((p->p_flag & P_SUSPSINGLE) == 0);
2160 pr->ps_single = p;
2161 pr->ps_singlecnt = pr->ps_threadcnt;
2162
2163 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
2164 if (q == p)
2165 continue;
2166 SCHED_LOCK();
2167 atomic_setbits_int(&q->p_flag, P_SUSPSINGLE);
2168 switch (q->p_stat) {
2169 case SSTOP:
2170 if (mode == SINGLE_EXIT) {
2171 unsleep(q);
2172 setrunnable(q);
2173 } else
2174 --pr->ps_singlecnt;
2175 break;
2176 case SSLEEP:
2177 /* if it's not interruptible, then just have to wait */
2178 if (q->p_flag & P_SINTR) {
2179 /* merely need to suspend? just stop it */
2180 if (mode == SINGLE_SUSPEND) {
2181 q->p_stat = SSTOP;
2182 --pr->ps_singlecnt;
2183 break;
2184 }
2185 /* need to unwind or exit, so wake it */
2186 unsleep(q);
2187 setrunnable(q);
2188 }
2189 break;
2190 case SONPROC:
2191 signotify(q);
2192 break;
2193 case SRUN:
2194 case SIDL:
2195 case SDEAD:
2196 break;
2197 }
2198 SCHED_UNLOCK();
2199 }
2200
2201 /* count ourself out */
2202 --pr->ps_singlecnt;
2203 mtx_leave(&pr->ps_mtx);
2204
2205 if ((flags & SINGLE_NOWAIT) == 0)
2206 single_thread_wait(pr, 1);
2207
2208 return 0;
2209 }
2210
2211 /*
2212 * Wait for other threads to stop. If recheck is false then the function
2213 * returns non-zero if the caller needs to restart the check else 0 is
2214 * returned. If recheck is true the return value is always 0.
2215 */
2216 int
single_thread_wait(struct process * pr,int recheck)2217 single_thread_wait(struct process *pr, int recheck)
2218 {
2219 int wait;
2220
2221 /* wait until they're all suspended */
2222 mtx_enter(&pr->ps_mtx);
2223 while ((wait = pr->ps_singlecnt > 0)) {
2224 msleep_nsec(&pr->ps_singlecnt, &pr->ps_mtx, PWAIT, "suspend",
2225 INFSLP);
2226 if (!recheck)
2227 break;
2228 }
2229 KASSERT((pr->ps_single->p_flag & P_SUSPSINGLE) == 0);
2230 mtx_leave(&pr->ps_mtx);
2231
2232 return wait;
2233 }
2234
2235 void
single_thread_clear(struct proc * p,int flag)2236 single_thread_clear(struct proc *p, int flag)
2237 {
2238 struct process *pr = p->p_p;
2239 struct proc *q;
2240
2241 KASSERT(pr->ps_single == p);
2242 KASSERT(curproc == p);
2243
2244 mtx_enter(&pr->ps_mtx);
2245 pr->ps_single = NULL;
2246 atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT);
2247
2248 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
2249 if (q == p || (q->p_flag & P_SUSPSINGLE) == 0)
2250 continue;
2251 atomic_clearbits_int(&q->p_flag, P_SUSPSINGLE);
2252
2253 /*
2254 * if the thread was only stopped for single threading
2255 * then clearing that either makes it runnable or puts
2256 * it back into some sleep queue
2257 */
2258 SCHED_LOCK();
2259 if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) {
2260 if (q->p_wchan == NULL)
2261 setrunnable(q);
2262 else {
2263 atomic_clearbits_int(&q->p_flag, P_WSLEEP);
2264 q->p_stat = SSLEEP;
2265 }
2266 }
2267 SCHED_UNLOCK();
2268 }
2269 mtx_leave(&pr->ps_mtx);
2270 }
2271
2272 void
sigio_del(struct sigiolst * rmlist)2273 sigio_del(struct sigiolst *rmlist)
2274 {
2275 struct sigio *sigio;
2276
2277 while ((sigio = LIST_FIRST(rmlist)) != NULL) {
2278 LIST_REMOVE(sigio, sio_pgsigio);
2279 crfree(sigio->sio_ucred);
2280 free(sigio, M_SIGIO, sizeof(*sigio));
2281 }
2282 }
2283
2284 void
sigio_unlink(struct sigio_ref * sir,struct sigiolst * rmlist)2285 sigio_unlink(struct sigio_ref *sir, struct sigiolst *rmlist)
2286 {
2287 struct sigio *sigio;
2288
2289 MUTEX_ASSERT_LOCKED(&sigio_lock);
2290
2291 sigio = sir->sir_sigio;
2292 if (sigio != NULL) {
2293 KASSERT(sigio->sio_myref == sir);
2294 sir->sir_sigio = NULL;
2295
2296 if (sigio->sio_pgid > 0)
2297 sigio->sio_proc = NULL;
2298 else
2299 sigio->sio_pgrp = NULL;
2300 LIST_REMOVE(sigio, sio_pgsigio);
2301
2302 LIST_INSERT_HEAD(rmlist, sigio, sio_pgsigio);
2303 }
2304 }
2305
2306 void
sigio_free(struct sigio_ref * sir)2307 sigio_free(struct sigio_ref *sir)
2308 {
2309 struct sigiolst rmlist;
2310
2311 if (sir->sir_sigio == NULL)
2312 return;
2313
2314 LIST_INIT(&rmlist);
2315
2316 mtx_enter(&sigio_lock);
2317 sigio_unlink(sir, &rmlist);
2318 mtx_leave(&sigio_lock);
2319
2320 sigio_del(&rmlist);
2321 }
2322
2323 void
sigio_freelist(struct sigiolst * sigiolst)2324 sigio_freelist(struct sigiolst *sigiolst)
2325 {
2326 struct sigiolst rmlist;
2327 struct sigio *sigio;
2328
2329 if (LIST_EMPTY(sigiolst))
2330 return;
2331
2332 LIST_INIT(&rmlist);
2333
2334 mtx_enter(&sigio_lock);
2335 while ((sigio = LIST_FIRST(sigiolst)) != NULL)
2336 sigio_unlink(sigio->sio_myref, &rmlist);
2337 mtx_leave(&sigio_lock);
2338
2339 sigio_del(&rmlist);
2340 }
2341
2342 int
sigio_setown(struct sigio_ref * sir,u_long cmd,caddr_t data)2343 sigio_setown(struct sigio_ref *sir, u_long cmd, caddr_t data)
2344 {
2345 struct sigiolst rmlist;
2346 struct proc *p = curproc;
2347 struct pgrp *pgrp = NULL;
2348 struct process *pr = NULL;
2349 struct sigio *sigio;
2350 int error;
2351 pid_t pgid = *(int *)data;
2352
2353 if (pgid == 0) {
2354 sigio_free(sir);
2355 return (0);
2356 }
2357
2358 if (cmd == TIOCSPGRP) {
2359 if (pgid < 0)
2360 return (EINVAL);
2361 pgid = -pgid;
2362 }
2363
2364 sigio = malloc(sizeof(*sigio), M_SIGIO, M_WAITOK);
2365 sigio->sio_pgid = pgid;
2366 sigio->sio_ucred = crhold(p->p_ucred);
2367 sigio->sio_myref = sir;
2368
2369 LIST_INIT(&rmlist);
2370
2371 /*
2372 * The kernel lock, and not sleeping between prfind()/pgfind() and
2373 * linking of the sigio ensure that the process or process group does
2374 * not disappear unexpectedly.
2375 */
2376 KERNEL_LOCK();
2377 mtx_enter(&sigio_lock);
2378
2379 if (pgid > 0) {
2380 pr = prfind(pgid);
2381 if (pr == NULL) {
2382 error = ESRCH;
2383 goto fail;
2384 }
2385
2386 /*
2387 * Policy - Don't allow a process to FSETOWN a process
2388 * in another session.
2389 *
2390 * Remove this test to allow maximum flexibility or
2391 * restrict FSETOWN to the current process or process
2392 * group for maximum safety.
2393 */
2394 if (pr->ps_session != p->p_p->ps_session) {
2395 error = EPERM;
2396 goto fail;
2397 }
2398
2399 if ((pr->ps_flags & PS_EXITING) != 0) {
2400 error = ESRCH;
2401 goto fail;
2402 }
2403 } else /* if (pgid < 0) */ {
2404 pgrp = pgfind(-pgid);
2405 if (pgrp == NULL) {
2406 error = ESRCH;
2407 goto fail;
2408 }
2409
2410 /*
2411 * Policy - Don't allow a process to FSETOWN a process
2412 * in another session.
2413 *
2414 * Remove this test to allow maximum flexibility or
2415 * restrict FSETOWN to the current process or process
2416 * group for maximum safety.
2417 */
2418 if (pgrp->pg_session != p->p_p->ps_session) {
2419 error = EPERM;
2420 goto fail;
2421 }
2422 }
2423
2424 if (pgid > 0) {
2425 sigio->sio_proc = pr;
2426 LIST_INSERT_HEAD(&pr->ps_sigiolst, sigio, sio_pgsigio);
2427 } else {
2428 sigio->sio_pgrp = pgrp;
2429 LIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
2430 }
2431
2432 sigio_unlink(sir, &rmlist);
2433 sir->sir_sigio = sigio;
2434
2435 mtx_leave(&sigio_lock);
2436 KERNEL_UNLOCK();
2437
2438 sigio_del(&rmlist);
2439
2440 return (0);
2441
2442 fail:
2443 mtx_leave(&sigio_lock);
2444 KERNEL_UNLOCK();
2445
2446 crfree(sigio->sio_ucred);
2447 free(sigio, M_SIGIO, sizeof(*sigio));
2448
2449 return (error);
2450 }
2451
2452 void
sigio_getown(struct sigio_ref * sir,u_long cmd,caddr_t data)2453 sigio_getown(struct sigio_ref *sir, u_long cmd, caddr_t data)
2454 {
2455 struct sigio *sigio;
2456 pid_t pgid = 0;
2457
2458 mtx_enter(&sigio_lock);
2459 sigio = sir->sir_sigio;
2460 if (sigio != NULL)
2461 pgid = sigio->sio_pgid;
2462 mtx_leave(&sigio_lock);
2463
2464 if (cmd == TIOCGPGRP)
2465 pgid = -pgid;
2466
2467 *(int *)data = pgid;
2468 }
2469
2470 void
sigio_copy(struct sigio_ref * dst,struct sigio_ref * src)2471 sigio_copy(struct sigio_ref *dst, struct sigio_ref *src)
2472 {
2473 struct sigiolst rmlist;
2474 struct sigio *newsigio, *sigio;
2475
2476 sigio_free(dst);
2477
2478 if (src->sir_sigio == NULL)
2479 return;
2480
2481 newsigio = malloc(sizeof(*newsigio), M_SIGIO, M_WAITOK);
2482 LIST_INIT(&rmlist);
2483
2484 mtx_enter(&sigio_lock);
2485
2486 sigio = src->sir_sigio;
2487 if (sigio == NULL) {
2488 mtx_leave(&sigio_lock);
2489 free(newsigio, M_SIGIO, sizeof(*newsigio));
2490 return;
2491 }
2492
2493 newsigio->sio_pgid = sigio->sio_pgid;
2494 newsigio->sio_ucred = crhold(sigio->sio_ucred);
2495 newsigio->sio_myref = dst;
2496 if (newsigio->sio_pgid > 0) {
2497 newsigio->sio_proc = sigio->sio_proc;
2498 LIST_INSERT_HEAD(&newsigio->sio_proc->ps_sigiolst, newsigio,
2499 sio_pgsigio);
2500 } else {
2501 newsigio->sio_pgrp = sigio->sio_pgrp;
2502 LIST_INSERT_HEAD(&newsigio->sio_pgrp->pg_sigiolst, newsigio,
2503 sio_pgsigio);
2504 }
2505
2506 sigio_unlink(dst, &rmlist);
2507 dst->sir_sigio = newsigio;
2508
2509 mtx_leave(&sigio_lock);
2510
2511 sigio_del(&rmlist);
2512 }
2513