1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37 #include "opt_capsicum.h"
38 #include "opt_ktrace.h"
39
40 #include <sys/param.h>
41 #include <sys/capsicum.h>
42 #include <sys/ctype.h>
43 #include <sys/systm.h>
44 #include <sys/signalvar.h>
45 #include <sys/vnode.h>
46 #include <sys/acct.h>
47 #include <sys/capsicum.h>
48 #include <sys/compressor.h>
49 #include <sys/condvar.h>
50 #include <sys/devctl.h>
51 #include <sys/event.h>
52 #include <sys/fcntl.h>
53 #include <sys/imgact.h>
54 #include <sys/jail.h>
55 #include <sys/kernel.h>
56 #include <sys/ktr.h>
57 #include <sys/ktrace.h>
58 #include <sys/limits.h>
59 #include <sys/lock.h>
60 #include <sys/malloc.h>
61 #include <sys/mutex.h>
62 #include <sys/refcount.h>
63 #include <sys/namei.h>
64 #include <sys/proc.h>
65 #include <sys/procdesc.h>
66 #include <sys/ptrace.h>
67 #include <sys/posix4.h>
68 #include <sys/racct.h>
69 #include <sys/resourcevar.h>
70 #include <sys/sdt.h>
71 #include <sys/sbuf.h>
72 #include <sys/sleepqueue.h>
73 #include <sys/smp.h>
74 #include <sys/stat.h>
75 #include <sys/sx.h>
76 #include <sys/syscall.h>
77 #include <sys/syscallsubr.h>
78 #include <sys/sysctl.h>
79 #include <sys/sysent.h>
80 #include <sys/syslog.h>
81 #include <sys/sysproto.h>
82 #include <sys/timers.h>
83 #include <sys/unistd.h>
84 #include <sys/vmmeter.h>
85 #include <sys/wait.h>
86 #include <vm/vm.h>
87 #include <vm/vm_extern.h>
88 #include <vm/uma.h>
89
90 #include <machine/cpu.h>
91
92 #include <security/audit/audit.h>
93
94 #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */
95
96 SDT_PROVIDER_DECLARE(proc);
97 SDT_PROBE_DEFINE3(proc, , , signal__send,
98 "struct thread *", "struct proc *", "int");
99 SDT_PROBE_DEFINE2(proc, , , signal__clear,
100 "int", "ksiginfo_t *");
101 SDT_PROBE_DEFINE3(proc, , , signal__discard,
102 "struct thread *", "struct proc *", "int");
103
104 static int coredump(struct thread *);
105 static int killpg1(struct thread *td, int sig, int pgid, int all,
106 ksiginfo_t *ksi);
107 static int issignal(struct thread *td);
108 static void reschedule_signals(struct proc *p, sigset_t block, int flags);
109 static int sigprop(int sig);
110 static void tdsigwakeup(struct thread *, int, sig_t, int);
111 static void sig_suspend_threads(struct thread *, struct proc *);
112 static int filt_sigattach(struct knote *kn);
113 static void filt_sigdetach(struct knote *kn);
114 static int filt_signal(struct knote *kn, long hint);
115 static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock);
116 static void sigqueue_start(void);
117 static void sigfastblock_setpend(struct thread *td, bool resched);
118
119 static uma_zone_t ksiginfo_zone = NULL;
120 struct filterops sig_filtops = {
121 .f_isfd = 0,
122 .f_attach = filt_sigattach,
123 .f_detach = filt_sigdetach,
124 .f_event = filt_signal,
125 };
126
127 static int kern_logsigexit = 1;
128 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
129 &kern_logsigexit, 0,
130 "Log processes quitting on abnormal signals to syslog(3)");
131
132 static int kern_forcesigexit = 1;
133 SYSCTL_INT(_kern, OID_AUTO, forcesigexit, CTLFLAG_RW,
134 &kern_forcesigexit, 0, "Force trap signal to be handled");
135
136 static SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
137 "POSIX real time signal");
138
139 static int max_pending_per_proc = 128;
140 SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW,
141 &max_pending_per_proc, 0, "Max pending signals per proc");
142
143 static int preallocate_siginfo = 1024;
144 SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RDTUN,
145 &preallocate_siginfo, 0, "Preallocated signal memory size");
146
147 static int signal_overflow = 0;
148 SYSCTL_INT(_kern_sigqueue, OID_AUTO, overflow, CTLFLAG_RD,
149 &signal_overflow, 0, "Number of signals overflew");
150
151 static int signal_alloc_fail = 0;
152 SYSCTL_INT(_kern_sigqueue, OID_AUTO, alloc_fail, CTLFLAG_RD,
153 &signal_alloc_fail, 0, "signals failed to be allocated");
154
155 static int kern_lognosys = 0;
156 SYSCTL_INT(_kern, OID_AUTO, lognosys, CTLFLAG_RWTUN, &kern_lognosys, 0,
157 "Log invalid syscalls");
158
159 static int kern_signosys = 1;
160 SYSCTL_INT(_kern, OID_AUTO, signosys, CTLFLAG_RWTUN, &kern_signosys, 0,
161 "Send SIGSYS on return from invalid syscall");
162
163 __read_frequently bool sigfastblock_fetch_always = false;
164 SYSCTL_BOOL(_kern, OID_AUTO, sigfastblock_fetch_always, CTLFLAG_RWTUN,
165 &sigfastblock_fetch_always, 0,
166 "Fetch sigfastblock word on each syscall entry for proper "
167 "blocking semantic");
168
169 static bool kern_sig_discard_ign = true;
170 SYSCTL_BOOL(_kern, OID_AUTO, sig_discard_ign, CTLFLAG_RWTUN,
171 &kern_sig_discard_ign, 0,
172 "Discard ignored signals on delivery, otherwise queue them to "
173 "the target queue");
174
175 SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL);
176
177 /*
178 * Policy -- Can ucred cr1 send SIGIO to process cr2?
179 * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG
180 * in the right situations.
181 */
182 #define CANSIGIO(cr1, cr2) \
183 ((cr1)->cr_uid == 0 || \
184 (cr1)->cr_ruid == (cr2)->cr_ruid || \
185 (cr1)->cr_uid == (cr2)->cr_ruid || \
186 (cr1)->cr_ruid == (cr2)->cr_uid || \
187 (cr1)->cr_uid == (cr2)->cr_uid)
188
189 static int sugid_coredump;
190 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RWTUN,
191 &sugid_coredump, 0, "Allow setuid and setgid processes to dump core");
192
193 static int capmode_coredump;
194 SYSCTL_INT(_kern, OID_AUTO, capmode_coredump, CTLFLAG_RWTUN,
195 &capmode_coredump, 0, "Allow processes in capability mode to dump core");
196
197 static int do_coredump = 1;
198 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
199 &do_coredump, 0, "Enable/Disable coredumps");
200
201 static int set_core_nodump_flag = 0;
202 SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag,
203 0, "Enable setting the NODUMP flag on coredump files");
204
205 static int coredump_devctl = 0;
206 SYSCTL_INT(_kern, OID_AUTO, coredump_devctl, CTLFLAG_RW, &coredump_devctl,
207 0, "Generate a devctl notification when processes coredump");
208
209 /*
210 * Signal properties and actions.
211 * The array below categorizes the signals and their default actions
212 * according to the following properties:
213 */
214 #define SIGPROP_KILL 0x01 /* terminates process by default */
215 #define SIGPROP_CORE 0x02 /* ditto and coredumps */
216 #define SIGPROP_STOP 0x04 /* suspend process */
217 #define SIGPROP_TTYSTOP 0x08 /* ditto, from tty */
218 #define SIGPROP_IGNORE 0x10 /* ignore by default */
219 #define SIGPROP_CONT 0x20 /* continue if suspended */
220
221 static const int sigproptbl[NSIG] = {
222 [SIGHUP] = SIGPROP_KILL,
223 [SIGINT] = SIGPROP_KILL,
224 [SIGQUIT] = SIGPROP_KILL | SIGPROP_CORE,
225 [SIGILL] = SIGPROP_KILL | SIGPROP_CORE,
226 [SIGTRAP] = SIGPROP_KILL | SIGPROP_CORE,
227 [SIGABRT] = SIGPROP_KILL | SIGPROP_CORE,
228 [SIGEMT] = SIGPROP_KILL | SIGPROP_CORE,
229 [SIGFPE] = SIGPROP_KILL | SIGPROP_CORE,
230 [SIGKILL] = SIGPROP_KILL,
231 [SIGBUS] = SIGPROP_KILL | SIGPROP_CORE,
232 [SIGSEGV] = SIGPROP_KILL | SIGPROP_CORE,
233 [SIGSYS] = SIGPROP_KILL | SIGPROP_CORE,
234 [SIGPIPE] = SIGPROP_KILL,
235 [SIGALRM] = SIGPROP_KILL,
236 [SIGTERM] = SIGPROP_KILL,
237 [SIGURG] = SIGPROP_IGNORE,
238 [SIGSTOP] = SIGPROP_STOP,
239 [SIGTSTP] = SIGPROP_STOP | SIGPROP_TTYSTOP,
240 [SIGCONT] = SIGPROP_IGNORE | SIGPROP_CONT,
241 [SIGCHLD] = SIGPROP_IGNORE,
242 [SIGTTIN] = SIGPROP_STOP | SIGPROP_TTYSTOP,
243 [SIGTTOU] = SIGPROP_STOP | SIGPROP_TTYSTOP,
244 [SIGIO] = SIGPROP_IGNORE,
245 [SIGXCPU] = SIGPROP_KILL,
246 [SIGXFSZ] = SIGPROP_KILL,
247 [SIGVTALRM] = SIGPROP_KILL,
248 [SIGPROF] = SIGPROP_KILL,
249 [SIGWINCH] = SIGPROP_IGNORE,
250 [SIGINFO] = SIGPROP_IGNORE,
251 [SIGUSR1] = SIGPROP_KILL,
252 [SIGUSR2] = SIGPROP_KILL,
253 };
254
255 #define _SIG_FOREACH_ADVANCE(i, set) ({ \
256 int __found; \
257 for (;;) { \
258 if (__bits != 0) { \
259 int __sig = ffs(__bits); \
260 __bits &= ~(1u << (__sig - 1)); \
261 sig = __i * sizeof((set)->__bits[0]) * NBBY + __sig; \
262 __found = 1; \
263 break; \
264 } \
265 if (++__i == _SIG_WORDS) { \
266 __found = 0; \
267 break; \
268 } \
269 __bits = (set)->__bits[__i]; \
270 } \
271 __found != 0; \
272 })
273
274 #define SIG_FOREACH(i, set) \
275 for (int32_t __i = -1, __bits = 0; \
276 _SIG_FOREACH_ADVANCE(i, set); ) \
277
278 static sigset_t fastblock_mask;
279
280 static void
ast_sig(struct thread * td,int tda)281 ast_sig(struct thread *td, int tda)
282 {
283 struct proc *p;
284 int old_boundary, sig;
285 bool resched_sigs;
286
287 p = td->td_proc;
288
289 #ifdef DIAGNOSTIC
290 if (p->p_numthreads == 1 && (tda & (TDAI(TDA_SIG) |
291 TDAI(TDA_AST))) == 0) {
292 PROC_LOCK(p);
293 thread_lock(td);
294 /*
295 * Note that TDA_SIG should be re-read from
296 * td_ast, since signal might have been delivered
297 * after we cleared td_flags above. This is one of
298 * the reason for looping check for AST condition.
299 * See comment in userret() about P_PPWAIT.
300 */
301 if ((p->p_flag & P_PPWAIT) == 0 &&
302 (td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
303 if (SIGPENDING(td) && ((tda | td->td_ast) &
304 (TDAI(TDA_SIG) | TDAI(TDA_AST))) == 0) {
305 thread_unlock(td); /* fix dumps */
306 panic(
307 "failed2 to set signal flags for ast p %p "
308 "td %p tda %#x td_ast %#x fl %#x",
309 p, td, tda, td->td_ast, td->td_flags);
310 }
311 }
312 thread_unlock(td);
313 PROC_UNLOCK(p);
314 }
315 #endif
316
317 /*
318 * Check for signals. Unlocked reads of p_pendingcnt or
319 * p_siglist might cause process-directed signal to be handled
320 * later.
321 */
322 if ((tda & TDAI(TDA_SIG)) != 0 || p->p_pendingcnt > 0 ||
323 !SIGISEMPTY(p->p_siglist)) {
324 sigfastblock_fetch(td);
325 PROC_LOCK(p);
326 old_boundary = ~TDB_BOUNDARY | (td->td_dbgflags & TDB_BOUNDARY);
327 td->td_dbgflags |= TDB_BOUNDARY;
328 mtx_lock(&p->p_sigacts->ps_mtx);
329 while ((sig = cursig(td)) != 0) {
330 KASSERT(sig >= 0, ("sig %d", sig));
331 postsig(sig);
332 }
333 mtx_unlock(&p->p_sigacts->ps_mtx);
334 td->td_dbgflags &= old_boundary;
335 PROC_UNLOCK(p);
336 resched_sigs = true;
337 } else {
338 resched_sigs = false;
339 }
340
341 /*
342 * Handle deferred update of the fast sigblock value, after
343 * the postsig() loop was performed.
344 */
345 sigfastblock_setpend(td, resched_sigs);
346 }
347
348 static void
ast_sigsuspend(struct thread * td,int tda __unused)349 ast_sigsuspend(struct thread *td, int tda __unused)
350 {
351 MPASS((td->td_pflags & TDP_OLDMASK) != 0);
352 td->td_pflags &= ~TDP_OLDMASK;
353 kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0);
354 }
355
356 static void
sigqueue_start(void)357 sigqueue_start(void)
358 {
359 ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
360 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
361 uma_prealloc(ksiginfo_zone, preallocate_siginfo);
362 p31b_setcfg(CTL_P1003_1B_REALTIME_SIGNALS, _POSIX_REALTIME_SIGNALS);
363 p31b_setcfg(CTL_P1003_1B_RTSIG_MAX, SIGRTMAX - SIGRTMIN + 1);
364 p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
365 SIGFILLSET(fastblock_mask);
366 SIG_CANTMASK(fastblock_mask);
367 ast_register(TDA_SIG, ASTR_UNCOND, 0, ast_sig);
368 ast_register(TDA_SIGSUSPEND, ASTR_ASTF_REQUIRED | ASTR_TDP,
369 TDP_OLDMASK, ast_sigsuspend);
370 }
371
372 ksiginfo_t *
ksiginfo_alloc(int mwait)373 ksiginfo_alloc(int mwait)
374 {
375 MPASS(mwait == M_WAITOK || mwait == M_NOWAIT);
376
377 if (ksiginfo_zone == NULL)
378 return (NULL);
379 return (uma_zalloc(ksiginfo_zone, mwait | M_ZERO));
380 }
381
382 void
ksiginfo_free(ksiginfo_t * ksi)383 ksiginfo_free(ksiginfo_t *ksi)
384 {
385 uma_zfree(ksiginfo_zone, ksi);
386 }
387
388 static __inline bool
ksiginfo_tryfree(ksiginfo_t * ksi)389 ksiginfo_tryfree(ksiginfo_t *ksi)
390 {
391 if ((ksi->ksi_flags & KSI_EXT) == 0) {
392 uma_zfree(ksiginfo_zone, ksi);
393 return (true);
394 }
395 return (false);
396 }
397
398 void
sigqueue_init(sigqueue_t * list,struct proc * p)399 sigqueue_init(sigqueue_t *list, struct proc *p)
400 {
401 SIGEMPTYSET(list->sq_signals);
402 SIGEMPTYSET(list->sq_kill);
403 SIGEMPTYSET(list->sq_ptrace);
404 TAILQ_INIT(&list->sq_list);
405 list->sq_proc = p;
406 list->sq_flags = SQ_INIT;
407 }
408
409 /*
410 * Get a signal's ksiginfo.
411 * Return:
412 * 0 - signal not found
413 * others - signal number
414 */
415 static int
sigqueue_get(sigqueue_t * sq,int signo,ksiginfo_t * si)416 sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si)
417 {
418 struct proc *p = sq->sq_proc;
419 struct ksiginfo *ksi, *next;
420 int count = 0;
421
422 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
423
424 if (!SIGISMEMBER(sq->sq_signals, signo))
425 return (0);
426
427 if (SIGISMEMBER(sq->sq_ptrace, signo)) {
428 count++;
429 SIGDELSET(sq->sq_ptrace, signo);
430 si->ksi_flags |= KSI_PTRACE;
431 }
432 if (SIGISMEMBER(sq->sq_kill, signo)) {
433 count++;
434 if (count == 1)
435 SIGDELSET(sq->sq_kill, signo);
436 }
437
438 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
439 if (ksi->ksi_signo == signo) {
440 if (count == 0) {
441 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
442 ksi->ksi_sigq = NULL;
443 ksiginfo_copy(ksi, si);
444 if (ksiginfo_tryfree(ksi) && p != NULL)
445 p->p_pendingcnt--;
446 }
447 if (++count > 1)
448 break;
449 }
450 }
451
452 if (count <= 1)
453 SIGDELSET(sq->sq_signals, signo);
454 si->ksi_signo = signo;
455 return (signo);
456 }
457
458 void
sigqueue_take(ksiginfo_t * ksi)459 sigqueue_take(ksiginfo_t *ksi)
460 {
461 struct ksiginfo *kp;
462 struct proc *p;
463 sigqueue_t *sq;
464
465 if (ksi == NULL || (sq = ksi->ksi_sigq) == NULL)
466 return;
467
468 p = sq->sq_proc;
469 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
470 ksi->ksi_sigq = NULL;
471 if (!(ksi->ksi_flags & KSI_EXT) && p != NULL)
472 p->p_pendingcnt--;
473
474 for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL;
475 kp = TAILQ_NEXT(kp, ksi_link)) {
476 if (kp->ksi_signo == ksi->ksi_signo)
477 break;
478 }
479 if (kp == NULL && !SIGISMEMBER(sq->sq_kill, ksi->ksi_signo) &&
480 !SIGISMEMBER(sq->sq_ptrace, ksi->ksi_signo))
481 SIGDELSET(sq->sq_signals, ksi->ksi_signo);
482 }
483
484 static int
sigqueue_add(sigqueue_t * sq,int signo,ksiginfo_t * si)485 sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si)
486 {
487 struct proc *p = sq->sq_proc;
488 struct ksiginfo *ksi;
489 int ret = 0;
490
491 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
492
493 /*
494 * SIGKILL/SIGSTOP cannot be caught or masked, so take the fast path
495 * for these signals.
496 */
497 if (signo == SIGKILL || signo == SIGSTOP || si == NULL) {
498 SIGADDSET(sq->sq_kill, signo);
499 goto out_set_bit;
500 }
501
502 /* directly insert the ksi, don't copy it */
503 if (si->ksi_flags & KSI_INS) {
504 if (si->ksi_flags & KSI_HEAD)
505 TAILQ_INSERT_HEAD(&sq->sq_list, si, ksi_link);
506 else
507 TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link);
508 si->ksi_sigq = sq;
509 goto out_set_bit;
510 }
511
512 if (__predict_false(ksiginfo_zone == NULL)) {
513 SIGADDSET(sq->sq_kill, signo);
514 goto out_set_bit;
515 }
516
517 if (p != NULL && p->p_pendingcnt >= max_pending_per_proc) {
518 signal_overflow++;
519 ret = EAGAIN;
520 } else if ((ksi = ksiginfo_alloc(M_NOWAIT)) == NULL) {
521 signal_alloc_fail++;
522 ret = EAGAIN;
523 } else {
524 if (p != NULL)
525 p->p_pendingcnt++;
526 ksiginfo_copy(si, ksi);
527 ksi->ksi_signo = signo;
528 if (si->ksi_flags & KSI_HEAD)
529 TAILQ_INSERT_HEAD(&sq->sq_list, ksi, ksi_link);
530 else
531 TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link);
532 ksi->ksi_sigq = sq;
533 }
534
535 if (ret != 0) {
536 if ((si->ksi_flags & KSI_PTRACE) != 0) {
537 SIGADDSET(sq->sq_ptrace, signo);
538 ret = 0;
539 goto out_set_bit;
540 } else if ((si->ksi_flags & KSI_TRAP) != 0 ||
541 (si->ksi_flags & KSI_SIGQ) == 0) {
542 SIGADDSET(sq->sq_kill, signo);
543 ret = 0;
544 goto out_set_bit;
545 }
546 return (ret);
547 }
548
549 out_set_bit:
550 SIGADDSET(sq->sq_signals, signo);
551 return (ret);
552 }
553
554 void
sigqueue_flush(sigqueue_t * sq)555 sigqueue_flush(sigqueue_t *sq)
556 {
557 struct proc *p = sq->sq_proc;
558 ksiginfo_t *ksi;
559
560 KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited"));
561
562 if (p != NULL)
563 PROC_LOCK_ASSERT(p, MA_OWNED);
564
565 while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) {
566 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
567 ksi->ksi_sigq = NULL;
568 if (ksiginfo_tryfree(ksi) && p != NULL)
569 p->p_pendingcnt--;
570 }
571
572 SIGEMPTYSET(sq->sq_signals);
573 SIGEMPTYSET(sq->sq_kill);
574 SIGEMPTYSET(sq->sq_ptrace);
575 }
576
577 static void
sigqueue_move_set(sigqueue_t * src,sigqueue_t * dst,const sigset_t * set)578 sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, const sigset_t *set)
579 {
580 sigset_t tmp;
581 struct proc *p1, *p2;
582 ksiginfo_t *ksi, *next;
583
584 KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited"));
585 KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited"));
586 p1 = src->sq_proc;
587 p2 = dst->sq_proc;
588 /* Move siginfo to target list */
589 TAILQ_FOREACH_SAFE(ksi, &src->sq_list, ksi_link, next) {
590 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
591 TAILQ_REMOVE(&src->sq_list, ksi, ksi_link);
592 if (p1 != NULL)
593 p1->p_pendingcnt--;
594 TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link);
595 ksi->ksi_sigq = dst;
596 if (p2 != NULL)
597 p2->p_pendingcnt++;
598 }
599 }
600
601 /* Move pending bits to target list */
602 tmp = src->sq_kill;
603 SIGSETAND(tmp, *set);
604 SIGSETOR(dst->sq_kill, tmp);
605 SIGSETNAND(src->sq_kill, tmp);
606
607 tmp = src->sq_ptrace;
608 SIGSETAND(tmp, *set);
609 SIGSETOR(dst->sq_ptrace, tmp);
610 SIGSETNAND(src->sq_ptrace, tmp);
611
612 tmp = src->sq_signals;
613 SIGSETAND(tmp, *set);
614 SIGSETOR(dst->sq_signals, tmp);
615 SIGSETNAND(src->sq_signals, tmp);
616 }
617
618 #if 0
619 static void
620 sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo)
621 {
622 sigset_t set;
623
624 SIGEMPTYSET(set);
625 SIGADDSET(set, signo);
626 sigqueue_move_set(src, dst, &set);
627 }
628 #endif
629
630 static void
sigqueue_delete_set(sigqueue_t * sq,const sigset_t * set)631 sigqueue_delete_set(sigqueue_t *sq, const sigset_t *set)
632 {
633 struct proc *p = sq->sq_proc;
634 ksiginfo_t *ksi, *next;
635
636 KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited"));
637
638 /* Remove siginfo queue */
639 TAILQ_FOREACH_SAFE(ksi, &sq->sq_list, ksi_link, next) {
640 if (SIGISMEMBER(*set, ksi->ksi_signo)) {
641 TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link);
642 ksi->ksi_sigq = NULL;
643 if (ksiginfo_tryfree(ksi) && p != NULL)
644 p->p_pendingcnt--;
645 }
646 }
647 SIGSETNAND(sq->sq_kill, *set);
648 SIGSETNAND(sq->sq_ptrace, *set);
649 SIGSETNAND(sq->sq_signals, *set);
650 }
651
652 void
sigqueue_delete(sigqueue_t * sq,int signo)653 sigqueue_delete(sigqueue_t *sq, int signo)
654 {
655 sigset_t set;
656
657 SIGEMPTYSET(set);
658 SIGADDSET(set, signo);
659 sigqueue_delete_set(sq, &set);
660 }
661
662 /* Remove a set of signals for a process */
663 static void
sigqueue_delete_set_proc(struct proc * p,const sigset_t * set)664 sigqueue_delete_set_proc(struct proc *p, const sigset_t *set)
665 {
666 sigqueue_t worklist;
667 struct thread *td0;
668
669 PROC_LOCK_ASSERT(p, MA_OWNED);
670
671 sigqueue_init(&worklist, NULL);
672 sigqueue_move_set(&p->p_sigqueue, &worklist, set);
673
674 FOREACH_THREAD_IN_PROC(p, td0)
675 sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
676
677 sigqueue_flush(&worklist);
678 }
679
680 void
sigqueue_delete_proc(struct proc * p,int signo)681 sigqueue_delete_proc(struct proc *p, int signo)
682 {
683 sigset_t set;
684
685 SIGEMPTYSET(set);
686 SIGADDSET(set, signo);
687 sigqueue_delete_set_proc(p, &set);
688 }
689
690 static void
sigqueue_delete_stopmask_proc(struct proc * p)691 sigqueue_delete_stopmask_proc(struct proc *p)
692 {
693 sigset_t set;
694
695 SIGEMPTYSET(set);
696 SIGADDSET(set, SIGSTOP);
697 SIGADDSET(set, SIGTSTP);
698 SIGADDSET(set, SIGTTIN);
699 SIGADDSET(set, SIGTTOU);
700 sigqueue_delete_set_proc(p, &set);
701 }
702
703 /*
704 * Determine signal that should be delivered to thread td, the current
705 * thread, 0 if none. If there is a pending stop signal with default
706 * action, the process stops in issignal().
707 */
708 int
cursig(struct thread * td)709 cursig(struct thread *td)
710 {
711 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
712 mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED);
713 THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
714 return (SIGPENDING(td) ? issignal(td) : 0);
715 }
716
717 /*
718 * Arrange for ast() to handle unmasked pending signals on return to user
719 * mode. This must be called whenever a signal is added to td_sigqueue or
720 * unmasked in td_sigmask.
721 */
722 void
signotify(struct thread * td)723 signotify(struct thread *td)
724 {
725
726 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
727
728 if (SIGPENDING(td))
729 ast_sched(td, TDA_SIG);
730 }
731
732 /*
733 * Returns 1 (true) if altstack is configured for the thread, and the
734 * passed stack bottom address falls into the altstack range. Handles
735 * the 43 compat special case where the alt stack size is zero.
736 */
737 int
sigonstack(size_t sp)738 sigonstack(size_t sp)
739 {
740 struct thread *td;
741
742 td = curthread;
743 if ((td->td_pflags & TDP_ALTSTACK) == 0)
744 return (0);
745 #if defined(COMPAT_43)
746 if (SV_PROC_FLAG(td->td_proc, SV_AOUT) && td->td_sigstk.ss_size == 0)
747 return ((td->td_sigstk.ss_flags & SS_ONSTACK) != 0);
748 #endif
749 return (sp >= (size_t)td->td_sigstk.ss_sp &&
750 sp < td->td_sigstk.ss_size + (size_t)td->td_sigstk.ss_sp);
751 }
752
753 static __inline int
sigprop(int sig)754 sigprop(int sig)
755 {
756
757 if (sig > 0 && sig < nitems(sigproptbl))
758 return (sigproptbl[sig]);
759 return (0);
760 }
761
762 static bool
sigact_flag_test(const struct sigaction * act,int flag)763 sigact_flag_test(const struct sigaction *act, int flag)
764 {
765
766 /*
767 * SA_SIGINFO is reset when signal disposition is set to
768 * ignore or default. Other flags are kept according to user
769 * settings.
770 */
771 return ((act->sa_flags & flag) != 0 && (flag != SA_SIGINFO ||
772 ((__sighandler_t *)act->sa_sigaction != SIG_IGN &&
773 (__sighandler_t *)act->sa_sigaction != SIG_DFL)));
774 }
775
776 /*
777 * kern_sigaction
778 * sigaction
779 * freebsd4_sigaction
780 * osigaction
781 */
782 int
kern_sigaction(struct thread * td,int sig,const struct sigaction * act,struct sigaction * oact,int flags)783 kern_sigaction(struct thread *td, int sig, const struct sigaction *act,
784 struct sigaction *oact, int flags)
785 {
786 struct sigacts *ps;
787 struct proc *p = td->td_proc;
788
789 if (!_SIG_VALID(sig))
790 return (EINVAL);
791 if (act != NULL && act->sa_handler != SIG_DFL &&
792 act->sa_handler != SIG_IGN && (act->sa_flags & ~(SA_ONSTACK |
793 SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER |
794 SA_NOCLDWAIT | SA_SIGINFO)) != 0)
795 return (EINVAL);
796
797 PROC_LOCK(p);
798 ps = p->p_sigacts;
799 mtx_lock(&ps->ps_mtx);
800 if (oact) {
801 memset(oact, 0, sizeof(*oact));
802 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
803 if (SIGISMEMBER(ps->ps_sigonstack, sig))
804 oact->sa_flags |= SA_ONSTACK;
805 if (!SIGISMEMBER(ps->ps_sigintr, sig))
806 oact->sa_flags |= SA_RESTART;
807 if (SIGISMEMBER(ps->ps_sigreset, sig))
808 oact->sa_flags |= SA_RESETHAND;
809 if (SIGISMEMBER(ps->ps_signodefer, sig))
810 oact->sa_flags |= SA_NODEFER;
811 if (SIGISMEMBER(ps->ps_siginfo, sig)) {
812 oact->sa_flags |= SA_SIGINFO;
813 oact->sa_sigaction =
814 (__siginfohandler_t *)ps->ps_sigact[_SIG_IDX(sig)];
815 } else
816 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
817 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP)
818 oact->sa_flags |= SA_NOCLDSTOP;
819 if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT)
820 oact->sa_flags |= SA_NOCLDWAIT;
821 }
822 if (act) {
823 if ((sig == SIGKILL || sig == SIGSTOP) &&
824 act->sa_handler != SIG_DFL) {
825 mtx_unlock(&ps->ps_mtx);
826 PROC_UNLOCK(p);
827 return (EINVAL);
828 }
829
830 /*
831 * Change setting atomically.
832 */
833
834 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
835 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
836 if (sigact_flag_test(act, SA_SIGINFO)) {
837 ps->ps_sigact[_SIG_IDX(sig)] =
838 (__sighandler_t *)act->sa_sigaction;
839 SIGADDSET(ps->ps_siginfo, sig);
840 } else {
841 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
842 SIGDELSET(ps->ps_siginfo, sig);
843 }
844 if (!sigact_flag_test(act, SA_RESTART))
845 SIGADDSET(ps->ps_sigintr, sig);
846 else
847 SIGDELSET(ps->ps_sigintr, sig);
848 if (sigact_flag_test(act, SA_ONSTACK))
849 SIGADDSET(ps->ps_sigonstack, sig);
850 else
851 SIGDELSET(ps->ps_sigonstack, sig);
852 if (sigact_flag_test(act, SA_RESETHAND))
853 SIGADDSET(ps->ps_sigreset, sig);
854 else
855 SIGDELSET(ps->ps_sigreset, sig);
856 if (sigact_flag_test(act, SA_NODEFER))
857 SIGADDSET(ps->ps_signodefer, sig);
858 else
859 SIGDELSET(ps->ps_signodefer, sig);
860 if (sig == SIGCHLD) {
861 if (act->sa_flags & SA_NOCLDSTOP)
862 ps->ps_flag |= PS_NOCLDSTOP;
863 else
864 ps->ps_flag &= ~PS_NOCLDSTOP;
865 if (act->sa_flags & SA_NOCLDWAIT) {
866 /*
867 * Paranoia: since SA_NOCLDWAIT is implemented
868 * by reparenting the dying child to PID 1 (and
869 * trust it to reap the zombie), PID 1 itself
870 * is forbidden to set SA_NOCLDWAIT.
871 */
872 if (p->p_pid == 1)
873 ps->ps_flag &= ~PS_NOCLDWAIT;
874 else
875 ps->ps_flag |= PS_NOCLDWAIT;
876 } else
877 ps->ps_flag &= ~PS_NOCLDWAIT;
878 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
879 ps->ps_flag |= PS_CLDSIGIGN;
880 else
881 ps->ps_flag &= ~PS_CLDSIGIGN;
882 }
883 /*
884 * Set bit in ps_sigignore for signals that are set to SIG_IGN,
885 * and for signals set to SIG_DFL where the default is to
886 * ignore. However, don't put SIGCONT in ps_sigignore, as we
887 * have to restart the process.
888 */
889 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
890 (sigprop(sig) & SIGPROP_IGNORE &&
891 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
892 /* never to be seen again */
893 sigqueue_delete_proc(p, sig);
894 if (sig != SIGCONT)
895 /* easier in psignal */
896 SIGADDSET(ps->ps_sigignore, sig);
897 SIGDELSET(ps->ps_sigcatch, sig);
898 } else {
899 SIGDELSET(ps->ps_sigignore, sig);
900 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
901 SIGDELSET(ps->ps_sigcatch, sig);
902 else
903 SIGADDSET(ps->ps_sigcatch, sig);
904 }
905 #ifdef COMPAT_FREEBSD4
906 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
907 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
908 (flags & KSA_FREEBSD4) == 0)
909 SIGDELSET(ps->ps_freebsd4, sig);
910 else
911 SIGADDSET(ps->ps_freebsd4, sig);
912 #endif
913 #ifdef COMPAT_43
914 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
915 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL ||
916 (flags & KSA_OSIGSET) == 0)
917 SIGDELSET(ps->ps_osigset, sig);
918 else
919 SIGADDSET(ps->ps_osigset, sig);
920 #endif
921 }
922 mtx_unlock(&ps->ps_mtx);
923 PROC_UNLOCK(p);
924 return (0);
925 }
926
927 #ifndef _SYS_SYSPROTO_H_
928 struct sigaction_args {
929 int sig;
930 struct sigaction *act;
931 struct sigaction *oact;
932 };
933 #endif
934 int
sys_sigaction(struct thread * td,struct sigaction_args * uap)935 sys_sigaction(struct thread *td, struct sigaction_args *uap)
936 {
937 struct sigaction act, oact;
938 struct sigaction *actp, *oactp;
939 int error;
940
941 actp = (uap->act != NULL) ? &act : NULL;
942 oactp = (uap->oact != NULL) ? &oact : NULL;
943 if (actp) {
944 error = copyin(uap->act, actp, sizeof(act));
945 if (error)
946 return (error);
947 }
948 error = kern_sigaction(td, uap->sig, actp, oactp, 0);
949 if (oactp && !error)
950 error = copyout(oactp, uap->oact, sizeof(oact));
951 return (error);
952 }
953
954 #ifdef COMPAT_FREEBSD4
955 #ifndef _SYS_SYSPROTO_H_
956 struct freebsd4_sigaction_args {
957 int sig;
958 struct sigaction *act;
959 struct sigaction *oact;
960 };
961 #endif
962 int
freebsd4_sigaction(struct thread * td,struct freebsd4_sigaction_args * uap)963 freebsd4_sigaction(struct thread *td, struct freebsd4_sigaction_args *uap)
964 {
965 struct sigaction act, oact;
966 struct sigaction *actp, *oactp;
967 int error;
968
969 actp = (uap->act != NULL) ? &act : NULL;
970 oactp = (uap->oact != NULL) ? &oact : NULL;
971 if (actp) {
972 error = copyin(uap->act, actp, sizeof(act));
973 if (error)
974 return (error);
975 }
976 error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4);
977 if (oactp && !error)
978 error = copyout(oactp, uap->oact, sizeof(oact));
979 return (error);
980 }
981 #endif /* COMAPT_FREEBSD4 */
982
983 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
984 #ifndef _SYS_SYSPROTO_H_
985 struct osigaction_args {
986 int signum;
987 struct osigaction *nsa;
988 struct osigaction *osa;
989 };
990 #endif
991 int
osigaction(struct thread * td,struct osigaction_args * uap)992 osigaction(struct thread *td, struct osigaction_args *uap)
993 {
994 struct osigaction sa;
995 struct sigaction nsa, osa;
996 struct sigaction *nsap, *osap;
997 int error;
998
999 if (uap->signum <= 0 || uap->signum >= ONSIG)
1000 return (EINVAL);
1001
1002 nsap = (uap->nsa != NULL) ? &nsa : NULL;
1003 osap = (uap->osa != NULL) ? &osa : NULL;
1004
1005 if (nsap) {
1006 error = copyin(uap->nsa, &sa, sizeof(sa));
1007 if (error)
1008 return (error);
1009 nsap->sa_handler = sa.sa_handler;
1010 nsap->sa_flags = sa.sa_flags;
1011 OSIG2SIG(sa.sa_mask, nsap->sa_mask);
1012 }
1013 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1014 if (osap && !error) {
1015 sa.sa_handler = osap->sa_handler;
1016 sa.sa_flags = osap->sa_flags;
1017 SIG2OSIG(osap->sa_mask, sa.sa_mask);
1018 error = copyout(&sa, uap->osa, sizeof(sa));
1019 }
1020 return (error);
1021 }
1022
1023 #if !defined(__i386__)
1024 /* Avoid replicating the same stub everywhere */
1025 int
osigreturn(struct thread * td,struct osigreturn_args * uap)1026 osigreturn(struct thread *td, struct osigreturn_args *uap)
1027 {
1028
1029 return (nosys(td, (struct nosys_args *)uap));
1030 }
1031 #endif
1032 #endif /* COMPAT_43 */
1033
1034 /*
1035 * Initialize signal state for process 0;
1036 * set to ignore signals that are ignored by default.
1037 */
1038 void
siginit(struct proc * p)1039 siginit(struct proc *p)
1040 {
1041 int i;
1042 struct sigacts *ps;
1043
1044 PROC_LOCK(p);
1045 ps = p->p_sigacts;
1046 mtx_lock(&ps->ps_mtx);
1047 for (i = 1; i <= NSIG; i++) {
1048 if (sigprop(i) & SIGPROP_IGNORE && i != SIGCONT) {
1049 SIGADDSET(ps->ps_sigignore, i);
1050 }
1051 }
1052 mtx_unlock(&ps->ps_mtx);
1053 PROC_UNLOCK(p);
1054 }
1055
1056 /*
1057 * Reset specified signal to the default disposition.
1058 */
1059 static void
sigdflt(struct sigacts * ps,int sig)1060 sigdflt(struct sigacts *ps, int sig)
1061 {
1062
1063 mtx_assert(&ps->ps_mtx, MA_OWNED);
1064 SIGDELSET(ps->ps_sigcatch, sig);
1065 if ((sigprop(sig) & SIGPROP_IGNORE) != 0 && sig != SIGCONT)
1066 SIGADDSET(ps->ps_sigignore, sig);
1067 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1068 SIGDELSET(ps->ps_siginfo, sig);
1069 }
1070
1071 /*
1072 * Reset signals for an exec of the specified process.
1073 */
1074 void
execsigs(struct proc * p)1075 execsigs(struct proc *p)
1076 {
1077 struct sigacts *ps;
1078 struct thread *td;
1079
1080 /*
1081 * Reset caught signals. Held signals remain held
1082 * through td_sigmask (unless they were caught,
1083 * and are now ignored by default).
1084 */
1085 PROC_LOCK_ASSERT(p, MA_OWNED);
1086 ps = p->p_sigacts;
1087 mtx_lock(&ps->ps_mtx);
1088 sig_drop_caught(p);
1089
1090 /*
1091 * Reset stack state to the user stack.
1092 * Clear set of signals caught on the signal stack.
1093 */
1094 td = curthread;
1095 MPASS(td->td_proc == p);
1096 td->td_sigstk.ss_flags = SS_DISABLE;
1097 td->td_sigstk.ss_size = 0;
1098 td->td_sigstk.ss_sp = 0;
1099 td->td_pflags &= ~TDP_ALTSTACK;
1100 /*
1101 * Reset no zombies if child dies flag as Solaris does.
1102 */
1103 ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN);
1104 if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN)
1105 ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL;
1106 mtx_unlock(&ps->ps_mtx);
1107 }
1108
1109 /*
1110 * kern_sigprocmask()
1111 *
1112 * Manipulate signal mask.
1113 */
1114 int
kern_sigprocmask(struct thread * td,int how,sigset_t * set,sigset_t * oset,int flags)1115 kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset,
1116 int flags)
1117 {
1118 sigset_t new_block, oset1;
1119 struct proc *p;
1120 int error;
1121
1122 p = td->td_proc;
1123 if ((flags & SIGPROCMASK_PROC_LOCKED) != 0)
1124 PROC_LOCK_ASSERT(p, MA_OWNED);
1125 else
1126 PROC_LOCK(p);
1127 mtx_assert(&p->p_sigacts->ps_mtx, (flags & SIGPROCMASK_PS_LOCKED) != 0
1128 ? MA_OWNED : MA_NOTOWNED);
1129 if (oset != NULL)
1130 *oset = td->td_sigmask;
1131
1132 error = 0;
1133 if (set != NULL) {
1134 switch (how) {
1135 case SIG_BLOCK:
1136 SIG_CANTMASK(*set);
1137 oset1 = td->td_sigmask;
1138 SIGSETOR(td->td_sigmask, *set);
1139 new_block = td->td_sigmask;
1140 SIGSETNAND(new_block, oset1);
1141 break;
1142 case SIG_UNBLOCK:
1143 SIGSETNAND(td->td_sigmask, *set);
1144 signotify(td);
1145 goto out;
1146 case SIG_SETMASK:
1147 SIG_CANTMASK(*set);
1148 oset1 = td->td_sigmask;
1149 if (flags & SIGPROCMASK_OLD)
1150 SIGSETLO(td->td_sigmask, *set);
1151 else
1152 td->td_sigmask = *set;
1153 new_block = td->td_sigmask;
1154 SIGSETNAND(new_block, oset1);
1155 signotify(td);
1156 break;
1157 default:
1158 error = EINVAL;
1159 goto out;
1160 }
1161
1162 /*
1163 * The new_block set contains signals that were not previously
1164 * blocked, but are blocked now.
1165 *
1166 * In case we block any signal that was not previously blocked
1167 * for td, and process has the signal pending, try to schedule
1168 * signal delivery to some thread that does not block the
1169 * signal, possibly waking it up.
1170 */
1171 if (p->p_numthreads != 1)
1172 reschedule_signals(p, new_block, flags);
1173 }
1174
1175 out:
1176 if (!(flags & SIGPROCMASK_PROC_LOCKED))
1177 PROC_UNLOCK(p);
1178 return (error);
1179 }
1180
1181 #ifndef _SYS_SYSPROTO_H_
1182 struct sigprocmask_args {
1183 int how;
1184 const sigset_t *set;
1185 sigset_t *oset;
1186 };
1187 #endif
1188 int
sys_sigprocmask(struct thread * td,struct sigprocmask_args * uap)1189 sys_sigprocmask(struct thread *td, struct sigprocmask_args *uap)
1190 {
1191 sigset_t set, oset;
1192 sigset_t *setp, *osetp;
1193 int error;
1194
1195 setp = (uap->set != NULL) ? &set : NULL;
1196 osetp = (uap->oset != NULL) ? &oset : NULL;
1197 if (setp) {
1198 error = copyin(uap->set, setp, sizeof(set));
1199 if (error)
1200 return (error);
1201 }
1202 error = kern_sigprocmask(td, uap->how, setp, osetp, 0);
1203 if (osetp && !error) {
1204 error = copyout(osetp, uap->oset, sizeof(oset));
1205 }
1206 return (error);
1207 }
1208
1209 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1210 #ifndef _SYS_SYSPROTO_H_
1211 struct osigprocmask_args {
1212 int how;
1213 osigset_t mask;
1214 };
1215 #endif
1216 int
osigprocmask(struct thread * td,struct osigprocmask_args * uap)1217 osigprocmask(struct thread *td, struct osigprocmask_args *uap)
1218 {
1219 sigset_t set, oset;
1220 int error;
1221
1222 OSIG2SIG(uap->mask, set);
1223 error = kern_sigprocmask(td, uap->how, &set, &oset, 1);
1224 SIG2OSIG(oset, td->td_retval[0]);
1225 return (error);
1226 }
1227 #endif /* COMPAT_43 */
1228
1229 int
sys_sigwait(struct thread * td,struct sigwait_args * uap)1230 sys_sigwait(struct thread *td, struct sigwait_args *uap)
1231 {
1232 ksiginfo_t ksi;
1233 sigset_t set;
1234 int error;
1235
1236 error = copyin(uap->set, &set, sizeof(set));
1237 if (error) {
1238 td->td_retval[0] = error;
1239 return (0);
1240 }
1241
1242 error = kern_sigtimedwait(td, set, &ksi, NULL);
1243 if (error) {
1244 /*
1245 * sigwait() function shall not return EINTR, but
1246 * the syscall does. Non-ancient libc provides the
1247 * wrapper which hides EINTR. Otherwise, EINTR return
1248 * is used by libthr to handle required cancellation
1249 * point in the sigwait().
1250 */
1251 if (error == EINTR && td->td_proc->p_osrel < P_OSREL_SIGWAIT)
1252 return (ERESTART);
1253 td->td_retval[0] = error;
1254 return (0);
1255 }
1256
1257 error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo));
1258 td->td_retval[0] = error;
1259 return (0);
1260 }
1261
1262 int
sys_sigtimedwait(struct thread * td,struct sigtimedwait_args * uap)1263 sys_sigtimedwait(struct thread *td, struct sigtimedwait_args *uap)
1264 {
1265 struct timespec ts;
1266 struct timespec *timeout;
1267 sigset_t set;
1268 ksiginfo_t ksi;
1269 int error;
1270
1271 if (uap->timeout) {
1272 error = copyin(uap->timeout, &ts, sizeof(ts));
1273 if (error)
1274 return (error);
1275
1276 timeout = &ts;
1277 } else
1278 timeout = NULL;
1279
1280 error = copyin(uap->set, &set, sizeof(set));
1281 if (error)
1282 return (error);
1283
1284 error = kern_sigtimedwait(td, set, &ksi, timeout);
1285 if (error)
1286 return (error);
1287
1288 if (uap->info)
1289 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1290
1291 if (error == 0)
1292 td->td_retval[0] = ksi.ksi_signo;
1293 return (error);
1294 }
1295
1296 int
sys_sigwaitinfo(struct thread * td,struct sigwaitinfo_args * uap)1297 sys_sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap)
1298 {
1299 ksiginfo_t ksi;
1300 sigset_t set;
1301 int error;
1302
1303 error = copyin(uap->set, &set, sizeof(set));
1304 if (error)
1305 return (error);
1306
1307 error = kern_sigtimedwait(td, set, &ksi, NULL);
1308 if (error)
1309 return (error);
1310
1311 if (uap->info)
1312 error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t));
1313
1314 if (error == 0)
1315 td->td_retval[0] = ksi.ksi_signo;
1316 return (error);
1317 }
1318
1319 static void
proc_td_siginfo_capture(struct thread * td,siginfo_t * si)1320 proc_td_siginfo_capture(struct thread *td, siginfo_t *si)
1321 {
1322 struct thread *thr;
1323
1324 FOREACH_THREAD_IN_PROC(td->td_proc, thr) {
1325 if (thr == td)
1326 thr->td_si = *si;
1327 else
1328 thr->td_si.si_signo = 0;
1329 }
1330 }
1331
1332 int
kern_sigtimedwait(struct thread * td,sigset_t waitset,ksiginfo_t * ksi,struct timespec * timeout)1333 kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
1334 struct timespec *timeout)
1335 {
1336 struct sigacts *ps;
1337 sigset_t saved_mask, new_block;
1338 struct proc *p;
1339 int error, sig, timevalid = 0;
1340 sbintime_t sbt, precision, tsbt;
1341 struct timespec ts;
1342 bool traced;
1343
1344 p = td->td_proc;
1345 error = 0;
1346 traced = false;
1347
1348 /* Ensure the sigfastblock value is up to date. */
1349 sigfastblock_fetch(td);
1350
1351 if (timeout != NULL) {
1352 if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) {
1353 timevalid = 1;
1354 ts = *timeout;
1355 if (ts.tv_sec < INT32_MAX / 2) {
1356 tsbt = tstosbt(ts);
1357 precision = tsbt;
1358 precision >>= tc_precexp;
1359 if (TIMESEL(&sbt, tsbt))
1360 sbt += tc_tick_sbt;
1361 sbt += tsbt;
1362 } else
1363 precision = sbt = 0;
1364 }
1365 } else
1366 precision = sbt = 0;
1367 ksiginfo_init(ksi);
1368 /* Some signals can not be waited for. */
1369 SIG_CANTMASK(waitset);
1370 ps = p->p_sigacts;
1371 PROC_LOCK(p);
1372 saved_mask = td->td_sigmask;
1373 SIGSETNAND(td->td_sigmask, waitset);
1374 if ((p->p_sysent->sv_flags & SV_SIG_DISCIGN) != 0 ||
1375 !kern_sig_discard_ign) {
1376 thread_lock(td);
1377 td->td_flags |= TDF_SIGWAIT;
1378 thread_unlock(td);
1379 }
1380 for (;;) {
1381 mtx_lock(&ps->ps_mtx);
1382 sig = cursig(td);
1383 mtx_unlock(&ps->ps_mtx);
1384 KASSERT(sig >= 0, ("sig %d", sig));
1385 if (sig != 0 && SIGISMEMBER(waitset, sig)) {
1386 if (sigqueue_get(&td->td_sigqueue, sig, ksi) != 0 ||
1387 sigqueue_get(&p->p_sigqueue, sig, ksi) != 0) {
1388 error = 0;
1389 break;
1390 }
1391 }
1392
1393 if (error != 0)
1394 break;
1395
1396 /*
1397 * POSIX says this must be checked after looking for pending
1398 * signals.
1399 */
1400 if (timeout != NULL && !timevalid) {
1401 error = EINVAL;
1402 break;
1403 }
1404
1405 if (traced) {
1406 error = EINTR;
1407 break;
1408 }
1409
1410 error = msleep_sbt(&p->p_sigacts, &p->p_mtx, PPAUSE | PCATCH,
1411 "sigwait", sbt, precision, C_ABSOLUTE);
1412
1413 /* The syscalls can not be restarted. */
1414 if (error == ERESTART)
1415 error = EINTR;
1416
1417 /*
1418 * If PTRACE_SCE or PTRACE_SCX were set after
1419 * userspace entered the syscall, return spurious
1420 * EINTR after wait was done. Only do this as last
1421 * resort after rechecking for possible queued signals
1422 * and expired timeouts.
1423 */
1424 if (error == 0 && (p->p_ptevents & PTRACE_SYSCALL) != 0)
1425 traced = true;
1426 }
1427 thread_lock(td);
1428 td->td_flags &= ~TDF_SIGWAIT;
1429 thread_unlock(td);
1430
1431 new_block = saved_mask;
1432 SIGSETNAND(new_block, td->td_sigmask);
1433 td->td_sigmask = saved_mask;
1434 /*
1435 * Fewer signals can be delivered to us, reschedule signal
1436 * notification.
1437 */
1438 if (p->p_numthreads != 1)
1439 reschedule_signals(p, new_block, 0);
1440
1441 if (error == 0) {
1442 SDT_PROBE2(proc, , , signal__clear, sig, ksi);
1443
1444 if (ksi->ksi_code == SI_TIMER)
1445 itimer_accept(p, ksi->ksi_timerid, ksi);
1446
1447 #ifdef KTRACE
1448 if (KTRPOINT(td, KTR_PSIG)) {
1449 sig_t action;
1450
1451 mtx_lock(&ps->ps_mtx);
1452 action = ps->ps_sigact[_SIG_IDX(sig)];
1453 mtx_unlock(&ps->ps_mtx);
1454 ktrpsig(sig, action, &td->td_sigmask, ksi->ksi_code);
1455 }
1456 #endif
1457 if (sig == SIGKILL) {
1458 proc_td_siginfo_capture(td, &ksi->ksi_info);
1459 sigexit(td, sig);
1460 }
1461 }
1462 PROC_UNLOCK(p);
1463 return (error);
1464 }
1465
1466 #ifndef _SYS_SYSPROTO_H_
1467 struct sigpending_args {
1468 sigset_t *set;
1469 };
1470 #endif
1471 int
sys_sigpending(struct thread * td,struct sigpending_args * uap)1472 sys_sigpending(struct thread *td, struct sigpending_args *uap)
1473 {
1474 struct proc *p = td->td_proc;
1475 sigset_t pending;
1476
1477 PROC_LOCK(p);
1478 pending = p->p_sigqueue.sq_signals;
1479 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1480 PROC_UNLOCK(p);
1481 return (copyout(&pending, uap->set, sizeof(sigset_t)));
1482 }
1483
1484 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1485 #ifndef _SYS_SYSPROTO_H_
1486 struct osigpending_args {
1487 int dummy;
1488 };
1489 #endif
1490 int
osigpending(struct thread * td,struct osigpending_args * uap)1491 osigpending(struct thread *td, struct osigpending_args *uap)
1492 {
1493 struct proc *p = td->td_proc;
1494 sigset_t pending;
1495
1496 PROC_LOCK(p);
1497 pending = p->p_sigqueue.sq_signals;
1498 SIGSETOR(pending, td->td_sigqueue.sq_signals);
1499 PROC_UNLOCK(p);
1500 SIG2OSIG(pending, td->td_retval[0]);
1501 return (0);
1502 }
1503 #endif /* COMPAT_43 */
1504
1505 #if defined(COMPAT_43)
1506 /*
1507 * Generalized interface signal handler, 4.3-compatible.
1508 */
1509 #ifndef _SYS_SYSPROTO_H_
1510 struct osigvec_args {
1511 int signum;
1512 struct sigvec *nsv;
1513 struct sigvec *osv;
1514 };
1515 #endif
1516 /* ARGSUSED */
1517 int
osigvec(struct thread * td,struct osigvec_args * uap)1518 osigvec(struct thread *td, struct osigvec_args *uap)
1519 {
1520 struct sigvec vec;
1521 struct sigaction nsa, osa;
1522 struct sigaction *nsap, *osap;
1523 int error;
1524
1525 if (uap->signum <= 0 || uap->signum >= ONSIG)
1526 return (EINVAL);
1527 nsap = (uap->nsv != NULL) ? &nsa : NULL;
1528 osap = (uap->osv != NULL) ? &osa : NULL;
1529 if (nsap) {
1530 error = copyin(uap->nsv, &vec, sizeof(vec));
1531 if (error)
1532 return (error);
1533 nsap->sa_handler = vec.sv_handler;
1534 OSIG2SIG(vec.sv_mask, nsap->sa_mask);
1535 nsap->sa_flags = vec.sv_flags;
1536 nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */
1537 }
1538 error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET);
1539 if (osap && !error) {
1540 vec.sv_handler = osap->sa_handler;
1541 SIG2OSIG(osap->sa_mask, vec.sv_mask);
1542 vec.sv_flags = osap->sa_flags;
1543 vec.sv_flags &= ~SA_NOCLDWAIT;
1544 vec.sv_flags ^= SA_RESTART;
1545 error = copyout(&vec, uap->osv, sizeof(vec));
1546 }
1547 return (error);
1548 }
1549
1550 #ifndef _SYS_SYSPROTO_H_
1551 struct osigblock_args {
1552 int mask;
1553 };
1554 #endif
1555 int
osigblock(struct thread * td,struct osigblock_args * uap)1556 osigblock(struct thread *td, struct osigblock_args *uap)
1557 {
1558 sigset_t set, oset;
1559
1560 OSIG2SIG(uap->mask, set);
1561 kern_sigprocmask(td, SIG_BLOCK, &set, &oset, 0);
1562 SIG2OSIG(oset, td->td_retval[0]);
1563 return (0);
1564 }
1565
1566 #ifndef _SYS_SYSPROTO_H_
1567 struct osigsetmask_args {
1568 int mask;
1569 };
1570 #endif
1571 int
osigsetmask(struct thread * td,struct osigsetmask_args * uap)1572 osigsetmask(struct thread *td, struct osigsetmask_args *uap)
1573 {
1574 sigset_t set, oset;
1575
1576 OSIG2SIG(uap->mask, set);
1577 kern_sigprocmask(td, SIG_SETMASK, &set, &oset, 0);
1578 SIG2OSIG(oset, td->td_retval[0]);
1579 return (0);
1580 }
1581 #endif /* COMPAT_43 */
1582
1583 /*
1584 * Suspend calling thread until signal, providing mask to be set in the
1585 * meantime.
1586 */
1587 #ifndef _SYS_SYSPROTO_H_
1588 struct sigsuspend_args {
1589 const sigset_t *sigmask;
1590 };
1591 #endif
1592 /* ARGSUSED */
1593 int
sys_sigsuspend(struct thread * td,struct sigsuspend_args * uap)1594 sys_sigsuspend(struct thread *td, struct sigsuspend_args *uap)
1595 {
1596 sigset_t mask;
1597 int error;
1598
1599 error = copyin(uap->sigmask, &mask, sizeof(mask));
1600 if (error)
1601 return (error);
1602 return (kern_sigsuspend(td, mask));
1603 }
1604
1605 int
kern_sigsuspend(struct thread * td,sigset_t mask)1606 kern_sigsuspend(struct thread *td, sigset_t mask)
1607 {
1608 struct proc *p = td->td_proc;
1609 int has_sig, sig;
1610
1611 /* Ensure the sigfastblock value is up to date. */
1612 sigfastblock_fetch(td);
1613
1614 /*
1615 * When returning from sigsuspend, we want
1616 * the old mask to be restored after the
1617 * signal handler has finished. Thus, we
1618 * save it here and mark the sigacts structure
1619 * to indicate this.
1620 */
1621 PROC_LOCK(p);
1622 kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
1623 SIGPROCMASK_PROC_LOCKED);
1624 td->td_pflags |= TDP_OLDMASK;
1625 ast_sched(td, TDA_SIGSUSPEND);
1626
1627 /*
1628 * Process signals now. Otherwise, we can get spurious wakeup
1629 * due to signal entered process queue, but delivered to other
1630 * thread. But sigsuspend should return only on signal
1631 * delivery.
1632 */
1633 (p->p_sysent->sv_set_syscall_retval)(td, EINTR);
1634 for (has_sig = 0; !has_sig;) {
1635 while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause",
1636 0) == 0)
1637 /* void */;
1638 thread_suspend_check(0);
1639 mtx_lock(&p->p_sigacts->ps_mtx);
1640 while ((sig = cursig(td)) != 0) {
1641 KASSERT(sig >= 0, ("sig %d", sig));
1642 has_sig += postsig(sig);
1643 }
1644 mtx_unlock(&p->p_sigacts->ps_mtx);
1645
1646 /*
1647 * If PTRACE_SCE or PTRACE_SCX were set after
1648 * userspace entered the syscall, return spurious
1649 * EINTR.
1650 */
1651 if ((p->p_ptevents & PTRACE_SYSCALL) != 0)
1652 has_sig += 1;
1653 }
1654 PROC_UNLOCK(p);
1655 td->td_errno = EINTR;
1656 td->td_pflags |= TDP_NERRNO;
1657 return (EJUSTRETURN);
1658 }
1659
1660 #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */
1661 /*
1662 * Compatibility sigsuspend call for old binaries. Note nonstandard calling
1663 * convention: libc stub passes mask, not pointer, to save a copyin.
1664 */
1665 #ifndef _SYS_SYSPROTO_H_
1666 struct osigsuspend_args {
1667 osigset_t mask;
1668 };
1669 #endif
1670 /* ARGSUSED */
1671 int
osigsuspend(struct thread * td,struct osigsuspend_args * uap)1672 osigsuspend(struct thread *td, struct osigsuspend_args *uap)
1673 {
1674 sigset_t mask;
1675
1676 OSIG2SIG(uap->mask, mask);
1677 return (kern_sigsuspend(td, mask));
1678 }
1679 #endif /* COMPAT_43 */
1680
1681 #if defined(COMPAT_43)
1682 #ifndef _SYS_SYSPROTO_H_
1683 struct osigstack_args {
1684 struct sigstack *nss;
1685 struct sigstack *oss;
1686 };
1687 #endif
1688 /* ARGSUSED */
1689 int
osigstack(struct thread * td,struct osigstack_args * uap)1690 osigstack(struct thread *td, struct osigstack_args *uap)
1691 {
1692 struct sigstack nss, oss;
1693 int error = 0;
1694
1695 if (uap->nss != NULL) {
1696 error = copyin(uap->nss, &nss, sizeof(nss));
1697 if (error)
1698 return (error);
1699 }
1700 oss.ss_sp = td->td_sigstk.ss_sp;
1701 oss.ss_onstack = sigonstack(cpu_getstack(td));
1702 if (uap->nss != NULL) {
1703 td->td_sigstk.ss_sp = nss.ss_sp;
1704 td->td_sigstk.ss_size = 0;
1705 td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK;
1706 td->td_pflags |= TDP_ALTSTACK;
1707 }
1708 if (uap->oss != NULL)
1709 error = copyout(&oss, uap->oss, sizeof(oss));
1710
1711 return (error);
1712 }
1713 #endif /* COMPAT_43 */
1714
1715 #ifndef _SYS_SYSPROTO_H_
1716 struct sigaltstack_args {
1717 stack_t *ss;
1718 stack_t *oss;
1719 };
1720 #endif
1721 /* ARGSUSED */
1722 int
sys_sigaltstack(struct thread * td,struct sigaltstack_args * uap)1723 sys_sigaltstack(struct thread *td, struct sigaltstack_args *uap)
1724 {
1725 stack_t ss, oss;
1726 int error;
1727
1728 if (uap->ss != NULL) {
1729 error = copyin(uap->ss, &ss, sizeof(ss));
1730 if (error)
1731 return (error);
1732 }
1733 error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL,
1734 (uap->oss != NULL) ? &oss : NULL);
1735 if (error)
1736 return (error);
1737 if (uap->oss != NULL)
1738 error = copyout(&oss, uap->oss, sizeof(stack_t));
1739 return (error);
1740 }
1741
1742 int
kern_sigaltstack(struct thread * td,stack_t * ss,stack_t * oss)1743 kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss)
1744 {
1745 struct proc *p = td->td_proc;
1746 int oonstack;
1747
1748 oonstack = sigonstack(cpu_getstack(td));
1749
1750 if (oss != NULL) {
1751 *oss = td->td_sigstk;
1752 oss->ss_flags = (td->td_pflags & TDP_ALTSTACK)
1753 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1754 }
1755
1756 if (ss != NULL) {
1757 if (oonstack)
1758 return (EPERM);
1759 if ((ss->ss_flags & ~SS_DISABLE) != 0)
1760 return (EINVAL);
1761 if (!(ss->ss_flags & SS_DISABLE)) {
1762 if (ss->ss_size < p->p_sysent->sv_minsigstksz)
1763 return (ENOMEM);
1764
1765 td->td_sigstk = *ss;
1766 td->td_pflags |= TDP_ALTSTACK;
1767 } else {
1768 td->td_pflags &= ~TDP_ALTSTACK;
1769 }
1770 }
1771 return (0);
1772 }
1773
1774 struct killpg1_ctx {
1775 struct thread *td;
1776 ksiginfo_t *ksi;
1777 int sig;
1778 bool sent;
1779 bool found;
1780 int ret;
1781 };
1782
1783 static void
killpg1_sendsig_locked(struct proc * p,struct killpg1_ctx * arg)1784 killpg1_sendsig_locked(struct proc *p, struct killpg1_ctx *arg)
1785 {
1786 int err;
1787
1788 err = p_cansignal(arg->td, p, arg->sig);
1789 if (err == 0 && arg->sig != 0)
1790 pksignal(p, arg->sig, arg->ksi);
1791 if (err != ESRCH)
1792 arg->found = true;
1793 if (err == 0)
1794 arg->sent = true;
1795 else if (arg->ret == 0 && err != ESRCH && err != EPERM)
1796 arg->ret = err;
1797 }
1798
1799 static void
killpg1_sendsig(struct proc * p,bool notself,struct killpg1_ctx * arg)1800 killpg1_sendsig(struct proc *p, bool notself, struct killpg1_ctx *arg)
1801 {
1802
1803 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 ||
1804 (notself && p == arg->td->td_proc) || p->p_state == PRS_NEW)
1805 return;
1806
1807 PROC_LOCK(p);
1808 killpg1_sendsig_locked(p, arg);
1809 PROC_UNLOCK(p);
1810 }
1811
1812 static void
kill_processes_prison_cb(struct proc * p,void * arg)1813 kill_processes_prison_cb(struct proc *p, void *arg)
1814 {
1815 struct killpg1_ctx *ctx = arg;
1816
1817 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) != 0 ||
1818 (p == ctx->td->td_proc) || p->p_state == PRS_NEW)
1819 return;
1820
1821 killpg1_sendsig_locked(p, ctx);
1822 }
1823
1824 /*
1825 * Common code for kill process group/broadcast kill.
1826 * td is the calling thread, as usual.
1827 */
1828 static int
killpg1(struct thread * td,int sig,int pgid,int all,ksiginfo_t * ksi)1829 killpg1(struct thread *td, int sig, int pgid, int all, ksiginfo_t *ksi)
1830 {
1831 struct proc *p;
1832 struct pgrp *pgrp;
1833 struct killpg1_ctx arg;
1834
1835 arg.td = td;
1836 arg.ksi = ksi;
1837 arg.sig = sig;
1838 arg.sent = false;
1839 arg.found = false;
1840 arg.ret = 0;
1841 if (all) {
1842 /*
1843 * broadcast
1844 */
1845 prison_proc_iterate(td->td_ucred->cr_prison,
1846 kill_processes_prison_cb, &arg);
1847 } else {
1848 again:
1849 sx_slock(&proctree_lock);
1850 if (pgid == 0) {
1851 /*
1852 * zero pgid means send to my process group.
1853 */
1854 pgrp = td->td_proc->p_pgrp;
1855 PGRP_LOCK(pgrp);
1856 } else {
1857 pgrp = pgfind(pgid);
1858 if (pgrp == NULL) {
1859 sx_sunlock(&proctree_lock);
1860 return (ESRCH);
1861 }
1862 }
1863 sx_sunlock(&proctree_lock);
1864 if (!sx_try_xlock(&pgrp->pg_killsx)) {
1865 PGRP_UNLOCK(pgrp);
1866 sx_xlock(&pgrp->pg_killsx);
1867 sx_xunlock(&pgrp->pg_killsx);
1868 goto again;
1869 }
1870 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1871 killpg1_sendsig(p, false, &arg);
1872 }
1873 PGRP_UNLOCK(pgrp);
1874 sx_xunlock(&pgrp->pg_killsx);
1875 }
1876 MPASS(arg.ret != 0 || arg.found || !arg.sent);
1877 if (arg.ret == 0 && !arg.sent)
1878 arg.ret = arg.found ? EPERM : ESRCH;
1879 return (arg.ret);
1880 }
1881
1882 #ifndef _SYS_SYSPROTO_H_
1883 struct kill_args {
1884 int pid;
1885 int signum;
1886 };
1887 #endif
1888 /* ARGSUSED */
1889 int
sys_kill(struct thread * td,struct kill_args * uap)1890 sys_kill(struct thread *td, struct kill_args *uap)
1891 {
1892
1893 return (kern_kill(td, uap->pid, uap->signum));
1894 }
1895
1896 int
kern_kill(struct thread * td,pid_t pid,int signum)1897 kern_kill(struct thread *td, pid_t pid, int signum)
1898 {
1899 ksiginfo_t ksi;
1900 struct proc *p;
1901 int error;
1902
1903 /*
1904 * A process in capability mode can send signals only to himself.
1905 * The main rationale behind this is that abort(3) is implemented as
1906 * kill(getpid(), SIGABRT).
1907 */
1908 if (pid != td->td_proc->p_pid) {
1909 if (CAP_TRACING(td))
1910 ktrcapfail(CAPFAIL_SIGNAL, &signum);
1911 if (IN_CAPABILITY_MODE(td))
1912 return (ECAPMODE);
1913 }
1914
1915 AUDIT_ARG_SIGNUM(signum);
1916 AUDIT_ARG_PID(pid);
1917 if ((u_int)signum > _SIG_MAXSIG)
1918 return (EINVAL);
1919
1920 ksiginfo_init(&ksi);
1921 ksi.ksi_signo = signum;
1922 ksi.ksi_code = SI_USER;
1923 ksi.ksi_pid = td->td_proc->p_pid;
1924 ksi.ksi_uid = td->td_ucred->cr_ruid;
1925
1926 if (pid > 0) {
1927 /* kill single process */
1928 if ((p = pfind_any(pid)) == NULL)
1929 return (ESRCH);
1930 AUDIT_ARG_PROCESS(p);
1931 error = p_cansignal(td, p, signum);
1932 if (error == 0 && signum)
1933 pksignal(p, signum, &ksi);
1934 PROC_UNLOCK(p);
1935 return (error);
1936 }
1937 switch (pid) {
1938 case -1: /* broadcast signal */
1939 return (killpg1(td, signum, 0, 1, &ksi));
1940 case 0: /* signal own process group */
1941 return (killpg1(td, signum, 0, 0, &ksi));
1942 default: /* negative explicit process group */
1943 return (killpg1(td, signum, -pid, 0, &ksi));
1944 }
1945 /* NOTREACHED */
1946 }
1947
1948 int
sys_pdkill(struct thread * td,struct pdkill_args * uap)1949 sys_pdkill(struct thread *td, struct pdkill_args *uap)
1950 {
1951 struct proc *p;
1952 int error;
1953
1954 AUDIT_ARG_SIGNUM(uap->signum);
1955 AUDIT_ARG_FD(uap->fd);
1956 if ((u_int)uap->signum > _SIG_MAXSIG)
1957 return (EINVAL);
1958
1959 error = procdesc_find(td, uap->fd, &cap_pdkill_rights, &p);
1960 if (error)
1961 return (error);
1962 AUDIT_ARG_PROCESS(p);
1963 error = p_cansignal(td, p, uap->signum);
1964 if (error == 0 && uap->signum)
1965 kern_psignal(p, uap->signum);
1966 PROC_UNLOCK(p);
1967 return (error);
1968 }
1969
1970 #if defined(COMPAT_43)
1971 #ifndef _SYS_SYSPROTO_H_
1972 struct okillpg_args {
1973 int pgid;
1974 int signum;
1975 };
1976 #endif
1977 /* ARGSUSED */
1978 int
okillpg(struct thread * td,struct okillpg_args * uap)1979 okillpg(struct thread *td, struct okillpg_args *uap)
1980 {
1981 ksiginfo_t ksi;
1982
1983 AUDIT_ARG_SIGNUM(uap->signum);
1984 AUDIT_ARG_PID(uap->pgid);
1985 if ((u_int)uap->signum > _SIG_MAXSIG)
1986 return (EINVAL);
1987
1988 ksiginfo_init(&ksi);
1989 ksi.ksi_signo = uap->signum;
1990 ksi.ksi_code = SI_USER;
1991 ksi.ksi_pid = td->td_proc->p_pid;
1992 ksi.ksi_uid = td->td_ucred->cr_ruid;
1993 return (killpg1(td, uap->signum, uap->pgid, 0, &ksi));
1994 }
1995 #endif /* COMPAT_43 */
1996
1997 #ifndef _SYS_SYSPROTO_H_
1998 struct sigqueue_args {
1999 pid_t pid;
2000 int signum;
2001 /* union sigval */ void *value;
2002 };
2003 #endif
2004 int
sys_sigqueue(struct thread * td,struct sigqueue_args * uap)2005 sys_sigqueue(struct thread *td, struct sigqueue_args *uap)
2006 {
2007 union sigval sv;
2008
2009 sv.sival_ptr = uap->value;
2010
2011 return (kern_sigqueue(td, uap->pid, uap->signum, &sv));
2012 }
2013
2014 int
kern_sigqueue(struct thread * td,pid_t pid,int signumf,union sigval * value)2015 kern_sigqueue(struct thread *td, pid_t pid, int signumf, union sigval *value)
2016 {
2017 ksiginfo_t ksi;
2018 struct proc *p;
2019 struct thread *td2;
2020 u_int signum;
2021 int error;
2022
2023 signum = signumf & ~__SIGQUEUE_TID;
2024 if (signum > _SIG_MAXSIG)
2025 return (EINVAL);
2026
2027 /*
2028 * Specification says sigqueue can only send signal to
2029 * single process.
2030 */
2031 if (pid <= 0)
2032 return (EINVAL);
2033
2034 if ((signumf & __SIGQUEUE_TID) == 0) {
2035 if ((p = pfind_any(pid)) == NULL)
2036 return (ESRCH);
2037 td2 = NULL;
2038 } else {
2039 p = td->td_proc;
2040 td2 = tdfind((lwpid_t)pid, p->p_pid);
2041 if (td2 == NULL)
2042 return (ESRCH);
2043 }
2044
2045 error = p_cansignal(td, p, signum);
2046 if (error == 0 && signum != 0) {
2047 ksiginfo_init(&ksi);
2048 ksi.ksi_flags = KSI_SIGQ;
2049 ksi.ksi_signo = signum;
2050 ksi.ksi_code = SI_QUEUE;
2051 ksi.ksi_pid = td->td_proc->p_pid;
2052 ksi.ksi_uid = td->td_ucred->cr_ruid;
2053 ksi.ksi_value = *value;
2054 error = tdsendsignal(p, td2, ksi.ksi_signo, &ksi);
2055 }
2056 PROC_UNLOCK(p);
2057 return (error);
2058 }
2059
2060 /*
2061 * Send a signal to a process group. If checktty is 1,
2062 * limit to members which have a controlling terminal.
2063 */
2064 void
pgsignal(struct pgrp * pgrp,int sig,int checkctty,ksiginfo_t * ksi)2065 pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi)
2066 {
2067 struct proc *p;
2068
2069 if (pgrp) {
2070 PGRP_LOCK_ASSERT(pgrp, MA_OWNED);
2071 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
2072 PROC_LOCK(p);
2073 if (p->p_state == PRS_NORMAL &&
2074 (checkctty == 0 || p->p_flag & P_CONTROLT))
2075 pksignal(p, sig, ksi);
2076 PROC_UNLOCK(p);
2077 }
2078 }
2079 }
2080
2081 /*
2082 * Recalculate the signal mask and reset the signal disposition after
2083 * usermode frame for delivery is formed. Should be called after
2084 * mach-specific routine, because sysent->sv_sendsig() needs correct
2085 * ps_siginfo and signal mask.
2086 */
2087 static void
postsig_done(int sig,struct thread * td,struct sigacts * ps)2088 postsig_done(int sig, struct thread *td, struct sigacts *ps)
2089 {
2090 sigset_t mask;
2091
2092 mtx_assert(&ps->ps_mtx, MA_OWNED);
2093 td->td_ru.ru_nsignals++;
2094 mask = ps->ps_catchmask[_SIG_IDX(sig)];
2095 if (!SIGISMEMBER(ps->ps_signodefer, sig))
2096 SIGADDSET(mask, sig);
2097 kern_sigprocmask(td, SIG_BLOCK, &mask, NULL,
2098 SIGPROCMASK_PROC_LOCKED | SIGPROCMASK_PS_LOCKED);
2099 if (SIGISMEMBER(ps->ps_sigreset, sig))
2100 sigdflt(ps, sig);
2101 }
2102
2103 /*
2104 * Send a signal caused by a trap to the current thread. If it will be
2105 * caught immediately, deliver it with correct code. Otherwise, post it
2106 * normally.
2107 */
2108 void
trapsignal(struct thread * td,ksiginfo_t * ksi)2109 trapsignal(struct thread *td, ksiginfo_t *ksi)
2110 {
2111 struct sigacts *ps;
2112 struct proc *p;
2113 sigset_t sigmask;
2114 int sig;
2115
2116 p = td->td_proc;
2117 sig = ksi->ksi_signo;
2118 KASSERT(_SIG_VALID(sig), ("invalid signal"));
2119
2120 sigfastblock_fetch(td);
2121 PROC_LOCK(p);
2122 ps = p->p_sigacts;
2123 mtx_lock(&ps->ps_mtx);
2124 sigmask = td->td_sigmask;
2125 if (td->td_sigblock_val != 0)
2126 SIGSETOR(sigmask, fastblock_mask);
2127 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
2128 !SIGISMEMBER(sigmask, sig)) {
2129 #ifdef KTRACE
2130 if (KTRPOINT(curthread, KTR_PSIG))
2131 ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
2132 &td->td_sigmask, ksi->ksi_code);
2133 #endif
2134 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
2135 ksi, &td->td_sigmask);
2136 postsig_done(sig, td, ps);
2137 mtx_unlock(&ps->ps_mtx);
2138 } else {
2139 /*
2140 * Avoid a possible infinite loop if the thread
2141 * masking the signal or process is ignoring the
2142 * signal.
2143 */
2144 if (kern_forcesigexit && (SIGISMEMBER(sigmask, sig) ||
2145 ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN)) {
2146 SIGDELSET(td->td_sigmask, sig);
2147 SIGDELSET(ps->ps_sigcatch, sig);
2148 SIGDELSET(ps->ps_sigignore, sig);
2149 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
2150 td->td_pflags &= ~TDP_SIGFASTBLOCK;
2151 td->td_sigblock_val = 0;
2152 }
2153 mtx_unlock(&ps->ps_mtx);
2154 p->p_sig = sig; /* XXX to verify code */
2155 tdsendsignal(p, td, sig, ksi);
2156 }
2157 PROC_UNLOCK(p);
2158 }
2159
2160 static struct thread *
sigtd(struct proc * p,int sig,bool fast_sigblock)2161 sigtd(struct proc *p, int sig, bool fast_sigblock)
2162 {
2163 struct thread *td, *signal_td;
2164
2165 PROC_LOCK_ASSERT(p, MA_OWNED);
2166 MPASS(!fast_sigblock || p == curproc);
2167
2168 /*
2169 * Check if current thread can handle the signal without
2170 * switching context to another thread.
2171 */
2172 if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig) &&
2173 (!fast_sigblock || curthread->td_sigblock_val == 0))
2174 return (curthread);
2175
2176 /* Find a non-stopped thread that does not mask the signal. */
2177 signal_td = NULL;
2178 FOREACH_THREAD_IN_PROC(p, td) {
2179 if (!SIGISMEMBER(td->td_sigmask, sig) && (!fast_sigblock ||
2180 td != curthread || td->td_sigblock_val == 0) &&
2181 (td->td_flags & TDF_BOUNDARY) == 0) {
2182 signal_td = td;
2183 break;
2184 }
2185 }
2186 /* Select random (first) thread if no better match was found. */
2187 if (signal_td == NULL)
2188 signal_td = FIRST_THREAD_IN_PROC(p);
2189 return (signal_td);
2190 }
2191
2192 /*
2193 * Send the signal to the process. If the signal has an action, the action
2194 * is usually performed by the target process rather than the caller; we add
2195 * the signal to the set of pending signals for the process.
2196 *
2197 * Exceptions:
2198 * o When a stop signal is sent to a sleeping process that takes the
2199 * default action, the process is stopped without awakening it.
2200 * o SIGCONT restarts stopped processes (or puts them back to sleep)
2201 * regardless of the signal action (eg, blocked or ignored).
2202 *
2203 * Other ignored signals are discarded immediately.
2204 *
2205 * NB: This function may be entered from the debugger via the "kill" DDB
2206 * command. There is little that can be done to mitigate the possibly messy
2207 * side effects of this unwise possibility.
2208 */
2209 void
kern_psignal(struct proc * p,int sig)2210 kern_psignal(struct proc *p, int sig)
2211 {
2212 ksiginfo_t ksi;
2213
2214 ksiginfo_init(&ksi);
2215 ksi.ksi_signo = sig;
2216 ksi.ksi_code = SI_KERNEL;
2217 (void) tdsendsignal(p, NULL, sig, &ksi);
2218 }
2219
2220 int
pksignal(struct proc * p,int sig,ksiginfo_t * ksi)2221 pksignal(struct proc *p, int sig, ksiginfo_t *ksi)
2222 {
2223
2224 return (tdsendsignal(p, NULL, sig, ksi));
2225 }
2226
2227 /* Utility function for finding a thread to send signal event to. */
2228 int
sigev_findtd(struct proc * p,struct sigevent * sigev,struct thread ** ttd)2229 sigev_findtd(struct proc *p, struct sigevent *sigev, struct thread **ttd)
2230 {
2231 struct thread *td;
2232
2233 if (sigev->sigev_notify == SIGEV_THREAD_ID) {
2234 td = tdfind(sigev->sigev_notify_thread_id, p->p_pid);
2235 if (td == NULL)
2236 return (ESRCH);
2237 *ttd = td;
2238 } else {
2239 *ttd = NULL;
2240 PROC_LOCK(p);
2241 }
2242 return (0);
2243 }
2244
2245 void
tdsignal(struct thread * td,int sig)2246 tdsignal(struct thread *td, int sig)
2247 {
2248 ksiginfo_t ksi;
2249
2250 ksiginfo_init(&ksi);
2251 ksi.ksi_signo = sig;
2252 ksi.ksi_code = SI_KERNEL;
2253 (void) tdsendsignal(td->td_proc, td, sig, &ksi);
2254 }
2255
2256 void
tdksignal(struct thread * td,int sig,ksiginfo_t * ksi)2257 tdksignal(struct thread *td, int sig, ksiginfo_t *ksi)
2258 {
2259
2260 (void) tdsendsignal(td->td_proc, td, sig, ksi);
2261 }
2262
2263 static void
sig_sleepq_abort(struct thread * td,int intrval)2264 sig_sleepq_abort(struct thread *td, int intrval)
2265 {
2266 THREAD_LOCK_ASSERT(td, MA_OWNED);
2267
2268 if (intrval == 0 && (td->td_flags & TDF_SIGWAIT) == 0)
2269 thread_unlock(td);
2270 else
2271 sleepq_abort(td, intrval);
2272 }
2273
2274 int
tdsendsignal(struct proc * p,struct thread * td,int sig,ksiginfo_t * ksi)2275 tdsendsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
2276 {
2277 sig_t action;
2278 sigqueue_t *sigqueue;
2279 struct sigacts *ps;
2280 int intrval, prop, ret;
2281
2282 MPASS(td == NULL || p == td->td_proc);
2283 PROC_LOCK_ASSERT(p, MA_OWNED);
2284
2285 if (!_SIG_VALID(sig))
2286 panic("%s(): invalid signal %d", __func__, sig);
2287
2288 KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("%s: ksi on queue", __func__));
2289
2290 /*
2291 * IEEE Std 1003.1-2001: return success when killing a zombie.
2292 */
2293 if (p->p_state == PRS_ZOMBIE) {
2294 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
2295 ksiginfo_tryfree(ksi);
2296 return (0);
2297 }
2298
2299 ps = p->p_sigacts;
2300 KNOTE_LOCKED(p->p_klist, NOTE_SIGNAL | sig);
2301 prop = sigprop(sig);
2302
2303 if (td == NULL) {
2304 td = sigtd(p, sig, false);
2305 sigqueue = &p->p_sigqueue;
2306 } else
2307 sigqueue = &td->td_sigqueue;
2308
2309 SDT_PROBE3(proc, , , signal__send, td, p, sig);
2310
2311 /*
2312 * If the signal is being ignored, then we forget about it
2313 * immediately, except when the target process executes
2314 * sigwait(). (Note: we don't set SIGCONT in ps_sigignore,
2315 * and if it is set to SIG_IGN, action will be SIG_DFL here.)
2316 */
2317 mtx_lock(&ps->ps_mtx);
2318 if (SIGISMEMBER(ps->ps_sigignore, sig)) {
2319 if (kern_sig_discard_ign &&
2320 (p->p_sysent->sv_flags & SV_SIG_DISCIGN) == 0) {
2321 SDT_PROBE3(proc, , , signal__discard, td, p, sig);
2322
2323 mtx_unlock(&ps->ps_mtx);
2324 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
2325 ksiginfo_tryfree(ksi);
2326 return (0);
2327 } else {
2328 action = SIG_CATCH;
2329 intrval = 0;
2330 }
2331 } else {
2332 if (SIGISMEMBER(td->td_sigmask, sig))
2333 action = SIG_HOLD;
2334 else if (SIGISMEMBER(ps->ps_sigcatch, sig))
2335 action = SIG_CATCH;
2336 else
2337 action = SIG_DFL;
2338 if (SIGISMEMBER(ps->ps_sigintr, sig))
2339 intrval = EINTR;
2340 else
2341 intrval = ERESTART;
2342 }
2343 mtx_unlock(&ps->ps_mtx);
2344
2345 if (prop & SIGPROP_CONT)
2346 sigqueue_delete_stopmask_proc(p);
2347 else if (prop & SIGPROP_STOP) {
2348 /*
2349 * If sending a tty stop signal to a member of an orphaned
2350 * process group, discard the signal here if the action
2351 * is default; don't stop the process below if sleeping,
2352 * and don't clear any pending SIGCONT.
2353 */
2354 if ((prop & SIGPROP_TTYSTOP) != 0 &&
2355 (p->p_pgrp->pg_flags & PGRP_ORPHANED) != 0 &&
2356 action == SIG_DFL) {
2357 if (ksi != NULL && (ksi->ksi_flags & KSI_INS) != 0)
2358 ksiginfo_tryfree(ksi);
2359 return (0);
2360 }
2361 sigqueue_delete_proc(p, SIGCONT);
2362 if (p->p_flag & P_CONTINUED) {
2363 p->p_flag &= ~P_CONTINUED;
2364 PROC_LOCK(p->p_pptr);
2365 sigqueue_take(p->p_ksi);
2366 PROC_UNLOCK(p->p_pptr);
2367 }
2368 }
2369
2370 ret = sigqueue_add(sigqueue, sig, ksi);
2371 if (ret != 0)
2372 return (ret);
2373 signotify(td);
2374 /*
2375 * Defer further processing for signals which are held,
2376 * except that stopped processes must be continued by SIGCONT.
2377 */
2378 if (action == SIG_HOLD &&
2379 !((prop & SIGPROP_CONT) && (p->p_flag & P_STOPPED_SIG)))
2380 return (0);
2381
2382 /*
2383 * Some signals have a process-wide effect and a per-thread
2384 * component. Most processing occurs when the process next
2385 * tries to cross the user boundary, however there are some
2386 * times when processing needs to be done immediately, such as
2387 * waking up threads so that they can cross the user boundary.
2388 * We try to do the per-process part here.
2389 */
2390 if (P_SHOULDSTOP(p)) {
2391 KASSERT(!(p->p_flag & P_WEXIT),
2392 ("signal to stopped but exiting process"));
2393 if (sig == SIGKILL) {
2394 /*
2395 * If traced process is already stopped,
2396 * then no further action is necessary.
2397 */
2398 if (p->p_flag & P_TRACED)
2399 return (0);
2400 /*
2401 * SIGKILL sets process running.
2402 * It will die elsewhere.
2403 * All threads must be restarted.
2404 */
2405 p->p_flag &= ~P_STOPPED_SIG;
2406 goto runfast;
2407 }
2408
2409 if (prop & SIGPROP_CONT) {
2410 /*
2411 * If traced process is already stopped,
2412 * then no further action is necessary.
2413 */
2414 if (p->p_flag & P_TRACED)
2415 return (0);
2416 /*
2417 * If SIGCONT is default (or ignored), we continue the
2418 * process but don't leave the signal in sigqueue as
2419 * it has no further action. If SIGCONT is held, we
2420 * continue the process and leave the signal in
2421 * sigqueue. If the process catches SIGCONT, let it
2422 * handle the signal itself. If it isn't waiting on
2423 * an event, it goes back to run state.
2424 * Otherwise, process goes back to sleep state.
2425 */
2426 p->p_flag &= ~P_STOPPED_SIG;
2427 PROC_SLOCK(p);
2428 if (p->p_numthreads == p->p_suspcount) {
2429 PROC_SUNLOCK(p);
2430 p->p_flag |= P_CONTINUED;
2431 p->p_xsig = SIGCONT;
2432 PROC_LOCK(p->p_pptr);
2433 childproc_continued(p);
2434 PROC_UNLOCK(p->p_pptr);
2435 PROC_SLOCK(p);
2436 }
2437 if (action == SIG_DFL) {
2438 thread_unsuspend(p);
2439 PROC_SUNLOCK(p);
2440 sigqueue_delete(sigqueue, sig);
2441 goto out_cont;
2442 }
2443 if (action == SIG_CATCH) {
2444 /*
2445 * The process wants to catch it so it needs
2446 * to run at least one thread, but which one?
2447 */
2448 PROC_SUNLOCK(p);
2449 goto runfast;
2450 }
2451 /*
2452 * The signal is not ignored or caught.
2453 */
2454 thread_unsuspend(p);
2455 PROC_SUNLOCK(p);
2456 goto out_cont;
2457 }
2458
2459 if (prop & SIGPROP_STOP) {
2460 /*
2461 * If traced process is already stopped,
2462 * then no further action is necessary.
2463 */
2464 if (p->p_flag & P_TRACED)
2465 return (0);
2466 /*
2467 * Already stopped, don't need to stop again
2468 * (If we did the shell could get confused).
2469 * Just make sure the signal STOP bit set.
2470 */
2471 p->p_flag |= P_STOPPED_SIG;
2472 sigqueue_delete(sigqueue, sig);
2473 return (0);
2474 }
2475
2476 /*
2477 * All other kinds of signals:
2478 * If a thread is sleeping interruptibly, simulate a
2479 * wakeup so that when it is continued it will be made
2480 * runnable and can look at the signal. However, don't make
2481 * the PROCESS runnable, leave it stopped.
2482 * It may run a bit until it hits a thread_suspend_check().
2483 */
2484 PROC_SLOCK(p);
2485 thread_lock(td);
2486 if (TD_CAN_ABORT(td))
2487 sig_sleepq_abort(td, intrval);
2488 else
2489 thread_unlock(td);
2490 PROC_SUNLOCK(p);
2491 return (0);
2492 /*
2493 * Mutexes are short lived. Threads waiting on them will
2494 * hit thread_suspend_check() soon.
2495 */
2496 } else if (p->p_state == PRS_NORMAL) {
2497 if (p->p_flag & P_TRACED || action == SIG_CATCH) {
2498 tdsigwakeup(td, sig, action, intrval);
2499 return (0);
2500 }
2501
2502 MPASS(action == SIG_DFL);
2503
2504 if (prop & SIGPROP_STOP) {
2505 if (p->p_flag & (P_PPWAIT|P_WEXIT))
2506 return (0);
2507 p->p_flag |= P_STOPPED_SIG;
2508 p->p_xsig = sig;
2509 PROC_SLOCK(p);
2510 sig_suspend_threads(td, p);
2511 if (p->p_numthreads == p->p_suspcount) {
2512 /*
2513 * only thread sending signal to another
2514 * process can reach here, if thread is sending
2515 * signal to its process, because thread does
2516 * not suspend itself here, p_numthreads
2517 * should never be equal to p_suspcount.
2518 */
2519 thread_stopped(p);
2520 PROC_SUNLOCK(p);
2521 sigqueue_delete_proc(p, p->p_xsig);
2522 } else
2523 PROC_SUNLOCK(p);
2524 return (0);
2525 }
2526 } else {
2527 /* Not in "NORMAL" state. discard the signal. */
2528 sigqueue_delete(sigqueue, sig);
2529 return (0);
2530 }
2531
2532 /*
2533 * The process is not stopped so we need to apply the signal to all the
2534 * running threads.
2535 */
2536 runfast:
2537 tdsigwakeup(td, sig, action, intrval);
2538 PROC_SLOCK(p);
2539 thread_unsuspend(p);
2540 PROC_SUNLOCK(p);
2541 out_cont:
2542 itimer_proc_continue(p);
2543 kqtimer_proc_continue(p);
2544
2545 return (0);
2546 }
2547
2548 /*
2549 * The force of a signal has been directed against a single
2550 * thread. We need to see what we can do about knocking it
2551 * out of any sleep it may be in etc.
2552 */
2553 static void
tdsigwakeup(struct thread * td,int sig,sig_t action,int intrval)2554 tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
2555 {
2556 struct proc *p = td->td_proc;
2557 int prop;
2558
2559 PROC_LOCK_ASSERT(p, MA_OWNED);
2560 prop = sigprop(sig);
2561
2562 PROC_SLOCK(p);
2563 thread_lock(td);
2564 /*
2565 * Bring the priority of a thread up if we want it to get
2566 * killed in this lifetime. Be careful to avoid bumping the
2567 * priority of the idle thread, since we still allow to signal
2568 * kernel processes.
2569 */
2570 if (action == SIG_DFL && (prop & SIGPROP_KILL) != 0 &&
2571 td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2572 sched_prio(td, PUSER);
2573 if (TD_ON_SLEEPQ(td)) {
2574 /*
2575 * If thread is sleeping uninterruptibly
2576 * we can't interrupt the sleep... the signal will
2577 * be noticed when the process returns through
2578 * trap() or syscall().
2579 */
2580 if ((td->td_flags & TDF_SINTR) == 0)
2581 goto out;
2582 /*
2583 * If SIGCONT is default (or ignored) and process is
2584 * asleep, we are finished; the process should not
2585 * be awakened.
2586 */
2587 if ((prop & SIGPROP_CONT) && action == SIG_DFL) {
2588 thread_unlock(td);
2589 PROC_SUNLOCK(p);
2590 sigqueue_delete(&p->p_sigqueue, sig);
2591 /*
2592 * It may be on either list in this state.
2593 * Remove from both for now.
2594 */
2595 sigqueue_delete(&td->td_sigqueue, sig);
2596 return;
2597 }
2598
2599 /*
2600 * Don't awaken a sleeping thread for SIGSTOP if the
2601 * STOP signal is deferred.
2602 */
2603 if ((prop & SIGPROP_STOP) != 0 && (td->td_flags & (TDF_SBDRY |
2604 TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
2605 goto out;
2606
2607 /*
2608 * Give low priority threads a better chance to run.
2609 */
2610 if (td->td_priority > PUSER && !TD_IS_IDLETHREAD(td))
2611 sched_prio(td, PUSER);
2612
2613 sig_sleepq_abort(td, intrval);
2614 PROC_SUNLOCK(p);
2615 return;
2616 }
2617
2618 /*
2619 * Other states do nothing with the signal immediately,
2620 * other than kicking ourselves if we are running.
2621 * It will either never be noticed, or noticed very soon.
2622 */
2623 #ifdef SMP
2624 if (TD_IS_RUNNING(td) && td != curthread)
2625 forward_signal(td);
2626 #endif
2627
2628 out:
2629 PROC_SUNLOCK(p);
2630 thread_unlock(td);
2631 }
2632
2633 static void
ptrace_coredumpreq(struct thread * td,struct proc * p,struct thr_coredump_req * tcq)2634 ptrace_coredumpreq(struct thread *td, struct proc *p,
2635 struct thr_coredump_req *tcq)
2636 {
2637 void *rl_cookie;
2638
2639 if (p->p_sysent->sv_coredump == NULL) {
2640 tcq->tc_error = ENOSYS;
2641 return;
2642 }
2643
2644 rl_cookie = vn_rangelock_wlock(tcq->tc_vp, 0, OFF_MAX);
2645 tcq->tc_error = p->p_sysent->sv_coredump(td, tcq->tc_vp,
2646 tcq->tc_limit, tcq->tc_flags);
2647 vn_rangelock_unlock(tcq->tc_vp, rl_cookie);
2648 }
2649
2650 static void
ptrace_syscallreq(struct thread * td,struct proc * p,struct thr_syscall_req * tsr)2651 ptrace_syscallreq(struct thread *td, struct proc *p,
2652 struct thr_syscall_req *tsr)
2653 {
2654 struct sysentvec *sv;
2655 struct sysent *se;
2656 register_t rv_saved[2];
2657 int error, nerror;
2658 int sc;
2659 bool audited, sy_thr_static;
2660
2661 sv = p->p_sysent;
2662 if (sv->sv_table == NULL || sv->sv_size < tsr->ts_sa.code) {
2663 tsr->ts_ret.sr_error = ENOSYS;
2664 return;
2665 }
2666
2667 sc = tsr->ts_sa.code;
2668 if (sc == SYS_syscall || sc == SYS___syscall) {
2669 sc = tsr->ts_sa.args[0];
2670 memmove(&tsr->ts_sa.args[0], &tsr->ts_sa.args[1],
2671 sizeof(register_t) * (tsr->ts_nargs - 1));
2672 }
2673
2674 tsr->ts_sa.callp = se = &sv->sv_table[sc];
2675
2676 VM_CNT_INC(v_syscall);
2677 td->td_pticks = 0;
2678 if (__predict_false(td->td_cowgen != atomic_load_int(
2679 &td->td_proc->p_cowgen)))
2680 thread_cow_update(td);
2681
2682 td->td_sa = tsr->ts_sa;
2683
2684 #ifdef CAPABILITY_MODE
2685 if ((se->sy_flags & SYF_CAPENABLED) == 0) {
2686 if (CAP_TRACING(td))
2687 ktrcapfail(CAPFAIL_SYSCALL, NULL);
2688 if (IN_CAPABILITY_MODE(td)) {
2689 tsr->ts_ret.sr_error = ECAPMODE;
2690 return;
2691 }
2692 }
2693 #endif
2694
2695 sy_thr_static = (se->sy_thrcnt & SY_THR_STATIC) != 0;
2696 audited = AUDIT_SYSCALL_ENTER(sc, td) != 0;
2697
2698 if (!sy_thr_static) {
2699 error = syscall_thread_enter(td, &se);
2700 sy_thr_static = (se->sy_thrcnt & SY_THR_STATIC) != 0;
2701 if (error != 0) {
2702 tsr->ts_ret.sr_error = error;
2703 return;
2704 }
2705 }
2706
2707 rv_saved[0] = td->td_retval[0];
2708 rv_saved[1] = td->td_retval[1];
2709 nerror = td->td_errno;
2710 td->td_retval[0] = 0;
2711 td->td_retval[1] = 0;
2712
2713 #ifdef KDTRACE_HOOKS
2714 if (se->sy_entry != 0)
2715 (*systrace_probe_func)(&tsr->ts_sa, SYSTRACE_ENTRY, 0);
2716 #endif
2717 tsr->ts_ret.sr_error = se->sy_call(td, tsr->ts_sa.args);
2718 #ifdef KDTRACE_HOOKS
2719 if (se->sy_return != 0)
2720 (*systrace_probe_func)(&tsr->ts_sa, SYSTRACE_RETURN,
2721 tsr->ts_ret.sr_error != 0 ? -1 : td->td_retval[0]);
2722 #endif
2723
2724 tsr->ts_ret.sr_retval[0] = td->td_retval[0];
2725 tsr->ts_ret.sr_retval[1] = td->td_retval[1];
2726 td->td_retval[0] = rv_saved[0];
2727 td->td_retval[1] = rv_saved[1];
2728 td->td_errno = nerror;
2729
2730 if (audited)
2731 AUDIT_SYSCALL_EXIT(error, td);
2732 if (!sy_thr_static)
2733 syscall_thread_exit(td, se);
2734 }
2735
2736 static void
ptrace_remotereq(struct thread * td,int flag)2737 ptrace_remotereq(struct thread *td, int flag)
2738 {
2739 struct proc *p;
2740
2741 MPASS(td == curthread);
2742 p = td->td_proc;
2743 PROC_LOCK_ASSERT(p, MA_OWNED);
2744 if ((td->td_dbgflags & flag) == 0)
2745 return;
2746 KASSERT((p->p_flag & P_STOPPED_TRACE) != 0, ("not stopped"));
2747 KASSERT(td->td_remotereq != NULL, ("td_remotereq is NULL"));
2748
2749 PROC_UNLOCK(p);
2750 switch (flag) {
2751 case TDB_COREDUMPREQ:
2752 ptrace_coredumpreq(td, p, td->td_remotereq);
2753 break;
2754 case TDB_SCREMOTEREQ:
2755 ptrace_syscallreq(td, p, td->td_remotereq);
2756 break;
2757 default:
2758 __unreachable();
2759 }
2760 PROC_LOCK(p);
2761
2762 MPASS((td->td_dbgflags & flag) != 0);
2763 td->td_dbgflags &= ~flag;
2764 td->td_remotereq = NULL;
2765 wakeup(p);
2766 }
2767
2768 static void
sig_suspend_threads(struct thread * td,struct proc * p)2769 sig_suspend_threads(struct thread *td, struct proc *p)
2770 {
2771 struct thread *td2;
2772
2773 PROC_LOCK_ASSERT(p, MA_OWNED);
2774 PROC_SLOCK_ASSERT(p, MA_OWNED);
2775
2776 FOREACH_THREAD_IN_PROC(p, td2) {
2777 thread_lock(td2);
2778 ast_sched_locked(td2, TDA_SUSPEND);
2779 if (TD_IS_SLEEPING(td2) && (td2->td_flags & TDF_SINTR) != 0) {
2780 if (td2->td_flags & TDF_SBDRY) {
2781 /*
2782 * Once a thread is asleep with
2783 * TDF_SBDRY and without TDF_SERESTART
2784 * or TDF_SEINTR set, it should never
2785 * become suspended due to this check.
2786 */
2787 KASSERT(!TD_IS_SUSPENDED(td2),
2788 ("thread with deferred stops suspended"));
2789 if (TD_SBDRY_INTR(td2)) {
2790 sleepq_abort(td2, TD_SBDRY_ERRNO(td2));
2791 continue;
2792 }
2793 } else if (!TD_IS_SUSPENDED(td2))
2794 thread_suspend_one(td2);
2795 } else if (!TD_IS_SUSPENDED(td2)) {
2796 #ifdef SMP
2797 if (TD_IS_RUNNING(td2) && td2 != td)
2798 forward_signal(td2);
2799 #endif
2800 }
2801 thread_unlock(td2);
2802 }
2803 }
2804
2805 /*
2806 * Stop the process for an event deemed interesting to the debugger. If si is
2807 * non-NULL, this is a signal exchange; the new signal requested by the
2808 * debugger will be returned for handling. If si is NULL, this is some other
2809 * type of interesting event. The debugger may request a signal be delivered in
2810 * that case as well, however it will be deferred until it can be handled.
2811 */
2812 int
ptracestop(struct thread * td,int sig,ksiginfo_t * si)2813 ptracestop(struct thread *td, int sig, ksiginfo_t *si)
2814 {
2815 struct proc *p = td->td_proc;
2816 struct thread *td2;
2817 ksiginfo_t ksi;
2818
2819 PROC_LOCK_ASSERT(p, MA_OWNED);
2820 KASSERT(!(p->p_flag & P_WEXIT), ("Stopping exiting process"));
2821 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
2822 &p->p_mtx.lock_object, "Stopping for traced signal");
2823
2824 td->td_xsig = sig;
2825
2826 if (si == NULL || (si->ksi_flags & KSI_PTRACE) == 0) {
2827 td->td_dbgflags |= TDB_XSIG;
2828 CTR4(KTR_PTRACE, "ptracestop: tid %d (pid %d) flags %#x sig %d",
2829 td->td_tid, p->p_pid, td->td_dbgflags, sig);
2830 PROC_SLOCK(p);
2831 while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
2832 if (P_KILLED(p)) {
2833 /*
2834 * Ensure that, if we've been PT_KILLed, the
2835 * exit status reflects that. Another thread
2836 * may also be in ptracestop(), having just
2837 * received the SIGKILL, but this thread was
2838 * unsuspended first.
2839 */
2840 td->td_dbgflags &= ~TDB_XSIG;
2841 td->td_xsig = SIGKILL;
2842 p->p_ptevents = 0;
2843 break;
2844 }
2845 if (p->p_flag & P_SINGLE_EXIT &&
2846 !(td->td_dbgflags & TDB_EXIT)) {
2847 /*
2848 * Ignore ptrace stops except for thread exit
2849 * events when the process exits.
2850 */
2851 td->td_dbgflags &= ~TDB_XSIG;
2852 PROC_SUNLOCK(p);
2853 return (0);
2854 }
2855
2856 /*
2857 * Make wait(2) work. Ensure that right after the
2858 * attach, the thread which was decided to become the
2859 * leader of attach gets reported to the waiter.
2860 * Otherwise, just avoid overwriting another thread's
2861 * assignment to p_xthread. If another thread has
2862 * already set p_xthread, the current thread will get
2863 * a chance to report itself upon the next iteration.
2864 */
2865 if ((td->td_dbgflags & TDB_FSTP) != 0 ||
2866 ((p->p_flag2 & P2_PTRACE_FSTP) == 0 &&
2867 p->p_xthread == NULL)) {
2868 p->p_xsig = sig;
2869 p->p_xthread = td;
2870
2871 /*
2872 * If we are on sleepqueue already,
2873 * let sleepqueue code decide if it
2874 * needs to go sleep after attach.
2875 */
2876 if (td->td_wchan == NULL)
2877 td->td_dbgflags &= ~TDB_FSTP;
2878
2879 p->p_flag2 &= ~P2_PTRACE_FSTP;
2880 p->p_flag |= P_STOPPED_SIG | P_STOPPED_TRACE;
2881 sig_suspend_threads(td, p);
2882 }
2883 if ((td->td_dbgflags & TDB_STOPATFORK) != 0) {
2884 td->td_dbgflags &= ~TDB_STOPATFORK;
2885 }
2886 stopme:
2887 td->td_dbgflags |= TDB_SSWITCH;
2888 thread_suspend_switch(td, p);
2889 td->td_dbgflags &= ~TDB_SSWITCH;
2890 if ((td->td_dbgflags & (TDB_COREDUMPREQ |
2891 TDB_SCREMOTEREQ)) != 0) {
2892 MPASS((td->td_dbgflags & (TDB_COREDUMPREQ |
2893 TDB_SCREMOTEREQ)) !=
2894 (TDB_COREDUMPREQ | TDB_SCREMOTEREQ));
2895 PROC_SUNLOCK(p);
2896 ptrace_remotereq(td, td->td_dbgflags &
2897 (TDB_COREDUMPREQ | TDB_SCREMOTEREQ));
2898 PROC_SLOCK(p);
2899 goto stopme;
2900 }
2901 if (p->p_xthread == td)
2902 p->p_xthread = NULL;
2903 if (!(p->p_flag & P_TRACED))
2904 break;
2905 if (td->td_dbgflags & TDB_SUSPEND) {
2906 if (p->p_flag & P_SINGLE_EXIT)
2907 break;
2908 goto stopme;
2909 }
2910 }
2911 PROC_SUNLOCK(p);
2912 }
2913
2914 if (si != NULL && sig == td->td_xsig) {
2915 /* Parent wants us to take the original signal unchanged. */
2916 si->ksi_flags |= KSI_HEAD;
2917 if (sigqueue_add(&td->td_sigqueue, sig, si) != 0)
2918 si->ksi_signo = 0;
2919 } else if (td->td_xsig != 0) {
2920 /*
2921 * If parent wants us to take a new signal, then it will leave
2922 * it in td->td_xsig; otherwise we just look for signals again.
2923 */
2924 ksiginfo_init(&ksi);
2925 ksi.ksi_signo = td->td_xsig;
2926 ksi.ksi_flags |= KSI_PTRACE;
2927 td2 = sigtd(p, td->td_xsig, false);
2928 tdsendsignal(p, td2, td->td_xsig, &ksi);
2929 if (td != td2)
2930 return (0);
2931 }
2932
2933 return (td->td_xsig);
2934 }
2935
2936 static void
reschedule_signals(struct proc * p,sigset_t block,int flags)2937 reschedule_signals(struct proc *p, sigset_t block, int flags)
2938 {
2939 struct sigacts *ps;
2940 struct thread *td;
2941 int sig;
2942 bool fastblk, pslocked;
2943
2944 PROC_LOCK_ASSERT(p, MA_OWNED);
2945 ps = p->p_sigacts;
2946 pslocked = (flags & SIGPROCMASK_PS_LOCKED) != 0;
2947 mtx_assert(&ps->ps_mtx, pslocked ? MA_OWNED : MA_NOTOWNED);
2948 if (SIGISEMPTY(p->p_siglist))
2949 return;
2950 SIGSETAND(block, p->p_siglist);
2951 fastblk = (flags & SIGPROCMASK_FASTBLK) != 0;
2952 SIG_FOREACH(sig, &block) {
2953 td = sigtd(p, sig, fastblk);
2954
2955 /*
2956 * If sigtd() selected us despite sigfastblock is
2957 * blocking, do not activate AST or wake us, to avoid
2958 * loop in AST handler.
2959 */
2960 if (fastblk && td == curthread)
2961 continue;
2962
2963 signotify(td);
2964 if (!pslocked)
2965 mtx_lock(&ps->ps_mtx);
2966 if (p->p_flag & P_TRACED ||
2967 (SIGISMEMBER(ps->ps_sigcatch, sig) &&
2968 !SIGISMEMBER(td->td_sigmask, sig))) {
2969 tdsigwakeup(td, sig, SIG_CATCH,
2970 (SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR :
2971 ERESTART));
2972 }
2973 if (!pslocked)
2974 mtx_unlock(&ps->ps_mtx);
2975 }
2976 }
2977
2978 void
tdsigcleanup(struct thread * td)2979 tdsigcleanup(struct thread *td)
2980 {
2981 struct proc *p;
2982 sigset_t unblocked;
2983
2984 p = td->td_proc;
2985 PROC_LOCK_ASSERT(p, MA_OWNED);
2986
2987 sigqueue_flush(&td->td_sigqueue);
2988 if (p->p_numthreads == 1)
2989 return;
2990
2991 /*
2992 * Since we cannot handle signals, notify signal post code
2993 * about this by filling the sigmask.
2994 *
2995 * Also, if needed, wake up thread(s) that do not block the
2996 * same signals as the exiting thread, since the thread might
2997 * have been selected for delivery and woken up.
2998 */
2999 SIGFILLSET(unblocked);
3000 SIGSETNAND(unblocked, td->td_sigmask);
3001 SIGFILLSET(td->td_sigmask);
3002 reschedule_signals(p, unblocked, 0);
3003
3004 }
3005
3006 static int
sigdeferstop_curr_flags(int cflags)3007 sigdeferstop_curr_flags(int cflags)
3008 {
3009
3010 MPASS((cflags & (TDF_SEINTR | TDF_SERESTART)) == 0 ||
3011 (cflags & TDF_SBDRY) != 0);
3012 return (cflags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART));
3013 }
3014
3015 /*
3016 * Defer the delivery of SIGSTOP for the current thread, according to
3017 * the requested mode. Returns previous flags, which must be restored
3018 * by sigallowstop().
3019 *
3020 * TDF_SBDRY, TDF_SEINTR, and TDF_SERESTART flags are only set and
3021 * cleared by the current thread, which allow the lock-less read-only
3022 * accesses below.
3023 */
3024 int
sigdeferstop_impl(int mode)3025 sigdeferstop_impl(int mode)
3026 {
3027 struct thread *td;
3028 int cflags, nflags;
3029
3030 td = curthread;
3031 cflags = sigdeferstop_curr_flags(td->td_flags);
3032 switch (mode) {
3033 case SIGDEFERSTOP_NOP:
3034 nflags = cflags;
3035 break;
3036 case SIGDEFERSTOP_OFF:
3037 nflags = 0;
3038 break;
3039 case SIGDEFERSTOP_SILENT:
3040 nflags = (cflags | TDF_SBDRY) & ~(TDF_SEINTR | TDF_SERESTART);
3041 break;
3042 case SIGDEFERSTOP_EINTR:
3043 nflags = (cflags | TDF_SBDRY | TDF_SEINTR) & ~TDF_SERESTART;
3044 break;
3045 case SIGDEFERSTOP_ERESTART:
3046 nflags = (cflags | TDF_SBDRY | TDF_SERESTART) & ~TDF_SEINTR;
3047 break;
3048 default:
3049 panic("sigdeferstop: invalid mode %x", mode);
3050 break;
3051 }
3052 if (cflags == nflags)
3053 return (SIGDEFERSTOP_VAL_NCHG);
3054 thread_lock(td);
3055 td->td_flags = (td->td_flags & ~cflags) | nflags;
3056 thread_unlock(td);
3057 return (cflags);
3058 }
3059
3060 /*
3061 * Restores the STOP handling mode, typically permitting the delivery
3062 * of SIGSTOP for the current thread. This does not immediately
3063 * suspend if a stop was posted. Instead, the thread will suspend
3064 * either via ast() or a subsequent interruptible sleep.
3065 */
3066 void
sigallowstop_impl(int prev)3067 sigallowstop_impl(int prev)
3068 {
3069 struct thread *td;
3070 int cflags;
3071
3072 KASSERT(prev != SIGDEFERSTOP_VAL_NCHG, ("failed sigallowstop"));
3073 KASSERT((prev & ~(TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
3074 ("sigallowstop: incorrect previous mode %x", prev));
3075 td = curthread;
3076 cflags = sigdeferstop_curr_flags(td->td_flags);
3077 if (cflags != prev) {
3078 thread_lock(td);
3079 td->td_flags = (td->td_flags & ~cflags) | prev;
3080 thread_unlock(td);
3081 }
3082 }
3083
3084 enum sigstatus {
3085 SIGSTATUS_HANDLE,
3086 SIGSTATUS_HANDLED,
3087 SIGSTATUS_IGNORE,
3088 SIGSTATUS_SBDRY_STOP,
3089 };
3090
3091 /*
3092 * The thread has signal "sig" pending. Figure out what to do with it:
3093 *
3094 * _HANDLE -> the caller should handle the signal
3095 * _HANDLED -> handled internally, reload pending signal set
3096 * _IGNORE -> ignored, remove from the set of pending signals and try the
3097 * next pending signal
3098 * _SBDRY_STOP -> the signal should stop the thread but this is not
3099 * permitted in the current context
3100 */
3101 static enum sigstatus
sigprocess(struct thread * td,int sig)3102 sigprocess(struct thread *td, int sig)
3103 {
3104 struct proc *p;
3105 struct sigacts *ps;
3106 struct sigqueue *queue;
3107 ksiginfo_t ksi;
3108 int prop;
3109
3110 KASSERT(_SIG_VALID(sig), ("%s: invalid signal %d", __func__, sig));
3111
3112 p = td->td_proc;
3113 ps = p->p_sigacts;
3114 mtx_assert(&ps->ps_mtx, MA_OWNED);
3115 PROC_LOCK_ASSERT(p, MA_OWNED);
3116
3117 /*
3118 * We should allow pending but ignored signals below
3119 * if there is sigwait() active, or P_TRACED was
3120 * on when they were posted.
3121 */
3122 if (SIGISMEMBER(ps->ps_sigignore, sig) &&
3123 (p->p_flag & P_TRACED) == 0 &&
3124 (td->td_flags & TDF_SIGWAIT) == 0) {
3125 return (SIGSTATUS_IGNORE);
3126 }
3127
3128 /*
3129 * If the process is going to single-thread mode to prepare
3130 * for exit, there is no sense in delivering any signal
3131 * to usermode. Another important consequence is that
3132 * msleep(..., PCATCH, ...) now is only interruptible by a
3133 * suspend request.
3134 */
3135 if ((p->p_flag2 & P2_WEXIT) != 0)
3136 return (SIGSTATUS_IGNORE);
3137
3138 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED) {
3139 /*
3140 * If traced, always stop.
3141 * Remove old signal from queue before the stop.
3142 * XXX shrug off debugger, it causes siginfo to
3143 * be thrown away.
3144 */
3145 queue = &td->td_sigqueue;
3146 ksiginfo_init(&ksi);
3147 if (sigqueue_get(queue, sig, &ksi) == 0) {
3148 queue = &p->p_sigqueue;
3149 sigqueue_get(queue, sig, &ksi);
3150 }
3151 td->td_si = ksi.ksi_info;
3152
3153 mtx_unlock(&ps->ps_mtx);
3154 sig = ptracestop(td, sig, &ksi);
3155 mtx_lock(&ps->ps_mtx);
3156
3157 td->td_si.si_signo = 0;
3158
3159 /*
3160 * Keep looking if the debugger discarded or
3161 * replaced the signal.
3162 */
3163 if (sig == 0)
3164 return (SIGSTATUS_HANDLED);
3165
3166 /*
3167 * If the signal became masked, re-queue it.
3168 */
3169 if (SIGISMEMBER(td->td_sigmask, sig)) {
3170 ksi.ksi_flags |= KSI_HEAD;
3171 sigqueue_add(&p->p_sigqueue, sig, &ksi);
3172 return (SIGSTATUS_HANDLED);
3173 }
3174
3175 /*
3176 * If the traced bit got turned off, requeue the signal and
3177 * reload the set of pending signals. This ensures that p_sig*
3178 * and p_sigact are consistent.
3179 */
3180 if ((p->p_flag & P_TRACED) == 0) {
3181 if ((ksi.ksi_flags & KSI_PTRACE) == 0) {
3182 ksi.ksi_flags |= KSI_HEAD;
3183 sigqueue_add(queue, sig, &ksi);
3184 }
3185 return (SIGSTATUS_HANDLED);
3186 }
3187 }
3188
3189 /*
3190 * Decide whether the signal should be returned.
3191 * Return the signal's number, or fall through
3192 * to clear it from the pending mask.
3193 */
3194 switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
3195 case (intptr_t)SIG_DFL:
3196 /*
3197 * Don't take default actions on system processes.
3198 */
3199 if (p->p_pid <= 1) {
3200 #ifdef DIAGNOSTIC
3201 /*
3202 * Are you sure you want to ignore SIGSEGV
3203 * in init? XXX
3204 */
3205 printf("Process (pid %lu) got signal %d\n",
3206 (u_long)p->p_pid, sig);
3207 #endif
3208 return (SIGSTATUS_IGNORE);
3209 }
3210
3211 /*
3212 * If there is a pending stop signal to process with
3213 * default action, stop here, then clear the signal.
3214 * Traced or exiting processes should ignore stops.
3215 * Additionally, a member of an orphaned process group
3216 * should ignore tty stops.
3217 */
3218 prop = sigprop(sig);
3219 if (prop & SIGPROP_STOP) {
3220 mtx_unlock(&ps->ps_mtx);
3221 if ((p->p_flag & (P_TRACED | P_WEXIT |
3222 P_SINGLE_EXIT)) != 0 || ((p->p_pgrp->
3223 pg_flags & PGRP_ORPHANED) != 0 &&
3224 (prop & SIGPROP_TTYSTOP) != 0)) {
3225 mtx_lock(&ps->ps_mtx);
3226 return (SIGSTATUS_IGNORE);
3227 }
3228 if (TD_SBDRY_INTR(td)) {
3229 KASSERT((td->td_flags & TDF_SBDRY) != 0,
3230 ("lost TDF_SBDRY"));
3231 mtx_lock(&ps->ps_mtx);
3232 return (SIGSTATUS_SBDRY_STOP);
3233 }
3234 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
3235 &p->p_mtx.lock_object, "Catching SIGSTOP");
3236 sigqueue_delete(&td->td_sigqueue, sig);
3237 sigqueue_delete(&p->p_sigqueue, sig);
3238 p->p_flag |= P_STOPPED_SIG;
3239 p->p_xsig = sig;
3240 PROC_SLOCK(p);
3241 sig_suspend_threads(td, p);
3242 thread_suspend_switch(td, p);
3243 PROC_SUNLOCK(p);
3244 mtx_lock(&ps->ps_mtx);
3245 return (SIGSTATUS_HANDLED);
3246 } else if ((prop & SIGPROP_IGNORE) != 0 &&
3247 (td->td_flags & TDF_SIGWAIT) == 0) {
3248 /*
3249 * Default action is to ignore; drop it if
3250 * not in kern_sigtimedwait().
3251 */
3252 return (SIGSTATUS_IGNORE);
3253 } else {
3254 return (SIGSTATUS_HANDLE);
3255 }
3256
3257 case (intptr_t)SIG_IGN:
3258 if ((td->td_flags & TDF_SIGWAIT) == 0)
3259 return (SIGSTATUS_IGNORE);
3260 else
3261 return (SIGSTATUS_HANDLE);
3262
3263 default:
3264 /*
3265 * This signal has an action, let postsig() process it.
3266 */
3267 return (SIGSTATUS_HANDLE);
3268 }
3269 }
3270
3271 /*
3272 * If the current process has received a signal (should be caught or cause
3273 * termination, should interrupt current syscall), return the signal number.
3274 * Stop signals with default action are processed immediately, then cleared;
3275 * they aren't returned. This is checked after each entry to the system for
3276 * a syscall or trap (though this can usually be done without calling
3277 * issignal by checking the pending signal masks in cursig.) The normal call
3278 * sequence is
3279 *
3280 * while (sig = cursig(curthread))
3281 * postsig(sig);
3282 */
3283 static int
issignal(struct thread * td)3284 issignal(struct thread *td)
3285 {
3286 struct proc *p;
3287 sigset_t sigpending;
3288 int sig;
3289
3290 p = td->td_proc;
3291 PROC_LOCK_ASSERT(p, MA_OWNED);
3292
3293 for (;;) {
3294 sigpending = td->td_sigqueue.sq_signals;
3295 SIGSETOR(sigpending, p->p_sigqueue.sq_signals);
3296 SIGSETNAND(sigpending, td->td_sigmask);
3297
3298 if ((p->p_flag & P_PPWAIT) != 0 || (td->td_flags &
3299 (TDF_SBDRY | TDF_SERESTART | TDF_SEINTR)) == TDF_SBDRY)
3300 SIG_STOPSIGMASK(sigpending);
3301 if (SIGISEMPTY(sigpending)) /* no signal to send */
3302 return (0);
3303
3304 /*
3305 * Do fast sigblock if requested by usermode. Since
3306 * we do know that there was a signal pending at this
3307 * point, set the FAST_SIGBLOCK_PEND as indicator for
3308 * usermode to perform a dummy call to
3309 * FAST_SIGBLOCK_UNBLOCK, which causes immediate
3310 * delivery of postponed pending signal.
3311 */
3312 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) {
3313 if (td->td_sigblock_val != 0)
3314 SIGSETNAND(sigpending, fastblock_mask);
3315 if (SIGISEMPTY(sigpending)) {
3316 td->td_pflags |= TDP_SIGFASTPENDING;
3317 return (0);
3318 }
3319 }
3320
3321 if ((p->p_flag & (P_TRACED | P_PPTRACE)) == P_TRACED &&
3322 (p->p_flag2 & P2_PTRACE_FSTP) != 0 &&
3323 SIGISMEMBER(sigpending, SIGSTOP)) {
3324 /*
3325 * If debugger just attached, always consume
3326 * SIGSTOP from ptrace(PT_ATTACH) first, to
3327 * execute the debugger attach ritual in
3328 * order.
3329 */
3330 td->td_dbgflags |= TDB_FSTP;
3331 SIGEMPTYSET(sigpending);
3332 SIGADDSET(sigpending, SIGSTOP);
3333 }
3334
3335 SIG_FOREACH(sig, &sigpending) {
3336 switch (sigprocess(td, sig)) {
3337 case SIGSTATUS_HANDLE:
3338 return (sig);
3339 case SIGSTATUS_HANDLED:
3340 goto next;
3341 case SIGSTATUS_IGNORE:
3342 sigqueue_delete(&td->td_sigqueue, sig);
3343 sigqueue_delete(&p->p_sigqueue, sig);
3344 break;
3345 case SIGSTATUS_SBDRY_STOP:
3346 return (-1);
3347 }
3348 }
3349 next:;
3350 }
3351 }
3352
3353 void
thread_stopped(struct proc * p)3354 thread_stopped(struct proc *p)
3355 {
3356 int n;
3357
3358 PROC_LOCK_ASSERT(p, MA_OWNED);
3359 PROC_SLOCK_ASSERT(p, MA_OWNED);
3360 n = p->p_suspcount;
3361 if (p == curproc)
3362 n++;
3363 if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
3364 PROC_SUNLOCK(p);
3365 p->p_flag &= ~P_WAITED;
3366 PROC_LOCK(p->p_pptr);
3367 childproc_stopped(p, (p->p_flag & P_TRACED) ?
3368 CLD_TRAPPED : CLD_STOPPED);
3369 PROC_UNLOCK(p->p_pptr);
3370 PROC_SLOCK(p);
3371 }
3372 }
3373
3374 /*
3375 * Take the action for the specified signal
3376 * from the current set of pending signals.
3377 */
3378 int
postsig(int sig)3379 postsig(int sig)
3380 {
3381 struct thread *td;
3382 struct proc *p;
3383 struct sigacts *ps;
3384 sig_t action;
3385 ksiginfo_t ksi;
3386 sigset_t returnmask;
3387
3388 KASSERT(sig != 0, ("postsig"));
3389
3390 td = curthread;
3391 p = td->td_proc;
3392 PROC_LOCK_ASSERT(p, MA_OWNED);
3393 ps = p->p_sigacts;
3394 mtx_assert(&ps->ps_mtx, MA_OWNED);
3395 ksiginfo_init(&ksi);
3396 if (sigqueue_get(&td->td_sigqueue, sig, &ksi) == 0 &&
3397 sigqueue_get(&p->p_sigqueue, sig, &ksi) == 0)
3398 return (0);
3399 ksi.ksi_signo = sig;
3400 if (ksi.ksi_code == SI_TIMER)
3401 itimer_accept(p, ksi.ksi_timerid, &ksi);
3402 action = ps->ps_sigact[_SIG_IDX(sig)];
3403 #ifdef KTRACE
3404 if (KTRPOINT(td, KTR_PSIG))
3405 ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ?
3406 &td->td_oldsigmask : &td->td_sigmask, ksi.ksi_code);
3407 #endif
3408
3409 if (action == SIG_DFL) {
3410 /*
3411 * Default action, where the default is to kill
3412 * the process. (Other cases were ignored above.)
3413 */
3414 mtx_unlock(&ps->ps_mtx);
3415 proc_td_siginfo_capture(td, &ksi.ksi_info);
3416 sigexit(td, sig);
3417 /* NOTREACHED */
3418 } else {
3419 /*
3420 * If we get here, the signal must be caught.
3421 */
3422 KASSERT(action != SIG_IGN, ("postsig action %p", action));
3423 KASSERT(!SIGISMEMBER(td->td_sigmask, sig),
3424 ("postsig action: blocked sig %d", sig));
3425
3426 /*
3427 * Set the new mask value and also defer further
3428 * occurrences of this signal.
3429 *
3430 * Special case: user has done a sigsuspend. Here the
3431 * current mask is not of interest, but rather the
3432 * mask from before the sigsuspend is what we want
3433 * restored after the signal processing is completed.
3434 */
3435 if (td->td_pflags & TDP_OLDMASK) {
3436 returnmask = td->td_oldsigmask;
3437 td->td_pflags &= ~TDP_OLDMASK;
3438 } else
3439 returnmask = td->td_sigmask;
3440
3441 if (p->p_sig == sig) {
3442 p->p_sig = 0;
3443 }
3444 (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
3445 postsig_done(sig, td, ps);
3446 }
3447 return (1);
3448 }
3449
3450 int
sig_ast_checksusp(struct thread * td)3451 sig_ast_checksusp(struct thread *td)
3452 {
3453 struct proc *p __diagused;
3454 int ret;
3455
3456 p = td->td_proc;
3457 PROC_LOCK_ASSERT(p, MA_OWNED);
3458
3459 if (!td_ast_pending(td, TDA_SUSPEND))
3460 return (0);
3461
3462 ret = thread_suspend_check(1);
3463 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
3464 return (ret);
3465 }
3466
3467 int
sig_ast_needsigchk(struct thread * td)3468 sig_ast_needsigchk(struct thread *td)
3469 {
3470 struct proc *p;
3471 struct sigacts *ps;
3472 int ret, sig;
3473
3474 p = td->td_proc;
3475 PROC_LOCK_ASSERT(p, MA_OWNED);
3476
3477 if (!td_ast_pending(td, TDA_SIG))
3478 return (0);
3479
3480 ps = p->p_sigacts;
3481 mtx_lock(&ps->ps_mtx);
3482 sig = cursig(td);
3483 if (sig == -1) {
3484 mtx_unlock(&ps->ps_mtx);
3485 KASSERT((td->td_flags & TDF_SBDRY) != 0, ("lost TDF_SBDRY"));
3486 KASSERT(TD_SBDRY_INTR(td),
3487 ("lost TDF_SERESTART of TDF_SEINTR"));
3488 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
3489 (TDF_SEINTR | TDF_SERESTART),
3490 ("both TDF_SEINTR and TDF_SERESTART"));
3491 ret = TD_SBDRY_ERRNO(td);
3492 } else if (sig != 0) {
3493 ret = SIGISMEMBER(ps->ps_sigintr, sig) ? EINTR : ERESTART;
3494 mtx_unlock(&ps->ps_mtx);
3495 } else {
3496 mtx_unlock(&ps->ps_mtx);
3497 ret = 0;
3498 }
3499
3500 /*
3501 * Do not go into sleep if this thread was the ptrace(2)
3502 * attach leader. cursig() consumed SIGSTOP from PT_ATTACH,
3503 * but we usually act on the signal by interrupting sleep, and
3504 * should do that here as well.
3505 */
3506 if ((td->td_dbgflags & TDB_FSTP) != 0) {
3507 if (ret == 0)
3508 ret = EINTR;
3509 td->td_dbgflags &= ~TDB_FSTP;
3510 }
3511
3512 return (ret);
3513 }
3514
3515 int
sig_intr(void)3516 sig_intr(void)
3517 {
3518 struct thread *td;
3519 struct proc *p;
3520 int ret;
3521
3522 td = curthread;
3523 if (!td_ast_pending(td, TDA_SIG) && !td_ast_pending(td, TDA_SUSPEND))
3524 return (0);
3525
3526 p = td->td_proc;
3527
3528 PROC_LOCK(p);
3529 ret = sig_ast_checksusp(td);
3530 if (ret == 0)
3531 ret = sig_ast_needsigchk(td);
3532 PROC_UNLOCK(p);
3533 return (ret);
3534 }
3535
3536 bool
curproc_sigkilled(void)3537 curproc_sigkilled(void)
3538 {
3539 struct thread *td;
3540 struct proc *p;
3541 struct sigacts *ps;
3542 bool res;
3543
3544 td = curthread;
3545 if (!td_ast_pending(td, TDA_SIG))
3546 return (false);
3547
3548 p = td->td_proc;
3549 PROC_LOCK(p);
3550 ps = p->p_sigacts;
3551 mtx_lock(&ps->ps_mtx);
3552 res = SIGISMEMBER(td->td_sigqueue.sq_signals, SIGKILL) ||
3553 SIGISMEMBER(p->p_sigqueue.sq_signals, SIGKILL);
3554 mtx_unlock(&ps->ps_mtx);
3555 PROC_UNLOCK(p);
3556 return (res);
3557 }
3558
3559 void
proc_wkilled(struct proc * p)3560 proc_wkilled(struct proc *p)
3561 {
3562
3563 PROC_LOCK_ASSERT(p, MA_OWNED);
3564 if ((p->p_flag & P_WKILLED) == 0)
3565 p->p_flag |= P_WKILLED;
3566 }
3567
3568 /*
3569 * Kill the current process for stated reason.
3570 */
3571 void
killproc(struct proc * p,const char * why)3572 killproc(struct proc *p, const char *why)
3573 {
3574
3575 PROC_LOCK_ASSERT(p, MA_OWNED);
3576 CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid,
3577 p->p_comm);
3578 log(LOG_ERR, "pid %d (%s), jid %d, uid %d, was killed: %s\n",
3579 p->p_pid, p->p_comm, p->p_ucred->cr_prison->pr_id,
3580 p->p_ucred->cr_uid, why);
3581 proc_wkilled(p);
3582 kern_psignal(p, SIGKILL);
3583 }
3584
3585 /*
3586 * Force the current process to exit with the specified signal, dumping core
3587 * if appropriate. We bypass the normal tests for masked and caught signals,
3588 * allowing unrecoverable failures to terminate the process without changing
3589 * signal state. Mark the accounting record with the signal termination.
3590 * If dumping core, save the signal number for the debugger. Calls exit and
3591 * does not return.
3592 */
3593 void
sigexit(struct thread * td,int sig)3594 sigexit(struct thread *td, int sig)
3595 {
3596 struct proc *p = td->td_proc;
3597 const char *coreinfo;
3598 int rv;
3599
3600 PROC_LOCK_ASSERT(p, MA_OWNED);
3601 proc_set_p2_wexit(p);
3602
3603 p->p_acflag |= AXSIG;
3604 /*
3605 * We must be single-threading to generate a core dump. This
3606 * ensures that the registers in the core file are up-to-date.
3607 * Also, the ELF dump handler assumes that the thread list doesn't
3608 * change out from under it.
3609 *
3610 * XXX If another thread attempts to single-thread before us
3611 * (e.g. via fork()), we won't get a dump at all.
3612 */
3613 if ((sigprop(sig) & SIGPROP_CORE) &&
3614 thread_single(p, SINGLE_NO_EXIT) == 0) {
3615 p->p_sig = sig;
3616 /*
3617 * Log signals which would cause core dumps
3618 * (Log as LOG_INFO to appease those who don't want
3619 * these messages.)
3620 * XXX : Todo, as well as euid, write out ruid too
3621 * Note that coredump() drops proc lock.
3622 */
3623 rv = coredump(td);
3624 switch (rv) {
3625 case 0:
3626 sig |= WCOREFLAG;
3627 coreinfo = " (core dumped)";
3628 break;
3629 case EFAULT:
3630 coreinfo = " (no core dump - bad address)";
3631 break;
3632 case EINVAL:
3633 coreinfo = " (no core dump - invalid argument)";
3634 break;
3635 case EFBIG:
3636 coreinfo = " (no core dump - too large)";
3637 break;
3638 default:
3639 coreinfo = " (no core dump - other error)";
3640 break;
3641 }
3642 if (kern_logsigexit)
3643 log(LOG_INFO,
3644 "pid %d (%s), jid %d, uid %d: exited on "
3645 "signal %d%s\n", p->p_pid, p->p_comm,
3646 p->p_ucred->cr_prison->pr_id,
3647 td->td_ucred->cr_uid,
3648 sig &~ WCOREFLAG, coreinfo);
3649 } else
3650 PROC_UNLOCK(p);
3651 exit1(td, 0, sig);
3652 /* NOTREACHED */
3653 }
3654
3655 /*
3656 * Send queued SIGCHLD to parent when child process's state
3657 * is changed.
3658 */
3659 static void
sigparent(struct proc * p,int reason,int status)3660 sigparent(struct proc *p, int reason, int status)
3661 {
3662 PROC_LOCK_ASSERT(p, MA_OWNED);
3663 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3664
3665 if (p->p_ksi != NULL) {
3666 p->p_ksi->ksi_signo = SIGCHLD;
3667 p->p_ksi->ksi_code = reason;
3668 p->p_ksi->ksi_status = status;
3669 p->p_ksi->ksi_pid = p->p_pid;
3670 p->p_ksi->ksi_uid = p->p_ucred->cr_ruid;
3671 if (KSI_ONQ(p->p_ksi))
3672 return;
3673 }
3674 pksignal(p->p_pptr, SIGCHLD, p->p_ksi);
3675 }
3676
3677 static void
childproc_jobstate(struct proc * p,int reason,int sig)3678 childproc_jobstate(struct proc *p, int reason, int sig)
3679 {
3680 struct sigacts *ps;
3681
3682 PROC_LOCK_ASSERT(p, MA_OWNED);
3683 PROC_LOCK_ASSERT(p->p_pptr, MA_OWNED);
3684
3685 /*
3686 * Wake up parent sleeping in kern_wait(), also send
3687 * SIGCHLD to parent, but SIGCHLD does not guarantee
3688 * that parent will awake, because parent may masked
3689 * the signal.
3690 */
3691 p->p_pptr->p_flag |= P_STATCHILD;
3692 wakeup(p->p_pptr);
3693
3694 ps = p->p_pptr->p_sigacts;
3695 mtx_lock(&ps->ps_mtx);
3696 if ((ps->ps_flag & PS_NOCLDSTOP) == 0) {
3697 mtx_unlock(&ps->ps_mtx);
3698 sigparent(p, reason, sig);
3699 } else
3700 mtx_unlock(&ps->ps_mtx);
3701 }
3702
3703 void
childproc_stopped(struct proc * p,int reason)3704 childproc_stopped(struct proc *p, int reason)
3705 {
3706
3707 childproc_jobstate(p, reason, p->p_xsig);
3708 }
3709
3710 void
childproc_continued(struct proc * p)3711 childproc_continued(struct proc *p)
3712 {
3713 childproc_jobstate(p, CLD_CONTINUED, SIGCONT);
3714 }
3715
3716 void
childproc_exited(struct proc * p)3717 childproc_exited(struct proc *p)
3718 {
3719 int reason, status;
3720
3721 if (WCOREDUMP(p->p_xsig)) {
3722 reason = CLD_DUMPED;
3723 status = WTERMSIG(p->p_xsig);
3724 } else if (WIFSIGNALED(p->p_xsig)) {
3725 reason = CLD_KILLED;
3726 status = WTERMSIG(p->p_xsig);
3727 } else {
3728 reason = CLD_EXITED;
3729 status = p->p_xexit;
3730 }
3731 /*
3732 * XXX avoid calling wakeup(p->p_pptr), the work is
3733 * done in exit1().
3734 */
3735 sigparent(p, reason, status);
3736 }
3737
3738 #define MAX_NUM_CORE_FILES 100000
3739 #ifndef NUM_CORE_FILES
3740 #define NUM_CORE_FILES 5
3741 #endif
3742 CTASSERT(NUM_CORE_FILES >= 0 && NUM_CORE_FILES <= MAX_NUM_CORE_FILES);
3743 static int num_cores = NUM_CORE_FILES;
3744
3745 static int
sysctl_debug_num_cores_check(SYSCTL_HANDLER_ARGS)3746 sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS)
3747 {
3748 int error;
3749 int new_val;
3750
3751 new_val = num_cores;
3752 error = sysctl_handle_int(oidp, &new_val, 0, req);
3753 if (error != 0 || req->newptr == NULL)
3754 return (error);
3755 if (new_val > MAX_NUM_CORE_FILES)
3756 new_val = MAX_NUM_CORE_FILES;
3757 if (new_val < 0)
3758 new_val = 0;
3759 num_cores = new_val;
3760 return (0);
3761 }
3762 SYSCTL_PROC(_debug, OID_AUTO, ncores,
3763 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(int),
3764 sysctl_debug_num_cores_check, "I",
3765 "Maximum number of generated process corefiles while using index format");
3766
3767 #define GZIP_SUFFIX ".gz"
3768 #define ZSTD_SUFFIX ".zst"
3769
3770 int compress_user_cores = 0;
3771
3772 static int
sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS)3773 sysctl_compress_user_cores(SYSCTL_HANDLER_ARGS)
3774 {
3775 int error, val;
3776
3777 val = compress_user_cores;
3778 error = sysctl_handle_int(oidp, &val, 0, req);
3779 if (error != 0 || req->newptr == NULL)
3780 return (error);
3781 if (val != 0 && !compressor_avail(val))
3782 return (EINVAL);
3783 compress_user_cores = val;
3784 return (error);
3785 }
3786 SYSCTL_PROC(_kern, OID_AUTO, compress_user_cores,
3787 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int),
3788 sysctl_compress_user_cores, "I",
3789 "Enable compression of user corefiles ("
3790 __XSTRING(COMPRESS_GZIP) " = gzip, "
3791 __XSTRING(COMPRESS_ZSTD) " = zstd)");
3792
3793 int compress_user_cores_level = 6;
3794 SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_level, CTLFLAG_RWTUN,
3795 &compress_user_cores_level, 0,
3796 "Corefile compression level");
3797
3798 /*
3799 * Protect the access to corefilename[] by allproc_lock.
3800 */
3801 #define corefilename_lock allproc_lock
3802
3803 static char corefilename[MAXPATHLEN] = {"%N.core"};
3804 TUNABLE_STR("kern.corefile", corefilename, sizeof(corefilename));
3805
3806 static int
sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)3807 sysctl_kern_corefile(SYSCTL_HANDLER_ARGS)
3808 {
3809 int error;
3810
3811 sx_xlock(&corefilename_lock);
3812 error = sysctl_handle_string(oidp, corefilename, sizeof(corefilename),
3813 req);
3814 sx_xunlock(&corefilename_lock);
3815
3816 return (error);
3817 }
3818 SYSCTL_PROC(_kern, OID_AUTO, corefile, CTLTYPE_STRING | CTLFLAG_RW |
3819 CTLFLAG_MPSAFE, 0, 0, sysctl_kern_corefile, "A",
3820 "Process corefile name format string");
3821
3822 static void
vnode_close_locked(struct thread * td,struct vnode * vp)3823 vnode_close_locked(struct thread *td, struct vnode *vp)
3824 {
3825
3826 VOP_UNLOCK(vp);
3827 vn_close(vp, FWRITE, td->td_ucred, td);
3828 }
3829
3830 /*
3831 * If the core format has a %I in it, then we need to check
3832 * for existing corefiles before defining a name.
3833 * To do this we iterate over 0..ncores to find a
3834 * non-existing core file name to use. If all core files are
3835 * already used we choose the oldest one.
3836 */
3837 static int
corefile_open_last(struct thread * td,char * name,int indexpos,int indexlen,int ncores,struct vnode ** vpp)3838 corefile_open_last(struct thread *td, char *name, int indexpos,
3839 int indexlen, int ncores, struct vnode **vpp)
3840 {
3841 struct vnode *oldvp, *nextvp, *vp;
3842 struct vattr vattr;
3843 struct nameidata nd;
3844 int error, i, flags, oflags, cmode;
3845 char ch;
3846 struct timespec lasttime;
3847
3848 nextvp = oldvp = NULL;
3849 cmode = S_IRUSR | S_IWUSR;
3850 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
3851 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
3852
3853 for (i = 0; i < ncores; i++) {
3854 flags = O_CREAT | FWRITE | O_NOFOLLOW;
3855
3856 ch = name[indexpos + indexlen];
3857 (void)snprintf(name + indexpos, indexlen + 1, "%.*u", indexlen,
3858 i);
3859 name[indexpos + indexlen] = ch;
3860
3861 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name);
3862 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
3863 NULL);
3864 if (error != 0)
3865 break;
3866
3867 vp = nd.ni_vp;
3868 NDFREE_PNBUF(&nd);
3869 if ((flags & O_CREAT) == O_CREAT) {
3870 nextvp = vp;
3871 break;
3872 }
3873
3874 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3875 if (error != 0) {
3876 vnode_close_locked(td, vp);
3877 break;
3878 }
3879
3880 if (oldvp == NULL ||
3881 lasttime.tv_sec > vattr.va_mtime.tv_sec ||
3882 (lasttime.tv_sec == vattr.va_mtime.tv_sec &&
3883 lasttime.tv_nsec >= vattr.va_mtime.tv_nsec)) {
3884 if (oldvp != NULL)
3885 vn_close(oldvp, FWRITE, td->td_ucred, td);
3886 oldvp = vp;
3887 VOP_UNLOCK(oldvp);
3888 lasttime = vattr.va_mtime;
3889 } else {
3890 vnode_close_locked(td, vp);
3891 }
3892 }
3893
3894 if (oldvp != NULL) {
3895 if (nextvp == NULL) {
3896 if ((td->td_proc->p_flag & P_SUGID) != 0) {
3897 error = EFAULT;
3898 vn_close(oldvp, FWRITE, td->td_ucred, td);
3899 } else {
3900 nextvp = oldvp;
3901 error = vn_lock(nextvp, LK_EXCLUSIVE);
3902 if (error != 0) {
3903 vn_close(nextvp, FWRITE, td->td_ucred,
3904 td);
3905 nextvp = NULL;
3906 }
3907 }
3908 } else {
3909 vn_close(oldvp, FWRITE, td->td_ucred, td);
3910 }
3911 }
3912 if (error != 0) {
3913 if (nextvp != NULL)
3914 vnode_close_locked(td, oldvp);
3915 } else {
3916 *vpp = nextvp;
3917 }
3918
3919 return (error);
3920 }
3921
3922 /*
3923 * corefile_open(comm, uid, pid, td, compress, vpp, namep)
3924 * Expand the name described in corefilename, using name, uid, and pid
3925 * and open/create core file.
3926 * corefilename is a printf-like string, with three format specifiers:
3927 * %N name of process ("name")
3928 * %P process id (pid)
3929 * %U user id (uid)
3930 * For example, "%N.core" is the default; they can be disabled completely
3931 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
3932 * This is controlled by the sysctl variable kern.corefile (see above).
3933 */
3934 static int
corefile_open(const char * comm,uid_t uid,pid_t pid,struct thread * td,int compress,int signum,struct vnode ** vpp,char ** namep)3935 corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td,
3936 int compress, int signum, struct vnode **vpp, char **namep)
3937 {
3938 struct sbuf sb;
3939 struct nameidata nd;
3940 const char *format;
3941 char *hostname, *name;
3942 int cmode, error, flags, i, indexpos, indexlen, oflags, ncores;
3943
3944 hostname = NULL;
3945 format = corefilename;
3946 name = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_ZERO);
3947 indexlen = 0;
3948 indexpos = -1;
3949 ncores = num_cores;
3950 (void)sbuf_new(&sb, name, MAXPATHLEN, SBUF_FIXEDLEN);
3951 sx_slock(&corefilename_lock);
3952 for (i = 0; format[i] != '\0'; i++) {
3953 switch (format[i]) {
3954 case '%': /* Format character */
3955 i++;
3956 switch (format[i]) {
3957 case '%':
3958 sbuf_putc(&sb, '%');
3959 break;
3960 case 'H': /* hostname */
3961 if (hostname == NULL) {
3962 hostname = malloc(MAXHOSTNAMELEN,
3963 M_TEMP, M_WAITOK);
3964 }
3965 getcredhostname(td->td_ucred, hostname,
3966 MAXHOSTNAMELEN);
3967 sbuf_cat(&sb, hostname);
3968 break;
3969 case 'I': /* autoincrementing index */
3970 if (indexpos != -1) {
3971 sbuf_printf(&sb, "%%I");
3972 break;
3973 }
3974
3975 indexpos = sbuf_len(&sb);
3976 sbuf_printf(&sb, "%u", ncores - 1);
3977 indexlen = sbuf_len(&sb) - indexpos;
3978 break;
3979 case 'N': /* process name */
3980 sbuf_printf(&sb, "%s", comm);
3981 break;
3982 case 'P': /* process id */
3983 sbuf_printf(&sb, "%u", pid);
3984 break;
3985 case 'S': /* signal number */
3986 sbuf_printf(&sb, "%i", signum);
3987 break;
3988 case 'U': /* user id */
3989 sbuf_printf(&sb, "%u", uid);
3990 break;
3991 default:
3992 log(LOG_ERR,
3993 "Unknown format character %c in "
3994 "corename `%s'\n", format[i], format);
3995 break;
3996 }
3997 break;
3998 default:
3999 sbuf_putc(&sb, format[i]);
4000 break;
4001 }
4002 }
4003 sx_sunlock(&corefilename_lock);
4004 free(hostname, M_TEMP);
4005 if (compress == COMPRESS_GZIP)
4006 sbuf_cat(&sb, GZIP_SUFFIX);
4007 else if (compress == COMPRESS_ZSTD)
4008 sbuf_cat(&sb, ZSTD_SUFFIX);
4009 if (sbuf_error(&sb) != 0) {
4010 log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too "
4011 "long\n", (long)pid, comm, (u_long)uid);
4012 sbuf_delete(&sb);
4013 free(name, M_TEMP);
4014 return (ENOMEM);
4015 }
4016 sbuf_finish(&sb);
4017 sbuf_delete(&sb);
4018
4019 if (indexpos != -1) {
4020 error = corefile_open_last(td, name, indexpos, indexlen, ncores,
4021 vpp);
4022 if (error != 0) {
4023 log(LOG_ERR,
4024 "pid %d (%s), uid (%u): Path `%s' failed "
4025 "on initial open test, error = %d\n",
4026 pid, comm, uid, name, error);
4027 }
4028 } else {
4029 cmode = S_IRUSR | S_IWUSR;
4030 oflags = VN_OPEN_NOAUDIT | VN_OPEN_NAMECACHE |
4031 (capmode_coredump ? VN_OPEN_NOCAPCHECK : 0);
4032 flags = O_CREAT | FWRITE | O_NOFOLLOW;
4033 if ((td->td_proc->p_flag & P_SUGID) != 0)
4034 flags |= O_EXCL;
4035
4036 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name);
4037 error = vn_open_cred(&nd, &flags, cmode, oflags, td->td_ucred,
4038 NULL);
4039 if (error == 0) {
4040 *vpp = nd.ni_vp;
4041 NDFREE_PNBUF(&nd);
4042 }
4043 }
4044
4045 if (error != 0) {
4046 #ifdef AUDIT
4047 audit_proc_coredump(td, name, error);
4048 #endif
4049 free(name, M_TEMP);
4050 return (error);
4051 }
4052 *namep = name;
4053 return (0);
4054 }
4055
4056 /*
4057 * Dump a process' core. The main routine does some
4058 * policy checking, and creates the name of the coredump;
4059 * then it passes on a vnode and a size limit to the process-specific
4060 * coredump routine if there is one; if there _is not_ one, it returns
4061 * ENOSYS; otherwise it returns the error from the process-specific routine.
4062 */
4063
4064 static int
coredump(struct thread * td)4065 coredump(struct thread *td)
4066 {
4067 struct proc *p = td->td_proc;
4068 struct ucred *cred = td->td_ucred;
4069 struct vnode *vp;
4070 struct flock lf;
4071 struct vattr vattr;
4072 size_t fullpathsize;
4073 int error, error1, locked;
4074 char *name; /* name of corefile */
4075 void *rl_cookie;
4076 off_t limit;
4077 char *fullpath, *freepath = NULL;
4078 struct sbuf *sb;
4079
4080 PROC_LOCK_ASSERT(p, MA_OWNED);
4081 MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td);
4082
4083 if (!do_coredump || (!sugid_coredump && (p->p_flag & P_SUGID) != 0) ||
4084 (p->p_flag2 & P2_NOTRACE) != 0) {
4085 PROC_UNLOCK(p);
4086 return (EFAULT);
4087 }
4088
4089 /*
4090 * Note that the bulk of limit checking is done after
4091 * the corefile is created. The exception is if the limit
4092 * for corefiles is 0, in which case we don't bother
4093 * creating the corefile at all. This layout means that
4094 * a corefile is truncated instead of not being created,
4095 * if it is larger than the limit.
4096 */
4097 limit = (off_t)lim_cur(td, RLIMIT_CORE);
4098 if (limit == 0 || racct_get_available(p, RACCT_CORE) == 0) {
4099 PROC_UNLOCK(p);
4100 return (EFBIG);
4101 }
4102 PROC_UNLOCK(p);
4103
4104 error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td,
4105 compress_user_cores, p->p_sig, &vp, &name);
4106 if (error != 0)
4107 return (error);
4108
4109 /*
4110 * Don't dump to non-regular files or files with links.
4111 * Do not dump into system files. Effective user must own the corefile.
4112 */
4113 if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) != 0 ||
4114 vattr.va_nlink != 1 || (vp->v_vflag & VV_SYSTEM) != 0 ||
4115 vattr.va_uid != cred->cr_uid) {
4116 VOP_UNLOCK(vp);
4117 error = EFAULT;
4118 goto out;
4119 }
4120
4121 VOP_UNLOCK(vp);
4122
4123 /* Postpone other writers, including core dumps of other processes. */
4124 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
4125
4126 lf.l_whence = SEEK_SET;
4127 lf.l_start = 0;
4128 lf.l_len = 0;
4129 lf.l_type = F_WRLCK;
4130 locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0);
4131
4132 VATTR_NULL(&vattr);
4133 vattr.va_size = 0;
4134 if (set_core_nodump_flag)
4135 vattr.va_flags = UF_NODUMP;
4136 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4137 VOP_SETATTR(vp, &vattr, cred);
4138 VOP_UNLOCK(vp);
4139 PROC_LOCK(p);
4140 p->p_acflag |= ACORE;
4141 PROC_UNLOCK(p);
4142
4143 if (p->p_sysent->sv_coredump != NULL) {
4144 error = p->p_sysent->sv_coredump(td, vp, limit, 0);
4145 } else {
4146 error = ENOSYS;
4147 }
4148
4149 if (locked) {
4150 lf.l_type = F_UNLCK;
4151 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK);
4152 }
4153 vn_rangelock_unlock(vp, rl_cookie);
4154
4155 /*
4156 * Notify the userland helper that a process triggered a core dump.
4157 * This allows the helper to run an automated debugging session.
4158 */
4159 if (error != 0 || coredump_devctl == 0)
4160 goto out;
4161 sb = sbuf_new_auto();
4162 if (vn_fullpath_global(p->p_textvp, &fullpath, &freepath) != 0)
4163 goto out2;
4164 sbuf_cat(sb, "comm=\"");
4165 devctl_safe_quote_sb(sb, fullpath);
4166 free(freepath, M_TEMP);
4167 sbuf_cat(sb, "\" core=\"");
4168
4169 /*
4170 * We can't lookup core file vp directly. When we're replacing a core, and
4171 * other random times, we flush the name cache, so it will fail. Instead,
4172 * if the path of the core is relative, add the current dir in front if it.
4173 */
4174 if (name[0] != '/') {
4175 fullpathsize = MAXPATHLEN;
4176 freepath = malloc(fullpathsize, M_TEMP, M_WAITOK);
4177 if (vn_getcwd(freepath, &fullpath, &fullpathsize) != 0) {
4178 free(freepath, M_TEMP);
4179 goto out2;
4180 }
4181 devctl_safe_quote_sb(sb, fullpath);
4182 free(freepath, M_TEMP);
4183 sbuf_putc(sb, '/');
4184 }
4185 devctl_safe_quote_sb(sb, name);
4186 sbuf_putc(sb, '"');
4187 if (sbuf_finish(sb) == 0)
4188 devctl_notify("kernel", "signal", "coredump", sbuf_data(sb));
4189 out2:
4190 sbuf_delete(sb);
4191 out:
4192 error1 = vn_close(vp, FWRITE, cred, td);
4193 if (error == 0)
4194 error = error1;
4195 #ifdef AUDIT
4196 audit_proc_coredump(td, name, error);
4197 #endif
4198 free(name, M_TEMP);
4199 return (error);
4200 }
4201
4202 /*
4203 * Nonexistent system call-- signal process (may want to handle it). Flag
4204 * error in case process won't see signal immediately (blocked or ignored).
4205 */
4206 #ifndef _SYS_SYSPROTO_H_
4207 struct nosys_args {
4208 int dummy;
4209 };
4210 #endif
4211 /* ARGSUSED */
4212 int
nosys(struct thread * td,struct nosys_args * args)4213 nosys(struct thread *td, struct nosys_args *args)
4214 {
4215 struct proc *p;
4216
4217 p = td->td_proc;
4218
4219 if (SV_PROC_FLAG(p, SV_SIGSYS) != 0 && kern_signosys) {
4220 PROC_LOCK(p);
4221 tdsignal(td, SIGSYS);
4222 PROC_UNLOCK(p);
4223 }
4224 if (kern_lognosys == 1 || kern_lognosys == 3) {
4225 uprintf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
4226 td->td_sa.code);
4227 }
4228 if (kern_lognosys == 2 || kern_lognosys == 3 ||
4229 (p->p_pid == 1 && (kern_lognosys & 3) == 0)) {
4230 printf("pid %d comm %s: nosys %d\n", p->p_pid, p->p_comm,
4231 td->td_sa.code);
4232 }
4233 return (ENOSYS);
4234 }
4235
4236 /*
4237 * Send a SIGIO or SIGURG signal to a process or process group using stored
4238 * credentials rather than those of the current process.
4239 */
4240 void
pgsigio(struct sigio ** sigiop,int sig,int checkctty)4241 pgsigio(struct sigio **sigiop, int sig, int checkctty)
4242 {
4243 ksiginfo_t ksi;
4244 struct sigio *sigio;
4245
4246 ksiginfo_init(&ksi);
4247 ksi.ksi_signo = sig;
4248 ksi.ksi_code = SI_KERNEL;
4249
4250 SIGIO_LOCK();
4251 sigio = *sigiop;
4252 if (sigio == NULL) {
4253 SIGIO_UNLOCK();
4254 return;
4255 }
4256 if (sigio->sio_pgid > 0) {
4257 PROC_LOCK(sigio->sio_proc);
4258 if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
4259 kern_psignal(sigio->sio_proc, sig);
4260 PROC_UNLOCK(sigio->sio_proc);
4261 } else if (sigio->sio_pgid < 0) {
4262 struct proc *p;
4263
4264 PGRP_LOCK(sigio->sio_pgrp);
4265 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
4266 PROC_LOCK(p);
4267 if (p->p_state == PRS_NORMAL &&
4268 CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
4269 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
4270 kern_psignal(p, sig);
4271 PROC_UNLOCK(p);
4272 }
4273 PGRP_UNLOCK(sigio->sio_pgrp);
4274 }
4275 SIGIO_UNLOCK();
4276 }
4277
4278 static int
filt_sigattach(struct knote * kn)4279 filt_sigattach(struct knote *kn)
4280 {
4281 struct proc *p = curproc;
4282
4283 kn->kn_ptr.p_proc = p;
4284 kn->kn_flags |= EV_CLEAR; /* automatically set */
4285
4286 knlist_add(p->p_klist, kn, 0);
4287
4288 return (0);
4289 }
4290
4291 static void
filt_sigdetach(struct knote * kn)4292 filt_sigdetach(struct knote *kn)
4293 {
4294 knlist_remove(kn->kn_knlist, kn, 0);
4295 }
4296
4297 /*
4298 * signal knotes are shared with proc knotes, so we apply a mask to
4299 * the hint in order to differentiate them from process hints. This
4300 * could be avoided by using a signal-specific knote list, but probably
4301 * isn't worth the trouble.
4302 */
4303 static int
filt_signal(struct knote * kn,long hint)4304 filt_signal(struct knote *kn, long hint)
4305 {
4306
4307 if (hint & NOTE_SIGNAL) {
4308 hint &= ~NOTE_SIGNAL;
4309
4310 if (kn->kn_id == hint)
4311 kn->kn_data++;
4312 }
4313 return (kn->kn_data != 0);
4314 }
4315
4316 struct sigacts *
sigacts_alloc(void)4317 sigacts_alloc(void)
4318 {
4319 struct sigacts *ps;
4320
4321 ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO);
4322 refcount_init(&ps->ps_refcnt, 1);
4323 mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF);
4324 return (ps);
4325 }
4326
4327 void
sigacts_free(struct sigacts * ps)4328 sigacts_free(struct sigacts *ps)
4329 {
4330
4331 if (refcount_release(&ps->ps_refcnt) == 0)
4332 return;
4333 mtx_destroy(&ps->ps_mtx);
4334 free(ps, M_SUBPROC);
4335 }
4336
4337 struct sigacts *
sigacts_hold(struct sigacts * ps)4338 sigacts_hold(struct sigacts *ps)
4339 {
4340
4341 refcount_acquire(&ps->ps_refcnt);
4342 return (ps);
4343 }
4344
4345 void
sigacts_copy(struct sigacts * dest,struct sigacts * src)4346 sigacts_copy(struct sigacts *dest, struct sigacts *src)
4347 {
4348
4349 KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest"));
4350 mtx_lock(&src->ps_mtx);
4351 bcopy(src, dest, offsetof(struct sigacts, ps_refcnt));
4352 mtx_unlock(&src->ps_mtx);
4353 }
4354
4355 int
sigacts_shared(struct sigacts * ps)4356 sigacts_shared(struct sigacts *ps)
4357 {
4358
4359 return (ps->ps_refcnt > 1);
4360 }
4361
4362 void
sig_drop_caught(struct proc * p)4363 sig_drop_caught(struct proc *p)
4364 {
4365 int sig;
4366 struct sigacts *ps;
4367
4368 ps = p->p_sigacts;
4369 PROC_LOCK_ASSERT(p, MA_OWNED);
4370 mtx_assert(&ps->ps_mtx, MA_OWNED);
4371 SIG_FOREACH(sig, &ps->ps_sigcatch) {
4372 sigdflt(ps, sig);
4373 if ((sigprop(sig) & SIGPROP_IGNORE) != 0)
4374 sigqueue_delete_proc(p, sig);
4375 }
4376 }
4377
4378 static void
sigfastblock_failed(struct thread * td,bool sendsig,bool write)4379 sigfastblock_failed(struct thread *td, bool sendsig, bool write)
4380 {
4381 ksiginfo_t ksi;
4382
4383 /*
4384 * Prevent further fetches and SIGSEGVs, allowing thread to
4385 * issue syscalls despite corruption.
4386 */
4387 sigfastblock_clear(td);
4388
4389 if (!sendsig)
4390 return;
4391 ksiginfo_init_trap(&ksi);
4392 ksi.ksi_signo = SIGSEGV;
4393 ksi.ksi_code = write ? SEGV_ACCERR : SEGV_MAPERR;
4394 ksi.ksi_addr = td->td_sigblock_ptr;
4395 trapsignal(td, &ksi);
4396 }
4397
4398 static bool
sigfastblock_fetch_sig(struct thread * td,bool sendsig,uint32_t * valp)4399 sigfastblock_fetch_sig(struct thread *td, bool sendsig, uint32_t *valp)
4400 {
4401 uint32_t res;
4402
4403 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0)
4404 return (true);
4405 if (fueword32((void *)td->td_sigblock_ptr, &res) == -1) {
4406 sigfastblock_failed(td, sendsig, false);
4407 return (false);
4408 }
4409 *valp = res;
4410 td->td_sigblock_val = res & ~SIGFASTBLOCK_FLAGS;
4411 return (true);
4412 }
4413
4414 static void
sigfastblock_resched(struct thread * td,bool resched)4415 sigfastblock_resched(struct thread *td, bool resched)
4416 {
4417 struct proc *p;
4418
4419 if (resched) {
4420 p = td->td_proc;
4421 PROC_LOCK(p);
4422 reschedule_signals(p, td->td_sigmask, 0);
4423 PROC_UNLOCK(p);
4424 }
4425 ast_sched(td, TDA_SIG);
4426 }
4427
4428 int
sys_sigfastblock(struct thread * td,struct sigfastblock_args * uap)4429 sys_sigfastblock(struct thread *td, struct sigfastblock_args *uap)
4430 {
4431 struct proc *p;
4432 int error, res;
4433 uint32_t oldval;
4434
4435 error = 0;
4436 p = td->td_proc;
4437 switch (uap->cmd) {
4438 case SIGFASTBLOCK_SETPTR:
4439 if ((td->td_pflags & TDP_SIGFASTBLOCK) != 0) {
4440 error = EBUSY;
4441 break;
4442 }
4443 if (((uintptr_t)(uap->ptr) & (sizeof(uint32_t) - 1)) != 0) {
4444 error = EINVAL;
4445 break;
4446 }
4447 td->td_pflags |= TDP_SIGFASTBLOCK;
4448 td->td_sigblock_ptr = uap->ptr;
4449 break;
4450
4451 case SIGFASTBLOCK_UNBLOCK:
4452 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
4453 error = EINVAL;
4454 break;
4455 }
4456
4457 for (;;) {
4458 res = casueword32(td->td_sigblock_ptr,
4459 SIGFASTBLOCK_PEND, &oldval, 0);
4460 if (res == -1) {
4461 error = EFAULT;
4462 sigfastblock_failed(td, false, true);
4463 break;
4464 }
4465 if (res == 0)
4466 break;
4467 MPASS(res == 1);
4468 if (oldval != SIGFASTBLOCK_PEND) {
4469 error = EBUSY;
4470 break;
4471 }
4472 error = thread_check_susp(td, false);
4473 if (error != 0)
4474 break;
4475 }
4476 if (error != 0)
4477 break;
4478
4479 /*
4480 * td_sigblock_val is cleared there, but not on a
4481 * syscall exit. The end effect is that a single
4482 * interruptible sleep, while user sigblock word is
4483 * set, might return EINTR or ERESTART to usermode
4484 * without delivering signal. All further sleeps,
4485 * until userspace clears the word and does
4486 * sigfastblock(UNBLOCK), observe current word and no
4487 * longer get interrupted. It is slight
4488 * non-conformance, with alternative to have read the
4489 * sigblock word on each syscall entry.
4490 */
4491 td->td_sigblock_val = 0;
4492
4493 /*
4494 * Rely on normal ast mechanism to deliver pending
4495 * signals to current thread. But notify others about
4496 * fake unblock.
4497 */
4498 sigfastblock_resched(td, error == 0 && p->p_numthreads != 1);
4499
4500 break;
4501
4502 case SIGFASTBLOCK_UNSETPTR:
4503 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
4504 error = EINVAL;
4505 break;
4506 }
4507 if (!sigfastblock_fetch_sig(td, false, &oldval)) {
4508 error = EFAULT;
4509 break;
4510 }
4511 if (oldval != 0 && oldval != SIGFASTBLOCK_PEND) {
4512 error = EBUSY;
4513 break;
4514 }
4515 sigfastblock_clear(td);
4516 break;
4517
4518 default:
4519 error = EINVAL;
4520 break;
4521 }
4522 return (error);
4523 }
4524
4525 void
sigfastblock_clear(struct thread * td)4526 sigfastblock_clear(struct thread *td)
4527 {
4528 bool resched;
4529
4530 if ((td->td_pflags & TDP_SIGFASTBLOCK) == 0)
4531 return;
4532 td->td_sigblock_val = 0;
4533 resched = (td->td_pflags & TDP_SIGFASTPENDING) != 0 ||
4534 SIGPENDING(td);
4535 td->td_pflags &= ~(TDP_SIGFASTBLOCK | TDP_SIGFASTPENDING);
4536 sigfastblock_resched(td, resched);
4537 }
4538
4539 void
sigfastblock_fetch(struct thread * td)4540 sigfastblock_fetch(struct thread *td)
4541 {
4542 uint32_t val;
4543
4544 (void)sigfastblock_fetch_sig(td, true, &val);
4545 }
4546
4547 static void
sigfastblock_setpend1(struct thread * td)4548 sigfastblock_setpend1(struct thread *td)
4549 {
4550 int res;
4551 uint32_t oldval;
4552
4553 if ((td->td_pflags & TDP_SIGFASTPENDING) == 0)
4554 return;
4555 res = fueword32((void *)td->td_sigblock_ptr, &oldval);
4556 if (res == -1) {
4557 sigfastblock_failed(td, true, false);
4558 return;
4559 }
4560 for (;;) {
4561 res = casueword32(td->td_sigblock_ptr, oldval, &oldval,
4562 oldval | SIGFASTBLOCK_PEND);
4563 if (res == -1) {
4564 sigfastblock_failed(td, true, true);
4565 return;
4566 }
4567 if (res == 0) {
4568 td->td_sigblock_val = oldval & ~SIGFASTBLOCK_FLAGS;
4569 td->td_pflags &= ~TDP_SIGFASTPENDING;
4570 break;
4571 }
4572 MPASS(res == 1);
4573 if (thread_check_susp(td, false) != 0)
4574 break;
4575 }
4576 }
4577
4578 static void
sigfastblock_setpend(struct thread * td,bool resched)4579 sigfastblock_setpend(struct thread *td, bool resched)
4580 {
4581 struct proc *p;
4582
4583 sigfastblock_setpend1(td);
4584 if (resched) {
4585 p = td->td_proc;
4586 PROC_LOCK(p);
4587 reschedule_signals(p, fastblock_mask, SIGPROCMASK_FASTBLK);
4588 PROC_UNLOCK(p);
4589 }
4590 }
4591