xref: /qemu/linux-user/signal.c (revision 727385c4)
1 /*
2  *  Emulation of Linux signals
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "exec/gdbstub.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 
24 #include <sys/ucontext.h>
25 #include <sys/resource.h>
26 
27 #include "qemu.h"
28 #include "user-internals.h"
29 #include "strace.h"
30 #include "loader.h"
31 #include "trace.h"
32 #include "signal-common.h"
33 #include "host-signal.h"
34 
35 static struct target_sigaction sigact_table[TARGET_NSIG];
36 
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38                                 void *puc);
39 
40 /* Fallback addresses into sigtramp page. */
41 abi_ulong default_sigreturn;
42 abi_ulong default_rt_sigreturn;
43 
44 /*
45  * System includes define _NSIG as SIGRTMAX + 1,
46  * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
47  * and the first signal is SIGHUP defined as 1
48  * Signal number 0 is reserved for use as kill(pid, 0), to test whether
49  * a process exists without sending it a signal.
50  */
51 #ifdef __SIGRTMAX
52 QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
53 #endif
54 static uint8_t host_to_target_signal_table[_NSIG] = {
55     [SIGHUP] = TARGET_SIGHUP,
56     [SIGINT] = TARGET_SIGINT,
57     [SIGQUIT] = TARGET_SIGQUIT,
58     [SIGILL] = TARGET_SIGILL,
59     [SIGTRAP] = TARGET_SIGTRAP,
60     [SIGABRT] = TARGET_SIGABRT,
61 /*    [SIGIOT] = TARGET_SIGIOT,*/
62     [SIGBUS] = TARGET_SIGBUS,
63     [SIGFPE] = TARGET_SIGFPE,
64     [SIGKILL] = TARGET_SIGKILL,
65     [SIGUSR1] = TARGET_SIGUSR1,
66     [SIGSEGV] = TARGET_SIGSEGV,
67     [SIGUSR2] = TARGET_SIGUSR2,
68     [SIGPIPE] = TARGET_SIGPIPE,
69     [SIGALRM] = TARGET_SIGALRM,
70     [SIGTERM] = TARGET_SIGTERM,
71 #ifdef SIGSTKFLT
72     [SIGSTKFLT] = TARGET_SIGSTKFLT,
73 #endif
74     [SIGCHLD] = TARGET_SIGCHLD,
75     [SIGCONT] = TARGET_SIGCONT,
76     [SIGSTOP] = TARGET_SIGSTOP,
77     [SIGTSTP] = TARGET_SIGTSTP,
78     [SIGTTIN] = TARGET_SIGTTIN,
79     [SIGTTOU] = TARGET_SIGTTOU,
80     [SIGURG] = TARGET_SIGURG,
81     [SIGXCPU] = TARGET_SIGXCPU,
82     [SIGXFSZ] = TARGET_SIGXFSZ,
83     [SIGVTALRM] = TARGET_SIGVTALRM,
84     [SIGPROF] = TARGET_SIGPROF,
85     [SIGWINCH] = TARGET_SIGWINCH,
86     [SIGIO] = TARGET_SIGIO,
87     [SIGPWR] = TARGET_SIGPWR,
88     [SIGSYS] = TARGET_SIGSYS,
89     /* next signals stay the same */
90 };
91 
92 static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
93 
94 /* valid sig is between 1 and _NSIG - 1 */
95 int host_to_target_signal(int sig)
96 {
97     if (sig < 1 || sig >= _NSIG) {
98         return sig;
99     }
100     return host_to_target_signal_table[sig];
101 }
102 
103 /* valid sig is between 1 and TARGET_NSIG */
104 int target_to_host_signal(int sig)
105 {
106     if (sig < 1 || sig > TARGET_NSIG) {
107         return sig;
108     }
109     return target_to_host_signal_table[sig];
110 }
111 
112 static inline void target_sigaddset(target_sigset_t *set, int signum)
113 {
114     signum--;
115     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
116     set->sig[signum / TARGET_NSIG_BPW] |= mask;
117 }
118 
119 static inline int target_sigismember(const target_sigset_t *set, int signum)
120 {
121     signum--;
122     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
123     return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
124 }
125 
126 void host_to_target_sigset_internal(target_sigset_t *d,
127                                     const sigset_t *s)
128 {
129     int host_sig, target_sig;
130     target_sigemptyset(d);
131     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
132         target_sig = host_to_target_signal(host_sig);
133         if (target_sig < 1 || target_sig > TARGET_NSIG) {
134             continue;
135         }
136         if (sigismember(s, host_sig)) {
137             target_sigaddset(d, target_sig);
138         }
139     }
140 }
141 
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
143 {
144     target_sigset_t d1;
145     int i;
146 
147     host_to_target_sigset_internal(&d1, s);
148     for(i = 0;i < TARGET_NSIG_WORDS; i++)
149         d->sig[i] = tswapal(d1.sig[i]);
150 }
151 
152 void target_to_host_sigset_internal(sigset_t *d,
153                                     const target_sigset_t *s)
154 {
155     int host_sig, target_sig;
156     sigemptyset(d);
157     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
158         host_sig = target_to_host_signal(target_sig);
159         if (host_sig < 1 || host_sig >= _NSIG) {
160             continue;
161         }
162         if (target_sigismember(s, target_sig)) {
163             sigaddset(d, host_sig);
164         }
165     }
166 }
167 
168 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
169 {
170     target_sigset_t s1;
171     int i;
172 
173     for(i = 0;i < TARGET_NSIG_WORDS; i++)
174         s1.sig[i] = tswapal(s->sig[i]);
175     target_to_host_sigset_internal(d, &s1);
176 }
177 
178 void host_to_target_old_sigset(abi_ulong *old_sigset,
179                                const sigset_t *sigset)
180 {
181     target_sigset_t d;
182     host_to_target_sigset(&d, sigset);
183     *old_sigset = d.sig[0];
184 }
185 
186 void target_to_host_old_sigset(sigset_t *sigset,
187                                const abi_ulong *old_sigset)
188 {
189     target_sigset_t d;
190     int i;
191 
192     d.sig[0] = *old_sigset;
193     for(i = 1;i < TARGET_NSIG_WORDS; i++)
194         d.sig[i] = 0;
195     target_to_host_sigset(sigset, &d);
196 }
197 
198 int block_signals(void)
199 {
200     TaskState *ts = (TaskState *)thread_cpu->opaque;
201     sigset_t set;
202 
203     /* It's OK to block everything including SIGSEGV, because we won't
204      * run any further guest code before unblocking signals in
205      * process_pending_signals().
206      */
207     sigfillset(&set);
208     sigprocmask(SIG_SETMASK, &set, 0);
209 
210     return qatomic_xchg(&ts->signal_pending, 1);
211 }
212 
213 /* Wrapper for sigprocmask function
214  * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
215  * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
216  * a signal was already pending and the syscall must be restarted, or
217  * 0 on success.
218  * If set is NULL, this is guaranteed not to fail.
219  */
220 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
221 {
222     TaskState *ts = (TaskState *)thread_cpu->opaque;
223 
224     if (oldset) {
225         *oldset = ts->signal_mask;
226     }
227 
228     if (set) {
229         int i;
230 
231         if (block_signals()) {
232             return -TARGET_ERESTARTSYS;
233         }
234 
235         switch (how) {
236         case SIG_BLOCK:
237             sigorset(&ts->signal_mask, &ts->signal_mask, set);
238             break;
239         case SIG_UNBLOCK:
240             for (i = 1; i <= NSIG; ++i) {
241                 if (sigismember(set, i)) {
242                     sigdelset(&ts->signal_mask, i);
243                 }
244             }
245             break;
246         case SIG_SETMASK:
247             ts->signal_mask = *set;
248             break;
249         default:
250             g_assert_not_reached();
251         }
252 
253         /* Silently ignore attempts to change blocking status of KILL or STOP */
254         sigdelset(&ts->signal_mask, SIGKILL);
255         sigdelset(&ts->signal_mask, SIGSTOP);
256     }
257     return 0;
258 }
259 
260 #if !defined(TARGET_NIOS2)
261 /* Just set the guest's signal mask to the specified value; the
262  * caller is assumed to have called block_signals() already.
263  */
264 void set_sigmask(const sigset_t *set)
265 {
266     TaskState *ts = (TaskState *)thread_cpu->opaque;
267 
268     ts->signal_mask = *set;
269 }
270 #endif
271 
272 /* sigaltstack management */
273 
274 int on_sig_stack(unsigned long sp)
275 {
276     TaskState *ts = (TaskState *)thread_cpu->opaque;
277 
278     return (sp - ts->sigaltstack_used.ss_sp
279             < ts->sigaltstack_used.ss_size);
280 }
281 
282 int sas_ss_flags(unsigned long sp)
283 {
284     TaskState *ts = (TaskState *)thread_cpu->opaque;
285 
286     return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
287             : on_sig_stack(sp) ? SS_ONSTACK : 0);
288 }
289 
290 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
291 {
292     /*
293      * This is the X/Open sanctioned signal stack switching.
294      */
295     TaskState *ts = (TaskState *)thread_cpu->opaque;
296 
297     if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
298         return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
299     }
300     return sp;
301 }
302 
303 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
304 {
305     TaskState *ts = (TaskState *)thread_cpu->opaque;
306 
307     __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
308     __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
309     __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
310 }
311 
312 abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
313 {
314     TaskState *ts = (TaskState *)thread_cpu->opaque;
315     size_t minstacksize = TARGET_MINSIGSTKSZ;
316     target_stack_t ss;
317 
318 #if defined(TARGET_PPC64)
319     /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
320     struct image_info *image = ts->info;
321     if (get_ppc64_abi(image) > 1) {
322         minstacksize = 4096;
323     }
324 #endif
325 
326     __get_user(ss.ss_sp, &uss->ss_sp);
327     __get_user(ss.ss_size, &uss->ss_size);
328     __get_user(ss.ss_flags, &uss->ss_flags);
329 
330     if (on_sig_stack(get_sp_from_cpustate(env))) {
331         return -TARGET_EPERM;
332     }
333 
334     switch (ss.ss_flags) {
335     default:
336         return -TARGET_EINVAL;
337 
338     case TARGET_SS_DISABLE:
339         ss.ss_size = 0;
340         ss.ss_sp = 0;
341         break;
342 
343     case TARGET_SS_ONSTACK:
344     case 0:
345         if (ss.ss_size < minstacksize) {
346             return -TARGET_ENOMEM;
347         }
348         break;
349     }
350 
351     ts->sigaltstack_used.ss_sp = ss.ss_sp;
352     ts->sigaltstack_used.ss_size = ss.ss_size;
353     return 0;
354 }
355 
356 /* siginfo conversion */
357 
358 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
359                                                  const siginfo_t *info)
360 {
361     int sig = host_to_target_signal(info->si_signo);
362     int si_code = info->si_code;
363     int si_type;
364     tinfo->si_signo = sig;
365     tinfo->si_errno = 0;
366     tinfo->si_code = info->si_code;
367 
368     /* This memset serves two purposes:
369      * (1) ensure we don't leak random junk to the guest later
370      * (2) placate false positives from gcc about fields
371      *     being used uninitialized if it chooses to inline both this
372      *     function and tswap_siginfo() into host_to_target_siginfo().
373      */
374     memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
375 
376     /* This is awkward, because we have to use a combination of
377      * the si_code and si_signo to figure out which of the union's
378      * members are valid. (Within the host kernel it is always possible
379      * to tell, but the kernel carefully avoids giving userspace the
380      * high 16 bits of si_code, so we don't have the information to
381      * do this the easy way...) We therefore make our best guess,
382      * bearing in mind that a guest can spoof most of the si_codes
383      * via rt_sigqueueinfo() if it likes.
384      *
385      * Once we have made our guess, we record it in the top 16 bits of
386      * the si_code, so that tswap_siginfo() later can use it.
387      * tswap_siginfo() will strip these top bits out before writing
388      * si_code to the guest (sign-extending the lower bits).
389      */
390 
391     switch (si_code) {
392     case SI_USER:
393     case SI_TKILL:
394     case SI_KERNEL:
395         /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
396          * These are the only unspoofable si_code values.
397          */
398         tinfo->_sifields._kill._pid = info->si_pid;
399         tinfo->_sifields._kill._uid = info->si_uid;
400         si_type = QEMU_SI_KILL;
401         break;
402     default:
403         /* Everything else is spoofable. Make best guess based on signal */
404         switch (sig) {
405         case TARGET_SIGCHLD:
406             tinfo->_sifields._sigchld._pid = info->si_pid;
407             tinfo->_sifields._sigchld._uid = info->si_uid;
408             tinfo->_sifields._sigchld._status = info->si_status;
409             tinfo->_sifields._sigchld._utime = info->si_utime;
410             tinfo->_sifields._sigchld._stime = info->si_stime;
411             si_type = QEMU_SI_CHLD;
412             break;
413         case TARGET_SIGIO:
414             tinfo->_sifields._sigpoll._band = info->si_band;
415             tinfo->_sifields._sigpoll._fd = info->si_fd;
416             si_type = QEMU_SI_POLL;
417             break;
418         default:
419             /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
420             tinfo->_sifields._rt._pid = info->si_pid;
421             tinfo->_sifields._rt._uid = info->si_uid;
422             /* XXX: potential problem if 64 bit */
423             tinfo->_sifields._rt._sigval.sival_ptr
424                 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
425             si_type = QEMU_SI_RT;
426             break;
427         }
428         break;
429     }
430 
431     tinfo->si_code = deposit32(si_code, 16, 16, si_type);
432 }
433 
434 void tswap_siginfo(target_siginfo_t *tinfo,
435                    const target_siginfo_t *info)
436 {
437     int si_type = extract32(info->si_code, 16, 16);
438     int si_code = sextract32(info->si_code, 0, 16);
439 
440     __put_user(info->si_signo, &tinfo->si_signo);
441     __put_user(info->si_errno, &tinfo->si_errno);
442     __put_user(si_code, &tinfo->si_code);
443 
444     /* We can use our internal marker of which fields in the structure
445      * are valid, rather than duplicating the guesswork of
446      * host_to_target_siginfo_noswap() here.
447      */
448     switch (si_type) {
449     case QEMU_SI_KILL:
450         __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
451         __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
452         break;
453     case QEMU_SI_TIMER:
454         __put_user(info->_sifields._timer._timer1,
455                    &tinfo->_sifields._timer._timer1);
456         __put_user(info->_sifields._timer._timer2,
457                    &tinfo->_sifields._timer._timer2);
458         break;
459     case QEMU_SI_POLL:
460         __put_user(info->_sifields._sigpoll._band,
461                    &tinfo->_sifields._sigpoll._band);
462         __put_user(info->_sifields._sigpoll._fd,
463                    &tinfo->_sifields._sigpoll._fd);
464         break;
465     case QEMU_SI_FAULT:
466         __put_user(info->_sifields._sigfault._addr,
467                    &tinfo->_sifields._sigfault._addr);
468         break;
469     case QEMU_SI_CHLD:
470         __put_user(info->_sifields._sigchld._pid,
471                    &tinfo->_sifields._sigchld._pid);
472         __put_user(info->_sifields._sigchld._uid,
473                    &tinfo->_sifields._sigchld._uid);
474         __put_user(info->_sifields._sigchld._status,
475                    &tinfo->_sifields._sigchld._status);
476         __put_user(info->_sifields._sigchld._utime,
477                    &tinfo->_sifields._sigchld._utime);
478         __put_user(info->_sifields._sigchld._stime,
479                    &tinfo->_sifields._sigchld._stime);
480         break;
481     case QEMU_SI_RT:
482         __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
483         __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
484         __put_user(info->_sifields._rt._sigval.sival_ptr,
485                    &tinfo->_sifields._rt._sigval.sival_ptr);
486         break;
487     default:
488         g_assert_not_reached();
489     }
490 }
491 
492 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
493 {
494     target_siginfo_t tgt_tmp;
495     host_to_target_siginfo_noswap(&tgt_tmp, info);
496     tswap_siginfo(tinfo, &tgt_tmp);
497 }
498 
499 /* XXX: we support only POSIX RT signals are used. */
500 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
501 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
502 {
503     /* This conversion is used only for the rt_sigqueueinfo syscall,
504      * and so we know that the _rt fields are the valid ones.
505      */
506     abi_ulong sival_ptr;
507 
508     __get_user(info->si_signo, &tinfo->si_signo);
509     __get_user(info->si_errno, &tinfo->si_errno);
510     __get_user(info->si_code, &tinfo->si_code);
511     __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
512     __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
513     __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
514     info->si_value.sival_ptr = (void *)(long)sival_ptr;
515 }
516 
517 static int fatal_signal (int sig)
518 {
519     switch (sig) {
520     case TARGET_SIGCHLD:
521     case TARGET_SIGURG:
522     case TARGET_SIGWINCH:
523         /* Ignored by default.  */
524         return 0;
525     case TARGET_SIGCONT:
526     case TARGET_SIGSTOP:
527     case TARGET_SIGTSTP:
528     case TARGET_SIGTTIN:
529     case TARGET_SIGTTOU:
530         /* Job control signals.  */
531         return 0;
532     default:
533         return 1;
534     }
535 }
536 
537 /* returns 1 if given signal should dump core if not handled */
538 static int core_dump_signal(int sig)
539 {
540     switch (sig) {
541     case TARGET_SIGABRT:
542     case TARGET_SIGFPE:
543     case TARGET_SIGILL:
544     case TARGET_SIGQUIT:
545     case TARGET_SIGSEGV:
546     case TARGET_SIGTRAP:
547     case TARGET_SIGBUS:
548         return (1);
549     default:
550         return (0);
551     }
552 }
553 
554 static void signal_table_init(void)
555 {
556     int host_sig, target_sig, count;
557 
558     /*
559      * Signals are supported starting from TARGET_SIGRTMIN and going up
560      * until we run out of host realtime signals.
561      * glibc at least uses only the lower 2 rt signals and probably
562      * nobody's using the upper ones.
563      * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
564      * To fix this properly we need to do manual signal delivery multiplexed
565      * over a single host signal.
566      * Attempts for configure "missing" signals via sigaction will be
567      * silently ignored.
568      */
569     for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
570         target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
571         if (target_sig <= TARGET_NSIG) {
572             host_to_target_signal_table[host_sig] = target_sig;
573         }
574     }
575 
576     /* generate signal conversion tables */
577     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
578         target_to_host_signal_table[target_sig] = _NSIG; /* poison */
579     }
580     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
581         if (host_to_target_signal_table[host_sig] == 0) {
582             host_to_target_signal_table[host_sig] = host_sig;
583         }
584         target_sig = host_to_target_signal_table[host_sig];
585         if (target_sig <= TARGET_NSIG) {
586             target_to_host_signal_table[target_sig] = host_sig;
587         }
588     }
589 
590     if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
591         for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
592             if (target_to_host_signal_table[target_sig] == _NSIG) {
593                 count++;
594             }
595         }
596         trace_signal_table_init(count);
597     }
598 }
599 
600 void signal_init(void)
601 {
602     TaskState *ts = (TaskState *)thread_cpu->opaque;
603     struct sigaction act;
604     struct sigaction oact;
605     int i;
606     int host_sig;
607 
608     /* initialize signal conversion tables */
609     signal_table_init();
610 
611     /* Set the signal mask from the host mask. */
612     sigprocmask(0, 0, &ts->signal_mask);
613 
614     sigfillset(&act.sa_mask);
615     act.sa_flags = SA_SIGINFO;
616     act.sa_sigaction = host_signal_handler;
617     for(i = 1; i <= TARGET_NSIG; i++) {
618 #ifdef CONFIG_GPROF
619         if (i == TARGET_SIGPROF) {
620             continue;
621         }
622 #endif
623         host_sig = target_to_host_signal(i);
624         sigaction(host_sig, NULL, &oact);
625         if (oact.sa_sigaction == (void *)SIG_IGN) {
626             sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
627         } else if (oact.sa_sigaction == (void *)SIG_DFL) {
628             sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
629         }
630         /* If there's already a handler installed then something has
631            gone horribly wrong, so don't even try to handle that case.  */
632         /* Install some handlers for our own use.  We need at least
633            SIGSEGV and SIGBUS, to detect exceptions.  We can not just
634            trap all signals because it affects syscall interrupt
635            behavior.  But do trap all default-fatal signals.  */
636         if (fatal_signal (i))
637             sigaction(host_sig, &act, NULL);
638     }
639 }
640 
641 /* Force a synchronously taken signal. The kernel force_sig() function
642  * also forces the signal to "not blocked, not ignored", but for QEMU
643  * that work is done in process_pending_signals().
644  */
645 void force_sig(int sig)
646 {
647     CPUState *cpu = thread_cpu;
648     CPUArchState *env = cpu->env_ptr;
649     target_siginfo_t info = {};
650 
651     info.si_signo = sig;
652     info.si_errno = 0;
653     info.si_code = TARGET_SI_KERNEL;
654     info._sifields._kill._pid = 0;
655     info._sifields._kill._uid = 0;
656     queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
657 }
658 
659 /*
660  * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
661  * 'force' part is handled in process_pending_signals().
662  */
663 void force_sig_fault(int sig, int code, abi_ulong addr)
664 {
665     CPUState *cpu = thread_cpu;
666     CPUArchState *env = cpu->env_ptr;
667     target_siginfo_t info = {};
668 
669     info.si_signo = sig;
670     info.si_errno = 0;
671     info.si_code = code;
672     info._sifields._sigfault._addr = addr;
673     queue_signal(env, sig, QEMU_SI_FAULT, &info);
674 }
675 
676 /* Force a SIGSEGV if we couldn't write to memory trying to set
677  * up the signal frame. oldsig is the signal we were trying to handle
678  * at the point of failure.
679  */
680 #if !defined(TARGET_RISCV)
681 void force_sigsegv(int oldsig)
682 {
683     if (oldsig == SIGSEGV) {
684         /* Make sure we don't try to deliver the signal again; this will
685          * end up with handle_pending_signal() calling dump_core_and_abort().
686          */
687         sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
688     }
689     force_sig(TARGET_SIGSEGV);
690 }
691 #endif
692 
693 void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
694                            MMUAccessType access_type, bool maperr, uintptr_t ra)
695 {
696     const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
697 
698     if (tcg_ops->record_sigsegv) {
699         tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
700     }
701 
702     force_sig_fault(TARGET_SIGSEGV,
703                     maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
704                     addr);
705     cpu->exception_index = EXCP_INTERRUPT;
706     cpu_loop_exit_restore(cpu, ra);
707 }
708 
709 void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
710                           MMUAccessType access_type, uintptr_t ra)
711 {
712     const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
713 
714     if (tcg_ops->record_sigbus) {
715         tcg_ops->record_sigbus(cpu, addr, access_type, ra);
716     }
717 
718     force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
719     cpu->exception_index = EXCP_INTERRUPT;
720     cpu_loop_exit_restore(cpu, ra);
721 }
722 
723 /* abort execution with signal */
724 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
725 {
726     CPUState *cpu = thread_cpu;
727     CPUArchState *env = cpu->env_ptr;
728     TaskState *ts = (TaskState *)cpu->opaque;
729     int host_sig, core_dumped = 0;
730     struct sigaction act;
731 
732     host_sig = target_to_host_signal(target_sig);
733     trace_user_force_sig(env, target_sig, host_sig);
734     gdb_signalled(env, target_sig);
735 
736     /* dump core if supported by target binary format */
737     if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
738         stop_all_tasks();
739         core_dumped =
740             ((*ts->bprm->core_dump)(target_sig, env) == 0);
741     }
742     if (core_dumped) {
743         /* we already dumped the core of target process, we don't want
744          * a coredump of qemu itself */
745         struct rlimit nodump;
746         getrlimit(RLIMIT_CORE, &nodump);
747         nodump.rlim_cur=0;
748         setrlimit(RLIMIT_CORE, &nodump);
749         (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
750             target_sig, strsignal(host_sig), "core dumped" );
751     }
752 
753     /* The proper exit code for dying from an uncaught signal is
754      * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
755      * a negative value.  To get the proper exit code we need to
756      * actually die from an uncaught signal.  Here the default signal
757      * handler is installed, we send ourself a signal and we wait for
758      * it to arrive. */
759     sigfillset(&act.sa_mask);
760     act.sa_handler = SIG_DFL;
761     act.sa_flags = 0;
762     sigaction(host_sig, &act, NULL);
763 
764     /* For some reason raise(host_sig) doesn't send the signal when
765      * statically linked on x86-64. */
766     kill(getpid(), host_sig);
767 
768     /* Make sure the signal isn't masked (just reuse the mask inside
769     of act) */
770     sigdelset(&act.sa_mask, host_sig);
771     sigsuspend(&act.sa_mask);
772 
773     /* unreachable */
774     abort();
775 }
776 
777 /* queue a signal so that it will be send to the virtual CPU as soon
778    as possible */
779 int queue_signal(CPUArchState *env, int sig, int si_type,
780                  target_siginfo_t *info)
781 {
782     CPUState *cpu = env_cpu(env);
783     TaskState *ts = cpu->opaque;
784 
785     trace_user_queue_signal(env, sig);
786 
787     info->si_code = deposit32(info->si_code, 16, 16, si_type);
788 
789     ts->sync_signal.info = *info;
790     ts->sync_signal.pending = sig;
791     /* signal that a new signal is pending */
792     qatomic_set(&ts->signal_pending, 1);
793     return 1; /* indicates that the signal was queued */
794 }
795 
796 #ifndef HAVE_SAFE_SYSCALL
797 static inline void rewind_if_in_safe_syscall(void *puc)
798 {
799     /* Default version: never rewind */
800 }
801 #endif
802 
803 static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
804 {
805     CPUArchState *env = thread_cpu->env_ptr;
806     CPUState *cpu = env_cpu(env);
807     TaskState *ts = cpu->opaque;
808     target_siginfo_t tinfo;
809     ucontext_t *uc = puc;
810     struct emulated_sigtable *k;
811     int guest_sig;
812     uintptr_t pc = 0;
813     bool sync_sig = false;
814 
815     /*
816      * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
817      * handling wrt signal blocking and unwinding.
818      */
819     if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
820         MMUAccessType access_type;
821         uintptr_t host_addr;
822         abi_ptr guest_addr;
823         bool is_write;
824 
825         host_addr = (uintptr_t)info->si_addr;
826 
827         /*
828          * Convert forcefully to guest address space: addresses outside
829          * reserved_va are still valid to report via SEGV_MAPERR.
830          */
831         guest_addr = h2g_nocheck(host_addr);
832 
833         pc = host_signal_pc(uc);
834         is_write = host_signal_write(info, uc);
835         access_type = adjust_signal_pc(&pc, is_write);
836 
837         if (host_sig == SIGSEGV) {
838             bool maperr = true;
839 
840             if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
841                 /* If this was a write to a TB protected page, restart. */
842                 if (is_write &&
843                     handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask,
844                                                 pc, guest_addr)) {
845                     return;
846                 }
847 
848                 /*
849                  * With reserved_va, the whole address space is PROT_NONE,
850                  * which means that we may get ACCERR when we want MAPERR.
851                  */
852                 if (page_get_flags(guest_addr) & PAGE_VALID) {
853                     maperr = false;
854                 } else {
855                     info->si_code = SEGV_MAPERR;
856                 }
857             }
858 
859             sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
860             cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
861         } else {
862             sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
863             if (info->si_code == BUS_ADRALN) {
864                 cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
865             }
866         }
867 
868         sync_sig = true;
869     }
870 
871     /* get target signal number */
872     guest_sig = host_to_target_signal(host_sig);
873     if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
874         return;
875     }
876     trace_user_host_signal(env, host_sig, guest_sig);
877 
878     host_to_target_siginfo_noswap(&tinfo, info);
879     k = &ts->sigtab[guest_sig - 1];
880     k->info = tinfo;
881     k->pending = guest_sig;
882     ts->signal_pending = 1;
883 
884     /*
885      * For synchronous signals, unwind the cpu state to the faulting
886      * insn and then exit back to the main loop so that the signal
887      * is delivered immediately.
888      */
889     if (sync_sig) {
890         cpu->exception_index = EXCP_INTERRUPT;
891         cpu_loop_exit_restore(cpu, pc);
892     }
893 
894     rewind_if_in_safe_syscall(puc);
895 
896     /*
897      * Block host signals until target signal handler entered. We
898      * can't block SIGSEGV or SIGBUS while we're executing guest
899      * code in case the guest code provokes one in the window between
900      * now and it getting out to the main loop. Signals will be
901      * unblocked again in process_pending_signals().
902      *
903      * WARNING: we cannot use sigfillset() here because the uc_sigmask
904      * field is a kernel sigset_t, which is much smaller than the
905      * libc sigset_t which sigfillset() operates on. Using sigfillset()
906      * would write 0xff bytes off the end of the structure and trash
907      * data on the struct.
908      * We can't use sizeof(uc->uc_sigmask) either, because the libc
909      * headers define the struct field with the wrong (too large) type.
910      */
911     memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
912     sigdelset(&uc->uc_sigmask, SIGSEGV);
913     sigdelset(&uc->uc_sigmask, SIGBUS);
914 
915     /* interrupt the virtual CPU as soon as possible */
916     cpu_exit(thread_cpu);
917 }
918 
919 /* do_sigaltstack() returns target values and errnos. */
920 /* compare linux/kernel/signal.c:do_sigaltstack() */
921 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
922                         CPUArchState *env)
923 {
924     target_stack_t oss, *uoss = NULL;
925     abi_long ret = -TARGET_EFAULT;
926 
927     if (uoss_addr) {
928         /* Verify writability now, but do not alter user memory yet. */
929         if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
930             goto out;
931         }
932         target_save_altstack(&oss, env);
933     }
934 
935     if (uss_addr) {
936         target_stack_t *uss;
937 
938         if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
939             goto out;
940         }
941         ret = target_restore_altstack(uss, env);
942         if (ret) {
943             goto out;
944         }
945     }
946 
947     if (uoss_addr) {
948         memcpy(uoss, &oss, sizeof(oss));
949         unlock_user_struct(uoss, uoss_addr, 1);
950         uoss = NULL;
951     }
952     ret = 0;
953 
954  out:
955     if (uoss) {
956         unlock_user_struct(uoss, uoss_addr, 0);
957     }
958     return ret;
959 }
960 
961 /* do_sigaction() return target values and host errnos */
962 int do_sigaction(int sig, const struct target_sigaction *act,
963                  struct target_sigaction *oact, abi_ulong ka_restorer)
964 {
965     struct target_sigaction *k;
966     struct sigaction act1;
967     int host_sig;
968     int ret = 0;
969 
970     trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
971 
972     if (sig < 1 || sig > TARGET_NSIG) {
973         return -TARGET_EINVAL;
974     }
975 
976     if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
977         return -TARGET_EINVAL;
978     }
979 
980     if (block_signals()) {
981         return -TARGET_ERESTARTSYS;
982     }
983 
984     k = &sigact_table[sig - 1];
985     if (oact) {
986         __put_user(k->_sa_handler, &oact->_sa_handler);
987         __put_user(k->sa_flags, &oact->sa_flags);
988 #ifdef TARGET_ARCH_HAS_SA_RESTORER
989         __put_user(k->sa_restorer, &oact->sa_restorer);
990 #endif
991         /* Not swapped.  */
992         oact->sa_mask = k->sa_mask;
993     }
994     if (act) {
995         /* FIXME: This is not threadsafe.  */
996         __get_user(k->_sa_handler, &act->_sa_handler);
997         __get_user(k->sa_flags, &act->sa_flags);
998 #ifdef TARGET_ARCH_HAS_SA_RESTORER
999         __get_user(k->sa_restorer, &act->sa_restorer);
1000 #endif
1001 #ifdef TARGET_ARCH_HAS_KA_RESTORER
1002         k->ka_restorer = ka_restorer;
1003 #endif
1004         /* To be swapped in target_to_host_sigset.  */
1005         k->sa_mask = act->sa_mask;
1006 
1007         /* we update the host linux signal state */
1008         host_sig = target_to_host_signal(sig);
1009         trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
1010         if (host_sig > SIGRTMAX) {
1011             /* we don't have enough host signals to map all target signals */
1012             qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
1013                           sig);
1014             /*
1015              * we don't return an error here because some programs try to
1016              * register an handler for all possible rt signals even if they
1017              * don't need it.
1018              * An error here can abort them whereas there can be no problem
1019              * to not have the signal available later.
1020              * This is the case for golang,
1021              *   See https://github.com/golang/go/issues/33746
1022              * So we silently ignore the error.
1023              */
1024             return 0;
1025         }
1026         if (host_sig != SIGSEGV && host_sig != SIGBUS) {
1027             sigfillset(&act1.sa_mask);
1028             act1.sa_flags = SA_SIGINFO;
1029             if (k->sa_flags & TARGET_SA_RESTART)
1030                 act1.sa_flags |= SA_RESTART;
1031             /* NOTE: it is important to update the host kernel signal
1032                ignore state to avoid getting unexpected interrupted
1033                syscalls */
1034             if (k->_sa_handler == TARGET_SIG_IGN) {
1035                 act1.sa_sigaction = (void *)SIG_IGN;
1036             } else if (k->_sa_handler == TARGET_SIG_DFL) {
1037                 if (fatal_signal (sig))
1038                     act1.sa_sigaction = host_signal_handler;
1039                 else
1040                     act1.sa_sigaction = (void *)SIG_DFL;
1041             } else {
1042                 act1.sa_sigaction = host_signal_handler;
1043             }
1044             ret = sigaction(host_sig, &act1, NULL);
1045         }
1046     }
1047     return ret;
1048 }
1049 
1050 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1051                                   struct emulated_sigtable *k)
1052 {
1053     CPUState *cpu = env_cpu(cpu_env);
1054     abi_ulong handler;
1055     sigset_t set;
1056     target_sigset_t target_old_set;
1057     struct target_sigaction *sa;
1058     TaskState *ts = cpu->opaque;
1059 
1060     trace_user_handle_signal(cpu_env, sig);
1061     /* dequeue signal */
1062     k->pending = 0;
1063 
1064     sig = gdb_handlesig(cpu, sig);
1065     if (!sig) {
1066         sa = NULL;
1067         handler = TARGET_SIG_IGN;
1068     } else {
1069         sa = &sigact_table[sig - 1];
1070         handler = sa->_sa_handler;
1071     }
1072 
1073     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1074         print_taken_signal(sig, &k->info);
1075     }
1076 
1077     if (handler == TARGET_SIG_DFL) {
1078         /* default handler : ignore some signal. The other are job control or fatal */
1079         if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1080             kill(getpid(),SIGSTOP);
1081         } else if (sig != TARGET_SIGCHLD &&
1082                    sig != TARGET_SIGURG &&
1083                    sig != TARGET_SIGWINCH &&
1084                    sig != TARGET_SIGCONT) {
1085             dump_core_and_abort(sig);
1086         }
1087     } else if (handler == TARGET_SIG_IGN) {
1088         /* ignore sig */
1089     } else if (handler == TARGET_SIG_ERR) {
1090         dump_core_and_abort(sig);
1091     } else {
1092         /* compute the blocked signals during the handler execution */
1093         sigset_t *blocked_set;
1094 
1095         target_to_host_sigset(&set, &sa->sa_mask);
1096         /* SA_NODEFER indicates that the current signal should not be
1097            blocked during the handler */
1098         if (!(sa->sa_flags & TARGET_SA_NODEFER))
1099             sigaddset(&set, target_to_host_signal(sig));
1100 
1101         /* save the previous blocked signal state to restore it at the
1102            end of the signal execution (see do_sigreturn) */
1103         host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1104 
1105         /* block signals in the handler */
1106         blocked_set = ts->in_sigsuspend ?
1107             &ts->sigsuspend_mask : &ts->signal_mask;
1108         sigorset(&ts->signal_mask, blocked_set, &set);
1109         ts->in_sigsuspend = 0;
1110 
1111         /* if the CPU is in VM86 mode, we restore the 32 bit values */
1112 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1113         {
1114             CPUX86State *env = cpu_env;
1115             if (env->eflags & VM_MASK)
1116                 save_v86_state(env);
1117         }
1118 #endif
1119         /* prepare the stack frame of the virtual CPU */
1120 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1121         if (sa->sa_flags & TARGET_SA_SIGINFO) {
1122             setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1123         } else {
1124             setup_frame(sig, sa, &target_old_set, cpu_env);
1125         }
1126 #else
1127         /* These targets do not have traditional signals.  */
1128         setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1129 #endif
1130         if (sa->sa_flags & TARGET_SA_RESETHAND) {
1131             sa->_sa_handler = TARGET_SIG_DFL;
1132         }
1133     }
1134 }
1135 
1136 void process_pending_signals(CPUArchState *cpu_env)
1137 {
1138     CPUState *cpu = env_cpu(cpu_env);
1139     int sig;
1140     TaskState *ts = cpu->opaque;
1141     sigset_t set;
1142     sigset_t *blocked_set;
1143 
1144     while (qatomic_read(&ts->signal_pending)) {
1145         /* FIXME: This is not threadsafe.  */
1146         sigfillset(&set);
1147         sigprocmask(SIG_SETMASK, &set, 0);
1148 
1149     restart_scan:
1150         sig = ts->sync_signal.pending;
1151         if (sig) {
1152             /* Synchronous signals are forced,
1153              * see force_sig_info() and callers in Linux
1154              * Note that not all of our queue_signal() calls in QEMU correspond
1155              * to force_sig_info() calls in Linux (some are send_sig_info()).
1156              * However it seems like a kernel bug to me to allow the process
1157              * to block a synchronous signal since it could then just end up
1158              * looping round and round indefinitely.
1159              */
1160             if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1161                 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1162                 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1163                 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1164             }
1165 
1166             handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1167         }
1168 
1169         for (sig = 1; sig <= TARGET_NSIG; sig++) {
1170             blocked_set = ts->in_sigsuspend ?
1171                 &ts->sigsuspend_mask : &ts->signal_mask;
1172 
1173             if (ts->sigtab[sig - 1].pending &&
1174                 (!sigismember(blocked_set,
1175                               target_to_host_signal_table[sig]))) {
1176                 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1177                 /* Restart scan from the beginning, as handle_pending_signal
1178                  * might have resulted in a new synchronous signal (eg SIGSEGV).
1179                  */
1180                 goto restart_scan;
1181             }
1182         }
1183 
1184         /* if no signal is pending, unblock signals and recheck (the act
1185          * of unblocking might cause us to take another host signal which
1186          * will set signal_pending again).
1187          */
1188         qatomic_set(&ts->signal_pending, 0);
1189         ts->in_sigsuspend = 0;
1190         set = ts->signal_mask;
1191         sigdelset(&set, SIGSEGV);
1192         sigdelset(&set, SIGBUS);
1193         sigprocmask(SIG_SETMASK, &set, 0);
1194     }
1195     ts->in_sigsuspend = 0;
1196 }
1197