xref: /qemu/linux-user/signal.c (revision f9734d5d)
1 /*
2  *  Emulation of Linux signals
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23 
24 #include "qemu.h"
25 #include "trace.h"
26 #include "signal-common.h"
27 
28 static struct target_sigaction sigact_table[TARGET_NSIG];
29 
30 static void host_signal_handler(int host_signum, siginfo_t *info,
31                                 void *puc);
32 
33 
34 /*
35  * System includes define _NSIG as SIGRTMAX + 1,
36  * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
37  * and the first signal is SIGHUP defined as 1
38  * Signal number 0 is reserved for use as kill(pid, 0), to test whether
39  * a process exists without sending it a signal.
40  */
41 #ifdef __SIGRTMAX
42 QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
43 #endif
44 static uint8_t host_to_target_signal_table[_NSIG] = {
45     [SIGHUP] = TARGET_SIGHUP,
46     [SIGINT] = TARGET_SIGINT,
47     [SIGQUIT] = TARGET_SIGQUIT,
48     [SIGILL] = TARGET_SIGILL,
49     [SIGTRAP] = TARGET_SIGTRAP,
50     [SIGABRT] = TARGET_SIGABRT,
51 /*    [SIGIOT] = TARGET_SIGIOT,*/
52     [SIGBUS] = TARGET_SIGBUS,
53     [SIGFPE] = TARGET_SIGFPE,
54     [SIGKILL] = TARGET_SIGKILL,
55     [SIGUSR1] = TARGET_SIGUSR1,
56     [SIGSEGV] = TARGET_SIGSEGV,
57     [SIGUSR2] = TARGET_SIGUSR2,
58     [SIGPIPE] = TARGET_SIGPIPE,
59     [SIGALRM] = TARGET_SIGALRM,
60     [SIGTERM] = TARGET_SIGTERM,
61 #ifdef SIGSTKFLT
62     [SIGSTKFLT] = TARGET_SIGSTKFLT,
63 #endif
64     [SIGCHLD] = TARGET_SIGCHLD,
65     [SIGCONT] = TARGET_SIGCONT,
66     [SIGSTOP] = TARGET_SIGSTOP,
67     [SIGTSTP] = TARGET_SIGTSTP,
68     [SIGTTIN] = TARGET_SIGTTIN,
69     [SIGTTOU] = TARGET_SIGTTOU,
70     [SIGURG] = TARGET_SIGURG,
71     [SIGXCPU] = TARGET_SIGXCPU,
72     [SIGXFSZ] = TARGET_SIGXFSZ,
73     [SIGVTALRM] = TARGET_SIGVTALRM,
74     [SIGPROF] = TARGET_SIGPROF,
75     [SIGWINCH] = TARGET_SIGWINCH,
76     [SIGIO] = TARGET_SIGIO,
77     [SIGPWR] = TARGET_SIGPWR,
78     [SIGSYS] = TARGET_SIGSYS,
79     /* next signals stay the same */
80 };
81 
82 static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
83 
84 /* valid sig is between 1 and _NSIG - 1 */
85 int host_to_target_signal(int sig)
86 {
87     if (sig < 1 || sig >= _NSIG) {
88         return sig;
89     }
90     return host_to_target_signal_table[sig];
91 }
92 
93 /* valid sig is between 1 and TARGET_NSIG */
94 int target_to_host_signal(int sig)
95 {
96     if (sig < 1 || sig > TARGET_NSIG) {
97         return sig;
98     }
99     return target_to_host_signal_table[sig];
100 }
101 
102 static inline void target_sigaddset(target_sigset_t *set, int signum)
103 {
104     signum--;
105     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
106     set->sig[signum / TARGET_NSIG_BPW] |= mask;
107 }
108 
109 static inline int target_sigismember(const target_sigset_t *set, int signum)
110 {
111     signum--;
112     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
113     return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
114 }
115 
116 void host_to_target_sigset_internal(target_sigset_t *d,
117                                     const sigset_t *s)
118 {
119     int host_sig, target_sig;
120     target_sigemptyset(d);
121     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
122         target_sig = host_to_target_signal(host_sig);
123         if (target_sig < 1 || target_sig > TARGET_NSIG) {
124             continue;
125         }
126         if (sigismember(s, host_sig)) {
127             target_sigaddset(d, target_sig);
128         }
129     }
130 }
131 
132 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
133 {
134     target_sigset_t d1;
135     int i;
136 
137     host_to_target_sigset_internal(&d1, s);
138     for(i = 0;i < TARGET_NSIG_WORDS; i++)
139         d->sig[i] = tswapal(d1.sig[i]);
140 }
141 
142 void target_to_host_sigset_internal(sigset_t *d,
143                                     const target_sigset_t *s)
144 {
145     int host_sig, target_sig;
146     sigemptyset(d);
147     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
148         host_sig = target_to_host_signal(target_sig);
149         if (host_sig < 1 || host_sig >= _NSIG) {
150             continue;
151         }
152         if (target_sigismember(s, target_sig)) {
153             sigaddset(d, host_sig);
154         }
155     }
156 }
157 
158 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
159 {
160     target_sigset_t s1;
161     int i;
162 
163     for(i = 0;i < TARGET_NSIG_WORDS; i++)
164         s1.sig[i] = tswapal(s->sig[i]);
165     target_to_host_sigset_internal(d, &s1);
166 }
167 
168 void host_to_target_old_sigset(abi_ulong *old_sigset,
169                                const sigset_t *sigset)
170 {
171     target_sigset_t d;
172     host_to_target_sigset(&d, sigset);
173     *old_sigset = d.sig[0];
174 }
175 
176 void target_to_host_old_sigset(sigset_t *sigset,
177                                const abi_ulong *old_sigset)
178 {
179     target_sigset_t d;
180     int i;
181 
182     d.sig[0] = *old_sigset;
183     for(i = 1;i < TARGET_NSIG_WORDS; i++)
184         d.sig[i] = 0;
185     target_to_host_sigset(sigset, &d);
186 }
187 
188 int block_signals(void)
189 {
190     TaskState *ts = (TaskState *)thread_cpu->opaque;
191     sigset_t set;
192 
193     /* It's OK to block everything including SIGSEGV, because we won't
194      * run any further guest code before unblocking signals in
195      * process_pending_signals().
196      */
197     sigfillset(&set);
198     sigprocmask(SIG_SETMASK, &set, 0);
199 
200     return qatomic_xchg(&ts->signal_pending, 1);
201 }
202 
203 /* Wrapper for sigprocmask function
204  * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
205  * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
206  * a signal was already pending and the syscall must be restarted, or
207  * 0 on success.
208  * If set is NULL, this is guaranteed not to fail.
209  */
210 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
211 {
212     TaskState *ts = (TaskState *)thread_cpu->opaque;
213 
214     if (oldset) {
215         *oldset = ts->signal_mask;
216     }
217 
218     if (set) {
219         int i;
220 
221         if (block_signals()) {
222             return -TARGET_ERESTARTSYS;
223         }
224 
225         switch (how) {
226         case SIG_BLOCK:
227             sigorset(&ts->signal_mask, &ts->signal_mask, set);
228             break;
229         case SIG_UNBLOCK:
230             for (i = 1; i <= NSIG; ++i) {
231                 if (sigismember(set, i)) {
232                     sigdelset(&ts->signal_mask, i);
233                 }
234             }
235             break;
236         case SIG_SETMASK:
237             ts->signal_mask = *set;
238             break;
239         default:
240             g_assert_not_reached();
241         }
242 
243         /* Silently ignore attempts to change blocking status of KILL or STOP */
244         sigdelset(&ts->signal_mask, SIGKILL);
245         sigdelset(&ts->signal_mask, SIGSTOP);
246     }
247     return 0;
248 }
249 
250 #if !defined(TARGET_NIOS2)
251 /* Just set the guest's signal mask to the specified value; the
252  * caller is assumed to have called block_signals() already.
253  */
254 void set_sigmask(const sigset_t *set)
255 {
256     TaskState *ts = (TaskState *)thread_cpu->opaque;
257 
258     ts->signal_mask = *set;
259 }
260 #endif
261 
262 /* sigaltstack management */
263 
264 int on_sig_stack(unsigned long sp)
265 {
266     TaskState *ts = (TaskState *)thread_cpu->opaque;
267 
268     return (sp - ts->sigaltstack_used.ss_sp
269             < ts->sigaltstack_used.ss_size);
270 }
271 
272 int sas_ss_flags(unsigned long sp)
273 {
274     TaskState *ts = (TaskState *)thread_cpu->opaque;
275 
276     return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
277             : on_sig_stack(sp) ? SS_ONSTACK : 0);
278 }
279 
280 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
281 {
282     /*
283      * This is the X/Open sanctioned signal stack switching.
284      */
285     TaskState *ts = (TaskState *)thread_cpu->opaque;
286 
287     if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
288         return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
289     }
290     return sp;
291 }
292 
293 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
294 {
295     TaskState *ts = (TaskState *)thread_cpu->opaque;
296 
297     __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
298     __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
299     __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
300 }
301 
302 abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
303 {
304     TaskState *ts = (TaskState *)thread_cpu->opaque;
305     size_t minstacksize = TARGET_MINSIGSTKSZ;
306     target_stack_t ss;
307 
308 #if defined(TARGET_PPC64)
309     /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
310     struct image_info *image = ts->info;
311     if (get_ppc64_abi(image) > 1) {
312         minstacksize = 4096;
313     }
314 #endif
315 
316     __get_user(ss.ss_sp, &uss->ss_sp);
317     __get_user(ss.ss_size, &uss->ss_size);
318     __get_user(ss.ss_flags, &uss->ss_flags);
319 
320     if (on_sig_stack(get_sp_from_cpustate(env))) {
321         return -TARGET_EPERM;
322     }
323 
324     switch (ss.ss_flags) {
325     default:
326         return -TARGET_EINVAL;
327 
328     case TARGET_SS_DISABLE:
329         ss.ss_size = 0;
330         ss.ss_sp = 0;
331         break;
332 
333     case TARGET_SS_ONSTACK:
334     case 0:
335         if (ss.ss_size < minstacksize) {
336             return -TARGET_ENOMEM;
337         }
338         break;
339     }
340 
341     ts->sigaltstack_used.ss_sp = ss.ss_sp;
342     ts->sigaltstack_used.ss_size = ss.ss_size;
343     return 0;
344 }
345 
346 /* siginfo conversion */
347 
348 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
349                                                  const siginfo_t *info)
350 {
351     int sig = host_to_target_signal(info->si_signo);
352     int si_code = info->si_code;
353     int si_type;
354     tinfo->si_signo = sig;
355     tinfo->si_errno = 0;
356     tinfo->si_code = info->si_code;
357 
358     /* This memset serves two purposes:
359      * (1) ensure we don't leak random junk to the guest later
360      * (2) placate false positives from gcc about fields
361      *     being used uninitialized if it chooses to inline both this
362      *     function and tswap_siginfo() into host_to_target_siginfo().
363      */
364     memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
365 
366     /* This is awkward, because we have to use a combination of
367      * the si_code and si_signo to figure out which of the union's
368      * members are valid. (Within the host kernel it is always possible
369      * to tell, but the kernel carefully avoids giving userspace the
370      * high 16 bits of si_code, so we don't have the information to
371      * do this the easy way...) We therefore make our best guess,
372      * bearing in mind that a guest can spoof most of the si_codes
373      * via rt_sigqueueinfo() if it likes.
374      *
375      * Once we have made our guess, we record it in the top 16 bits of
376      * the si_code, so that tswap_siginfo() later can use it.
377      * tswap_siginfo() will strip these top bits out before writing
378      * si_code to the guest (sign-extending the lower bits).
379      */
380 
381     switch (si_code) {
382     case SI_USER:
383     case SI_TKILL:
384     case SI_KERNEL:
385         /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
386          * These are the only unspoofable si_code values.
387          */
388         tinfo->_sifields._kill._pid = info->si_pid;
389         tinfo->_sifields._kill._uid = info->si_uid;
390         si_type = QEMU_SI_KILL;
391         break;
392     default:
393         /* Everything else is spoofable. Make best guess based on signal */
394         switch (sig) {
395         case TARGET_SIGCHLD:
396             tinfo->_sifields._sigchld._pid = info->si_pid;
397             tinfo->_sifields._sigchld._uid = info->si_uid;
398             tinfo->_sifields._sigchld._status = info->si_status;
399             tinfo->_sifields._sigchld._utime = info->si_utime;
400             tinfo->_sifields._sigchld._stime = info->si_stime;
401             si_type = QEMU_SI_CHLD;
402             break;
403         case TARGET_SIGIO:
404             tinfo->_sifields._sigpoll._band = info->si_band;
405             tinfo->_sifields._sigpoll._fd = info->si_fd;
406             si_type = QEMU_SI_POLL;
407             break;
408         default:
409             /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
410             tinfo->_sifields._rt._pid = info->si_pid;
411             tinfo->_sifields._rt._uid = info->si_uid;
412             /* XXX: potential problem if 64 bit */
413             tinfo->_sifields._rt._sigval.sival_ptr
414                 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
415             si_type = QEMU_SI_RT;
416             break;
417         }
418         break;
419     }
420 
421     tinfo->si_code = deposit32(si_code, 16, 16, si_type);
422 }
423 
424 void tswap_siginfo(target_siginfo_t *tinfo,
425                    const target_siginfo_t *info)
426 {
427     int si_type = extract32(info->si_code, 16, 16);
428     int si_code = sextract32(info->si_code, 0, 16);
429 
430     __put_user(info->si_signo, &tinfo->si_signo);
431     __put_user(info->si_errno, &tinfo->si_errno);
432     __put_user(si_code, &tinfo->si_code);
433 
434     /* We can use our internal marker of which fields in the structure
435      * are valid, rather than duplicating the guesswork of
436      * host_to_target_siginfo_noswap() here.
437      */
438     switch (si_type) {
439     case QEMU_SI_KILL:
440         __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
441         __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
442         break;
443     case QEMU_SI_TIMER:
444         __put_user(info->_sifields._timer._timer1,
445                    &tinfo->_sifields._timer._timer1);
446         __put_user(info->_sifields._timer._timer2,
447                    &tinfo->_sifields._timer._timer2);
448         break;
449     case QEMU_SI_POLL:
450         __put_user(info->_sifields._sigpoll._band,
451                    &tinfo->_sifields._sigpoll._band);
452         __put_user(info->_sifields._sigpoll._fd,
453                    &tinfo->_sifields._sigpoll._fd);
454         break;
455     case QEMU_SI_FAULT:
456         __put_user(info->_sifields._sigfault._addr,
457                    &tinfo->_sifields._sigfault._addr);
458         break;
459     case QEMU_SI_CHLD:
460         __put_user(info->_sifields._sigchld._pid,
461                    &tinfo->_sifields._sigchld._pid);
462         __put_user(info->_sifields._sigchld._uid,
463                    &tinfo->_sifields._sigchld._uid);
464         __put_user(info->_sifields._sigchld._status,
465                    &tinfo->_sifields._sigchld._status);
466         __put_user(info->_sifields._sigchld._utime,
467                    &tinfo->_sifields._sigchld._utime);
468         __put_user(info->_sifields._sigchld._stime,
469                    &tinfo->_sifields._sigchld._stime);
470         break;
471     case QEMU_SI_RT:
472         __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
473         __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
474         __put_user(info->_sifields._rt._sigval.sival_ptr,
475                    &tinfo->_sifields._rt._sigval.sival_ptr);
476         break;
477     default:
478         g_assert_not_reached();
479     }
480 }
481 
482 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
483 {
484     target_siginfo_t tgt_tmp;
485     host_to_target_siginfo_noswap(&tgt_tmp, info);
486     tswap_siginfo(tinfo, &tgt_tmp);
487 }
488 
489 /* XXX: we support only POSIX RT signals are used. */
490 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
491 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
492 {
493     /* This conversion is used only for the rt_sigqueueinfo syscall,
494      * and so we know that the _rt fields are the valid ones.
495      */
496     abi_ulong sival_ptr;
497 
498     __get_user(info->si_signo, &tinfo->si_signo);
499     __get_user(info->si_errno, &tinfo->si_errno);
500     __get_user(info->si_code, &tinfo->si_code);
501     __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
502     __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
503     __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
504     info->si_value.sival_ptr = (void *)(long)sival_ptr;
505 }
506 
507 static int fatal_signal (int sig)
508 {
509     switch (sig) {
510     case TARGET_SIGCHLD:
511     case TARGET_SIGURG:
512     case TARGET_SIGWINCH:
513         /* Ignored by default.  */
514         return 0;
515     case TARGET_SIGCONT:
516     case TARGET_SIGSTOP:
517     case TARGET_SIGTSTP:
518     case TARGET_SIGTTIN:
519     case TARGET_SIGTTOU:
520         /* Job control signals.  */
521         return 0;
522     default:
523         return 1;
524     }
525 }
526 
527 /* returns 1 if given signal should dump core if not handled */
528 static int core_dump_signal(int sig)
529 {
530     switch (sig) {
531     case TARGET_SIGABRT:
532     case TARGET_SIGFPE:
533     case TARGET_SIGILL:
534     case TARGET_SIGQUIT:
535     case TARGET_SIGSEGV:
536     case TARGET_SIGTRAP:
537     case TARGET_SIGBUS:
538         return (1);
539     default:
540         return (0);
541     }
542 }
543 
544 static void signal_table_init(void)
545 {
546     int host_sig, target_sig, count;
547 
548     /*
549      * Signals are supported starting from TARGET_SIGRTMIN and going up
550      * until we run out of host realtime signals.
551      * glibc at least uses only the lower 2 rt signals and probably
552      * nobody's using the upper ones.
553      * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
554      * To fix this properly we need to do manual signal delivery multiplexed
555      * over a single host signal.
556      * Attempts for configure "missing" signals via sigaction will be
557      * silently ignored.
558      */
559     for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
560         target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
561         if (target_sig <= TARGET_NSIG) {
562             host_to_target_signal_table[host_sig] = target_sig;
563         }
564     }
565 
566     /* generate signal conversion tables */
567     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
568         target_to_host_signal_table[target_sig] = _NSIG; /* poison */
569     }
570     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
571         if (host_to_target_signal_table[host_sig] == 0) {
572             host_to_target_signal_table[host_sig] = host_sig;
573         }
574         target_sig = host_to_target_signal_table[host_sig];
575         if (target_sig <= TARGET_NSIG) {
576             target_to_host_signal_table[target_sig] = host_sig;
577         }
578     }
579 
580     if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
581         for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
582             if (target_to_host_signal_table[target_sig] == _NSIG) {
583                 count++;
584             }
585         }
586         trace_signal_table_init(count);
587     }
588 }
589 
590 void signal_init(void)
591 {
592     TaskState *ts = (TaskState *)thread_cpu->opaque;
593     struct sigaction act;
594     struct sigaction oact;
595     int i;
596     int host_sig;
597 
598     /* initialize signal conversion tables */
599     signal_table_init();
600 
601     /* Set the signal mask from the host mask. */
602     sigprocmask(0, 0, &ts->signal_mask);
603 
604     sigfillset(&act.sa_mask);
605     act.sa_flags = SA_SIGINFO;
606     act.sa_sigaction = host_signal_handler;
607     for(i = 1; i <= TARGET_NSIG; i++) {
608 #ifdef CONFIG_GPROF
609         if (i == TARGET_SIGPROF) {
610             continue;
611         }
612 #endif
613         host_sig = target_to_host_signal(i);
614         sigaction(host_sig, NULL, &oact);
615         if (oact.sa_sigaction == (void *)SIG_IGN) {
616             sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
617         } else if (oact.sa_sigaction == (void *)SIG_DFL) {
618             sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
619         }
620         /* If there's already a handler installed then something has
621            gone horribly wrong, so don't even try to handle that case.  */
622         /* Install some handlers for our own use.  We need at least
623            SIGSEGV and SIGBUS, to detect exceptions.  We can not just
624            trap all signals because it affects syscall interrupt
625            behavior.  But do trap all default-fatal signals.  */
626         if (fatal_signal (i))
627             sigaction(host_sig, &act, NULL);
628     }
629 }
630 
631 /* Force a synchronously taken signal. The kernel force_sig() function
632  * also forces the signal to "not blocked, not ignored", but for QEMU
633  * that work is done in process_pending_signals().
634  */
635 void force_sig(int sig)
636 {
637     CPUState *cpu = thread_cpu;
638     CPUArchState *env = cpu->env_ptr;
639     target_siginfo_t info;
640 
641     info.si_signo = sig;
642     info.si_errno = 0;
643     info.si_code = TARGET_SI_KERNEL;
644     info._sifields._kill._pid = 0;
645     info._sifields._kill._uid = 0;
646     queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
647 }
648 
649 /* Force a SIGSEGV if we couldn't write to memory trying to set
650  * up the signal frame. oldsig is the signal we were trying to handle
651  * at the point of failure.
652  */
653 #if !defined(TARGET_RISCV)
654 void force_sigsegv(int oldsig)
655 {
656     if (oldsig == SIGSEGV) {
657         /* Make sure we don't try to deliver the signal again; this will
658          * end up with handle_pending_signal() calling dump_core_and_abort().
659          */
660         sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
661     }
662     force_sig(TARGET_SIGSEGV);
663 }
664 
665 #endif
666 
667 /* abort execution with signal */
668 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
669 {
670     CPUState *cpu = thread_cpu;
671     CPUArchState *env = cpu->env_ptr;
672     TaskState *ts = (TaskState *)cpu->opaque;
673     int host_sig, core_dumped = 0;
674     struct sigaction act;
675 
676     host_sig = target_to_host_signal(target_sig);
677     trace_user_force_sig(env, target_sig, host_sig);
678     gdb_signalled(env, target_sig);
679 
680     /* dump core if supported by target binary format */
681     if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
682         stop_all_tasks();
683         core_dumped =
684             ((*ts->bprm->core_dump)(target_sig, env) == 0);
685     }
686     if (core_dumped) {
687         /* we already dumped the core of target process, we don't want
688          * a coredump of qemu itself */
689         struct rlimit nodump;
690         getrlimit(RLIMIT_CORE, &nodump);
691         nodump.rlim_cur=0;
692         setrlimit(RLIMIT_CORE, &nodump);
693         (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
694             target_sig, strsignal(host_sig), "core dumped" );
695     }
696 
697     /* The proper exit code for dying from an uncaught signal is
698      * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
699      * a negative value.  To get the proper exit code we need to
700      * actually die from an uncaught signal.  Here the default signal
701      * handler is installed, we send ourself a signal and we wait for
702      * it to arrive. */
703     sigfillset(&act.sa_mask);
704     act.sa_handler = SIG_DFL;
705     act.sa_flags = 0;
706     sigaction(host_sig, &act, NULL);
707 
708     /* For some reason raise(host_sig) doesn't send the signal when
709      * statically linked on x86-64. */
710     kill(getpid(), host_sig);
711 
712     /* Make sure the signal isn't masked (just reuse the mask inside
713     of act) */
714     sigdelset(&act.sa_mask, host_sig);
715     sigsuspend(&act.sa_mask);
716 
717     /* unreachable */
718     abort();
719 }
720 
721 /* queue a signal so that it will be send to the virtual CPU as soon
722    as possible */
723 int queue_signal(CPUArchState *env, int sig, int si_type,
724                  target_siginfo_t *info)
725 {
726     CPUState *cpu = env_cpu(env);
727     TaskState *ts = cpu->opaque;
728 
729     trace_user_queue_signal(env, sig);
730 
731     info->si_code = deposit32(info->si_code, 16, 16, si_type);
732 
733     ts->sync_signal.info = *info;
734     ts->sync_signal.pending = sig;
735     /* signal that a new signal is pending */
736     qatomic_set(&ts->signal_pending, 1);
737     return 1; /* indicates that the signal was queued */
738 }
739 
740 #ifndef HAVE_SAFE_SYSCALL
741 static inline void rewind_if_in_safe_syscall(void *puc)
742 {
743     /* Default version: never rewind */
744 }
745 #endif
746 
747 static void host_signal_handler(int host_signum, siginfo_t *info,
748                                 void *puc)
749 {
750     CPUArchState *env = thread_cpu->env_ptr;
751     CPUState *cpu = env_cpu(env);
752     TaskState *ts = cpu->opaque;
753 
754     int sig;
755     target_siginfo_t tinfo;
756     ucontext_t *uc = puc;
757     struct emulated_sigtable *k;
758 
759     /* the CPU emulator uses some host signals to detect exceptions,
760        we forward to it some signals */
761     if ((host_signum == SIGSEGV || host_signum == SIGBUS)
762         && info->si_code > 0) {
763         if (cpu_signal_handler(host_signum, info, puc))
764             return;
765     }
766 
767     /* get target signal number */
768     sig = host_to_target_signal(host_signum);
769     if (sig < 1 || sig > TARGET_NSIG)
770         return;
771     trace_user_host_signal(env, host_signum, sig);
772 
773     rewind_if_in_safe_syscall(puc);
774 
775     host_to_target_siginfo_noswap(&tinfo, info);
776     k = &ts->sigtab[sig - 1];
777     k->info = tinfo;
778     k->pending = sig;
779     ts->signal_pending = 1;
780 
781     /* Block host signals until target signal handler entered. We
782      * can't block SIGSEGV or SIGBUS while we're executing guest
783      * code in case the guest code provokes one in the window between
784      * now and it getting out to the main loop. Signals will be
785      * unblocked again in process_pending_signals().
786      *
787      * WARNING: we cannot use sigfillset() here because the uc_sigmask
788      * field is a kernel sigset_t, which is much smaller than the
789      * libc sigset_t which sigfillset() operates on. Using sigfillset()
790      * would write 0xff bytes off the end of the structure and trash
791      * data on the struct.
792      * We can't use sizeof(uc->uc_sigmask) either, because the libc
793      * headers define the struct field with the wrong (too large) type.
794      */
795     memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
796     sigdelset(&uc->uc_sigmask, SIGSEGV);
797     sigdelset(&uc->uc_sigmask, SIGBUS);
798 
799     /* interrupt the virtual CPU as soon as possible */
800     cpu_exit(thread_cpu);
801 }
802 
803 /* do_sigaltstack() returns target values and errnos. */
804 /* compare linux/kernel/signal.c:do_sigaltstack() */
805 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
806                         CPUArchState *env)
807 {
808     target_stack_t oss, *uoss = NULL;
809     abi_long ret = -TARGET_EFAULT;
810 
811     if (uoss_addr) {
812         /* Verify writability now, but do not alter user memory yet. */
813         if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
814             goto out;
815         }
816         target_save_altstack(&oss, env);
817     }
818 
819     if (uss_addr) {
820         target_stack_t *uss;
821 
822         if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
823             goto out;
824         }
825         ret = target_restore_altstack(uss, env);
826         if (ret) {
827             goto out;
828         }
829     }
830 
831     if (uoss_addr) {
832         memcpy(uoss, &oss, sizeof(oss));
833         unlock_user_struct(uoss, uoss_addr, 1);
834         uoss = NULL;
835     }
836     ret = 0;
837 
838  out:
839     if (uoss) {
840         unlock_user_struct(uoss, uoss_addr, 0);
841     }
842     return ret;
843 }
844 
845 /* do_sigaction() return target values and host errnos */
846 int do_sigaction(int sig, const struct target_sigaction *act,
847                  struct target_sigaction *oact, abi_ulong ka_restorer)
848 {
849     struct target_sigaction *k;
850     struct sigaction act1;
851     int host_sig;
852     int ret = 0;
853 
854     trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
855 
856     if (sig < 1 || sig > TARGET_NSIG) {
857         return -TARGET_EINVAL;
858     }
859 
860     if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
861         return -TARGET_EINVAL;
862     }
863 
864     if (block_signals()) {
865         return -TARGET_ERESTARTSYS;
866     }
867 
868     k = &sigact_table[sig - 1];
869     if (oact) {
870         __put_user(k->_sa_handler, &oact->_sa_handler);
871         __put_user(k->sa_flags, &oact->sa_flags);
872 #ifdef TARGET_ARCH_HAS_SA_RESTORER
873         __put_user(k->sa_restorer, &oact->sa_restorer);
874 #endif
875         /* Not swapped.  */
876         oact->sa_mask = k->sa_mask;
877     }
878     if (act) {
879         /* FIXME: This is not threadsafe.  */
880         __get_user(k->_sa_handler, &act->_sa_handler);
881         __get_user(k->sa_flags, &act->sa_flags);
882 #ifdef TARGET_ARCH_HAS_SA_RESTORER
883         __get_user(k->sa_restorer, &act->sa_restorer);
884 #endif
885 #ifdef TARGET_ARCH_HAS_KA_RESTORER
886         k->ka_restorer = ka_restorer;
887 #endif
888         /* To be swapped in target_to_host_sigset.  */
889         k->sa_mask = act->sa_mask;
890 
891         /* we update the host linux signal state */
892         host_sig = target_to_host_signal(sig);
893         trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
894         if (host_sig > SIGRTMAX) {
895             /* we don't have enough host signals to map all target signals */
896             qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
897                           sig);
898             /*
899              * we don't return an error here because some programs try to
900              * register an handler for all possible rt signals even if they
901              * don't need it.
902              * An error here can abort them whereas there can be no problem
903              * to not have the signal available later.
904              * This is the case for golang,
905              *   See https://github.com/golang/go/issues/33746
906              * So we silently ignore the error.
907              */
908             return 0;
909         }
910         if (host_sig != SIGSEGV && host_sig != SIGBUS) {
911             sigfillset(&act1.sa_mask);
912             act1.sa_flags = SA_SIGINFO;
913             if (k->sa_flags & TARGET_SA_RESTART)
914                 act1.sa_flags |= SA_RESTART;
915             /* NOTE: it is important to update the host kernel signal
916                ignore state to avoid getting unexpected interrupted
917                syscalls */
918             if (k->_sa_handler == TARGET_SIG_IGN) {
919                 act1.sa_sigaction = (void *)SIG_IGN;
920             } else if (k->_sa_handler == TARGET_SIG_DFL) {
921                 if (fatal_signal (sig))
922                     act1.sa_sigaction = host_signal_handler;
923                 else
924                     act1.sa_sigaction = (void *)SIG_DFL;
925             } else {
926                 act1.sa_sigaction = host_signal_handler;
927             }
928             ret = sigaction(host_sig, &act1, NULL);
929         }
930     }
931     return ret;
932 }
933 
934 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
935                                   struct emulated_sigtable *k)
936 {
937     CPUState *cpu = env_cpu(cpu_env);
938     abi_ulong handler;
939     sigset_t set;
940     target_sigset_t target_old_set;
941     struct target_sigaction *sa;
942     TaskState *ts = cpu->opaque;
943 
944     trace_user_handle_signal(cpu_env, sig);
945     /* dequeue signal */
946     k->pending = 0;
947 
948     sig = gdb_handlesig(cpu, sig);
949     if (!sig) {
950         sa = NULL;
951         handler = TARGET_SIG_IGN;
952     } else {
953         sa = &sigact_table[sig - 1];
954         handler = sa->_sa_handler;
955     }
956 
957     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
958         print_taken_signal(sig, &k->info);
959     }
960 
961     if (handler == TARGET_SIG_DFL) {
962         /* default handler : ignore some signal. The other are job control or fatal */
963         if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
964             kill(getpid(),SIGSTOP);
965         } else if (sig != TARGET_SIGCHLD &&
966                    sig != TARGET_SIGURG &&
967                    sig != TARGET_SIGWINCH &&
968                    sig != TARGET_SIGCONT) {
969             dump_core_and_abort(sig);
970         }
971     } else if (handler == TARGET_SIG_IGN) {
972         /* ignore sig */
973     } else if (handler == TARGET_SIG_ERR) {
974         dump_core_and_abort(sig);
975     } else {
976         /* compute the blocked signals during the handler execution */
977         sigset_t *blocked_set;
978 
979         target_to_host_sigset(&set, &sa->sa_mask);
980         /* SA_NODEFER indicates that the current signal should not be
981            blocked during the handler */
982         if (!(sa->sa_flags & TARGET_SA_NODEFER))
983             sigaddset(&set, target_to_host_signal(sig));
984 
985         /* save the previous blocked signal state to restore it at the
986            end of the signal execution (see do_sigreturn) */
987         host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
988 
989         /* block signals in the handler */
990         blocked_set = ts->in_sigsuspend ?
991             &ts->sigsuspend_mask : &ts->signal_mask;
992         sigorset(&ts->signal_mask, blocked_set, &set);
993         ts->in_sigsuspend = 0;
994 
995         /* if the CPU is in VM86 mode, we restore the 32 bit values */
996 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
997         {
998             CPUX86State *env = cpu_env;
999             if (env->eflags & VM_MASK)
1000                 save_v86_state(env);
1001         }
1002 #endif
1003         /* prepare the stack frame of the virtual CPU */
1004 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1005         if (sa->sa_flags & TARGET_SA_SIGINFO) {
1006             setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1007         } else {
1008             setup_frame(sig, sa, &target_old_set, cpu_env);
1009         }
1010 #else
1011         /* These targets do not have traditional signals.  */
1012         setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1013 #endif
1014         if (sa->sa_flags & TARGET_SA_RESETHAND) {
1015             sa->_sa_handler = TARGET_SIG_DFL;
1016         }
1017     }
1018 }
1019 
1020 void process_pending_signals(CPUArchState *cpu_env)
1021 {
1022     CPUState *cpu = env_cpu(cpu_env);
1023     int sig;
1024     TaskState *ts = cpu->opaque;
1025     sigset_t set;
1026     sigset_t *blocked_set;
1027 
1028     while (qatomic_read(&ts->signal_pending)) {
1029         /* FIXME: This is not threadsafe.  */
1030         sigfillset(&set);
1031         sigprocmask(SIG_SETMASK, &set, 0);
1032 
1033     restart_scan:
1034         sig = ts->sync_signal.pending;
1035         if (sig) {
1036             /* Synchronous signals are forced,
1037              * see force_sig_info() and callers in Linux
1038              * Note that not all of our queue_signal() calls in QEMU correspond
1039              * to force_sig_info() calls in Linux (some are send_sig_info()).
1040              * However it seems like a kernel bug to me to allow the process
1041              * to block a synchronous signal since it could then just end up
1042              * looping round and round indefinitely.
1043              */
1044             if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1045                 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1046                 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1047                 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1048             }
1049 
1050             handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1051         }
1052 
1053         for (sig = 1; sig <= TARGET_NSIG; sig++) {
1054             blocked_set = ts->in_sigsuspend ?
1055                 &ts->sigsuspend_mask : &ts->signal_mask;
1056 
1057             if (ts->sigtab[sig - 1].pending &&
1058                 (!sigismember(blocked_set,
1059                               target_to_host_signal_table[sig]))) {
1060                 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1061                 /* Restart scan from the beginning, as handle_pending_signal
1062                  * might have resulted in a new synchronous signal (eg SIGSEGV).
1063                  */
1064                 goto restart_scan;
1065             }
1066         }
1067 
1068         /* if no signal is pending, unblock signals and recheck (the act
1069          * of unblocking might cause us to take another host signal which
1070          * will set signal_pending again).
1071          */
1072         qatomic_set(&ts->signal_pending, 0);
1073         ts->in_sigsuspend = 0;
1074         set = ts->signal_mask;
1075         sigdelset(&set, SIGSEGV);
1076         sigdelset(&set, SIGBUS);
1077         sigprocmask(SIG_SETMASK, &set, 0);
1078     }
1079     ts->in_sigsuspend = 0;
1080 }
1081