xref: /qemu/linux-user/syscall.c (revision 3fb356cc)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83 
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
89 #include <linux/kd.h>
90 #include <linux/mtio.h>
91 #include <linux/fs.h>
92 #include <linux/fd.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include <linux/if_alg.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 #include "qemu/guest-random.h"
115 #include "user/syscall-trace.h"
116 #include "qapi/error.h"
117 #include "fd-trans.h"
118 
119 #ifndef CLONE_IO
120 #define CLONE_IO                0x80000000      /* Clone io context */
121 #endif
122 
123 /* We can't directly call the host clone syscall, because this will
124  * badly confuse libc (breaking mutexes, for example). So we must
125  * divide clone flags into:
126  *  * flag combinations that look like pthread_create()
127  *  * flag combinations that look like fork()
128  *  * flags we can implement within QEMU itself
129  *  * flags we can't support and will return an error for
130  */
131 /* For thread creation, all these flags must be present; for
132  * fork, none must be present.
133  */
134 #define CLONE_THREAD_FLAGS                              \
135     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
136      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
137 
138 /* These flags are ignored:
139  * CLONE_DETACHED is now ignored by the kernel;
140  * CLONE_IO is just an optimisation hint to the I/O scheduler
141  */
142 #define CLONE_IGNORED_FLAGS                     \
143     (CLONE_DETACHED | CLONE_IO)
144 
145 /* Flags for fork which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_FORK_FLAGS               \
147     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
148      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
149 
150 /* Flags for thread creation which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
152     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
153      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
154 
155 #define CLONE_INVALID_FORK_FLAGS                                        \
156     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
157 
158 #define CLONE_INVALID_THREAD_FLAGS                                      \
159     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
160        CLONE_IGNORED_FLAGS))
161 
162 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
163  * have almost all been allocated. We cannot support any of
164  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
165  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
166  * The checks against the invalid thread masks above will catch these.
167  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168  */
169 
170 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
171  * once. This exercises the codepaths for restart.
172  */
173 //#define DEBUG_ERESTARTSYS
174 
175 //#include <linux/msdos_fs.h>
176 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
177 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
178 
179 #undef _syscall0
180 #undef _syscall1
181 #undef _syscall2
182 #undef _syscall3
183 #undef _syscall4
184 #undef _syscall5
185 #undef _syscall6
186 
187 #define _syscall0(type,name)		\
188 static type name (void)			\
189 {					\
190 	return syscall(__NR_##name);	\
191 }
192 
193 #define _syscall1(type,name,type1,arg1)		\
194 static type name (type1 arg1)			\
195 {						\
196 	return syscall(__NR_##name, arg1);	\
197 }
198 
199 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
200 static type name (type1 arg1,type2 arg2)		\
201 {							\
202 	return syscall(__NR_##name, arg1, arg2);	\
203 }
204 
205 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
206 static type name (type1 arg1,type2 arg2,type3 arg3)		\
207 {								\
208 	return syscall(__NR_##name, arg1, arg2, arg3);		\
209 }
210 
211 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
212 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
213 {										\
214 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
215 }
216 
217 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
218 		  type5,arg5)							\
219 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
220 {										\
221 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
222 }
223 
224 
225 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
226 		  type5,arg5,type6,arg6)					\
227 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
228                   type6 arg6)							\
229 {										\
230 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
231 }
232 
233 
234 #define __NR_sys_uname __NR_uname
235 #define __NR_sys_getcwd1 __NR_getcwd
236 #define __NR_sys_getdents __NR_getdents
237 #define __NR_sys_getdents64 __NR_getdents64
238 #define __NR_sys_getpriority __NR_getpriority
239 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
240 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
241 #define __NR_sys_syslog __NR_syslog
242 #define __NR_sys_futex __NR_futex
243 #define __NR_sys_inotify_init __NR_inotify_init
244 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
245 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
246 #define __NR_sys_statx __NR_statx
247 
248 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
249 #define __NR__llseek __NR_lseek
250 #endif
251 
252 /* Newer kernel ports have llseek() instead of _llseek() */
253 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
254 #define TARGET_NR__llseek TARGET_NR_llseek
255 #endif
256 
257 #define __NR_sys_gettid __NR_gettid
258 _syscall0(int, sys_gettid)
259 
260 /* For the 64-bit guest on 32-bit host case we must emulate
261  * getdents using getdents64, because otherwise the host
262  * might hand us back more dirent records than we can fit
263  * into the guest buffer after structure format conversion.
264  * Otherwise we emulate getdents with getdents if the host has it.
265  */
266 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
267 #define EMULATE_GETDENTS_WITH_GETDENTS
268 #endif
269 
270 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
271 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
272 #endif
273 #if (defined(TARGET_NR_getdents) && \
274       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
275     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
276 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
277 #endif
278 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
279 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
280           loff_t *, res, uint, wh);
281 #endif
282 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
283 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
284           siginfo_t *, uinfo)
285 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
286 #ifdef __NR_exit_group
287 _syscall1(int,exit_group,int,error_code)
288 #endif
289 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
290 _syscall1(int,set_tid_address,int *,tidptr)
291 #endif
292 #if defined(TARGET_NR_futex) && defined(__NR_futex)
293 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
294           const struct timespec *,timeout,int *,uaddr2,int,val3)
295 #endif
296 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
297 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
298           unsigned long *, user_mask_ptr);
299 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
300 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
301           unsigned long *, user_mask_ptr);
302 #define __NR_sys_getcpu __NR_getcpu
303 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
304 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
305           void *, arg);
306 _syscall2(int, capget, struct __user_cap_header_struct *, header,
307           struct __user_cap_data_struct *, data);
308 _syscall2(int, capset, struct __user_cap_header_struct *, header,
309           struct __user_cap_data_struct *, data);
310 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
311 _syscall2(int, ioprio_get, int, which, int, who)
312 #endif
313 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
314 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
315 #endif
316 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
317 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
318 #endif
319 
320 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
321 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
322           unsigned long, idx1, unsigned long, idx2)
323 #endif
324 
325 /*
326  * It is assumed that struct statx is architecture independent.
327  */
328 #if defined(TARGET_NR_statx) && defined(__NR_statx)
329 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
330           unsigned int, mask, struct target_statx *, statxbuf)
331 #endif
332 
333 static bitmask_transtbl fcntl_flags_tbl[] = {
334   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
335   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
336   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
337   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
338   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
339   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
340   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
341   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
342   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
343   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
344   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
345   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
346   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
347 #if defined(O_DIRECT)
348   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
349 #endif
350 #if defined(O_NOATIME)
351   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
352 #endif
353 #if defined(O_CLOEXEC)
354   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
355 #endif
356 #if defined(O_PATH)
357   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
358 #endif
359 #if defined(O_TMPFILE)
360   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
361 #endif
362   /* Don't terminate the list prematurely on 64-bit host+guest.  */
363 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
364   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
365 #endif
366   { 0, 0, 0, 0 }
367 };
368 
369 static int sys_getcwd1(char *buf, size_t size)
370 {
371   if (getcwd(buf, size) == NULL) {
372       /* getcwd() sets errno */
373       return (-1);
374   }
375   return strlen(buf)+1;
376 }
377 
378 #ifdef TARGET_NR_utimensat
379 #if defined(__NR_utimensat)
380 #define __NR_sys_utimensat __NR_utimensat
381 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
382           const struct timespec *,tsp,int,flags)
383 #else
384 static int sys_utimensat(int dirfd, const char *pathname,
385                          const struct timespec times[2], int flags)
386 {
387     errno = ENOSYS;
388     return -1;
389 }
390 #endif
391 #endif /* TARGET_NR_utimensat */
392 
393 #ifdef TARGET_NR_renameat2
394 #if defined(__NR_renameat2)
395 #define __NR_sys_renameat2 __NR_renameat2
396 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
397           const char *, new, unsigned int, flags)
398 #else
399 static int sys_renameat2(int oldfd, const char *old,
400                          int newfd, const char *new, int flags)
401 {
402     if (flags == 0) {
403         return renameat(oldfd, old, newfd, new);
404     }
405     errno = ENOSYS;
406     return -1;
407 }
408 #endif
409 #endif /* TARGET_NR_renameat2 */
410 
411 #ifdef CONFIG_INOTIFY
412 #include <sys/inotify.h>
413 
414 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
415 static int sys_inotify_init(void)
416 {
417   return (inotify_init());
418 }
419 #endif
420 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
421 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
422 {
423   return (inotify_add_watch(fd, pathname, mask));
424 }
425 #endif
426 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
427 static int sys_inotify_rm_watch(int fd, int32_t wd)
428 {
429   return (inotify_rm_watch(fd, wd));
430 }
431 #endif
432 #ifdef CONFIG_INOTIFY1
433 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
434 static int sys_inotify_init1(int flags)
435 {
436   return (inotify_init1(flags));
437 }
438 #endif
439 #endif
440 #else
441 /* Userspace can usually survive runtime without inotify */
442 #undef TARGET_NR_inotify_init
443 #undef TARGET_NR_inotify_init1
444 #undef TARGET_NR_inotify_add_watch
445 #undef TARGET_NR_inotify_rm_watch
446 #endif /* CONFIG_INOTIFY  */
447 
448 #if defined(TARGET_NR_prlimit64)
449 #ifndef __NR_prlimit64
450 # define __NR_prlimit64 -1
451 #endif
452 #define __NR_sys_prlimit64 __NR_prlimit64
453 /* The glibc rlimit structure may not be that used by the underlying syscall */
454 struct host_rlimit64 {
455     uint64_t rlim_cur;
456     uint64_t rlim_max;
457 };
458 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
459           const struct host_rlimit64 *, new_limit,
460           struct host_rlimit64 *, old_limit)
461 #endif
462 
463 
464 #if defined(TARGET_NR_timer_create)
465 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
466 static timer_t g_posix_timers[32] = { 0, } ;
467 
468 static inline int next_free_host_timer(void)
469 {
470     int k ;
471     /* FIXME: Does finding the next free slot require a lock? */
472     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
473         if (g_posix_timers[k] == 0) {
474             g_posix_timers[k] = (timer_t) 1;
475             return k;
476         }
477     }
478     return -1;
479 }
480 #endif
481 
482 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
483 #ifdef TARGET_ARM
484 static inline int regpairs_aligned(void *cpu_env, int num)
485 {
486     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
487 }
488 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
489 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
490 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
491 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
492  * of registers which translates to the same as ARM/MIPS, because we start with
493  * r3 as arg1 */
494 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
495 #elif defined(TARGET_SH4)
496 /* SH4 doesn't align register pairs, except for p{read,write}64 */
497 static inline int regpairs_aligned(void *cpu_env, int num)
498 {
499     switch (num) {
500     case TARGET_NR_pread64:
501     case TARGET_NR_pwrite64:
502         return 1;
503 
504     default:
505         return 0;
506     }
507 }
508 #elif defined(TARGET_XTENSA)
509 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
510 #else
511 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
512 #endif
513 
514 #define ERRNO_TABLE_SIZE 1200
515 
516 /* target_to_host_errno_table[] is initialized from
517  * host_to_target_errno_table[] in syscall_init(). */
518 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
519 };
520 
521 /*
522  * This list is the union of errno values overridden in asm-<arch>/errno.h
523  * minus the errnos that are not actually generic to all archs.
524  */
525 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
526     [EAGAIN]		= TARGET_EAGAIN,
527     [EIDRM]		= TARGET_EIDRM,
528     [ECHRNG]		= TARGET_ECHRNG,
529     [EL2NSYNC]		= TARGET_EL2NSYNC,
530     [EL3HLT]		= TARGET_EL3HLT,
531     [EL3RST]		= TARGET_EL3RST,
532     [ELNRNG]		= TARGET_ELNRNG,
533     [EUNATCH]		= TARGET_EUNATCH,
534     [ENOCSI]		= TARGET_ENOCSI,
535     [EL2HLT]		= TARGET_EL2HLT,
536     [EDEADLK]		= TARGET_EDEADLK,
537     [ENOLCK]		= TARGET_ENOLCK,
538     [EBADE]		= TARGET_EBADE,
539     [EBADR]		= TARGET_EBADR,
540     [EXFULL]		= TARGET_EXFULL,
541     [ENOANO]		= TARGET_ENOANO,
542     [EBADRQC]		= TARGET_EBADRQC,
543     [EBADSLT]		= TARGET_EBADSLT,
544     [EBFONT]		= TARGET_EBFONT,
545     [ENOSTR]		= TARGET_ENOSTR,
546     [ENODATA]		= TARGET_ENODATA,
547     [ETIME]		= TARGET_ETIME,
548     [ENOSR]		= TARGET_ENOSR,
549     [ENONET]		= TARGET_ENONET,
550     [ENOPKG]		= TARGET_ENOPKG,
551     [EREMOTE]		= TARGET_EREMOTE,
552     [ENOLINK]		= TARGET_ENOLINK,
553     [EADV]		= TARGET_EADV,
554     [ESRMNT]		= TARGET_ESRMNT,
555     [ECOMM]		= TARGET_ECOMM,
556     [EPROTO]		= TARGET_EPROTO,
557     [EDOTDOT]		= TARGET_EDOTDOT,
558     [EMULTIHOP]		= TARGET_EMULTIHOP,
559     [EBADMSG]		= TARGET_EBADMSG,
560     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
561     [EOVERFLOW]		= TARGET_EOVERFLOW,
562     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
563     [EBADFD]		= TARGET_EBADFD,
564     [EREMCHG]		= TARGET_EREMCHG,
565     [ELIBACC]		= TARGET_ELIBACC,
566     [ELIBBAD]		= TARGET_ELIBBAD,
567     [ELIBSCN]		= TARGET_ELIBSCN,
568     [ELIBMAX]		= TARGET_ELIBMAX,
569     [ELIBEXEC]		= TARGET_ELIBEXEC,
570     [EILSEQ]		= TARGET_EILSEQ,
571     [ENOSYS]		= TARGET_ENOSYS,
572     [ELOOP]		= TARGET_ELOOP,
573     [ERESTART]		= TARGET_ERESTART,
574     [ESTRPIPE]		= TARGET_ESTRPIPE,
575     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
576     [EUSERS]		= TARGET_EUSERS,
577     [ENOTSOCK]		= TARGET_ENOTSOCK,
578     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
579     [EMSGSIZE]		= TARGET_EMSGSIZE,
580     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
581     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
582     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
583     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
584     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
585     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
586     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
587     [EADDRINUSE]	= TARGET_EADDRINUSE,
588     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
589     [ENETDOWN]		= TARGET_ENETDOWN,
590     [ENETUNREACH]	= TARGET_ENETUNREACH,
591     [ENETRESET]		= TARGET_ENETRESET,
592     [ECONNABORTED]	= TARGET_ECONNABORTED,
593     [ECONNRESET]	= TARGET_ECONNRESET,
594     [ENOBUFS]		= TARGET_ENOBUFS,
595     [EISCONN]		= TARGET_EISCONN,
596     [ENOTCONN]		= TARGET_ENOTCONN,
597     [EUCLEAN]		= TARGET_EUCLEAN,
598     [ENOTNAM]		= TARGET_ENOTNAM,
599     [ENAVAIL]		= TARGET_ENAVAIL,
600     [EISNAM]		= TARGET_EISNAM,
601     [EREMOTEIO]		= TARGET_EREMOTEIO,
602     [EDQUOT]            = TARGET_EDQUOT,
603     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
604     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
605     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
606     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
607     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
608     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
609     [EALREADY]		= TARGET_EALREADY,
610     [EINPROGRESS]	= TARGET_EINPROGRESS,
611     [ESTALE]		= TARGET_ESTALE,
612     [ECANCELED]		= TARGET_ECANCELED,
613     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
614     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
615 #ifdef ENOKEY
616     [ENOKEY]		= TARGET_ENOKEY,
617 #endif
618 #ifdef EKEYEXPIRED
619     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
620 #endif
621 #ifdef EKEYREVOKED
622     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
623 #endif
624 #ifdef EKEYREJECTED
625     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
626 #endif
627 #ifdef EOWNERDEAD
628     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
629 #endif
630 #ifdef ENOTRECOVERABLE
631     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
632 #endif
633 #ifdef ENOMSG
634     [ENOMSG]            = TARGET_ENOMSG,
635 #endif
636 #ifdef ERKFILL
637     [ERFKILL]           = TARGET_ERFKILL,
638 #endif
639 #ifdef EHWPOISON
640     [EHWPOISON]         = TARGET_EHWPOISON,
641 #endif
642 };
643 
644 static inline int host_to_target_errno(int err)
645 {
646     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
647         host_to_target_errno_table[err]) {
648         return host_to_target_errno_table[err];
649     }
650     return err;
651 }
652 
653 static inline int target_to_host_errno(int err)
654 {
655     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
656         target_to_host_errno_table[err]) {
657         return target_to_host_errno_table[err];
658     }
659     return err;
660 }
661 
662 static inline abi_long get_errno(abi_long ret)
663 {
664     if (ret == -1)
665         return -host_to_target_errno(errno);
666     else
667         return ret;
668 }
669 
670 const char *target_strerror(int err)
671 {
672     if (err == TARGET_ERESTARTSYS) {
673         return "To be restarted";
674     }
675     if (err == TARGET_QEMU_ESIGRETURN) {
676         return "Successful exit from sigreturn";
677     }
678 
679     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
680         return NULL;
681     }
682     return strerror(target_to_host_errno(err));
683 }
684 
685 #define safe_syscall0(type, name) \
686 static type safe_##name(void) \
687 { \
688     return safe_syscall(__NR_##name); \
689 }
690 
691 #define safe_syscall1(type, name, type1, arg1) \
692 static type safe_##name(type1 arg1) \
693 { \
694     return safe_syscall(__NR_##name, arg1); \
695 }
696 
697 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
698 static type safe_##name(type1 arg1, type2 arg2) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2); \
701 }
702 
703 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
705 { \
706     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
707 }
708 
709 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
710     type4, arg4) \
711 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
712 { \
713     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
714 }
715 
716 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
717     type4, arg4, type5, arg5) \
718 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
719     type5 arg5) \
720 { \
721     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
722 }
723 
724 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
725     type4, arg4, type5, arg5, type6, arg6) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
727     type5 arg5, type6 arg6) \
728 { \
729     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
730 }
731 
732 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
733 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
734 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
735               int, flags, mode_t, mode)
736 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
737               struct rusage *, rusage)
738 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
739               int, options, struct rusage *, rusage)
740 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
744               struct timespec *, tsp, const sigset_t *, sigmask,
745               size_t, sigsetsize)
746 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
747               int, maxevents, int, timeout, const sigset_t *, sigmask,
748               size_t, sigsetsize)
749 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
750               const struct timespec *,timeout,int *,uaddr2,int,val3)
751 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
752 safe_syscall2(int, kill, pid_t, pid, int, sig)
753 safe_syscall2(int, tkill, int, tid, int, sig)
754 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
755 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
756 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
757 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
758               unsigned long, pos_l, unsigned long, pos_h)
759 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
760               unsigned long, pos_l, unsigned long, pos_h)
761 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
762               socklen_t, addrlen)
763 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
764               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
765 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
766               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
767 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
768 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
769 safe_syscall2(int, flock, int, fd, int, operation)
770 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
771               const struct timespec *, uts, size_t, sigsetsize)
772 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
773               int, flags)
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775               struct timespec *, rem)
776 #ifdef TARGET_NR_clock_nanosleep
777 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
778               const struct timespec *, req, struct timespec *, rem)
779 #endif
780 #ifdef __NR_ipc
781 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
782               void *, ptr, long, fifth)
783 #endif
784 #ifdef __NR_msgsnd
785 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
786               int, flags)
787 #endif
788 #ifdef __NR_msgrcv
789 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
790               long, msgtype, int, flags)
791 #endif
792 #ifdef __NR_semtimedop
793 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
794               unsigned, nsops, const struct timespec *, timeout)
795 #endif
796 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
797 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
798               size_t, len, unsigned, prio, const struct timespec *, timeout)
799 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
800               size_t, len, unsigned *, prio, const struct timespec *, timeout)
801 #endif
802 /* We do ioctl like this rather than via safe_syscall3 to preserve the
803  * "third argument might be integer or pointer or not present" behaviour of
804  * the libc function.
805  */
806 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
807 /* Similarly for fcntl. Note that callers must always:
808  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
809  *  use the flock64 struct rather than unsuffixed flock
810  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
811  */
812 #ifdef __NR_fcntl64
813 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
814 #else
815 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
816 #endif
817 
818 static inline int host_to_target_sock_type(int host_type)
819 {
820     int target_type;
821 
822     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
823     case SOCK_DGRAM:
824         target_type = TARGET_SOCK_DGRAM;
825         break;
826     case SOCK_STREAM:
827         target_type = TARGET_SOCK_STREAM;
828         break;
829     default:
830         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
831         break;
832     }
833 
834 #if defined(SOCK_CLOEXEC)
835     if (host_type & SOCK_CLOEXEC) {
836         target_type |= TARGET_SOCK_CLOEXEC;
837     }
838 #endif
839 
840 #if defined(SOCK_NONBLOCK)
841     if (host_type & SOCK_NONBLOCK) {
842         target_type |= TARGET_SOCK_NONBLOCK;
843     }
844 #endif
845 
846     return target_type;
847 }
848 
849 static abi_ulong target_brk;
850 static abi_ulong target_original_brk;
851 static abi_ulong brk_page;
852 
853 void target_set_brk(abi_ulong new_brk)
854 {
855     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
856     brk_page = HOST_PAGE_ALIGN(target_brk);
857 }
858 
859 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
860 #define DEBUGF_BRK(message, args...)
861 
862 /* do_brk() must return target values and target errnos. */
863 abi_long do_brk(abi_ulong new_brk)
864 {
865     abi_long mapped_addr;
866     abi_ulong new_alloc_size;
867 
868     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
869 
870     if (!new_brk) {
871         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
872         return target_brk;
873     }
874     if (new_brk < target_original_brk) {
875         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
876                    target_brk);
877         return target_brk;
878     }
879 
880     /* If the new brk is less than the highest page reserved to the
881      * target heap allocation, set it and we're almost done...  */
882     if (new_brk <= brk_page) {
883         /* Heap contents are initialized to zero, as for anonymous
884          * mapped pages.  */
885         if (new_brk > target_brk) {
886             memset(g2h(target_brk), 0, new_brk - target_brk);
887         }
888 	target_brk = new_brk;
889         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
890 	return target_brk;
891     }
892 
893     /* We need to allocate more memory after the brk... Note that
894      * we don't use MAP_FIXED because that will map over the top of
895      * any existing mapping (like the one with the host libc or qemu
896      * itself); instead we treat "mapped but at wrong address" as
897      * a failure and unmap again.
898      */
899     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
900     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
901                                         PROT_READ|PROT_WRITE,
902                                         MAP_ANON|MAP_PRIVATE, 0, 0));
903 
904     if (mapped_addr == brk_page) {
905         /* Heap contents are initialized to zero, as for anonymous
906          * mapped pages.  Technically the new pages are already
907          * initialized to zero since they *are* anonymous mapped
908          * pages, however we have to take care with the contents that
909          * come from the remaining part of the previous page: it may
910          * contains garbage data due to a previous heap usage (grown
911          * then shrunken).  */
912         memset(g2h(target_brk), 0, brk_page - target_brk);
913 
914         target_brk = new_brk;
915         brk_page = HOST_PAGE_ALIGN(target_brk);
916         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
917             target_brk);
918         return target_brk;
919     } else if (mapped_addr != -1) {
920         /* Mapped but at wrong address, meaning there wasn't actually
921          * enough space for this brk.
922          */
923         target_munmap(mapped_addr, new_alloc_size);
924         mapped_addr = -1;
925         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
926     }
927     else {
928         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
929     }
930 
931 #if defined(TARGET_ALPHA)
932     /* We (partially) emulate OSF/1 on Alpha, which requires we
933        return a proper errno, not an unchanged brk value.  */
934     return -TARGET_ENOMEM;
935 #endif
936     /* For everything else, return the previous break. */
937     return target_brk;
938 }
939 
940 static inline abi_long copy_from_user_fdset(fd_set *fds,
941                                             abi_ulong target_fds_addr,
942                                             int n)
943 {
944     int i, nw, j, k;
945     abi_ulong b, *target_fds;
946 
947     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
948     if (!(target_fds = lock_user(VERIFY_READ,
949                                  target_fds_addr,
950                                  sizeof(abi_ulong) * nw,
951                                  1)))
952         return -TARGET_EFAULT;
953 
954     FD_ZERO(fds);
955     k = 0;
956     for (i = 0; i < nw; i++) {
957         /* grab the abi_ulong */
958         __get_user(b, &target_fds[i]);
959         for (j = 0; j < TARGET_ABI_BITS; j++) {
960             /* check the bit inside the abi_ulong */
961             if ((b >> j) & 1)
962                 FD_SET(k, fds);
963             k++;
964         }
965     }
966 
967     unlock_user(target_fds, target_fds_addr, 0);
968 
969     return 0;
970 }
971 
972 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
973                                                  abi_ulong target_fds_addr,
974                                                  int n)
975 {
976     if (target_fds_addr) {
977         if (copy_from_user_fdset(fds, target_fds_addr, n))
978             return -TARGET_EFAULT;
979         *fds_ptr = fds;
980     } else {
981         *fds_ptr = NULL;
982     }
983     return 0;
984 }
985 
986 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
987                                           const fd_set *fds,
988                                           int n)
989 {
990     int i, nw, j, k;
991     abi_long v;
992     abi_ulong *target_fds;
993 
994     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
995     if (!(target_fds = lock_user(VERIFY_WRITE,
996                                  target_fds_addr,
997                                  sizeof(abi_ulong) * nw,
998                                  0)))
999         return -TARGET_EFAULT;
1000 
1001     k = 0;
1002     for (i = 0; i < nw; i++) {
1003         v = 0;
1004         for (j = 0; j < TARGET_ABI_BITS; j++) {
1005             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1006             k++;
1007         }
1008         __put_user(v, &target_fds[i]);
1009     }
1010 
1011     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1012 
1013     return 0;
1014 }
1015 
1016 #if defined(__alpha__)
1017 #define HOST_HZ 1024
1018 #else
1019 #define HOST_HZ 100
1020 #endif
1021 
1022 static inline abi_long host_to_target_clock_t(long ticks)
1023 {
1024 #if HOST_HZ == TARGET_HZ
1025     return ticks;
1026 #else
1027     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1028 #endif
1029 }
1030 
1031 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1032                                              const struct rusage *rusage)
1033 {
1034     struct target_rusage *target_rusage;
1035 
1036     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1037         return -TARGET_EFAULT;
1038     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1039     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1040     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1041     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1042     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1043     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1044     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1045     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1046     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1047     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1048     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1049     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1050     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1051     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1052     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1053     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1054     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1055     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1056     unlock_user_struct(target_rusage, target_addr, 1);
1057 
1058     return 0;
1059 }
1060 
1061 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1062 {
1063     abi_ulong target_rlim_swap;
1064     rlim_t result;
1065 
1066     target_rlim_swap = tswapal(target_rlim);
1067     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1068         return RLIM_INFINITY;
1069 
1070     result = target_rlim_swap;
1071     if (target_rlim_swap != (rlim_t)result)
1072         return RLIM_INFINITY;
1073 
1074     return result;
1075 }
1076 
1077 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1078 {
1079     abi_ulong target_rlim_swap;
1080     abi_ulong result;
1081 
1082     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1083         target_rlim_swap = TARGET_RLIM_INFINITY;
1084     else
1085         target_rlim_swap = rlim;
1086     result = tswapal(target_rlim_swap);
1087 
1088     return result;
1089 }
1090 
1091 static inline int target_to_host_resource(int code)
1092 {
1093     switch (code) {
1094     case TARGET_RLIMIT_AS:
1095         return RLIMIT_AS;
1096     case TARGET_RLIMIT_CORE:
1097         return RLIMIT_CORE;
1098     case TARGET_RLIMIT_CPU:
1099         return RLIMIT_CPU;
1100     case TARGET_RLIMIT_DATA:
1101         return RLIMIT_DATA;
1102     case TARGET_RLIMIT_FSIZE:
1103         return RLIMIT_FSIZE;
1104     case TARGET_RLIMIT_LOCKS:
1105         return RLIMIT_LOCKS;
1106     case TARGET_RLIMIT_MEMLOCK:
1107         return RLIMIT_MEMLOCK;
1108     case TARGET_RLIMIT_MSGQUEUE:
1109         return RLIMIT_MSGQUEUE;
1110     case TARGET_RLIMIT_NICE:
1111         return RLIMIT_NICE;
1112     case TARGET_RLIMIT_NOFILE:
1113         return RLIMIT_NOFILE;
1114     case TARGET_RLIMIT_NPROC:
1115         return RLIMIT_NPROC;
1116     case TARGET_RLIMIT_RSS:
1117         return RLIMIT_RSS;
1118     case TARGET_RLIMIT_RTPRIO:
1119         return RLIMIT_RTPRIO;
1120     case TARGET_RLIMIT_SIGPENDING:
1121         return RLIMIT_SIGPENDING;
1122     case TARGET_RLIMIT_STACK:
1123         return RLIMIT_STACK;
1124     default:
1125         return code;
1126     }
1127 }
1128 
1129 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1130                                               abi_ulong target_tv_addr)
1131 {
1132     struct target_timeval *target_tv;
1133 
1134     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1135         return -TARGET_EFAULT;
1136     }
1137 
1138     __get_user(tv->tv_sec, &target_tv->tv_sec);
1139     __get_user(tv->tv_usec, &target_tv->tv_usec);
1140 
1141     unlock_user_struct(target_tv, target_tv_addr, 0);
1142 
1143     return 0;
1144 }
1145 
1146 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1147                                             const struct timeval *tv)
1148 {
1149     struct target_timeval *target_tv;
1150 
1151     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1152         return -TARGET_EFAULT;
1153     }
1154 
1155     __put_user(tv->tv_sec, &target_tv->tv_sec);
1156     __put_user(tv->tv_usec, &target_tv->tv_usec);
1157 
1158     unlock_user_struct(target_tv, target_tv_addr, 1);
1159 
1160     return 0;
1161 }
1162 
1163 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1164                                              const struct timeval *tv)
1165 {
1166     struct target__kernel_sock_timeval *target_tv;
1167 
1168     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1169         return -TARGET_EFAULT;
1170     }
1171 
1172     __put_user(tv->tv_sec, &target_tv->tv_sec);
1173     __put_user(tv->tv_usec, &target_tv->tv_usec);
1174 
1175     unlock_user_struct(target_tv, target_tv_addr, 1);
1176 
1177     return 0;
1178 }
1179 
1180 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1181                                                abi_ulong target_addr)
1182 {
1183     struct target_timespec *target_ts;
1184 
1185     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1186         return -TARGET_EFAULT;
1187     }
1188     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1189     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1190     unlock_user_struct(target_ts, target_addr, 0);
1191     return 0;
1192 }
1193 
1194 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1195                                                struct timespec *host_ts)
1196 {
1197     struct target_timespec *target_ts;
1198 
1199     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1200         return -TARGET_EFAULT;
1201     }
1202     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1203     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1204     unlock_user_struct(target_ts, target_addr, 1);
1205     return 0;
1206 }
1207 
1208 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1209                                                  struct timespec *host_ts)
1210 {
1211     struct target__kernel_timespec *target_ts;
1212 
1213     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1214         return -TARGET_EFAULT;
1215     }
1216     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1217     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1218     unlock_user_struct(target_ts, target_addr, 1);
1219     return 0;
1220 }
1221 
1222 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1223                                                abi_ulong target_tz_addr)
1224 {
1225     struct target_timezone *target_tz;
1226 
1227     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1228         return -TARGET_EFAULT;
1229     }
1230 
1231     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1232     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1233 
1234     unlock_user_struct(target_tz, target_tz_addr, 0);
1235 
1236     return 0;
1237 }
1238 
1239 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1240 #include <mqueue.h>
1241 
1242 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1243                                               abi_ulong target_mq_attr_addr)
1244 {
1245     struct target_mq_attr *target_mq_attr;
1246 
1247     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1248                           target_mq_attr_addr, 1))
1249         return -TARGET_EFAULT;
1250 
1251     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1252     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1253     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1254     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1255 
1256     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1257 
1258     return 0;
1259 }
1260 
1261 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1262                                             const struct mq_attr *attr)
1263 {
1264     struct target_mq_attr *target_mq_attr;
1265 
1266     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1267                           target_mq_attr_addr, 0))
1268         return -TARGET_EFAULT;
1269 
1270     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1271     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1272     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1273     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1274 
1275     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1276 
1277     return 0;
1278 }
1279 #endif
1280 
1281 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1282 /* do_select() must return target values and target errnos. */
1283 static abi_long do_select(int n,
1284                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1285                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1286 {
1287     fd_set rfds, wfds, efds;
1288     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1289     struct timeval tv;
1290     struct timespec ts, *ts_ptr;
1291     abi_long ret;
1292 
1293     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1294     if (ret) {
1295         return ret;
1296     }
1297     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1298     if (ret) {
1299         return ret;
1300     }
1301     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1302     if (ret) {
1303         return ret;
1304     }
1305 
1306     if (target_tv_addr) {
1307         if (copy_from_user_timeval(&tv, target_tv_addr))
1308             return -TARGET_EFAULT;
1309         ts.tv_sec = tv.tv_sec;
1310         ts.tv_nsec = tv.tv_usec * 1000;
1311         ts_ptr = &ts;
1312     } else {
1313         ts_ptr = NULL;
1314     }
1315 
1316     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1317                                   ts_ptr, NULL));
1318 
1319     if (!is_error(ret)) {
1320         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1321             return -TARGET_EFAULT;
1322         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1323             return -TARGET_EFAULT;
1324         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1325             return -TARGET_EFAULT;
1326 
1327         if (target_tv_addr) {
1328             tv.tv_sec = ts.tv_sec;
1329             tv.tv_usec = ts.tv_nsec / 1000;
1330             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1331                 return -TARGET_EFAULT;
1332             }
1333         }
1334     }
1335 
1336     return ret;
1337 }
1338 
1339 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1340 static abi_long do_old_select(abi_ulong arg1)
1341 {
1342     struct target_sel_arg_struct *sel;
1343     abi_ulong inp, outp, exp, tvp;
1344     long nsel;
1345 
1346     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1347         return -TARGET_EFAULT;
1348     }
1349 
1350     nsel = tswapal(sel->n);
1351     inp = tswapal(sel->inp);
1352     outp = tswapal(sel->outp);
1353     exp = tswapal(sel->exp);
1354     tvp = tswapal(sel->tvp);
1355 
1356     unlock_user_struct(sel, arg1, 0);
1357 
1358     return do_select(nsel, inp, outp, exp, tvp);
1359 }
1360 #endif
1361 #endif
1362 
1363 static abi_long do_pipe2(int host_pipe[], int flags)
1364 {
1365 #ifdef CONFIG_PIPE2
1366     return pipe2(host_pipe, flags);
1367 #else
1368     return -ENOSYS;
1369 #endif
1370 }
1371 
1372 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1373                         int flags, int is_pipe2)
1374 {
1375     int host_pipe[2];
1376     abi_long ret;
1377     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1378 
1379     if (is_error(ret))
1380         return get_errno(ret);
1381 
1382     /* Several targets have special calling conventions for the original
1383        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1384     if (!is_pipe2) {
1385 #if defined(TARGET_ALPHA)
1386         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1387         return host_pipe[0];
1388 #elif defined(TARGET_MIPS)
1389         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1390         return host_pipe[0];
1391 #elif defined(TARGET_SH4)
1392         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1393         return host_pipe[0];
1394 #elif defined(TARGET_SPARC)
1395         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1396         return host_pipe[0];
1397 #endif
1398     }
1399 
1400     if (put_user_s32(host_pipe[0], pipedes)
1401         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1402         return -TARGET_EFAULT;
1403     return get_errno(ret);
1404 }
1405 
1406 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1407                                               abi_ulong target_addr,
1408                                               socklen_t len)
1409 {
1410     struct target_ip_mreqn *target_smreqn;
1411 
1412     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1413     if (!target_smreqn)
1414         return -TARGET_EFAULT;
1415     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1416     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1417     if (len == sizeof(struct target_ip_mreqn))
1418         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1419     unlock_user(target_smreqn, target_addr, 0);
1420 
1421     return 0;
1422 }
1423 
1424 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1425                                                abi_ulong target_addr,
1426                                                socklen_t len)
1427 {
1428     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1429     sa_family_t sa_family;
1430     struct target_sockaddr *target_saddr;
1431 
1432     if (fd_trans_target_to_host_addr(fd)) {
1433         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1434     }
1435 
1436     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1437     if (!target_saddr)
1438         return -TARGET_EFAULT;
1439 
1440     sa_family = tswap16(target_saddr->sa_family);
1441 
1442     /* Oops. The caller might send a incomplete sun_path; sun_path
1443      * must be terminated by \0 (see the manual page), but
1444      * unfortunately it is quite common to specify sockaddr_un
1445      * length as "strlen(x->sun_path)" while it should be
1446      * "strlen(...) + 1". We'll fix that here if needed.
1447      * Linux kernel has a similar feature.
1448      */
1449 
1450     if (sa_family == AF_UNIX) {
1451         if (len < unix_maxlen && len > 0) {
1452             char *cp = (char*)target_saddr;
1453 
1454             if ( cp[len-1] && !cp[len] )
1455                 len++;
1456         }
1457         if (len > unix_maxlen)
1458             len = unix_maxlen;
1459     }
1460 
1461     memcpy(addr, target_saddr, len);
1462     addr->sa_family = sa_family;
1463     if (sa_family == AF_NETLINK) {
1464         struct sockaddr_nl *nladdr;
1465 
1466         nladdr = (struct sockaddr_nl *)addr;
1467         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1468         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1469     } else if (sa_family == AF_PACKET) {
1470 	struct target_sockaddr_ll *lladdr;
1471 
1472 	lladdr = (struct target_sockaddr_ll *)addr;
1473 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1474 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1475     }
1476     unlock_user(target_saddr, target_addr, 0);
1477 
1478     return 0;
1479 }
1480 
1481 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1482                                                struct sockaddr *addr,
1483                                                socklen_t len)
1484 {
1485     struct target_sockaddr *target_saddr;
1486 
1487     if (len == 0) {
1488         return 0;
1489     }
1490     assert(addr);
1491 
1492     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1493     if (!target_saddr)
1494         return -TARGET_EFAULT;
1495     memcpy(target_saddr, addr, len);
1496     if (len >= offsetof(struct target_sockaddr, sa_family) +
1497         sizeof(target_saddr->sa_family)) {
1498         target_saddr->sa_family = tswap16(addr->sa_family);
1499     }
1500     if (addr->sa_family == AF_NETLINK &&
1501         len >= sizeof(struct target_sockaddr_nl)) {
1502         struct target_sockaddr_nl *target_nl =
1503                (struct target_sockaddr_nl *)target_saddr;
1504         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1505         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1506     } else if (addr->sa_family == AF_PACKET) {
1507         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1508         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1509         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1510     } else if (addr->sa_family == AF_INET6 &&
1511                len >= sizeof(struct target_sockaddr_in6)) {
1512         struct target_sockaddr_in6 *target_in6 =
1513                (struct target_sockaddr_in6 *)target_saddr;
1514         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1515     }
1516     unlock_user(target_saddr, target_addr, len);
1517 
1518     return 0;
1519 }
1520 
1521 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1522                                            struct target_msghdr *target_msgh)
1523 {
1524     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1525     abi_long msg_controllen;
1526     abi_ulong target_cmsg_addr;
1527     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1528     socklen_t space = 0;
1529 
1530     msg_controllen = tswapal(target_msgh->msg_controllen);
1531     if (msg_controllen < sizeof (struct target_cmsghdr))
1532         goto the_end;
1533     target_cmsg_addr = tswapal(target_msgh->msg_control);
1534     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1535     target_cmsg_start = target_cmsg;
1536     if (!target_cmsg)
1537         return -TARGET_EFAULT;
1538 
1539     while (cmsg && target_cmsg) {
1540         void *data = CMSG_DATA(cmsg);
1541         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1542 
1543         int len = tswapal(target_cmsg->cmsg_len)
1544             - sizeof(struct target_cmsghdr);
1545 
1546         space += CMSG_SPACE(len);
1547         if (space > msgh->msg_controllen) {
1548             space -= CMSG_SPACE(len);
1549             /* This is a QEMU bug, since we allocated the payload
1550              * area ourselves (unlike overflow in host-to-target
1551              * conversion, which is just the guest giving us a buffer
1552              * that's too small). It can't happen for the payload types
1553              * we currently support; if it becomes an issue in future
1554              * we would need to improve our allocation strategy to
1555              * something more intelligent than "twice the size of the
1556              * target buffer we're reading from".
1557              */
1558             gemu_log("Host cmsg overflow\n");
1559             break;
1560         }
1561 
1562         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1563             cmsg->cmsg_level = SOL_SOCKET;
1564         } else {
1565             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1566         }
1567         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1568         cmsg->cmsg_len = CMSG_LEN(len);
1569 
1570         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1571             int *fd = (int *)data;
1572             int *target_fd = (int *)target_data;
1573             int i, numfds = len / sizeof(int);
1574 
1575             for (i = 0; i < numfds; i++) {
1576                 __get_user(fd[i], target_fd + i);
1577             }
1578         } else if (cmsg->cmsg_level == SOL_SOCKET
1579                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1580             struct ucred *cred = (struct ucred *)data;
1581             struct target_ucred *target_cred =
1582                 (struct target_ucred *)target_data;
1583 
1584             __get_user(cred->pid, &target_cred->pid);
1585             __get_user(cred->uid, &target_cred->uid);
1586             __get_user(cred->gid, &target_cred->gid);
1587         } else {
1588             gemu_log("Unsupported ancillary data: %d/%d\n",
1589                                         cmsg->cmsg_level, cmsg->cmsg_type);
1590             memcpy(data, target_data, len);
1591         }
1592 
1593         cmsg = CMSG_NXTHDR(msgh, cmsg);
1594         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1595                                          target_cmsg_start);
1596     }
1597     unlock_user(target_cmsg, target_cmsg_addr, 0);
1598  the_end:
1599     msgh->msg_controllen = space;
1600     return 0;
1601 }
1602 
1603 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1604                                            struct msghdr *msgh)
1605 {
1606     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1607     abi_long msg_controllen;
1608     abi_ulong target_cmsg_addr;
1609     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1610     socklen_t space = 0;
1611 
1612     msg_controllen = tswapal(target_msgh->msg_controllen);
1613     if (msg_controllen < sizeof (struct target_cmsghdr))
1614         goto the_end;
1615     target_cmsg_addr = tswapal(target_msgh->msg_control);
1616     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1617     target_cmsg_start = target_cmsg;
1618     if (!target_cmsg)
1619         return -TARGET_EFAULT;
1620 
1621     while (cmsg && target_cmsg) {
1622         void *data = CMSG_DATA(cmsg);
1623         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1624 
1625         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1626         int tgt_len, tgt_space;
1627 
1628         /* We never copy a half-header but may copy half-data;
1629          * this is Linux's behaviour in put_cmsg(). Note that
1630          * truncation here is a guest problem (which we report
1631          * to the guest via the CTRUNC bit), unlike truncation
1632          * in target_to_host_cmsg, which is a QEMU bug.
1633          */
1634         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1635             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1636             break;
1637         }
1638 
1639         if (cmsg->cmsg_level == SOL_SOCKET) {
1640             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1641         } else {
1642             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1643         }
1644         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1645 
1646         /* Payload types which need a different size of payload on
1647          * the target must adjust tgt_len here.
1648          */
1649         tgt_len = len;
1650         switch (cmsg->cmsg_level) {
1651         case SOL_SOCKET:
1652             switch (cmsg->cmsg_type) {
1653             case SO_TIMESTAMP:
1654                 tgt_len = sizeof(struct target_timeval);
1655                 break;
1656             default:
1657                 break;
1658             }
1659             break;
1660         default:
1661             break;
1662         }
1663 
1664         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1665             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1666             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1667         }
1668 
1669         /* We must now copy-and-convert len bytes of payload
1670          * into tgt_len bytes of destination space. Bear in mind
1671          * that in both source and destination we may be dealing
1672          * with a truncated value!
1673          */
1674         switch (cmsg->cmsg_level) {
1675         case SOL_SOCKET:
1676             switch (cmsg->cmsg_type) {
1677             case SCM_RIGHTS:
1678             {
1679                 int *fd = (int *)data;
1680                 int *target_fd = (int *)target_data;
1681                 int i, numfds = tgt_len / sizeof(int);
1682 
1683                 for (i = 0; i < numfds; i++) {
1684                     __put_user(fd[i], target_fd + i);
1685                 }
1686                 break;
1687             }
1688             case SO_TIMESTAMP:
1689             {
1690                 struct timeval *tv = (struct timeval *)data;
1691                 struct target_timeval *target_tv =
1692                     (struct target_timeval *)target_data;
1693 
1694                 if (len != sizeof(struct timeval) ||
1695                     tgt_len != sizeof(struct target_timeval)) {
1696                     goto unimplemented;
1697                 }
1698 
1699                 /* copy struct timeval to target */
1700                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1701                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1702                 break;
1703             }
1704             case SCM_CREDENTIALS:
1705             {
1706                 struct ucred *cred = (struct ucred *)data;
1707                 struct target_ucred *target_cred =
1708                     (struct target_ucred *)target_data;
1709 
1710                 __put_user(cred->pid, &target_cred->pid);
1711                 __put_user(cred->uid, &target_cred->uid);
1712                 __put_user(cred->gid, &target_cred->gid);
1713                 break;
1714             }
1715             default:
1716                 goto unimplemented;
1717             }
1718             break;
1719 
1720         case SOL_IP:
1721             switch (cmsg->cmsg_type) {
1722             case IP_TTL:
1723             {
1724                 uint32_t *v = (uint32_t *)data;
1725                 uint32_t *t_int = (uint32_t *)target_data;
1726 
1727                 if (len != sizeof(uint32_t) ||
1728                     tgt_len != sizeof(uint32_t)) {
1729                     goto unimplemented;
1730                 }
1731                 __put_user(*v, t_int);
1732                 break;
1733             }
1734             case IP_RECVERR:
1735             {
1736                 struct errhdr_t {
1737                    struct sock_extended_err ee;
1738                    struct sockaddr_in offender;
1739                 };
1740                 struct errhdr_t *errh = (struct errhdr_t *)data;
1741                 struct errhdr_t *target_errh =
1742                     (struct errhdr_t *)target_data;
1743 
1744                 if (len != sizeof(struct errhdr_t) ||
1745                     tgt_len != sizeof(struct errhdr_t)) {
1746                     goto unimplemented;
1747                 }
1748                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1749                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1750                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1751                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1752                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1753                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1754                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1755                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1756                     (void *) &errh->offender, sizeof(errh->offender));
1757                 break;
1758             }
1759             default:
1760                 goto unimplemented;
1761             }
1762             break;
1763 
1764         case SOL_IPV6:
1765             switch (cmsg->cmsg_type) {
1766             case IPV6_HOPLIMIT:
1767             {
1768                 uint32_t *v = (uint32_t *)data;
1769                 uint32_t *t_int = (uint32_t *)target_data;
1770 
1771                 if (len != sizeof(uint32_t) ||
1772                     tgt_len != sizeof(uint32_t)) {
1773                     goto unimplemented;
1774                 }
1775                 __put_user(*v, t_int);
1776                 break;
1777             }
1778             case IPV6_RECVERR:
1779             {
1780                 struct errhdr6_t {
1781                    struct sock_extended_err ee;
1782                    struct sockaddr_in6 offender;
1783                 };
1784                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1785                 struct errhdr6_t *target_errh =
1786                     (struct errhdr6_t *)target_data;
1787 
1788                 if (len != sizeof(struct errhdr6_t) ||
1789                     tgt_len != sizeof(struct errhdr6_t)) {
1790                     goto unimplemented;
1791                 }
1792                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1793                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1794                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1795                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1796                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1797                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1798                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1799                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1800                     (void *) &errh->offender, sizeof(errh->offender));
1801                 break;
1802             }
1803             default:
1804                 goto unimplemented;
1805             }
1806             break;
1807 
1808         default:
1809         unimplemented:
1810             gemu_log("Unsupported ancillary data: %d/%d\n",
1811                                         cmsg->cmsg_level, cmsg->cmsg_type);
1812             memcpy(target_data, data, MIN(len, tgt_len));
1813             if (tgt_len > len) {
1814                 memset(target_data + len, 0, tgt_len - len);
1815             }
1816         }
1817 
1818         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1819         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1820         if (msg_controllen < tgt_space) {
1821             tgt_space = msg_controllen;
1822         }
1823         msg_controllen -= tgt_space;
1824         space += tgt_space;
1825         cmsg = CMSG_NXTHDR(msgh, cmsg);
1826         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1827                                          target_cmsg_start);
1828     }
1829     unlock_user(target_cmsg, target_cmsg_addr, space);
1830  the_end:
1831     target_msgh->msg_controllen = tswapal(space);
1832     return 0;
1833 }
1834 
1835 /* do_setsockopt() Must return target values and target errnos. */
1836 static abi_long do_setsockopt(int sockfd, int level, int optname,
1837                               abi_ulong optval_addr, socklen_t optlen)
1838 {
1839     abi_long ret;
1840     int val;
1841     struct ip_mreqn *ip_mreq;
1842     struct ip_mreq_source *ip_mreq_source;
1843 
1844     switch(level) {
1845     case SOL_TCP:
1846         /* TCP options all take an 'int' value.  */
1847         if (optlen < sizeof(uint32_t))
1848             return -TARGET_EINVAL;
1849 
1850         if (get_user_u32(val, optval_addr))
1851             return -TARGET_EFAULT;
1852         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1853         break;
1854     case SOL_IP:
1855         switch(optname) {
1856         case IP_TOS:
1857         case IP_TTL:
1858         case IP_HDRINCL:
1859         case IP_ROUTER_ALERT:
1860         case IP_RECVOPTS:
1861         case IP_RETOPTS:
1862         case IP_PKTINFO:
1863         case IP_MTU_DISCOVER:
1864         case IP_RECVERR:
1865         case IP_RECVTTL:
1866         case IP_RECVTOS:
1867 #ifdef IP_FREEBIND
1868         case IP_FREEBIND:
1869 #endif
1870         case IP_MULTICAST_TTL:
1871         case IP_MULTICAST_LOOP:
1872             val = 0;
1873             if (optlen >= sizeof(uint32_t)) {
1874                 if (get_user_u32(val, optval_addr))
1875                     return -TARGET_EFAULT;
1876             } else if (optlen >= 1) {
1877                 if (get_user_u8(val, optval_addr))
1878                     return -TARGET_EFAULT;
1879             }
1880             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1881             break;
1882         case IP_ADD_MEMBERSHIP:
1883         case IP_DROP_MEMBERSHIP:
1884             if (optlen < sizeof (struct target_ip_mreq) ||
1885                 optlen > sizeof (struct target_ip_mreqn))
1886                 return -TARGET_EINVAL;
1887 
1888             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1889             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1890             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1891             break;
1892 
1893         case IP_BLOCK_SOURCE:
1894         case IP_UNBLOCK_SOURCE:
1895         case IP_ADD_SOURCE_MEMBERSHIP:
1896         case IP_DROP_SOURCE_MEMBERSHIP:
1897             if (optlen != sizeof (struct target_ip_mreq_source))
1898                 return -TARGET_EINVAL;
1899 
1900             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1901             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1902             unlock_user (ip_mreq_source, optval_addr, 0);
1903             break;
1904 
1905         default:
1906             goto unimplemented;
1907         }
1908         break;
1909     case SOL_IPV6:
1910         switch (optname) {
1911         case IPV6_MTU_DISCOVER:
1912         case IPV6_MTU:
1913         case IPV6_V6ONLY:
1914         case IPV6_RECVPKTINFO:
1915         case IPV6_UNICAST_HOPS:
1916         case IPV6_MULTICAST_HOPS:
1917         case IPV6_MULTICAST_LOOP:
1918         case IPV6_RECVERR:
1919         case IPV6_RECVHOPLIMIT:
1920         case IPV6_2292HOPLIMIT:
1921         case IPV6_CHECKSUM:
1922         case IPV6_ADDRFORM:
1923         case IPV6_2292PKTINFO:
1924         case IPV6_RECVTCLASS:
1925         case IPV6_RECVRTHDR:
1926         case IPV6_2292RTHDR:
1927         case IPV6_RECVHOPOPTS:
1928         case IPV6_2292HOPOPTS:
1929         case IPV6_RECVDSTOPTS:
1930         case IPV6_2292DSTOPTS:
1931         case IPV6_TCLASS:
1932 #ifdef IPV6_RECVPATHMTU
1933         case IPV6_RECVPATHMTU:
1934 #endif
1935 #ifdef IPV6_TRANSPARENT
1936         case IPV6_TRANSPARENT:
1937 #endif
1938 #ifdef IPV6_FREEBIND
1939         case IPV6_FREEBIND:
1940 #endif
1941 #ifdef IPV6_RECVORIGDSTADDR
1942         case IPV6_RECVORIGDSTADDR:
1943 #endif
1944             val = 0;
1945             if (optlen < sizeof(uint32_t)) {
1946                 return -TARGET_EINVAL;
1947             }
1948             if (get_user_u32(val, optval_addr)) {
1949                 return -TARGET_EFAULT;
1950             }
1951             ret = get_errno(setsockopt(sockfd, level, optname,
1952                                        &val, sizeof(val)));
1953             break;
1954         case IPV6_PKTINFO:
1955         {
1956             struct in6_pktinfo pki;
1957 
1958             if (optlen < sizeof(pki)) {
1959                 return -TARGET_EINVAL;
1960             }
1961 
1962             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1963                 return -TARGET_EFAULT;
1964             }
1965 
1966             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1967 
1968             ret = get_errno(setsockopt(sockfd, level, optname,
1969                                        &pki, sizeof(pki)));
1970             break;
1971         }
1972         case IPV6_ADD_MEMBERSHIP:
1973         case IPV6_DROP_MEMBERSHIP:
1974         {
1975             struct ipv6_mreq ipv6mreq;
1976 
1977             if (optlen < sizeof(ipv6mreq)) {
1978                 return -TARGET_EINVAL;
1979             }
1980 
1981             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1982                 return -TARGET_EFAULT;
1983             }
1984 
1985             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1986 
1987             ret = get_errno(setsockopt(sockfd, level, optname,
1988                                        &ipv6mreq, sizeof(ipv6mreq)));
1989             break;
1990         }
1991         default:
1992             goto unimplemented;
1993         }
1994         break;
1995     case SOL_ICMPV6:
1996         switch (optname) {
1997         case ICMPV6_FILTER:
1998         {
1999             struct icmp6_filter icmp6f;
2000 
2001             if (optlen > sizeof(icmp6f)) {
2002                 optlen = sizeof(icmp6f);
2003             }
2004 
2005             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2006                 return -TARGET_EFAULT;
2007             }
2008 
2009             for (val = 0; val < 8; val++) {
2010                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2011             }
2012 
2013             ret = get_errno(setsockopt(sockfd, level, optname,
2014                                        &icmp6f, optlen));
2015             break;
2016         }
2017         default:
2018             goto unimplemented;
2019         }
2020         break;
2021     case SOL_RAW:
2022         switch (optname) {
2023         case ICMP_FILTER:
2024         case IPV6_CHECKSUM:
2025             /* those take an u32 value */
2026             if (optlen < sizeof(uint32_t)) {
2027                 return -TARGET_EINVAL;
2028             }
2029 
2030             if (get_user_u32(val, optval_addr)) {
2031                 return -TARGET_EFAULT;
2032             }
2033             ret = get_errno(setsockopt(sockfd, level, optname,
2034                                        &val, sizeof(val)));
2035             break;
2036 
2037         default:
2038             goto unimplemented;
2039         }
2040         break;
2041 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2042     case SOL_ALG:
2043         switch (optname) {
2044         case ALG_SET_KEY:
2045         {
2046             char *alg_key = g_malloc(optlen);
2047 
2048             if (!alg_key) {
2049                 return -TARGET_ENOMEM;
2050             }
2051             if (copy_from_user(alg_key, optval_addr, optlen)) {
2052                 g_free(alg_key);
2053                 return -TARGET_EFAULT;
2054             }
2055             ret = get_errno(setsockopt(sockfd, level, optname,
2056                                        alg_key, optlen));
2057             g_free(alg_key);
2058             break;
2059         }
2060         case ALG_SET_AEAD_AUTHSIZE:
2061         {
2062             ret = get_errno(setsockopt(sockfd, level, optname,
2063                                        NULL, optlen));
2064             break;
2065         }
2066         default:
2067             goto unimplemented;
2068         }
2069         break;
2070 #endif
2071     case TARGET_SOL_SOCKET:
2072         switch (optname) {
2073         case TARGET_SO_RCVTIMEO:
2074         {
2075                 struct timeval tv;
2076 
2077                 optname = SO_RCVTIMEO;
2078 
2079 set_timeout:
2080                 if (optlen != sizeof(struct target_timeval)) {
2081                     return -TARGET_EINVAL;
2082                 }
2083 
2084                 if (copy_from_user_timeval(&tv, optval_addr)) {
2085                     return -TARGET_EFAULT;
2086                 }
2087 
2088                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2089                                 &tv, sizeof(tv)));
2090                 return ret;
2091         }
2092         case TARGET_SO_SNDTIMEO:
2093                 optname = SO_SNDTIMEO;
2094                 goto set_timeout;
2095         case TARGET_SO_ATTACH_FILTER:
2096         {
2097                 struct target_sock_fprog *tfprog;
2098                 struct target_sock_filter *tfilter;
2099                 struct sock_fprog fprog;
2100                 struct sock_filter *filter;
2101                 int i;
2102 
2103                 if (optlen != sizeof(*tfprog)) {
2104                     return -TARGET_EINVAL;
2105                 }
2106                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2107                     return -TARGET_EFAULT;
2108                 }
2109                 if (!lock_user_struct(VERIFY_READ, tfilter,
2110                                       tswapal(tfprog->filter), 0)) {
2111                     unlock_user_struct(tfprog, optval_addr, 1);
2112                     return -TARGET_EFAULT;
2113                 }
2114 
2115                 fprog.len = tswap16(tfprog->len);
2116                 filter = g_try_new(struct sock_filter, fprog.len);
2117                 if (filter == NULL) {
2118                     unlock_user_struct(tfilter, tfprog->filter, 1);
2119                     unlock_user_struct(tfprog, optval_addr, 1);
2120                     return -TARGET_ENOMEM;
2121                 }
2122                 for (i = 0; i < fprog.len; i++) {
2123                     filter[i].code = tswap16(tfilter[i].code);
2124                     filter[i].jt = tfilter[i].jt;
2125                     filter[i].jf = tfilter[i].jf;
2126                     filter[i].k = tswap32(tfilter[i].k);
2127                 }
2128                 fprog.filter = filter;
2129 
2130                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2131                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2132                 g_free(filter);
2133 
2134                 unlock_user_struct(tfilter, tfprog->filter, 1);
2135                 unlock_user_struct(tfprog, optval_addr, 1);
2136                 return ret;
2137         }
2138 	case TARGET_SO_BINDTODEVICE:
2139 	{
2140 		char *dev_ifname, *addr_ifname;
2141 
2142 		if (optlen > IFNAMSIZ - 1) {
2143 		    optlen = IFNAMSIZ - 1;
2144 		}
2145 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2146 		if (!dev_ifname) {
2147 		    return -TARGET_EFAULT;
2148 		}
2149 		optname = SO_BINDTODEVICE;
2150 		addr_ifname = alloca(IFNAMSIZ);
2151 		memcpy(addr_ifname, dev_ifname, optlen);
2152 		addr_ifname[optlen] = 0;
2153 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2154                                            addr_ifname, optlen));
2155 		unlock_user (dev_ifname, optval_addr, 0);
2156 		return ret;
2157 	}
2158         case TARGET_SO_LINGER:
2159         {
2160                 struct linger lg;
2161                 struct target_linger *tlg;
2162 
2163                 if (optlen != sizeof(struct target_linger)) {
2164                     return -TARGET_EINVAL;
2165                 }
2166                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2167                     return -TARGET_EFAULT;
2168                 }
2169                 __get_user(lg.l_onoff, &tlg->l_onoff);
2170                 __get_user(lg.l_linger, &tlg->l_linger);
2171                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2172                                 &lg, sizeof(lg)));
2173                 unlock_user_struct(tlg, optval_addr, 0);
2174                 return ret;
2175         }
2176             /* Options with 'int' argument.  */
2177         case TARGET_SO_DEBUG:
2178 		optname = SO_DEBUG;
2179 		break;
2180         case TARGET_SO_REUSEADDR:
2181 		optname = SO_REUSEADDR;
2182 		break;
2183 #ifdef SO_REUSEPORT
2184         case TARGET_SO_REUSEPORT:
2185                 optname = SO_REUSEPORT;
2186                 break;
2187 #endif
2188         case TARGET_SO_TYPE:
2189 		optname = SO_TYPE;
2190 		break;
2191         case TARGET_SO_ERROR:
2192 		optname = SO_ERROR;
2193 		break;
2194         case TARGET_SO_DONTROUTE:
2195 		optname = SO_DONTROUTE;
2196 		break;
2197         case TARGET_SO_BROADCAST:
2198 		optname = SO_BROADCAST;
2199 		break;
2200         case TARGET_SO_SNDBUF:
2201 		optname = SO_SNDBUF;
2202 		break;
2203         case TARGET_SO_SNDBUFFORCE:
2204                 optname = SO_SNDBUFFORCE;
2205                 break;
2206         case TARGET_SO_RCVBUF:
2207 		optname = SO_RCVBUF;
2208 		break;
2209         case TARGET_SO_RCVBUFFORCE:
2210                 optname = SO_RCVBUFFORCE;
2211                 break;
2212         case TARGET_SO_KEEPALIVE:
2213 		optname = SO_KEEPALIVE;
2214 		break;
2215         case TARGET_SO_OOBINLINE:
2216 		optname = SO_OOBINLINE;
2217 		break;
2218         case TARGET_SO_NO_CHECK:
2219 		optname = SO_NO_CHECK;
2220 		break;
2221         case TARGET_SO_PRIORITY:
2222 		optname = SO_PRIORITY;
2223 		break;
2224 #ifdef SO_BSDCOMPAT
2225         case TARGET_SO_BSDCOMPAT:
2226 		optname = SO_BSDCOMPAT;
2227 		break;
2228 #endif
2229         case TARGET_SO_PASSCRED:
2230 		optname = SO_PASSCRED;
2231 		break;
2232         case TARGET_SO_PASSSEC:
2233                 optname = SO_PASSSEC;
2234                 break;
2235         case TARGET_SO_TIMESTAMP:
2236 		optname = SO_TIMESTAMP;
2237 		break;
2238         case TARGET_SO_RCVLOWAT:
2239 		optname = SO_RCVLOWAT;
2240 		break;
2241         default:
2242             goto unimplemented;
2243         }
2244 	if (optlen < sizeof(uint32_t))
2245             return -TARGET_EINVAL;
2246 
2247 	if (get_user_u32(val, optval_addr))
2248             return -TARGET_EFAULT;
2249 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2250         break;
2251 #ifdef SOL_NETLINK
2252     case SOL_NETLINK:
2253         switch (optname) {
2254         case NETLINK_PKTINFO:
2255         case NETLINK_ADD_MEMBERSHIP:
2256         case NETLINK_DROP_MEMBERSHIP:
2257         case NETLINK_BROADCAST_ERROR:
2258         case NETLINK_NO_ENOBUFS:
2259 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2260         case NETLINK_LISTEN_ALL_NSID:
2261         case NETLINK_CAP_ACK:
2262 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2263 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2264         case NETLINK_EXT_ACK:
2265 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2266 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2267         case NETLINK_GET_STRICT_CHK:
2268 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2269             break;
2270         default:
2271             goto unimplemented;
2272         }
2273         val = 0;
2274         if (optlen < sizeof(uint32_t)) {
2275             return -TARGET_EINVAL;
2276         }
2277         if (get_user_u32(val, optval_addr)) {
2278             return -TARGET_EFAULT;
2279         }
2280         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2281                                    sizeof(val)));
2282         break;
2283 #endif /* SOL_NETLINK */
2284     default:
2285     unimplemented:
2286         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2287         ret = -TARGET_ENOPROTOOPT;
2288     }
2289     return ret;
2290 }
2291 
2292 /* do_getsockopt() Must return target values and target errnos. */
2293 static abi_long do_getsockopt(int sockfd, int level, int optname,
2294                               abi_ulong optval_addr, abi_ulong optlen)
2295 {
2296     abi_long ret;
2297     int len, val;
2298     socklen_t lv;
2299 
2300     switch(level) {
2301     case TARGET_SOL_SOCKET:
2302         level = SOL_SOCKET;
2303         switch (optname) {
2304         /* These don't just return a single integer */
2305         case TARGET_SO_RCVTIMEO:
2306         case TARGET_SO_SNDTIMEO:
2307         case TARGET_SO_PEERNAME:
2308             goto unimplemented;
2309         case TARGET_SO_PEERCRED: {
2310             struct ucred cr;
2311             socklen_t crlen;
2312             struct target_ucred *tcr;
2313 
2314             if (get_user_u32(len, optlen)) {
2315                 return -TARGET_EFAULT;
2316             }
2317             if (len < 0) {
2318                 return -TARGET_EINVAL;
2319             }
2320 
2321             crlen = sizeof(cr);
2322             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2323                                        &cr, &crlen));
2324             if (ret < 0) {
2325                 return ret;
2326             }
2327             if (len > crlen) {
2328                 len = crlen;
2329             }
2330             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2331                 return -TARGET_EFAULT;
2332             }
2333             __put_user(cr.pid, &tcr->pid);
2334             __put_user(cr.uid, &tcr->uid);
2335             __put_user(cr.gid, &tcr->gid);
2336             unlock_user_struct(tcr, optval_addr, 1);
2337             if (put_user_u32(len, optlen)) {
2338                 return -TARGET_EFAULT;
2339             }
2340             break;
2341         }
2342         case TARGET_SO_LINGER:
2343         {
2344             struct linger lg;
2345             socklen_t lglen;
2346             struct target_linger *tlg;
2347 
2348             if (get_user_u32(len, optlen)) {
2349                 return -TARGET_EFAULT;
2350             }
2351             if (len < 0) {
2352                 return -TARGET_EINVAL;
2353             }
2354 
2355             lglen = sizeof(lg);
2356             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2357                                        &lg, &lglen));
2358             if (ret < 0) {
2359                 return ret;
2360             }
2361             if (len > lglen) {
2362                 len = lglen;
2363             }
2364             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2365                 return -TARGET_EFAULT;
2366             }
2367             __put_user(lg.l_onoff, &tlg->l_onoff);
2368             __put_user(lg.l_linger, &tlg->l_linger);
2369             unlock_user_struct(tlg, optval_addr, 1);
2370             if (put_user_u32(len, optlen)) {
2371                 return -TARGET_EFAULT;
2372             }
2373             break;
2374         }
2375         /* Options with 'int' argument.  */
2376         case TARGET_SO_DEBUG:
2377             optname = SO_DEBUG;
2378             goto int_case;
2379         case TARGET_SO_REUSEADDR:
2380             optname = SO_REUSEADDR;
2381             goto int_case;
2382 #ifdef SO_REUSEPORT
2383         case TARGET_SO_REUSEPORT:
2384             optname = SO_REUSEPORT;
2385             goto int_case;
2386 #endif
2387         case TARGET_SO_TYPE:
2388             optname = SO_TYPE;
2389             goto int_case;
2390         case TARGET_SO_ERROR:
2391             optname = SO_ERROR;
2392             goto int_case;
2393         case TARGET_SO_DONTROUTE:
2394             optname = SO_DONTROUTE;
2395             goto int_case;
2396         case TARGET_SO_BROADCAST:
2397             optname = SO_BROADCAST;
2398             goto int_case;
2399         case TARGET_SO_SNDBUF:
2400             optname = SO_SNDBUF;
2401             goto int_case;
2402         case TARGET_SO_RCVBUF:
2403             optname = SO_RCVBUF;
2404             goto int_case;
2405         case TARGET_SO_KEEPALIVE:
2406             optname = SO_KEEPALIVE;
2407             goto int_case;
2408         case TARGET_SO_OOBINLINE:
2409             optname = SO_OOBINLINE;
2410             goto int_case;
2411         case TARGET_SO_NO_CHECK:
2412             optname = SO_NO_CHECK;
2413             goto int_case;
2414         case TARGET_SO_PRIORITY:
2415             optname = SO_PRIORITY;
2416             goto int_case;
2417 #ifdef SO_BSDCOMPAT
2418         case TARGET_SO_BSDCOMPAT:
2419             optname = SO_BSDCOMPAT;
2420             goto int_case;
2421 #endif
2422         case TARGET_SO_PASSCRED:
2423             optname = SO_PASSCRED;
2424             goto int_case;
2425         case TARGET_SO_TIMESTAMP:
2426             optname = SO_TIMESTAMP;
2427             goto int_case;
2428         case TARGET_SO_RCVLOWAT:
2429             optname = SO_RCVLOWAT;
2430             goto int_case;
2431         case TARGET_SO_ACCEPTCONN:
2432             optname = SO_ACCEPTCONN;
2433             goto int_case;
2434         default:
2435             goto int_case;
2436         }
2437         break;
2438     case SOL_TCP:
2439         /* TCP options all take an 'int' value.  */
2440     int_case:
2441         if (get_user_u32(len, optlen))
2442             return -TARGET_EFAULT;
2443         if (len < 0)
2444             return -TARGET_EINVAL;
2445         lv = sizeof(lv);
2446         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2447         if (ret < 0)
2448             return ret;
2449         if (optname == SO_TYPE) {
2450             val = host_to_target_sock_type(val);
2451         }
2452         if (len > lv)
2453             len = lv;
2454         if (len == 4) {
2455             if (put_user_u32(val, optval_addr))
2456                 return -TARGET_EFAULT;
2457         } else {
2458             if (put_user_u8(val, optval_addr))
2459                 return -TARGET_EFAULT;
2460         }
2461         if (put_user_u32(len, optlen))
2462             return -TARGET_EFAULT;
2463         break;
2464     case SOL_IP:
2465         switch(optname) {
2466         case IP_TOS:
2467         case IP_TTL:
2468         case IP_HDRINCL:
2469         case IP_ROUTER_ALERT:
2470         case IP_RECVOPTS:
2471         case IP_RETOPTS:
2472         case IP_PKTINFO:
2473         case IP_MTU_DISCOVER:
2474         case IP_RECVERR:
2475         case IP_RECVTOS:
2476 #ifdef IP_FREEBIND
2477         case IP_FREEBIND:
2478 #endif
2479         case IP_MULTICAST_TTL:
2480         case IP_MULTICAST_LOOP:
2481             if (get_user_u32(len, optlen))
2482                 return -TARGET_EFAULT;
2483             if (len < 0)
2484                 return -TARGET_EINVAL;
2485             lv = sizeof(lv);
2486             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2487             if (ret < 0)
2488                 return ret;
2489             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2490                 len = 1;
2491                 if (put_user_u32(len, optlen)
2492                     || put_user_u8(val, optval_addr))
2493                     return -TARGET_EFAULT;
2494             } else {
2495                 if (len > sizeof(int))
2496                     len = sizeof(int);
2497                 if (put_user_u32(len, optlen)
2498                     || put_user_u32(val, optval_addr))
2499                     return -TARGET_EFAULT;
2500             }
2501             break;
2502         default:
2503             ret = -TARGET_ENOPROTOOPT;
2504             break;
2505         }
2506         break;
2507     case SOL_IPV6:
2508         switch (optname) {
2509         case IPV6_MTU_DISCOVER:
2510         case IPV6_MTU:
2511         case IPV6_V6ONLY:
2512         case IPV6_RECVPKTINFO:
2513         case IPV6_UNICAST_HOPS:
2514         case IPV6_MULTICAST_HOPS:
2515         case IPV6_MULTICAST_LOOP:
2516         case IPV6_RECVERR:
2517         case IPV6_RECVHOPLIMIT:
2518         case IPV6_2292HOPLIMIT:
2519         case IPV6_CHECKSUM:
2520         case IPV6_ADDRFORM:
2521         case IPV6_2292PKTINFO:
2522         case IPV6_RECVTCLASS:
2523         case IPV6_RECVRTHDR:
2524         case IPV6_2292RTHDR:
2525         case IPV6_RECVHOPOPTS:
2526         case IPV6_2292HOPOPTS:
2527         case IPV6_RECVDSTOPTS:
2528         case IPV6_2292DSTOPTS:
2529         case IPV6_TCLASS:
2530 #ifdef IPV6_RECVPATHMTU
2531         case IPV6_RECVPATHMTU:
2532 #endif
2533 #ifdef IPV6_TRANSPARENT
2534         case IPV6_TRANSPARENT:
2535 #endif
2536 #ifdef IPV6_FREEBIND
2537         case IPV6_FREEBIND:
2538 #endif
2539 #ifdef IPV6_RECVORIGDSTADDR
2540         case IPV6_RECVORIGDSTADDR:
2541 #endif
2542             if (get_user_u32(len, optlen))
2543                 return -TARGET_EFAULT;
2544             if (len < 0)
2545                 return -TARGET_EINVAL;
2546             lv = sizeof(lv);
2547             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2548             if (ret < 0)
2549                 return ret;
2550             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2551                 len = 1;
2552                 if (put_user_u32(len, optlen)
2553                     || put_user_u8(val, optval_addr))
2554                     return -TARGET_EFAULT;
2555             } else {
2556                 if (len > sizeof(int))
2557                     len = sizeof(int);
2558                 if (put_user_u32(len, optlen)
2559                     || put_user_u32(val, optval_addr))
2560                     return -TARGET_EFAULT;
2561             }
2562             break;
2563         default:
2564             ret = -TARGET_ENOPROTOOPT;
2565             break;
2566         }
2567         break;
2568 #ifdef SOL_NETLINK
2569     case SOL_NETLINK:
2570         switch (optname) {
2571         case NETLINK_PKTINFO:
2572         case NETLINK_BROADCAST_ERROR:
2573         case NETLINK_NO_ENOBUFS:
2574 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2575         case NETLINK_LISTEN_ALL_NSID:
2576         case NETLINK_CAP_ACK:
2577 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2578 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2579         case NETLINK_EXT_ACK:
2580 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2581 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2582         case NETLINK_GET_STRICT_CHK:
2583 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2584             if (get_user_u32(len, optlen)) {
2585                 return -TARGET_EFAULT;
2586             }
2587             if (len != sizeof(val)) {
2588                 return -TARGET_EINVAL;
2589             }
2590             lv = len;
2591             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2592             if (ret < 0) {
2593                 return ret;
2594             }
2595             if (put_user_u32(lv, optlen)
2596                 || put_user_u32(val, optval_addr)) {
2597                 return -TARGET_EFAULT;
2598             }
2599             break;
2600 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2601         case NETLINK_LIST_MEMBERSHIPS:
2602         {
2603             uint32_t *results;
2604             int i;
2605             if (get_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             if (len < 0) {
2609                 return -TARGET_EINVAL;
2610             }
2611             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2612             if (!results) {
2613                 return -TARGET_EFAULT;
2614             }
2615             lv = len;
2616             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2617             if (ret < 0) {
2618                 unlock_user(results, optval_addr, 0);
2619                 return ret;
2620             }
2621             /* swap host endianess to target endianess. */
2622             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2623                 results[i] = tswap32(results[i]);
2624             }
2625             if (put_user_u32(lv, optlen)) {
2626                 return -TARGET_EFAULT;
2627             }
2628             unlock_user(results, optval_addr, 0);
2629             break;
2630         }
2631 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2632         default:
2633             goto unimplemented;
2634         }
2635 #endif /* SOL_NETLINK */
2636     default:
2637     unimplemented:
2638         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2639                  level, optname);
2640         ret = -TARGET_EOPNOTSUPP;
2641         break;
2642     }
2643     return ret;
2644 }
2645 
2646 /* Convert target low/high pair representing file offset into the host
2647  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2648  * as the kernel doesn't handle them either.
2649  */
2650 static void target_to_host_low_high(abi_ulong tlow,
2651                                     abi_ulong thigh,
2652                                     unsigned long *hlow,
2653                                     unsigned long *hhigh)
2654 {
2655     uint64_t off = tlow |
2656         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2657         TARGET_LONG_BITS / 2;
2658 
2659     *hlow = off;
2660     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2661 }
2662 
2663 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2664                                 abi_ulong count, int copy)
2665 {
2666     struct target_iovec *target_vec;
2667     struct iovec *vec;
2668     abi_ulong total_len, max_len;
2669     int i;
2670     int err = 0;
2671     bool bad_address = false;
2672 
2673     if (count == 0) {
2674         errno = 0;
2675         return NULL;
2676     }
2677     if (count > IOV_MAX) {
2678         errno = EINVAL;
2679         return NULL;
2680     }
2681 
2682     vec = g_try_new0(struct iovec, count);
2683     if (vec == NULL) {
2684         errno = ENOMEM;
2685         return NULL;
2686     }
2687 
2688     target_vec = lock_user(VERIFY_READ, target_addr,
2689                            count * sizeof(struct target_iovec), 1);
2690     if (target_vec == NULL) {
2691         err = EFAULT;
2692         goto fail2;
2693     }
2694 
2695     /* ??? If host page size > target page size, this will result in a
2696        value larger than what we can actually support.  */
2697     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2698     total_len = 0;
2699 
2700     for (i = 0; i < count; i++) {
2701         abi_ulong base = tswapal(target_vec[i].iov_base);
2702         abi_long len = tswapal(target_vec[i].iov_len);
2703 
2704         if (len < 0) {
2705             err = EINVAL;
2706             goto fail;
2707         } else if (len == 0) {
2708             /* Zero length pointer is ignored.  */
2709             vec[i].iov_base = 0;
2710         } else {
2711             vec[i].iov_base = lock_user(type, base, len, copy);
2712             /* If the first buffer pointer is bad, this is a fault.  But
2713              * subsequent bad buffers will result in a partial write; this
2714              * is realized by filling the vector with null pointers and
2715              * zero lengths. */
2716             if (!vec[i].iov_base) {
2717                 if (i == 0) {
2718                     err = EFAULT;
2719                     goto fail;
2720                 } else {
2721                     bad_address = true;
2722                 }
2723             }
2724             if (bad_address) {
2725                 len = 0;
2726             }
2727             if (len > max_len - total_len) {
2728                 len = max_len - total_len;
2729             }
2730         }
2731         vec[i].iov_len = len;
2732         total_len += len;
2733     }
2734 
2735     unlock_user(target_vec, target_addr, 0);
2736     return vec;
2737 
2738  fail:
2739     while (--i >= 0) {
2740         if (tswapal(target_vec[i].iov_len) > 0) {
2741             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2742         }
2743     }
2744     unlock_user(target_vec, target_addr, 0);
2745  fail2:
2746     g_free(vec);
2747     errno = err;
2748     return NULL;
2749 }
2750 
2751 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2752                          abi_ulong count, int copy)
2753 {
2754     struct target_iovec *target_vec;
2755     int i;
2756 
2757     target_vec = lock_user(VERIFY_READ, target_addr,
2758                            count * sizeof(struct target_iovec), 1);
2759     if (target_vec) {
2760         for (i = 0; i < count; i++) {
2761             abi_ulong base = tswapal(target_vec[i].iov_base);
2762             abi_long len = tswapal(target_vec[i].iov_len);
2763             if (len < 0) {
2764                 break;
2765             }
2766             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2767         }
2768         unlock_user(target_vec, target_addr, 0);
2769     }
2770 
2771     g_free(vec);
2772 }
2773 
2774 static inline int target_to_host_sock_type(int *type)
2775 {
2776     int host_type = 0;
2777     int target_type = *type;
2778 
2779     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2780     case TARGET_SOCK_DGRAM:
2781         host_type = SOCK_DGRAM;
2782         break;
2783     case TARGET_SOCK_STREAM:
2784         host_type = SOCK_STREAM;
2785         break;
2786     default:
2787         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2788         break;
2789     }
2790     if (target_type & TARGET_SOCK_CLOEXEC) {
2791 #if defined(SOCK_CLOEXEC)
2792         host_type |= SOCK_CLOEXEC;
2793 #else
2794         return -TARGET_EINVAL;
2795 #endif
2796     }
2797     if (target_type & TARGET_SOCK_NONBLOCK) {
2798 #if defined(SOCK_NONBLOCK)
2799         host_type |= SOCK_NONBLOCK;
2800 #elif !defined(O_NONBLOCK)
2801         return -TARGET_EINVAL;
2802 #endif
2803     }
2804     *type = host_type;
2805     return 0;
2806 }
2807 
2808 /* Try to emulate socket type flags after socket creation.  */
2809 static int sock_flags_fixup(int fd, int target_type)
2810 {
2811 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2812     if (target_type & TARGET_SOCK_NONBLOCK) {
2813         int flags = fcntl(fd, F_GETFL);
2814         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2815             close(fd);
2816             return -TARGET_EINVAL;
2817         }
2818     }
2819 #endif
2820     return fd;
2821 }
2822 
2823 /* do_socket() Must return target values and target errnos. */
2824 static abi_long do_socket(int domain, int type, int protocol)
2825 {
2826     int target_type = type;
2827     int ret;
2828 
2829     ret = target_to_host_sock_type(&type);
2830     if (ret) {
2831         return ret;
2832     }
2833 
2834     if (domain == PF_NETLINK && !(
2835 #ifdef CONFIG_RTNETLINK
2836          protocol == NETLINK_ROUTE ||
2837 #endif
2838          protocol == NETLINK_KOBJECT_UEVENT ||
2839          protocol == NETLINK_AUDIT)) {
2840         return -EPFNOSUPPORT;
2841     }
2842 
2843     if (domain == AF_PACKET ||
2844         (domain == AF_INET && type == SOCK_PACKET)) {
2845         protocol = tswap16(protocol);
2846     }
2847 
2848     ret = get_errno(socket(domain, type, protocol));
2849     if (ret >= 0) {
2850         ret = sock_flags_fixup(ret, target_type);
2851         if (type == SOCK_PACKET) {
2852             /* Manage an obsolete case :
2853              * if socket type is SOCK_PACKET, bind by name
2854              */
2855             fd_trans_register(ret, &target_packet_trans);
2856         } else if (domain == PF_NETLINK) {
2857             switch (protocol) {
2858 #ifdef CONFIG_RTNETLINK
2859             case NETLINK_ROUTE:
2860                 fd_trans_register(ret, &target_netlink_route_trans);
2861                 break;
2862 #endif
2863             case NETLINK_KOBJECT_UEVENT:
2864                 /* nothing to do: messages are strings */
2865                 break;
2866             case NETLINK_AUDIT:
2867                 fd_trans_register(ret, &target_netlink_audit_trans);
2868                 break;
2869             default:
2870                 g_assert_not_reached();
2871             }
2872         }
2873     }
2874     return ret;
2875 }
2876 
2877 /* do_bind() Must return target values and target errnos. */
2878 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2879                         socklen_t addrlen)
2880 {
2881     void *addr;
2882     abi_long ret;
2883 
2884     if ((int)addrlen < 0) {
2885         return -TARGET_EINVAL;
2886     }
2887 
2888     addr = alloca(addrlen+1);
2889 
2890     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2891     if (ret)
2892         return ret;
2893 
2894     return get_errno(bind(sockfd, addr, addrlen));
2895 }
2896 
2897 /* do_connect() Must return target values and target errnos. */
2898 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2899                            socklen_t addrlen)
2900 {
2901     void *addr;
2902     abi_long ret;
2903 
2904     if ((int)addrlen < 0) {
2905         return -TARGET_EINVAL;
2906     }
2907 
2908     addr = alloca(addrlen+1);
2909 
2910     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2911     if (ret)
2912         return ret;
2913 
2914     return get_errno(safe_connect(sockfd, addr, addrlen));
2915 }
2916 
2917 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2918 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2919                                       int flags, int send)
2920 {
2921     abi_long ret, len;
2922     struct msghdr msg;
2923     abi_ulong count;
2924     struct iovec *vec;
2925     abi_ulong target_vec;
2926 
2927     if (msgp->msg_name) {
2928         msg.msg_namelen = tswap32(msgp->msg_namelen);
2929         msg.msg_name = alloca(msg.msg_namelen+1);
2930         ret = target_to_host_sockaddr(fd, msg.msg_name,
2931                                       tswapal(msgp->msg_name),
2932                                       msg.msg_namelen);
2933         if (ret == -TARGET_EFAULT) {
2934             /* For connected sockets msg_name and msg_namelen must
2935              * be ignored, so returning EFAULT immediately is wrong.
2936              * Instead, pass a bad msg_name to the host kernel, and
2937              * let it decide whether to return EFAULT or not.
2938              */
2939             msg.msg_name = (void *)-1;
2940         } else if (ret) {
2941             goto out2;
2942         }
2943     } else {
2944         msg.msg_name = NULL;
2945         msg.msg_namelen = 0;
2946     }
2947     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2948     msg.msg_control = alloca(msg.msg_controllen);
2949     memset(msg.msg_control, 0, msg.msg_controllen);
2950 
2951     msg.msg_flags = tswap32(msgp->msg_flags);
2952 
2953     count = tswapal(msgp->msg_iovlen);
2954     target_vec = tswapal(msgp->msg_iov);
2955 
2956     if (count > IOV_MAX) {
2957         /* sendrcvmsg returns a different errno for this condition than
2958          * readv/writev, so we must catch it here before lock_iovec() does.
2959          */
2960         ret = -TARGET_EMSGSIZE;
2961         goto out2;
2962     }
2963 
2964     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2965                      target_vec, count, send);
2966     if (vec == NULL) {
2967         ret = -host_to_target_errno(errno);
2968         goto out2;
2969     }
2970     msg.msg_iovlen = count;
2971     msg.msg_iov = vec;
2972 
2973     if (send) {
2974         if (fd_trans_target_to_host_data(fd)) {
2975             void *host_msg;
2976 
2977             host_msg = g_malloc(msg.msg_iov->iov_len);
2978             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2979             ret = fd_trans_target_to_host_data(fd)(host_msg,
2980                                                    msg.msg_iov->iov_len);
2981             if (ret >= 0) {
2982                 msg.msg_iov->iov_base = host_msg;
2983                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2984             }
2985             g_free(host_msg);
2986         } else {
2987             ret = target_to_host_cmsg(&msg, msgp);
2988             if (ret == 0) {
2989                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2990             }
2991         }
2992     } else {
2993         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2994         if (!is_error(ret)) {
2995             len = ret;
2996             if (fd_trans_host_to_target_data(fd)) {
2997                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2998                                                MIN(msg.msg_iov->iov_len, len));
2999             } else {
3000                 ret = host_to_target_cmsg(msgp, &msg);
3001             }
3002             if (!is_error(ret)) {
3003                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3004                 msgp->msg_flags = tswap32(msg.msg_flags);
3005                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3006                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3007                                     msg.msg_name, msg.msg_namelen);
3008                     if (ret) {
3009                         goto out;
3010                     }
3011                 }
3012 
3013                 ret = len;
3014             }
3015         }
3016     }
3017 
3018 out:
3019     unlock_iovec(vec, target_vec, count, !send);
3020 out2:
3021     return ret;
3022 }
3023 
3024 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3025                                int flags, int send)
3026 {
3027     abi_long ret;
3028     struct target_msghdr *msgp;
3029 
3030     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3031                           msgp,
3032                           target_msg,
3033                           send ? 1 : 0)) {
3034         return -TARGET_EFAULT;
3035     }
3036     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3037     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3038     return ret;
3039 }
3040 
3041 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3042  * so it might not have this *mmsg-specific flag either.
3043  */
3044 #ifndef MSG_WAITFORONE
3045 #define MSG_WAITFORONE 0x10000
3046 #endif
3047 
3048 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3049                                 unsigned int vlen, unsigned int flags,
3050                                 int send)
3051 {
3052     struct target_mmsghdr *mmsgp;
3053     abi_long ret = 0;
3054     int i;
3055 
3056     if (vlen > UIO_MAXIOV) {
3057         vlen = UIO_MAXIOV;
3058     }
3059 
3060     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3061     if (!mmsgp) {
3062         return -TARGET_EFAULT;
3063     }
3064 
3065     for (i = 0; i < vlen; i++) {
3066         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3067         if (is_error(ret)) {
3068             break;
3069         }
3070         mmsgp[i].msg_len = tswap32(ret);
3071         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3072         if (flags & MSG_WAITFORONE) {
3073             flags |= MSG_DONTWAIT;
3074         }
3075     }
3076 
3077     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3078 
3079     /* Return number of datagrams sent if we sent any at all;
3080      * otherwise return the error.
3081      */
3082     if (i) {
3083         return i;
3084     }
3085     return ret;
3086 }
3087 
3088 /* do_accept4() Must return target values and target errnos. */
3089 static abi_long do_accept4(int fd, abi_ulong target_addr,
3090                            abi_ulong target_addrlen_addr, int flags)
3091 {
3092     socklen_t addrlen, ret_addrlen;
3093     void *addr;
3094     abi_long ret;
3095     int host_flags;
3096 
3097     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3098 
3099     if (target_addr == 0) {
3100         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3101     }
3102 
3103     /* linux returns EINVAL if addrlen pointer is invalid */
3104     if (get_user_u32(addrlen, target_addrlen_addr))
3105         return -TARGET_EINVAL;
3106 
3107     if ((int)addrlen < 0) {
3108         return -TARGET_EINVAL;
3109     }
3110 
3111     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3112         return -TARGET_EINVAL;
3113 
3114     addr = alloca(addrlen);
3115 
3116     ret_addrlen = addrlen;
3117     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3118     if (!is_error(ret)) {
3119         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3120         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3121             ret = -TARGET_EFAULT;
3122         }
3123     }
3124     return ret;
3125 }
3126 
3127 /* do_getpeername() Must return target values and target errnos. */
3128 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3129                                abi_ulong target_addrlen_addr)
3130 {
3131     socklen_t addrlen, ret_addrlen;
3132     void *addr;
3133     abi_long ret;
3134 
3135     if (get_user_u32(addrlen, target_addrlen_addr))
3136         return -TARGET_EFAULT;
3137 
3138     if ((int)addrlen < 0) {
3139         return -TARGET_EINVAL;
3140     }
3141 
3142     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3143         return -TARGET_EFAULT;
3144 
3145     addr = alloca(addrlen);
3146 
3147     ret_addrlen = addrlen;
3148     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3149     if (!is_error(ret)) {
3150         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3151         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3152             ret = -TARGET_EFAULT;
3153         }
3154     }
3155     return ret;
3156 }
3157 
3158 /* do_getsockname() Must return target values and target errnos. */
3159 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3160                                abi_ulong target_addrlen_addr)
3161 {
3162     socklen_t addrlen, ret_addrlen;
3163     void *addr;
3164     abi_long ret;
3165 
3166     if (get_user_u32(addrlen, target_addrlen_addr))
3167         return -TARGET_EFAULT;
3168 
3169     if ((int)addrlen < 0) {
3170         return -TARGET_EINVAL;
3171     }
3172 
3173     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3174         return -TARGET_EFAULT;
3175 
3176     addr = alloca(addrlen);
3177 
3178     ret_addrlen = addrlen;
3179     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3180     if (!is_error(ret)) {
3181         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3182         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3183             ret = -TARGET_EFAULT;
3184         }
3185     }
3186     return ret;
3187 }
3188 
3189 /* do_socketpair() Must return target values and target errnos. */
3190 static abi_long do_socketpair(int domain, int type, int protocol,
3191                               abi_ulong target_tab_addr)
3192 {
3193     int tab[2];
3194     abi_long ret;
3195 
3196     target_to_host_sock_type(&type);
3197 
3198     ret = get_errno(socketpair(domain, type, protocol, tab));
3199     if (!is_error(ret)) {
3200         if (put_user_s32(tab[0], target_tab_addr)
3201             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3202             ret = -TARGET_EFAULT;
3203     }
3204     return ret;
3205 }
3206 
3207 /* do_sendto() Must return target values and target errnos. */
3208 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3209                           abi_ulong target_addr, socklen_t addrlen)
3210 {
3211     void *addr;
3212     void *host_msg;
3213     void *copy_msg = NULL;
3214     abi_long ret;
3215 
3216     if ((int)addrlen < 0) {
3217         return -TARGET_EINVAL;
3218     }
3219 
3220     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3221     if (!host_msg)
3222         return -TARGET_EFAULT;
3223     if (fd_trans_target_to_host_data(fd)) {
3224         copy_msg = host_msg;
3225         host_msg = g_malloc(len);
3226         memcpy(host_msg, copy_msg, len);
3227         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3228         if (ret < 0) {
3229             goto fail;
3230         }
3231     }
3232     if (target_addr) {
3233         addr = alloca(addrlen+1);
3234         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3235         if (ret) {
3236             goto fail;
3237         }
3238         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3239     } else {
3240         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3241     }
3242 fail:
3243     if (copy_msg) {
3244         g_free(host_msg);
3245         host_msg = copy_msg;
3246     }
3247     unlock_user(host_msg, msg, 0);
3248     return ret;
3249 }
3250 
3251 /* do_recvfrom() Must return target values and target errnos. */
3252 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3253                             abi_ulong target_addr,
3254                             abi_ulong target_addrlen)
3255 {
3256     socklen_t addrlen, ret_addrlen;
3257     void *addr;
3258     void *host_msg;
3259     abi_long ret;
3260 
3261     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3262     if (!host_msg)
3263         return -TARGET_EFAULT;
3264     if (target_addr) {
3265         if (get_user_u32(addrlen, target_addrlen)) {
3266             ret = -TARGET_EFAULT;
3267             goto fail;
3268         }
3269         if ((int)addrlen < 0) {
3270             ret = -TARGET_EINVAL;
3271             goto fail;
3272         }
3273         addr = alloca(addrlen);
3274         ret_addrlen = addrlen;
3275         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3276                                       addr, &ret_addrlen));
3277     } else {
3278         addr = NULL; /* To keep compiler quiet.  */
3279         addrlen = 0; /* To keep compiler quiet.  */
3280         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3281     }
3282     if (!is_error(ret)) {
3283         if (fd_trans_host_to_target_data(fd)) {
3284             abi_long trans;
3285             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3286             if (is_error(trans)) {
3287                 ret = trans;
3288                 goto fail;
3289             }
3290         }
3291         if (target_addr) {
3292             host_to_target_sockaddr(target_addr, addr,
3293                                     MIN(addrlen, ret_addrlen));
3294             if (put_user_u32(ret_addrlen, target_addrlen)) {
3295                 ret = -TARGET_EFAULT;
3296                 goto fail;
3297             }
3298         }
3299         unlock_user(host_msg, msg, len);
3300     } else {
3301 fail:
3302         unlock_user(host_msg, msg, 0);
3303     }
3304     return ret;
3305 }
3306 
3307 #ifdef TARGET_NR_socketcall
3308 /* do_socketcall() must return target values and target errnos. */
3309 static abi_long do_socketcall(int num, abi_ulong vptr)
3310 {
3311     static const unsigned nargs[] = { /* number of arguments per operation */
3312         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3313         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3314         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3315         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3316         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3317         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3318         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3319         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3320         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3321         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3322         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3323         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3324         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3325         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3326         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3327         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3328         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3329         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3330         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3331         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3332     };
3333     abi_long a[6]; /* max 6 args */
3334     unsigned i;
3335 
3336     /* check the range of the first argument num */
3337     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3338     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3339         return -TARGET_EINVAL;
3340     }
3341     /* ensure we have space for args */
3342     if (nargs[num] > ARRAY_SIZE(a)) {
3343         return -TARGET_EINVAL;
3344     }
3345     /* collect the arguments in a[] according to nargs[] */
3346     for (i = 0; i < nargs[num]; ++i) {
3347         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3348             return -TARGET_EFAULT;
3349         }
3350     }
3351     /* now when we have the args, invoke the appropriate underlying function */
3352     switch (num) {
3353     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3354         return do_socket(a[0], a[1], a[2]);
3355     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3356         return do_bind(a[0], a[1], a[2]);
3357     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3358         return do_connect(a[0], a[1], a[2]);
3359     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3360         return get_errno(listen(a[0], a[1]));
3361     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3362         return do_accept4(a[0], a[1], a[2], 0);
3363     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3364         return do_getsockname(a[0], a[1], a[2]);
3365     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3366         return do_getpeername(a[0], a[1], a[2]);
3367     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3368         return do_socketpair(a[0], a[1], a[2], a[3]);
3369     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3370         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3371     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3372         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3373     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3374         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3375     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3376         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3377     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3378         return get_errno(shutdown(a[0], a[1]));
3379     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3380         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3381     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3382         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3383     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3384         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3385     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3386         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3387     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3388         return do_accept4(a[0], a[1], a[2], a[3]);
3389     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3390         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3391     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3392         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3393     default:
3394         gemu_log("Unsupported socketcall: %d\n", num);
3395         return -TARGET_EINVAL;
3396     }
3397 }
3398 #endif
3399 
3400 #define N_SHM_REGIONS	32
3401 
3402 static struct shm_region {
3403     abi_ulong start;
3404     abi_ulong size;
3405     bool in_use;
3406 } shm_regions[N_SHM_REGIONS];
3407 
3408 #ifndef TARGET_SEMID64_DS
3409 /* asm-generic version of this struct */
3410 struct target_semid64_ds
3411 {
3412   struct target_ipc_perm sem_perm;
3413   abi_ulong sem_otime;
3414 #if TARGET_ABI_BITS == 32
3415   abi_ulong __unused1;
3416 #endif
3417   abi_ulong sem_ctime;
3418 #if TARGET_ABI_BITS == 32
3419   abi_ulong __unused2;
3420 #endif
3421   abi_ulong sem_nsems;
3422   abi_ulong __unused3;
3423   abi_ulong __unused4;
3424 };
3425 #endif
3426 
3427 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3428                                                abi_ulong target_addr)
3429 {
3430     struct target_ipc_perm *target_ip;
3431     struct target_semid64_ds *target_sd;
3432 
3433     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3434         return -TARGET_EFAULT;
3435     target_ip = &(target_sd->sem_perm);
3436     host_ip->__key = tswap32(target_ip->__key);
3437     host_ip->uid = tswap32(target_ip->uid);
3438     host_ip->gid = tswap32(target_ip->gid);
3439     host_ip->cuid = tswap32(target_ip->cuid);
3440     host_ip->cgid = tswap32(target_ip->cgid);
3441 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3442     host_ip->mode = tswap32(target_ip->mode);
3443 #else
3444     host_ip->mode = tswap16(target_ip->mode);
3445 #endif
3446 #if defined(TARGET_PPC)
3447     host_ip->__seq = tswap32(target_ip->__seq);
3448 #else
3449     host_ip->__seq = tswap16(target_ip->__seq);
3450 #endif
3451     unlock_user_struct(target_sd, target_addr, 0);
3452     return 0;
3453 }
3454 
3455 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3456                                                struct ipc_perm *host_ip)
3457 {
3458     struct target_ipc_perm *target_ip;
3459     struct target_semid64_ds *target_sd;
3460 
3461     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3462         return -TARGET_EFAULT;
3463     target_ip = &(target_sd->sem_perm);
3464     target_ip->__key = tswap32(host_ip->__key);
3465     target_ip->uid = tswap32(host_ip->uid);
3466     target_ip->gid = tswap32(host_ip->gid);
3467     target_ip->cuid = tswap32(host_ip->cuid);
3468     target_ip->cgid = tswap32(host_ip->cgid);
3469 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3470     target_ip->mode = tswap32(host_ip->mode);
3471 #else
3472     target_ip->mode = tswap16(host_ip->mode);
3473 #endif
3474 #if defined(TARGET_PPC)
3475     target_ip->__seq = tswap32(host_ip->__seq);
3476 #else
3477     target_ip->__seq = tswap16(host_ip->__seq);
3478 #endif
3479     unlock_user_struct(target_sd, target_addr, 1);
3480     return 0;
3481 }
3482 
3483 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3484                                                abi_ulong target_addr)
3485 {
3486     struct target_semid64_ds *target_sd;
3487 
3488     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3489         return -TARGET_EFAULT;
3490     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3491         return -TARGET_EFAULT;
3492     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3493     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3494     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3495     unlock_user_struct(target_sd, target_addr, 0);
3496     return 0;
3497 }
3498 
3499 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3500                                                struct semid_ds *host_sd)
3501 {
3502     struct target_semid64_ds *target_sd;
3503 
3504     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3505         return -TARGET_EFAULT;
3506     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3507         return -TARGET_EFAULT;
3508     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3509     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3510     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3511     unlock_user_struct(target_sd, target_addr, 1);
3512     return 0;
3513 }
3514 
3515 struct target_seminfo {
3516     int semmap;
3517     int semmni;
3518     int semmns;
3519     int semmnu;
3520     int semmsl;
3521     int semopm;
3522     int semume;
3523     int semusz;
3524     int semvmx;
3525     int semaem;
3526 };
3527 
3528 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3529                                               struct seminfo *host_seminfo)
3530 {
3531     struct target_seminfo *target_seminfo;
3532     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3533         return -TARGET_EFAULT;
3534     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3535     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3536     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3537     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3538     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3539     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3540     __put_user(host_seminfo->semume, &target_seminfo->semume);
3541     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3542     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3543     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3544     unlock_user_struct(target_seminfo, target_addr, 1);
3545     return 0;
3546 }
3547 
3548 union semun {
3549 	int val;
3550 	struct semid_ds *buf;
3551 	unsigned short *array;
3552 	struct seminfo *__buf;
3553 };
3554 
3555 union target_semun {
3556 	int val;
3557 	abi_ulong buf;
3558 	abi_ulong array;
3559 	abi_ulong __buf;
3560 };
3561 
3562 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3563                                                abi_ulong target_addr)
3564 {
3565     int nsems;
3566     unsigned short *array;
3567     union semun semun;
3568     struct semid_ds semid_ds;
3569     int i, ret;
3570 
3571     semun.buf = &semid_ds;
3572 
3573     ret = semctl(semid, 0, IPC_STAT, semun);
3574     if (ret == -1)
3575         return get_errno(ret);
3576 
3577     nsems = semid_ds.sem_nsems;
3578 
3579     *host_array = g_try_new(unsigned short, nsems);
3580     if (!*host_array) {
3581         return -TARGET_ENOMEM;
3582     }
3583     array = lock_user(VERIFY_READ, target_addr,
3584                       nsems*sizeof(unsigned short), 1);
3585     if (!array) {
3586         g_free(*host_array);
3587         return -TARGET_EFAULT;
3588     }
3589 
3590     for(i=0; i<nsems; i++) {
3591         __get_user((*host_array)[i], &array[i]);
3592     }
3593     unlock_user(array, target_addr, 0);
3594 
3595     return 0;
3596 }
3597 
3598 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3599                                                unsigned short **host_array)
3600 {
3601     int nsems;
3602     unsigned short *array;
3603     union semun semun;
3604     struct semid_ds semid_ds;
3605     int i, ret;
3606 
3607     semun.buf = &semid_ds;
3608 
3609     ret = semctl(semid, 0, IPC_STAT, semun);
3610     if (ret == -1)
3611         return get_errno(ret);
3612 
3613     nsems = semid_ds.sem_nsems;
3614 
3615     array = lock_user(VERIFY_WRITE, target_addr,
3616                       nsems*sizeof(unsigned short), 0);
3617     if (!array)
3618         return -TARGET_EFAULT;
3619 
3620     for(i=0; i<nsems; i++) {
3621         __put_user((*host_array)[i], &array[i]);
3622     }
3623     g_free(*host_array);
3624     unlock_user(array, target_addr, 1);
3625 
3626     return 0;
3627 }
3628 
3629 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3630                                  abi_ulong target_arg)
3631 {
3632     union target_semun target_su = { .buf = target_arg };
3633     union semun arg;
3634     struct semid_ds dsarg;
3635     unsigned short *array = NULL;
3636     struct seminfo seminfo;
3637     abi_long ret = -TARGET_EINVAL;
3638     abi_long err;
3639     cmd &= 0xff;
3640 
3641     switch( cmd ) {
3642 	case GETVAL:
3643 	case SETVAL:
3644             /* In 64 bit cross-endian situations, we will erroneously pick up
3645              * the wrong half of the union for the "val" element.  To rectify
3646              * this, the entire 8-byte structure is byteswapped, followed by
3647 	     * a swap of the 4 byte val field. In other cases, the data is
3648 	     * already in proper host byte order. */
3649 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3650 		target_su.buf = tswapal(target_su.buf);
3651 		arg.val = tswap32(target_su.val);
3652 	    } else {
3653 		arg.val = target_su.val;
3654 	    }
3655             ret = get_errno(semctl(semid, semnum, cmd, arg));
3656             break;
3657 	case GETALL:
3658 	case SETALL:
3659             err = target_to_host_semarray(semid, &array, target_su.array);
3660             if (err)
3661                 return err;
3662             arg.array = array;
3663             ret = get_errno(semctl(semid, semnum, cmd, arg));
3664             err = host_to_target_semarray(semid, target_su.array, &array);
3665             if (err)
3666                 return err;
3667             break;
3668 	case IPC_STAT:
3669 	case IPC_SET:
3670 	case SEM_STAT:
3671             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3672             if (err)
3673                 return err;
3674             arg.buf = &dsarg;
3675             ret = get_errno(semctl(semid, semnum, cmd, arg));
3676             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3677             if (err)
3678                 return err;
3679             break;
3680 	case IPC_INFO:
3681 	case SEM_INFO:
3682             arg.__buf = &seminfo;
3683             ret = get_errno(semctl(semid, semnum, cmd, arg));
3684             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3685             if (err)
3686                 return err;
3687             break;
3688 	case IPC_RMID:
3689 	case GETPID:
3690 	case GETNCNT:
3691 	case GETZCNT:
3692             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3693             break;
3694     }
3695 
3696     return ret;
3697 }
3698 
3699 struct target_sembuf {
3700     unsigned short sem_num;
3701     short sem_op;
3702     short sem_flg;
3703 };
3704 
3705 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3706                                              abi_ulong target_addr,
3707                                              unsigned nsops)
3708 {
3709     struct target_sembuf *target_sembuf;
3710     int i;
3711 
3712     target_sembuf = lock_user(VERIFY_READ, target_addr,
3713                               nsops*sizeof(struct target_sembuf), 1);
3714     if (!target_sembuf)
3715         return -TARGET_EFAULT;
3716 
3717     for(i=0; i<nsops; i++) {
3718         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3719         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3720         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3721     }
3722 
3723     unlock_user(target_sembuf, target_addr, 0);
3724 
3725     return 0;
3726 }
3727 
3728 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3729 {
3730     struct sembuf sops[nsops];
3731     abi_long ret;
3732 
3733     if (target_to_host_sembuf(sops, ptr, nsops))
3734         return -TARGET_EFAULT;
3735 
3736     ret = -TARGET_ENOSYS;
3737 #ifdef __NR_semtimedop
3738     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3739 #endif
3740 #ifdef __NR_ipc
3741     if (ret == -TARGET_ENOSYS) {
3742         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3743     }
3744 #endif
3745     return ret;
3746 }
3747 
3748 struct target_msqid_ds
3749 {
3750     struct target_ipc_perm msg_perm;
3751     abi_ulong msg_stime;
3752 #if TARGET_ABI_BITS == 32
3753     abi_ulong __unused1;
3754 #endif
3755     abi_ulong msg_rtime;
3756 #if TARGET_ABI_BITS == 32
3757     abi_ulong __unused2;
3758 #endif
3759     abi_ulong msg_ctime;
3760 #if TARGET_ABI_BITS == 32
3761     abi_ulong __unused3;
3762 #endif
3763     abi_ulong __msg_cbytes;
3764     abi_ulong msg_qnum;
3765     abi_ulong msg_qbytes;
3766     abi_ulong msg_lspid;
3767     abi_ulong msg_lrpid;
3768     abi_ulong __unused4;
3769     abi_ulong __unused5;
3770 };
3771 
3772 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3773                                                abi_ulong target_addr)
3774 {
3775     struct target_msqid_ds *target_md;
3776 
3777     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3778         return -TARGET_EFAULT;
3779     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3780         return -TARGET_EFAULT;
3781     host_md->msg_stime = tswapal(target_md->msg_stime);
3782     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3783     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3784     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3785     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3786     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3787     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3788     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3789     unlock_user_struct(target_md, target_addr, 0);
3790     return 0;
3791 }
3792 
3793 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3794                                                struct msqid_ds *host_md)
3795 {
3796     struct target_msqid_ds *target_md;
3797 
3798     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3799         return -TARGET_EFAULT;
3800     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3801         return -TARGET_EFAULT;
3802     target_md->msg_stime = tswapal(host_md->msg_stime);
3803     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3804     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3805     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3806     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3807     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3808     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3809     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3810     unlock_user_struct(target_md, target_addr, 1);
3811     return 0;
3812 }
3813 
3814 struct target_msginfo {
3815     int msgpool;
3816     int msgmap;
3817     int msgmax;
3818     int msgmnb;
3819     int msgmni;
3820     int msgssz;
3821     int msgtql;
3822     unsigned short int msgseg;
3823 };
3824 
3825 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3826                                               struct msginfo *host_msginfo)
3827 {
3828     struct target_msginfo *target_msginfo;
3829     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3830         return -TARGET_EFAULT;
3831     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3832     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3833     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3834     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3835     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3836     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3837     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3838     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3839     unlock_user_struct(target_msginfo, target_addr, 1);
3840     return 0;
3841 }
3842 
3843 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3844 {
3845     struct msqid_ds dsarg;
3846     struct msginfo msginfo;
3847     abi_long ret = -TARGET_EINVAL;
3848 
3849     cmd &= 0xff;
3850 
3851     switch (cmd) {
3852     case IPC_STAT:
3853     case IPC_SET:
3854     case MSG_STAT:
3855         if (target_to_host_msqid_ds(&dsarg,ptr))
3856             return -TARGET_EFAULT;
3857         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3858         if (host_to_target_msqid_ds(ptr,&dsarg))
3859             return -TARGET_EFAULT;
3860         break;
3861     case IPC_RMID:
3862         ret = get_errno(msgctl(msgid, cmd, NULL));
3863         break;
3864     case IPC_INFO:
3865     case MSG_INFO:
3866         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3867         if (host_to_target_msginfo(ptr, &msginfo))
3868             return -TARGET_EFAULT;
3869         break;
3870     }
3871 
3872     return ret;
3873 }
3874 
3875 struct target_msgbuf {
3876     abi_long mtype;
3877     char	mtext[1];
3878 };
3879 
3880 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3881                                  ssize_t msgsz, int msgflg)
3882 {
3883     struct target_msgbuf *target_mb;
3884     struct msgbuf *host_mb;
3885     abi_long ret = 0;
3886 
3887     if (msgsz < 0) {
3888         return -TARGET_EINVAL;
3889     }
3890 
3891     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3892         return -TARGET_EFAULT;
3893     host_mb = g_try_malloc(msgsz + sizeof(long));
3894     if (!host_mb) {
3895         unlock_user_struct(target_mb, msgp, 0);
3896         return -TARGET_ENOMEM;
3897     }
3898     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3899     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3900     ret = -TARGET_ENOSYS;
3901 #ifdef __NR_msgsnd
3902     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3903 #endif
3904 #ifdef __NR_ipc
3905     if (ret == -TARGET_ENOSYS) {
3906         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3907                                  host_mb, 0));
3908     }
3909 #endif
3910     g_free(host_mb);
3911     unlock_user_struct(target_mb, msgp, 0);
3912 
3913     return ret;
3914 }
3915 
3916 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3917                                  ssize_t msgsz, abi_long msgtyp,
3918                                  int msgflg)
3919 {
3920     struct target_msgbuf *target_mb;
3921     char *target_mtext;
3922     struct msgbuf *host_mb;
3923     abi_long ret = 0;
3924 
3925     if (msgsz < 0) {
3926         return -TARGET_EINVAL;
3927     }
3928 
3929     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3930         return -TARGET_EFAULT;
3931 
3932     host_mb = g_try_malloc(msgsz + sizeof(long));
3933     if (!host_mb) {
3934         ret = -TARGET_ENOMEM;
3935         goto end;
3936     }
3937     ret = -TARGET_ENOSYS;
3938 #ifdef __NR_msgrcv
3939     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3940 #endif
3941 #ifdef __NR_ipc
3942     if (ret == -TARGET_ENOSYS) {
3943         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3944                         msgflg, host_mb, msgtyp));
3945     }
3946 #endif
3947 
3948     if (ret > 0) {
3949         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3950         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3951         if (!target_mtext) {
3952             ret = -TARGET_EFAULT;
3953             goto end;
3954         }
3955         memcpy(target_mb->mtext, host_mb->mtext, ret);
3956         unlock_user(target_mtext, target_mtext_addr, ret);
3957     }
3958 
3959     target_mb->mtype = tswapal(host_mb->mtype);
3960 
3961 end:
3962     if (target_mb)
3963         unlock_user_struct(target_mb, msgp, 1);
3964     g_free(host_mb);
3965     return ret;
3966 }
3967 
3968 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3969                                                abi_ulong target_addr)
3970 {
3971     struct target_shmid_ds *target_sd;
3972 
3973     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3974         return -TARGET_EFAULT;
3975     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3976         return -TARGET_EFAULT;
3977     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3978     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3979     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3980     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3981     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3982     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3983     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3984     unlock_user_struct(target_sd, target_addr, 0);
3985     return 0;
3986 }
3987 
3988 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3989                                                struct shmid_ds *host_sd)
3990 {
3991     struct target_shmid_ds *target_sd;
3992 
3993     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3994         return -TARGET_EFAULT;
3995     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3996         return -TARGET_EFAULT;
3997     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3998     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3999     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4000     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4001     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4002     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4003     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4004     unlock_user_struct(target_sd, target_addr, 1);
4005     return 0;
4006 }
4007 
4008 struct  target_shminfo {
4009     abi_ulong shmmax;
4010     abi_ulong shmmin;
4011     abi_ulong shmmni;
4012     abi_ulong shmseg;
4013     abi_ulong shmall;
4014 };
4015 
4016 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4017                                               struct shminfo *host_shminfo)
4018 {
4019     struct target_shminfo *target_shminfo;
4020     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4021         return -TARGET_EFAULT;
4022     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4023     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4024     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4025     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4026     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4027     unlock_user_struct(target_shminfo, target_addr, 1);
4028     return 0;
4029 }
4030 
4031 struct target_shm_info {
4032     int used_ids;
4033     abi_ulong shm_tot;
4034     abi_ulong shm_rss;
4035     abi_ulong shm_swp;
4036     abi_ulong swap_attempts;
4037     abi_ulong swap_successes;
4038 };
4039 
4040 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4041                                                struct shm_info *host_shm_info)
4042 {
4043     struct target_shm_info *target_shm_info;
4044     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4045         return -TARGET_EFAULT;
4046     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4047     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4048     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4049     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4050     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4051     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4052     unlock_user_struct(target_shm_info, target_addr, 1);
4053     return 0;
4054 }
4055 
4056 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4057 {
4058     struct shmid_ds dsarg;
4059     struct shminfo shminfo;
4060     struct shm_info shm_info;
4061     abi_long ret = -TARGET_EINVAL;
4062 
4063     cmd &= 0xff;
4064 
4065     switch(cmd) {
4066     case IPC_STAT:
4067     case IPC_SET:
4068     case SHM_STAT:
4069         if (target_to_host_shmid_ds(&dsarg, buf))
4070             return -TARGET_EFAULT;
4071         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4072         if (host_to_target_shmid_ds(buf, &dsarg))
4073             return -TARGET_EFAULT;
4074         break;
4075     case IPC_INFO:
4076         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4077         if (host_to_target_shminfo(buf, &shminfo))
4078             return -TARGET_EFAULT;
4079         break;
4080     case SHM_INFO:
4081         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4082         if (host_to_target_shm_info(buf, &shm_info))
4083             return -TARGET_EFAULT;
4084         break;
4085     case IPC_RMID:
4086     case SHM_LOCK:
4087     case SHM_UNLOCK:
4088         ret = get_errno(shmctl(shmid, cmd, NULL));
4089         break;
4090     }
4091 
4092     return ret;
4093 }
4094 
4095 #ifndef TARGET_FORCE_SHMLBA
4096 /* For most architectures, SHMLBA is the same as the page size;
4097  * some architectures have larger values, in which case they should
4098  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4099  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4100  * and defining its own value for SHMLBA.
4101  *
4102  * The kernel also permits SHMLBA to be set by the architecture to a
4103  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4104  * this means that addresses are rounded to the large size if
4105  * SHM_RND is set but addresses not aligned to that size are not rejected
4106  * as long as they are at least page-aligned. Since the only architecture
4107  * which uses this is ia64 this code doesn't provide for that oddity.
4108  */
4109 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4110 {
4111     return TARGET_PAGE_SIZE;
4112 }
4113 #endif
4114 
4115 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4116                                  int shmid, abi_ulong shmaddr, int shmflg)
4117 {
4118     abi_long raddr;
4119     void *host_raddr;
4120     struct shmid_ds shm_info;
4121     int i,ret;
4122     abi_ulong shmlba;
4123 
4124     /* find out the length of the shared memory segment */
4125     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4126     if (is_error(ret)) {
4127         /* can't get length, bail out */
4128         return ret;
4129     }
4130 
4131     shmlba = target_shmlba(cpu_env);
4132 
4133     if (shmaddr & (shmlba - 1)) {
4134         if (shmflg & SHM_RND) {
4135             shmaddr &= ~(shmlba - 1);
4136         } else {
4137             return -TARGET_EINVAL;
4138         }
4139     }
4140     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4141         return -TARGET_EINVAL;
4142     }
4143 
4144     mmap_lock();
4145 
4146     if (shmaddr)
4147         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4148     else {
4149         abi_ulong mmap_start;
4150 
4151         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4152         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4153 
4154         if (mmap_start == -1) {
4155             errno = ENOMEM;
4156             host_raddr = (void *)-1;
4157         } else
4158             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4159     }
4160 
4161     if (host_raddr == (void *)-1) {
4162         mmap_unlock();
4163         return get_errno((long)host_raddr);
4164     }
4165     raddr=h2g((unsigned long)host_raddr);
4166 
4167     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4168                    PAGE_VALID | PAGE_READ |
4169                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4170 
4171     for (i = 0; i < N_SHM_REGIONS; i++) {
4172         if (!shm_regions[i].in_use) {
4173             shm_regions[i].in_use = true;
4174             shm_regions[i].start = raddr;
4175             shm_regions[i].size = shm_info.shm_segsz;
4176             break;
4177         }
4178     }
4179 
4180     mmap_unlock();
4181     return raddr;
4182 
4183 }
4184 
4185 static inline abi_long do_shmdt(abi_ulong shmaddr)
4186 {
4187     int i;
4188     abi_long rv;
4189 
4190     mmap_lock();
4191 
4192     for (i = 0; i < N_SHM_REGIONS; ++i) {
4193         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4194             shm_regions[i].in_use = false;
4195             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4196             break;
4197         }
4198     }
4199     rv = get_errno(shmdt(g2h(shmaddr)));
4200 
4201     mmap_unlock();
4202 
4203     return rv;
4204 }
4205 
4206 #ifdef TARGET_NR_ipc
4207 /* ??? This only works with linear mappings.  */
4208 /* do_ipc() must return target values and target errnos. */
4209 static abi_long do_ipc(CPUArchState *cpu_env,
4210                        unsigned int call, abi_long first,
4211                        abi_long second, abi_long third,
4212                        abi_long ptr, abi_long fifth)
4213 {
4214     int version;
4215     abi_long ret = 0;
4216 
4217     version = call >> 16;
4218     call &= 0xffff;
4219 
4220     switch (call) {
4221     case IPCOP_semop:
4222         ret = do_semop(first, ptr, second);
4223         break;
4224 
4225     case IPCOP_semget:
4226         ret = get_errno(semget(first, second, third));
4227         break;
4228 
4229     case IPCOP_semctl: {
4230         /* The semun argument to semctl is passed by value, so dereference the
4231          * ptr argument. */
4232         abi_ulong atptr;
4233         get_user_ual(atptr, ptr);
4234         ret = do_semctl(first, second, third, atptr);
4235         break;
4236     }
4237 
4238     case IPCOP_msgget:
4239         ret = get_errno(msgget(first, second));
4240         break;
4241 
4242     case IPCOP_msgsnd:
4243         ret = do_msgsnd(first, ptr, second, third);
4244         break;
4245 
4246     case IPCOP_msgctl:
4247         ret = do_msgctl(first, second, ptr);
4248         break;
4249 
4250     case IPCOP_msgrcv:
4251         switch (version) {
4252         case 0:
4253             {
4254                 struct target_ipc_kludge {
4255                     abi_long msgp;
4256                     abi_long msgtyp;
4257                 } *tmp;
4258 
4259                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4260                     ret = -TARGET_EFAULT;
4261                     break;
4262                 }
4263 
4264                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4265 
4266                 unlock_user_struct(tmp, ptr, 0);
4267                 break;
4268             }
4269         default:
4270             ret = do_msgrcv(first, ptr, second, fifth, third);
4271         }
4272         break;
4273 
4274     case IPCOP_shmat:
4275         switch (version) {
4276         default:
4277         {
4278             abi_ulong raddr;
4279             raddr = do_shmat(cpu_env, first, ptr, second);
4280             if (is_error(raddr))
4281                 return get_errno(raddr);
4282             if (put_user_ual(raddr, third))
4283                 return -TARGET_EFAULT;
4284             break;
4285         }
4286         case 1:
4287             ret = -TARGET_EINVAL;
4288             break;
4289         }
4290 	break;
4291     case IPCOP_shmdt:
4292         ret = do_shmdt(ptr);
4293 	break;
4294 
4295     case IPCOP_shmget:
4296 	/* IPC_* flag values are the same on all linux platforms */
4297 	ret = get_errno(shmget(first, second, third));
4298 	break;
4299 
4300 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4301     case IPCOP_shmctl:
4302         ret = do_shmctl(first, second, ptr);
4303         break;
4304     default:
4305 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4306 	ret = -TARGET_ENOSYS;
4307 	break;
4308     }
4309     return ret;
4310 }
4311 #endif
4312 
4313 /* kernel structure types definitions */
4314 
4315 #define STRUCT(name, ...) STRUCT_ ## name,
4316 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4317 enum {
4318 #include "syscall_types.h"
4319 STRUCT_MAX
4320 };
4321 #undef STRUCT
4322 #undef STRUCT_SPECIAL
4323 
4324 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4325 #define STRUCT_SPECIAL(name)
4326 #include "syscall_types.h"
4327 #undef STRUCT
4328 #undef STRUCT_SPECIAL
4329 
4330 typedef struct IOCTLEntry IOCTLEntry;
4331 
4332 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4333                              int fd, int cmd, abi_long arg);
4334 
4335 struct IOCTLEntry {
4336     int target_cmd;
4337     unsigned int host_cmd;
4338     const char *name;
4339     int access;
4340     do_ioctl_fn *do_ioctl;
4341     const argtype arg_type[5];
4342 };
4343 
4344 #define IOC_R 0x0001
4345 #define IOC_W 0x0002
4346 #define IOC_RW (IOC_R | IOC_W)
4347 
4348 #define MAX_STRUCT_SIZE 4096
4349 
4350 #ifdef CONFIG_FIEMAP
4351 /* So fiemap access checks don't overflow on 32 bit systems.
4352  * This is very slightly smaller than the limit imposed by
4353  * the underlying kernel.
4354  */
4355 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4356                             / sizeof(struct fiemap_extent))
4357 
4358 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4359                                        int fd, int cmd, abi_long arg)
4360 {
4361     /* The parameter for this ioctl is a struct fiemap followed
4362      * by an array of struct fiemap_extent whose size is set
4363      * in fiemap->fm_extent_count. The array is filled in by the
4364      * ioctl.
4365      */
4366     int target_size_in, target_size_out;
4367     struct fiemap *fm;
4368     const argtype *arg_type = ie->arg_type;
4369     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4370     void *argptr, *p;
4371     abi_long ret;
4372     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4373     uint32_t outbufsz;
4374     int free_fm = 0;
4375 
4376     assert(arg_type[0] == TYPE_PTR);
4377     assert(ie->access == IOC_RW);
4378     arg_type++;
4379     target_size_in = thunk_type_size(arg_type, 0);
4380     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4381     if (!argptr) {
4382         return -TARGET_EFAULT;
4383     }
4384     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4385     unlock_user(argptr, arg, 0);
4386     fm = (struct fiemap *)buf_temp;
4387     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4388         return -TARGET_EINVAL;
4389     }
4390 
4391     outbufsz = sizeof (*fm) +
4392         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4393 
4394     if (outbufsz > MAX_STRUCT_SIZE) {
4395         /* We can't fit all the extents into the fixed size buffer.
4396          * Allocate one that is large enough and use it instead.
4397          */
4398         fm = g_try_malloc(outbufsz);
4399         if (!fm) {
4400             return -TARGET_ENOMEM;
4401         }
4402         memcpy(fm, buf_temp, sizeof(struct fiemap));
4403         free_fm = 1;
4404     }
4405     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4406     if (!is_error(ret)) {
4407         target_size_out = target_size_in;
4408         /* An extent_count of 0 means we were only counting the extents
4409          * so there are no structs to copy
4410          */
4411         if (fm->fm_extent_count != 0) {
4412             target_size_out += fm->fm_mapped_extents * extent_size;
4413         }
4414         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4415         if (!argptr) {
4416             ret = -TARGET_EFAULT;
4417         } else {
4418             /* Convert the struct fiemap */
4419             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4420             if (fm->fm_extent_count != 0) {
4421                 p = argptr + target_size_in;
4422                 /* ...and then all the struct fiemap_extents */
4423                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4424                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4425                                   THUNK_TARGET);
4426                     p += extent_size;
4427                 }
4428             }
4429             unlock_user(argptr, arg, target_size_out);
4430         }
4431     }
4432     if (free_fm) {
4433         g_free(fm);
4434     }
4435     return ret;
4436 }
4437 #endif
4438 
4439 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4440                                 int fd, int cmd, abi_long arg)
4441 {
4442     const argtype *arg_type = ie->arg_type;
4443     int target_size;
4444     void *argptr;
4445     int ret;
4446     struct ifconf *host_ifconf;
4447     uint32_t outbufsz;
4448     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4449     int target_ifreq_size;
4450     int nb_ifreq;
4451     int free_buf = 0;
4452     int i;
4453     int target_ifc_len;
4454     abi_long target_ifc_buf;
4455     int host_ifc_len;
4456     char *host_ifc_buf;
4457 
4458     assert(arg_type[0] == TYPE_PTR);
4459     assert(ie->access == IOC_RW);
4460 
4461     arg_type++;
4462     target_size = thunk_type_size(arg_type, 0);
4463 
4464     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4465     if (!argptr)
4466         return -TARGET_EFAULT;
4467     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4468     unlock_user(argptr, arg, 0);
4469 
4470     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4471     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4472     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4473 
4474     if (target_ifc_buf != 0) {
4475         target_ifc_len = host_ifconf->ifc_len;
4476         nb_ifreq = target_ifc_len / target_ifreq_size;
4477         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4478 
4479         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4480         if (outbufsz > MAX_STRUCT_SIZE) {
4481             /*
4482              * We can't fit all the extents into the fixed size buffer.
4483              * Allocate one that is large enough and use it instead.
4484              */
4485             host_ifconf = malloc(outbufsz);
4486             if (!host_ifconf) {
4487                 return -TARGET_ENOMEM;
4488             }
4489             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4490             free_buf = 1;
4491         }
4492         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4493 
4494         host_ifconf->ifc_len = host_ifc_len;
4495     } else {
4496       host_ifc_buf = NULL;
4497     }
4498     host_ifconf->ifc_buf = host_ifc_buf;
4499 
4500     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4501     if (!is_error(ret)) {
4502 	/* convert host ifc_len to target ifc_len */
4503 
4504         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4505         target_ifc_len = nb_ifreq * target_ifreq_size;
4506         host_ifconf->ifc_len = target_ifc_len;
4507 
4508 	/* restore target ifc_buf */
4509 
4510         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4511 
4512 	/* copy struct ifconf to target user */
4513 
4514         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4515         if (!argptr)
4516             return -TARGET_EFAULT;
4517         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4518         unlock_user(argptr, arg, target_size);
4519 
4520         if (target_ifc_buf != 0) {
4521             /* copy ifreq[] to target user */
4522             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4523             for (i = 0; i < nb_ifreq ; i++) {
4524                 thunk_convert(argptr + i * target_ifreq_size,
4525                               host_ifc_buf + i * sizeof(struct ifreq),
4526                               ifreq_arg_type, THUNK_TARGET);
4527             }
4528             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4529         }
4530     }
4531 
4532     if (free_buf) {
4533         free(host_ifconf);
4534     }
4535 
4536     return ret;
4537 }
4538 
4539 #if defined(CONFIG_USBFS)
4540 #if HOST_LONG_BITS > 64
4541 #error USBDEVFS thunks do not support >64 bit hosts yet.
4542 #endif
4543 struct live_urb {
4544     uint64_t target_urb_adr;
4545     uint64_t target_buf_adr;
4546     char *target_buf_ptr;
4547     struct usbdevfs_urb host_urb;
4548 };
4549 
4550 static GHashTable *usbdevfs_urb_hashtable(void)
4551 {
4552     static GHashTable *urb_hashtable;
4553 
4554     if (!urb_hashtable) {
4555         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4556     }
4557     return urb_hashtable;
4558 }
4559 
4560 static void urb_hashtable_insert(struct live_urb *urb)
4561 {
4562     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4563     g_hash_table_insert(urb_hashtable, urb, urb);
4564 }
4565 
4566 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4567 {
4568     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4569     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4570 }
4571 
4572 static void urb_hashtable_remove(struct live_urb *urb)
4573 {
4574     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4575     g_hash_table_remove(urb_hashtable, urb);
4576 }
4577 
4578 static abi_long
4579 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4580                           int fd, int cmd, abi_long arg)
4581 {
4582     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4583     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4584     struct live_urb *lurb;
4585     void *argptr;
4586     uint64_t hurb;
4587     int target_size;
4588     uintptr_t target_urb_adr;
4589     abi_long ret;
4590 
4591     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4592 
4593     memset(buf_temp, 0, sizeof(uint64_t));
4594     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4595     if (is_error(ret)) {
4596         return ret;
4597     }
4598 
4599     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4600     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4601     if (!lurb->target_urb_adr) {
4602         return -TARGET_EFAULT;
4603     }
4604     urb_hashtable_remove(lurb);
4605     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4606         lurb->host_urb.buffer_length);
4607     lurb->target_buf_ptr = NULL;
4608 
4609     /* restore the guest buffer pointer */
4610     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4611 
4612     /* update the guest urb struct */
4613     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4614     if (!argptr) {
4615         g_free(lurb);
4616         return -TARGET_EFAULT;
4617     }
4618     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4619     unlock_user(argptr, lurb->target_urb_adr, target_size);
4620 
4621     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4622     /* write back the urb handle */
4623     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4624     if (!argptr) {
4625         g_free(lurb);
4626         return -TARGET_EFAULT;
4627     }
4628 
4629     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4630     target_urb_adr = lurb->target_urb_adr;
4631     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4632     unlock_user(argptr, arg, target_size);
4633 
4634     g_free(lurb);
4635     return ret;
4636 }
4637 
4638 static abi_long
4639 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4640                              uint8_t *buf_temp __attribute__((unused)),
4641                              int fd, int cmd, abi_long arg)
4642 {
4643     struct live_urb *lurb;
4644 
4645     /* map target address back to host URB with metadata. */
4646     lurb = urb_hashtable_lookup(arg);
4647     if (!lurb) {
4648         return -TARGET_EFAULT;
4649     }
4650     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4651 }
4652 
4653 static abi_long
4654 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4655                             int fd, int cmd, abi_long arg)
4656 {
4657     const argtype *arg_type = ie->arg_type;
4658     int target_size;
4659     abi_long ret;
4660     void *argptr;
4661     int rw_dir;
4662     struct live_urb *lurb;
4663 
4664     /*
4665      * each submitted URB needs to map to a unique ID for the
4666      * kernel, and that unique ID needs to be a pointer to
4667      * host memory.  hence, we need to malloc for each URB.
4668      * isochronous transfers have a variable length struct.
4669      */
4670     arg_type++;
4671     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4672 
4673     /* construct host copy of urb and metadata */
4674     lurb = g_try_malloc0(sizeof(struct live_urb));
4675     if (!lurb) {
4676         return -TARGET_ENOMEM;
4677     }
4678 
4679     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4680     if (!argptr) {
4681         g_free(lurb);
4682         return -TARGET_EFAULT;
4683     }
4684     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4685     unlock_user(argptr, arg, 0);
4686 
4687     lurb->target_urb_adr = arg;
4688     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4689 
4690     /* buffer space used depends on endpoint type so lock the entire buffer */
4691     /* control type urbs should check the buffer contents for true direction */
4692     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4693     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4694         lurb->host_urb.buffer_length, 1);
4695     if (lurb->target_buf_ptr == NULL) {
4696         g_free(lurb);
4697         return -TARGET_EFAULT;
4698     }
4699 
4700     /* update buffer pointer in host copy */
4701     lurb->host_urb.buffer = lurb->target_buf_ptr;
4702 
4703     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4704     if (is_error(ret)) {
4705         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4706         g_free(lurb);
4707     } else {
4708         urb_hashtable_insert(lurb);
4709     }
4710 
4711     return ret;
4712 }
4713 #endif /* CONFIG_USBFS */
4714 
4715 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4716                             int cmd, abi_long arg)
4717 {
4718     void *argptr;
4719     struct dm_ioctl *host_dm;
4720     abi_long guest_data;
4721     uint32_t guest_data_size;
4722     int target_size;
4723     const argtype *arg_type = ie->arg_type;
4724     abi_long ret;
4725     void *big_buf = NULL;
4726     char *host_data;
4727 
4728     arg_type++;
4729     target_size = thunk_type_size(arg_type, 0);
4730     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4731     if (!argptr) {
4732         ret = -TARGET_EFAULT;
4733         goto out;
4734     }
4735     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4736     unlock_user(argptr, arg, 0);
4737 
4738     /* buf_temp is too small, so fetch things into a bigger buffer */
4739     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4740     memcpy(big_buf, buf_temp, target_size);
4741     buf_temp = big_buf;
4742     host_dm = big_buf;
4743 
4744     guest_data = arg + host_dm->data_start;
4745     if ((guest_data - arg) < 0) {
4746         ret = -TARGET_EINVAL;
4747         goto out;
4748     }
4749     guest_data_size = host_dm->data_size - host_dm->data_start;
4750     host_data = (char*)host_dm + host_dm->data_start;
4751 
4752     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4753     if (!argptr) {
4754         ret = -TARGET_EFAULT;
4755         goto out;
4756     }
4757 
4758     switch (ie->host_cmd) {
4759     case DM_REMOVE_ALL:
4760     case DM_LIST_DEVICES:
4761     case DM_DEV_CREATE:
4762     case DM_DEV_REMOVE:
4763     case DM_DEV_SUSPEND:
4764     case DM_DEV_STATUS:
4765     case DM_DEV_WAIT:
4766     case DM_TABLE_STATUS:
4767     case DM_TABLE_CLEAR:
4768     case DM_TABLE_DEPS:
4769     case DM_LIST_VERSIONS:
4770         /* no input data */
4771         break;
4772     case DM_DEV_RENAME:
4773     case DM_DEV_SET_GEOMETRY:
4774         /* data contains only strings */
4775         memcpy(host_data, argptr, guest_data_size);
4776         break;
4777     case DM_TARGET_MSG:
4778         memcpy(host_data, argptr, guest_data_size);
4779         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4780         break;
4781     case DM_TABLE_LOAD:
4782     {
4783         void *gspec = argptr;
4784         void *cur_data = host_data;
4785         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4786         int spec_size = thunk_type_size(arg_type, 0);
4787         int i;
4788 
4789         for (i = 0; i < host_dm->target_count; i++) {
4790             struct dm_target_spec *spec = cur_data;
4791             uint32_t next;
4792             int slen;
4793 
4794             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4795             slen = strlen((char*)gspec + spec_size) + 1;
4796             next = spec->next;
4797             spec->next = sizeof(*spec) + slen;
4798             strcpy((char*)&spec[1], gspec + spec_size);
4799             gspec += next;
4800             cur_data += spec->next;
4801         }
4802         break;
4803     }
4804     default:
4805         ret = -TARGET_EINVAL;
4806         unlock_user(argptr, guest_data, 0);
4807         goto out;
4808     }
4809     unlock_user(argptr, guest_data, 0);
4810 
4811     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4812     if (!is_error(ret)) {
4813         guest_data = arg + host_dm->data_start;
4814         guest_data_size = host_dm->data_size - host_dm->data_start;
4815         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4816         switch (ie->host_cmd) {
4817         case DM_REMOVE_ALL:
4818         case DM_DEV_CREATE:
4819         case DM_DEV_REMOVE:
4820         case DM_DEV_RENAME:
4821         case DM_DEV_SUSPEND:
4822         case DM_DEV_STATUS:
4823         case DM_TABLE_LOAD:
4824         case DM_TABLE_CLEAR:
4825         case DM_TARGET_MSG:
4826         case DM_DEV_SET_GEOMETRY:
4827             /* no return data */
4828             break;
4829         case DM_LIST_DEVICES:
4830         {
4831             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4832             uint32_t remaining_data = guest_data_size;
4833             void *cur_data = argptr;
4834             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4835             int nl_size = 12; /* can't use thunk_size due to alignment */
4836 
4837             while (1) {
4838                 uint32_t next = nl->next;
4839                 if (next) {
4840                     nl->next = nl_size + (strlen(nl->name) + 1);
4841                 }
4842                 if (remaining_data < nl->next) {
4843                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4844                     break;
4845                 }
4846                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4847                 strcpy(cur_data + nl_size, nl->name);
4848                 cur_data += nl->next;
4849                 remaining_data -= nl->next;
4850                 if (!next) {
4851                     break;
4852                 }
4853                 nl = (void*)nl + next;
4854             }
4855             break;
4856         }
4857         case DM_DEV_WAIT:
4858         case DM_TABLE_STATUS:
4859         {
4860             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4861             void *cur_data = argptr;
4862             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4863             int spec_size = thunk_type_size(arg_type, 0);
4864             int i;
4865 
4866             for (i = 0; i < host_dm->target_count; i++) {
4867                 uint32_t next = spec->next;
4868                 int slen = strlen((char*)&spec[1]) + 1;
4869                 spec->next = (cur_data - argptr) + spec_size + slen;
4870                 if (guest_data_size < spec->next) {
4871                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4872                     break;
4873                 }
4874                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4875                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4876                 cur_data = argptr + spec->next;
4877                 spec = (void*)host_dm + host_dm->data_start + next;
4878             }
4879             break;
4880         }
4881         case DM_TABLE_DEPS:
4882         {
4883             void *hdata = (void*)host_dm + host_dm->data_start;
4884             int count = *(uint32_t*)hdata;
4885             uint64_t *hdev = hdata + 8;
4886             uint64_t *gdev = argptr + 8;
4887             int i;
4888 
4889             *(uint32_t*)argptr = tswap32(count);
4890             for (i = 0; i < count; i++) {
4891                 *gdev = tswap64(*hdev);
4892                 gdev++;
4893                 hdev++;
4894             }
4895             break;
4896         }
4897         case DM_LIST_VERSIONS:
4898         {
4899             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4900             uint32_t remaining_data = guest_data_size;
4901             void *cur_data = argptr;
4902             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4903             int vers_size = thunk_type_size(arg_type, 0);
4904 
4905             while (1) {
4906                 uint32_t next = vers->next;
4907                 if (next) {
4908                     vers->next = vers_size + (strlen(vers->name) + 1);
4909                 }
4910                 if (remaining_data < vers->next) {
4911                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4912                     break;
4913                 }
4914                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4915                 strcpy(cur_data + vers_size, vers->name);
4916                 cur_data += vers->next;
4917                 remaining_data -= vers->next;
4918                 if (!next) {
4919                     break;
4920                 }
4921                 vers = (void*)vers + next;
4922             }
4923             break;
4924         }
4925         default:
4926             unlock_user(argptr, guest_data, 0);
4927             ret = -TARGET_EINVAL;
4928             goto out;
4929         }
4930         unlock_user(argptr, guest_data, guest_data_size);
4931 
4932         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4933         if (!argptr) {
4934             ret = -TARGET_EFAULT;
4935             goto out;
4936         }
4937         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4938         unlock_user(argptr, arg, target_size);
4939     }
4940 out:
4941     g_free(big_buf);
4942     return ret;
4943 }
4944 
4945 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4946                                int cmd, abi_long arg)
4947 {
4948     void *argptr;
4949     int target_size;
4950     const argtype *arg_type = ie->arg_type;
4951     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4952     abi_long ret;
4953 
4954     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4955     struct blkpg_partition host_part;
4956 
4957     /* Read and convert blkpg */
4958     arg_type++;
4959     target_size = thunk_type_size(arg_type, 0);
4960     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4961     if (!argptr) {
4962         ret = -TARGET_EFAULT;
4963         goto out;
4964     }
4965     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4966     unlock_user(argptr, arg, 0);
4967 
4968     switch (host_blkpg->op) {
4969     case BLKPG_ADD_PARTITION:
4970     case BLKPG_DEL_PARTITION:
4971         /* payload is struct blkpg_partition */
4972         break;
4973     default:
4974         /* Unknown opcode */
4975         ret = -TARGET_EINVAL;
4976         goto out;
4977     }
4978 
4979     /* Read and convert blkpg->data */
4980     arg = (abi_long)(uintptr_t)host_blkpg->data;
4981     target_size = thunk_type_size(part_arg_type, 0);
4982     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4983     if (!argptr) {
4984         ret = -TARGET_EFAULT;
4985         goto out;
4986     }
4987     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4988     unlock_user(argptr, arg, 0);
4989 
4990     /* Swizzle the data pointer to our local copy and call! */
4991     host_blkpg->data = &host_part;
4992     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4993 
4994 out:
4995     return ret;
4996 }
4997 
4998 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4999                                 int fd, int cmd, abi_long arg)
5000 {
5001     const argtype *arg_type = ie->arg_type;
5002     const StructEntry *se;
5003     const argtype *field_types;
5004     const int *dst_offsets, *src_offsets;
5005     int target_size;
5006     void *argptr;
5007     abi_ulong *target_rt_dev_ptr = NULL;
5008     unsigned long *host_rt_dev_ptr = NULL;
5009     abi_long ret;
5010     int i;
5011 
5012     assert(ie->access == IOC_W);
5013     assert(*arg_type == TYPE_PTR);
5014     arg_type++;
5015     assert(*arg_type == TYPE_STRUCT);
5016     target_size = thunk_type_size(arg_type, 0);
5017     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5018     if (!argptr) {
5019         return -TARGET_EFAULT;
5020     }
5021     arg_type++;
5022     assert(*arg_type == (int)STRUCT_rtentry);
5023     se = struct_entries + *arg_type++;
5024     assert(se->convert[0] == NULL);
5025     /* convert struct here to be able to catch rt_dev string */
5026     field_types = se->field_types;
5027     dst_offsets = se->field_offsets[THUNK_HOST];
5028     src_offsets = se->field_offsets[THUNK_TARGET];
5029     for (i = 0; i < se->nb_fields; i++) {
5030         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5031             assert(*field_types == TYPE_PTRVOID);
5032             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5033             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5034             if (*target_rt_dev_ptr != 0) {
5035                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5036                                                   tswapal(*target_rt_dev_ptr));
5037                 if (!*host_rt_dev_ptr) {
5038                     unlock_user(argptr, arg, 0);
5039                     return -TARGET_EFAULT;
5040                 }
5041             } else {
5042                 *host_rt_dev_ptr = 0;
5043             }
5044             field_types++;
5045             continue;
5046         }
5047         field_types = thunk_convert(buf_temp + dst_offsets[i],
5048                                     argptr + src_offsets[i],
5049                                     field_types, THUNK_HOST);
5050     }
5051     unlock_user(argptr, arg, 0);
5052 
5053     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5054 
5055     assert(host_rt_dev_ptr != NULL);
5056     assert(target_rt_dev_ptr != NULL);
5057     if (*host_rt_dev_ptr != 0) {
5058         unlock_user((void *)*host_rt_dev_ptr,
5059                     *target_rt_dev_ptr, 0);
5060     }
5061     return ret;
5062 }
5063 
5064 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5065                                      int fd, int cmd, abi_long arg)
5066 {
5067     int sig = target_to_host_signal(arg);
5068     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5069 }
5070 
5071 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5072                                     int fd, int cmd, abi_long arg)
5073 {
5074     struct timeval tv;
5075     abi_long ret;
5076 
5077     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5078     if (is_error(ret)) {
5079         return ret;
5080     }
5081 
5082     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5083         if (copy_to_user_timeval(arg, &tv)) {
5084             return -TARGET_EFAULT;
5085         }
5086     } else {
5087         if (copy_to_user_timeval64(arg, &tv)) {
5088             return -TARGET_EFAULT;
5089         }
5090     }
5091 
5092     return ret;
5093 }
5094 
5095 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5096                                       int fd, int cmd, abi_long arg)
5097 {
5098     struct timespec ts;
5099     abi_long ret;
5100 
5101     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5102     if (is_error(ret)) {
5103         return ret;
5104     }
5105 
5106     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5107         if (host_to_target_timespec(arg, &ts)) {
5108             return -TARGET_EFAULT;
5109         }
5110     } else{
5111         if (host_to_target_timespec64(arg, &ts)) {
5112             return -TARGET_EFAULT;
5113         }
5114     }
5115 
5116     return ret;
5117 }
5118 
5119 #ifdef TIOCGPTPEER
5120 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5121                                      int fd, int cmd, abi_long arg)
5122 {
5123     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5124     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5125 }
5126 #endif
5127 
5128 static IOCTLEntry ioctl_entries[] = {
5129 #define IOCTL(cmd, access, ...) \
5130     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5131 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5132     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5133 #define IOCTL_IGNORE(cmd) \
5134     { TARGET_ ## cmd, 0, #cmd },
5135 #include "ioctls.h"
5136     { 0, 0, },
5137 };
5138 
5139 /* ??? Implement proper locking for ioctls.  */
5140 /* do_ioctl() Must return target values and target errnos. */
5141 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5142 {
5143     const IOCTLEntry *ie;
5144     const argtype *arg_type;
5145     abi_long ret;
5146     uint8_t buf_temp[MAX_STRUCT_SIZE];
5147     int target_size;
5148     void *argptr;
5149 
5150     ie = ioctl_entries;
5151     for(;;) {
5152         if (ie->target_cmd == 0) {
5153             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5154             return -TARGET_ENOSYS;
5155         }
5156         if (ie->target_cmd == cmd)
5157             break;
5158         ie++;
5159     }
5160     arg_type = ie->arg_type;
5161     if (ie->do_ioctl) {
5162         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5163     } else if (!ie->host_cmd) {
5164         /* Some architectures define BSD ioctls in their headers
5165            that are not implemented in Linux.  */
5166         return -TARGET_ENOSYS;
5167     }
5168 
5169     switch(arg_type[0]) {
5170     case TYPE_NULL:
5171         /* no argument */
5172         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5173         break;
5174     case TYPE_PTRVOID:
5175     case TYPE_INT:
5176         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5177         break;
5178     case TYPE_PTR:
5179         arg_type++;
5180         target_size = thunk_type_size(arg_type, 0);
5181         switch(ie->access) {
5182         case IOC_R:
5183             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5184             if (!is_error(ret)) {
5185                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5186                 if (!argptr)
5187                     return -TARGET_EFAULT;
5188                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5189                 unlock_user(argptr, arg, target_size);
5190             }
5191             break;
5192         case IOC_W:
5193             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5194             if (!argptr)
5195                 return -TARGET_EFAULT;
5196             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5197             unlock_user(argptr, arg, 0);
5198             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5199             break;
5200         default:
5201         case IOC_RW:
5202             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5203             if (!argptr)
5204                 return -TARGET_EFAULT;
5205             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5206             unlock_user(argptr, arg, 0);
5207             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5208             if (!is_error(ret)) {
5209                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5210                 if (!argptr)
5211                     return -TARGET_EFAULT;
5212                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5213                 unlock_user(argptr, arg, target_size);
5214             }
5215             break;
5216         }
5217         break;
5218     default:
5219         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5220                  (long)cmd, arg_type[0]);
5221         ret = -TARGET_ENOSYS;
5222         break;
5223     }
5224     return ret;
5225 }
5226 
5227 static const bitmask_transtbl iflag_tbl[] = {
5228         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5229         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5230         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5231         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5232         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5233         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5234         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5235         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5236         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5237         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5238         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5239         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5240         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5241         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5242         { 0, 0, 0, 0 }
5243 };
5244 
5245 static const bitmask_transtbl oflag_tbl[] = {
5246 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5247 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5248 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5249 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5250 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5251 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5252 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5253 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5254 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5255 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5256 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5257 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5258 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5259 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5260 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5261 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5262 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5263 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5264 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5265 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5266 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5267 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5268 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5269 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5270 	{ 0, 0, 0, 0 }
5271 };
5272 
5273 static const bitmask_transtbl cflag_tbl[] = {
5274 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5275 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5276 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5277 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5278 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5279 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5280 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5281 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5282 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5283 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5284 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5285 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5286 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5287 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5288 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5289 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5290 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5291 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5292 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5293 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5294 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5295 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5296 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5297 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5298 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5299 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5300 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5301 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5302 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5303 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5304 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5305 	{ 0, 0, 0, 0 }
5306 };
5307 
5308 static const bitmask_transtbl lflag_tbl[] = {
5309 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5310 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5311 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5312 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5313 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5314 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5315 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5316 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5317 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5318 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5319 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5320 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5321 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5322 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5323 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5324 	{ 0, 0, 0, 0 }
5325 };
5326 
5327 static void target_to_host_termios (void *dst, const void *src)
5328 {
5329     struct host_termios *host = dst;
5330     const struct target_termios *target = src;
5331 
5332     host->c_iflag =
5333         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5334     host->c_oflag =
5335         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5336     host->c_cflag =
5337         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5338     host->c_lflag =
5339         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5340     host->c_line = target->c_line;
5341 
5342     memset(host->c_cc, 0, sizeof(host->c_cc));
5343     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5344     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5345     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5346     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5347     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5348     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5349     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5350     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5351     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5352     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5353     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5354     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5355     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5356     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5357     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5358     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5359     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5360 }
5361 
5362 static void host_to_target_termios (void *dst, const void *src)
5363 {
5364     struct target_termios *target = dst;
5365     const struct host_termios *host = src;
5366 
5367     target->c_iflag =
5368         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5369     target->c_oflag =
5370         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5371     target->c_cflag =
5372         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5373     target->c_lflag =
5374         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5375     target->c_line = host->c_line;
5376 
5377     memset(target->c_cc, 0, sizeof(target->c_cc));
5378     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5379     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5380     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5381     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5382     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5383     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5384     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5385     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5386     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5387     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5388     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5389     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5390     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5391     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5392     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5393     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5394     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5395 }
5396 
5397 static const StructEntry struct_termios_def = {
5398     .convert = { host_to_target_termios, target_to_host_termios },
5399     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5400     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5401 };
5402 
5403 static bitmask_transtbl mmap_flags_tbl[] = {
5404     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5405     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5406     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5407     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5408       MAP_ANONYMOUS, MAP_ANONYMOUS },
5409     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5410       MAP_GROWSDOWN, MAP_GROWSDOWN },
5411     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5412       MAP_DENYWRITE, MAP_DENYWRITE },
5413     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5414       MAP_EXECUTABLE, MAP_EXECUTABLE },
5415     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5416     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5417       MAP_NORESERVE, MAP_NORESERVE },
5418     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5419     /* MAP_STACK had been ignored by the kernel for quite some time.
5420        Recognize it for the target insofar as we do not want to pass
5421        it through to the host.  */
5422     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5423     { 0, 0, 0, 0 }
5424 };
5425 
5426 #if defined(TARGET_I386)
5427 
5428 /* NOTE: there is really one LDT for all the threads */
5429 static uint8_t *ldt_table;
5430 
5431 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5432 {
5433     int size;
5434     void *p;
5435 
5436     if (!ldt_table)
5437         return 0;
5438     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5439     if (size > bytecount)
5440         size = bytecount;
5441     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5442     if (!p)
5443         return -TARGET_EFAULT;
5444     /* ??? Should this by byteswapped?  */
5445     memcpy(p, ldt_table, size);
5446     unlock_user(p, ptr, size);
5447     return size;
5448 }
5449 
5450 /* XXX: add locking support */
5451 static abi_long write_ldt(CPUX86State *env,
5452                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5453 {
5454     struct target_modify_ldt_ldt_s ldt_info;
5455     struct target_modify_ldt_ldt_s *target_ldt_info;
5456     int seg_32bit, contents, read_exec_only, limit_in_pages;
5457     int seg_not_present, useable, lm;
5458     uint32_t *lp, entry_1, entry_2;
5459 
5460     if (bytecount != sizeof(ldt_info))
5461         return -TARGET_EINVAL;
5462     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5463         return -TARGET_EFAULT;
5464     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5465     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5466     ldt_info.limit = tswap32(target_ldt_info->limit);
5467     ldt_info.flags = tswap32(target_ldt_info->flags);
5468     unlock_user_struct(target_ldt_info, ptr, 0);
5469 
5470     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5471         return -TARGET_EINVAL;
5472     seg_32bit = ldt_info.flags & 1;
5473     contents = (ldt_info.flags >> 1) & 3;
5474     read_exec_only = (ldt_info.flags >> 3) & 1;
5475     limit_in_pages = (ldt_info.flags >> 4) & 1;
5476     seg_not_present = (ldt_info.flags >> 5) & 1;
5477     useable = (ldt_info.flags >> 6) & 1;
5478 #ifdef TARGET_ABI32
5479     lm = 0;
5480 #else
5481     lm = (ldt_info.flags >> 7) & 1;
5482 #endif
5483     if (contents == 3) {
5484         if (oldmode)
5485             return -TARGET_EINVAL;
5486         if (seg_not_present == 0)
5487             return -TARGET_EINVAL;
5488     }
5489     /* allocate the LDT */
5490     if (!ldt_table) {
5491         env->ldt.base = target_mmap(0,
5492                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5493                                     PROT_READ|PROT_WRITE,
5494                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5495         if (env->ldt.base == -1)
5496             return -TARGET_ENOMEM;
5497         memset(g2h(env->ldt.base), 0,
5498                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5499         env->ldt.limit = 0xffff;
5500         ldt_table = g2h(env->ldt.base);
5501     }
5502 
5503     /* NOTE: same code as Linux kernel */
5504     /* Allow LDTs to be cleared by the user. */
5505     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5506         if (oldmode ||
5507             (contents == 0		&&
5508              read_exec_only == 1	&&
5509              seg_32bit == 0		&&
5510              limit_in_pages == 0	&&
5511              seg_not_present == 1	&&
5512              useable == 0 )) {
5513             entry_1 = 0;
5514             entry_2 = 0;
5515             goto install;
5516         }
5517     }
5518 
5519     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5520         (ldt_info.limit & 0x0ffff);
5521     entry_2 = (ldt_info.base_addr & 0xff000000) |
5522         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5523         (ldt_info.limit & 0xf0000) |
5524         ((read_exec_only ^ 1) << 9) |
5525         (contents << 10) |
5526         ((seg_not_present ^ 1) << 15) |
5527         (seg_32bit << 22) |
5528         (limit_in_pages << 23) |
5529         (lm << 21) |
5530         0x7000;
5531     if (!oldmode)
5532         entry_2 |= (useable << 20);
5533 
5534     /* Install the new entry ...  */
5535 install:
5536     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5537     lp[0] = tswap32(entry_1);
5538     lp[1] = tswap32(entry_2);
5539     return 0;
5540 }
5541 
5542 /* specific and weird i386 syscalls */
5543 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5544                               unsigned long bytecount)
5545 {
5546     abi_long ret;
5547 
5548     switch (func) {
5549     case 0:
5550         ret = read_ldt(ptr, bytecount);
5551         break;
5552     case 1:
5553         ret = write_ldt(env, ptr, bytecount, 1);
5554         break;
5555     case 0x11:
5556         ret = write_ldt(env, ptr, bytecount, 0);
5557         break;
5558     default:
5559         ret = -TARGET_ENOSYS;
5560         break;
5561     }
5562     return ret;
5563 }
5564 
5565 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5566 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5567 {
5568     uint64_t *gdt_table = g2h(env->gdt.base);
5569     struct target_modify_ldt_ldt_s ldt_info;
5570     struct target_modify_ldt_ldt_s *target_ldt_info;
5571     int seg_32bit, contents, read_exec_only, limit_in_pages;
5572     int seg_not_present, useable, lm;
5573     uint32_t *lp, entry_1, entry_2;
5574     int i;
5575 
5576     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5577     if (!target_ldt_info)
5578         return -TARGET_EFAULT;
5579     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5580     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5581     ldt_info.limit = tswap32(target_ldt_info->limit);
5582     ldt_info.flags = tswap32(target_ldt_info->flags);
5583     if (ldt_info.entry_number == -1) {
5584         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5585             if (gdt_table[i] == 0) {
5586                 ldt_info.entry_number = i;
5587                 target_ldt_info->entry_number = tswap32(i);
5588                 break;
5589             }
5590         }
5591     }
5592     unlock_user_struct(target_ldt_info, ptr, 1);
5593 
5594     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5595         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5596            return -TARGET_EINVAL;
5597     seg_32bit = ldt_info.flags & 1;
5598     contents = (ldt_info.flags >> 1) & 3;
5599     read_exec_only = (ldt_info.flags >> 3) & 1;
5600     limit_in_pages = (ldt_info.flags >> 4) & 1;
5601     seg_not_present = (ldt_info.flags >> 5) & 1;
5602     useable = (ldt_info.flags >> 6) & 1;
5603 #ifdef TARGET_ABI32
5604     lm = 0;
5605 #else
5606     lm = (ldt_info.flags >> 7) & 1;
5607 #endif
5608 
5609     if (contents == 3) {
5610         if (seg_not_present == 0)
5611             return -TARGET_EINVAL;
5612     }
5613 
5614     /* NOTE: same code as Linux kernel */
5615     /* Allow LDTs to be cleared by the user. */
5616     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5617         if ((contents == 0             &&
5618              read_exec_only == 1       &&
5619              seg_32bit == 0            &&
5620              limit_in_pages == 0       &&
5621              seg_not_present == 1      &&
5622              useable == 0 )) {
5623             entry_1 = 0;
5624             entry_2 = 0;
5625             goto install;
5626         }
5627     }
5628 
5629     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5630         (ldt_info.limit & 0x0ffff);
5631     entry_2 = (ldt_info.base_addr & 0xff000000) |
5632         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5633         (ldt_info.limit & 0xf0000) |
5634         ((read_exec_only ^ 1) << 9) |
5635         (contents << 10) |
5636         ((seg_not_present ^ 1) << 15) |
5637         (seg_32bit << 22) |
5638         (limit_in_pages << 23) |
5639         (useable << 20) |
5640         (lm << 21) |
5641         0x7000;
5642 
5643     /* Install the new entry ...  */
5644 install:
5645     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5646     lp[0] = tswap32(entry_1);
5647     lp[1] = tswap32(entry_2);
5648     return 0;
5649 }
5650 
5651 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5652 {
5653     struct target_modify_ldt_ldt_s *target_ldt_info;
5654     uint64_t *gdt_table = g2h(env->gdt.base);
5655     uint32_t base_addr, limit, flags;
5656     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5657     int seg_not_present, useable, lm;
5658     uint32_t *lp, entry_1, entry_2;
5659 
5660     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5661     if (!target_ldt_info)
5662         return -TARGET_EFAULT;
5663     idx = tswap32(target_ldt_info->entry_number);
5664     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5665         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5666         unlock_user_struct(target_ldt_info, ptr, 1);
5667         return -TARGET_EINVAL;
5668     }
5669     lp = (uint32_t *)(gdt_table + idx);
5670     entry_1 = tswap32(lp[0]);
5671     entry_2 = tswap32(lp[1]);
5672 
5673     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5674     contents = (entry_2 >> 10) & 3;
5675     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5676     seg_32bit = (entry_2 >> 22) & 1;
5677     limit_in_pages = (entry_2 >> 23) & 1;
5678     useable = (entry_2 >> 20) & 1;
5679 #ifdef TARGET_ABI32
5680     lm = 0;
5681 #else
5682     lm = (entry_2 >> 21) & 1;
5683 #endif
5684     flags = (seg_32bit << 0) | (contents << 1) |
5685         (read_exec_only << 3) | (limit_in_pages << 4) |
5686         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5687     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5688     base_addr = (entry_1 >> 16) |
5689         (entry_2 & 0xff000000) |
5690         ((entry_2 & 0xff) << 16);
5691     target_ldt_info->base_addr = tswapal(base_addr);
5692     target_ldt_info->limit = tswap32(limit);
5693     target_ldt_info->flags = tswap32(flags);
5694     unlock_user_struct(target_ldt_info, ptr, 1);
5695     return 0;
5696 }
5697 #endif /* TARGET_I386 && TARGET_ABI32 */
5698 
5699 #ifndef TARGET_ABI32
5700 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5701 {
5702     abi_long ret = 0;
5703     abi_ulong val;
5704     int idx;
5705 
5706     switch(code) {
5707     case TARGET_ARCH_SET_GS:
5708     case TARGET_ARCH_SET_FS:
5709         if (code == TARGET_ARCH_SET_GS)
5710             idx = R_GS;
5711         else
5712             idx = R_FS;
5713         cpu_x86_load_seg(env, idx, 0);
5714         env->segs[idx].base = addr;
5715         break;
5716     case TARGET_ARCH_GET_GS:
5717     case TARGET_ARCH_GET_FS:
5718         if (code == TARGET_ARCH_GET_GS)
5719             idx = R_GS;
5720         else
5721             idx = R_FS;
5722         val = env->segs[idx].base;
5723         if (put_user(val, addr, abi_ulong))
5724             ret = -TARGET_EFAULT;
5725         break;
5726     default:
5727         ret = -TARGET_EINVAL;
5728         break;
5729     }
5730     return ret;
5731 }
5732 #endif
5733 
5734 #endif /* defined(TARGET_I386) */
5735 
5736 #define NEW_STACK_SIZE 0x40000
5737 
5738 
5739 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5740 typedef struct {
5741     CPUArchState *env;
5742     pthread_mutex_t mutex;
5743     pthread_cond_t cond;
5744     pthread_t thread;
5745     uint32_t tid;
5746     abi_ulong child_tidptr;
5747     abi_ulong parent_tidptr;
5748     sigset_t sigmask;
5749 } new_thread_info;
5750 
5751 static void *clone_func(void *arg)
5752 {
5753     new_thread_info *info = arg;
5754     CPUArchState *env;
5755     CPUState *cpu;
5756     TaskState *ts;
5757 
5758     rcu_register_thread();
5759     tcg_register_thread();
5760     env = info->env;
5761     cpu = env_cpu(env);
5762     thread_cpu = cpu;
5763     ts = (TaskState *)cpu->opaque;
5764     info->tid = sys_gettid();
5765     task_settid(ts);
5766     if (info->child_tidptr)
5767         put_user_u32(info->tid, info->child_tidptr);
5768     if (info->parent_tidptr)
5769         put_user_u32(info->tid, info->parent_tidptr);
5770     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5771     /* Enable signals.  */
5772     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5773     /* Signal to the parent that we're ready.  */
5774     pthread_mutex_lock(&info->mutex);
5775     pthread_cond_broadcast(&info->cond);
5776     pthread_mutex_unlock(&info->mutex);
5777     /* Wait until the parent has finished initializing the tls state.  */
5778     pthread_mutex_lock(&clone_lock);
5779     pthread_mutex_unlock(&clone_lock);
5780     cpu_loop(env);
5781     /* never exits */
5782     return NULL;
5783 }
5784 
5785 /* do_fork() Must return host values and target errnos (unlike most
5786    do_*() functions). */
5787 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5788                    abi_ulong parent_tidptr, target_ulong newtls,
5789                    abi_ulong child_tidptr)
5790 {
5791     CPUState *cpu = env_cpu(env);
5792     int ret;
5793     TaskState *ts;
5794     CPUState *new_cpu;
5795     CPUArchState *new_env;
5796     sigset_t sigmask;
5797 
5798     flags &= ~CLONE_IGNORED_FLAGS;
5799 
5800     /* Emulate vfork() with fork() */
5801     if (flags & CLONE_VFORK)
5802         flags &= ~(CLONE_VFORK | CLONE_VM);
5803 
5804     if (flags & CLONE_VM) {
5805         TaskState *parent_ts = (TaskState *)cpu->opaque;
5806         new_thread_info info;
5807         pthread_attr_t attr;
5808 
5809         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5810             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5811             return -TARGET_EINVAL;
5812         }
5813 
5814         ts = g_new0(TaskState, 1);
5815         init_task_state(ts);
5816 
5817         /* Grab a mutex so that thread setup appears atomic.  */
5818         pthread_mutex_lock(&clone_lock);
5819 
5820         /* we create a new CPU instance. */
5821         new_env = cpu_copy(env);
5822         /* Init regs that differ from the parent.  */
5823         cpu_clone_regs_child(new_env, newsp, flags);
5824         cpu_clone_regs_parent(env, flags);
5825         new_cpu = env_cpu(new_env);
5826         new_cpu->opaque = ts;
5827         ts->bprm = parent_ts->bprm;
5828         ts->info = parent_ts->info;
5829         ts->signal_mask = parent_ts->signal_mask;
5830 
5831         if (flags & CLONE_CHILD_CLEARTID) {
5832             ts->child_tidptr = child_tidptr;
5833         }
5834 
5835         if (flags & CLONE_SETTLS) {
5836             cpu_set_tls (new_env, newtls);
5837         }
5838 
5839         memset(&info, 0, sizeof(info));
5840         pthread_mutex_init(&info.mutex, NULL);
5841         pthread_mutex_lock(&info.mutex);
5842         pthread_cond_init(&info.cond, NULL);
5843         info.env = new_env;
5844         if (flags & CLONE_CHILD_SETTID) {
5845             info.child_tidptr = child_tidptr;
5846         }
5847         if (flags & CLONE_PARENT_SETTID) {
5848             info.parent_tidptr = parent_tidptr;
5849         }
5850 
5851         ret = pthread_attr_init(&attr);
5852         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5853         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5854         /* It is not safe to deliver signals until the child has finished
5855            initializing, so temporarily block all signals.  */
5856         sigfillset(&sigmask);
5857         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5858         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5859 
5860         /* If this is our first additional thread, we need to ensure we
5861          * generate code for parallel execution and flush old translations.
5862          */
5863         if (!parallel_cpus) {
5864             parallel_cpus = true;
5865             tb_flush(cpu);
5866         }
5867 
5868         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5869         /* TODO: Free new CPU state if thread creation failed.  */
5870 
5871         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5872         pthread_attr_destroy(&attr);
5873         if (ret == 0) {
5874             /* Wait for the child to initialize.  */
5875             pthread_cond_wait(&info.cond, &info.mutex);
5876             ret = info.tid;
5877         } else {
5878             ret = -1;
5879         }
5880         pthread_mutex_unlock(&info.mutex);
5881         pthread_cond_destroy(&info.cond);
5882         pthread_mutex_destroy(&info.mutex);
5883         pthread_mutex_unlock(&clone_lock);
5884     } else {
5885         /* if no CLONE_VM, we consider it is a fork */
5886         if (flags & CLONE_INVALID_FORK_FLAGS) {
5887             return -TARGET_EINVAL;
5888         }
5889 
5890         /* We can't support custom termination signals */
5891         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5892             return -TARGET_EINVAL;
5893         }
5894 
5895         if (block_signals()) {
5896             return -TARGET_ERESTARTSYS;
5897         }
5898 
5899         fork_start();
5900         ret = fork();
5901         if (ret == 0) {
5902             /* Child Process.  */
5903             cpu_clone_regs_child(env, newsp, flags);
5904             fork_end(1);
5905             /* There is a race condition here.  The parent process could
5906                theoretically read the TID in the child process before the child
5907                tid is set.  This would require using either ptrace
5908                (not implemented) or having *_tidptr to point at a shared memory
5909                mapping.  We can't repeat the spinlock hack used above because
5910                the child process gets its own copy of the lock.  */
5911             if (flags & CLONE_CHILD_SETTID)
5912                 put_user_u32(sys_gettid(), child_tidptr);
5913             if (flags & CLONE_PARENT_SETTID)
5914                 put_user_u32(sys_gettid(), parent_tidptr);
5915             ts = (TaskState *)cpu->opaque;
5916             if (flags & CLONE_SETTLS)
5917                 cpu_set_tls (env, newtls);
5918             if (flags & CLONE_CHILD_CLEARTID)
5919                 ts->child_tidptr = child_tidptr;
5920         } else {
5921             cpu_clone_regs_parent(env, flags);
5922             fork_end(0);
5923         }
5924     }
5925     return ret;
5926 }
5927 
5928 /* warning : doesn't handle linux specific flags... */
5929 static int target_to_host_fcntl_cmd(int cmd)
5930 {
5931     int ret;
5932 
5933     switch(cmd) {
5934     case TARGET_F_DUPFD:
5935     case TARGET_F_GETFD:
5936     case TARGET_F_SETFD:
5937     case TARGET_F_GETFL:
5938     case TARGET_F_SETFL:
5939         ret = cmd;
5940         break;
5941     case TARGET_F_GETLK:
5942         ret = F_GETLK64;
5943         break;
5944     case TARGET_F_SETLK:
5945         ret = F_SETLK64;
5946         break;
5947     case TARGET_F_SETLKW:
5948         ret = F_SETLKW64;
5949         break;
5950     case TARGET_F_GETOWN:
5951         ret = F_GETOWN;
5952         break;
5953     case TARGET_F_SETOWN:
5954         ret = F_SETOWN;
5955         break;
5956     case TARGET_F_GETSIG:
5957         ret = F_GETSIG;
5958         break;
5959     case TARGET_F_SETSIG:
5960         ret = F_SETSIG;
5961         break;
5962 #if TARGET_ABI_BITS == 32
5963     case TARGET_F_GETLK64:
5964         ret = F_GETLK64;
5965         break;
5966     case TARGET_F_SETLK64:
5967         ret = F_SETLK64;
5968         break;
5969     case TARGET_F_SETLKW64:
5970         ret = F_SETLKW64;
5971         break;
5972 #endif
5973     case TARGET_F_SETLEASE:
5974         ret = F_SETLEASE;
5975         break;
5976     case TARGET_F_GETLEASE:
5977         ret = F_GETLEASE;
5978         break;
5979 #ifdef F_DUPFD_CLOEXEC
5980     case TARGET_F_DUPFD_CLOEXEC:
5981         ret = F_DUPFD_CLOEXEC;
5982         break;
5983 #endif
5984     case TARGET_F_NOTIFY:
5985         ret = F_NOTIFY;
5986         break;
5987 #ifdef F_GETOWN_EX
5988     case TARGET_F_GETOWN_EX:
5989         ret = F_GETOWN_EX;
5990         break;
5991 #endif
5992 #ifdef F_SETOWN_EX
5993     case TARGET_F_SETOWN_EX:
5994         ret = F_SETOWN_EX;
5995         break;
5996 #endif
5997 #ifdef F_SETPIPE_SZ
5998     case TARGET_F_SETPIPE_SZ:
5999         ret = F_SETPIPE_SZ;
6000         break;
6001     case TARGET_F_GETPIPE_SZ:
6002         ret = F_GETPIPE_SZ;
6003         break;
6004 #endif
6005     default:
6006         ret = -TARGET_EINVAL;
6007         break;
6008     }
6009 
6010 #if defined(__powerpc64__)
6011     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6012      * is not supported by kernel. The glibc fcntl call actually adjusts
6013      * them to 5, 6 and 7 before making the syscall(). Since we make the
6014      * syscall directly, adjust to what is supported by the kernel.
6015      */
6016     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6017         ret -= F_GETLK64 - 5;
6018     }
6019 #endif
6020 
6021     return ret;
6022 }
6023 
6024 #define FLOCK_TRANSTBL \
6025     switch (type) { \
6026     TRANSTBL_CONVERT(F_RDLCK); \
6027     TRANSTBL_CONVERT(F_WRLCK); \
6028     TRANSTBL_CONVERT(F_UNLCK); \
6029     TRANSTBL_CONVERT(F_EXLCK); \
6030     TRANSTBL_CONVERT(F_SHLCK); \
6031     }
6032 
6033 static int target_to_host_flock(int type)
6034 {
6035 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6036     FLOCK_TRANSTBL
6037 #undef  TRANSTBL_CONVERT
6038     return -TARGET_EINVAL;
6039 }
6040 
6041 static int host_to_target_flock(int type)
6042 {
6043 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6044     FLOCK_TRANSTBL
6045 #undef  TRANSTBL_CONVERT
6046     /* if we don't know how to convert the value coming
6047      * from the host we copy to the target field as-is
6048      */
6049     return type;
6050 }
6051 
6052 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6053                                             abi_ulong target_flock_addr)
6054 {
6055     struct target_flock *target_fl;
6056     int l_type;
6057 
6058     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6059         return -TARGET_EFAULT;
6060     }
6061 
6062     __get_user(l_type, &target_fl->l_type);
6063     l_type = target_to_host_flock(l_type);
6064     if (l_type < 0) {
6065         return l_type;
6066     }
6067     fl->l_type = l_type;
6068     __get_user(fl->l_whence, &target_fl->l_whence);
6069     __get_user(fl->l_start, &target_fl->l_start);
6070     __get_user(fl->l_len, &target_fl->l_len);
6071     __get_user(fl->l_pid, &target_fl->l_pid);
6072     unlock_user_struct(target_fl, target_flock_addr, 0);
6073     return 0;
6074 }
6075 
6076 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6077                                           const struct flock64 *fl)
6078 {
6079     struct target_flock *target_fl;
6080     short l_type;
6081 
6082     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6083         return -TARGET_EFAULT;
6084     }
6085 
6086     l_type = host_to_target_flock(fl->l_type);
6087     __put_user(l_type, &target_fl->l_type);
6088     __put_user(fl->l_whence, &target_fl->l_whence);
6089     __put_user(fl->l_start, &target_fl->l_start);
6090     __put_user(fl->l_len, &target_fl->l_len);
6091     __put_user(fl->l_pid, &target_fl->l_pid);
6092     unlock_user_struct(target_fl, target_flock_addr, 1);
6093     return 0;
6094 }
6095 
6096 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6097 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6098 
6099 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6100 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6101                                                    abi_ulong target_flock_addr)
6102 {
6103     struct target_oabi_flock64 *target_fl;
6104     int l_type;
6105 
6106     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6107         return -TARGET_EFAULT;
6108     }
6109 
6110     __get_user(l_type, &target_fl->l_type);
6111     l_type = target_to_host_flock(l_type);
6112     if (l_type < 0) {
6113         return l_type;
6114     }
6115     fl->l_type = l_type;
6116     __get_user(fl->l_whence, &target_fl->l_whence);
6117     __get_user(fl->l_start, &target_fl->l_start);
6118     __get_user(fl->l_len, &target_fl->l_len);
6119     __get_user(fl->l_pid, &target_fl->l_pid);
6120     unlock_user_struct(target_fl, target_flock_addr, 0);
6121     return 0;
6122 }
6123 
6124 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6125                                                  const struct flock64 *fl)
6126 {
6127     struct target_oabi_flock64 *target_fl;
6128     short l_type;
6129 
6130     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6131         return -TARGET_EFAULT;
6132     }
6133 
6134     l_type = host_to_target_flock(fl->l_type);
6135     __put_user(l_type, &target_fl->l_type);
6136     __put_user(fl->l_whence, &target_fl->l_whence);
6137     __put_user(fl->l_start, &target_fl->l_start);
6138     __put_user(fl->l_len, &target_fl->l_len);
6139     __put_user(fl->l_pid, &target_fl->l_pid);
6140     unlock_user_struct(target_fl, target_flock_addr, 1);
6141     return 0;
6142 }
6143 #endif
6144 
6145 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6146                                               abi_ulong target_flock_addr)
6147 {
6148     struct target_flock64 *target_fl;
6149     int l_type;
6150 
6151     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6152         return -TARGET_EFAULT;
6153     }
6154 
6155     __get_user(l_type, &target_fl->l_type);
6156     l_type = target_to_host_flock(l_type);
6157     if (l_type < 0) {
6158         return l_type;
6159     }
6160     fl->l_type = l_type;
6161     __get_user(fl->l_whence, &target_fl->l_whence);
6162     __get_user(fl->l_start, &target_fl->l_start);
6163     __get_user(fl->l_len, &target_fl->l_len);
6164     __get_user(fl->l_pid, &target_fl->l_pid);
6165     unlock_user_struct(target_fl, target_flock_addr, 0);
6166     return 0;
6167 }
6168 
6169 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6170                                             const struct flock64 *fl)
6171 {
6172     struct target_flock64 *target_fl;
6173     short l_type;
6174 
6175     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6176         return -TARGET_EFAULT;
6177     }
6178 
6179     l_type = host_to_target_flock(fl->l_type);
6180     __put_user(l_type, &target_fl->l_type);
6181     __put_user(fl->l_whence, &target_fl->l_whence);
6182     __put_user(fl->l_start, &target_fl->l_start);
6183     __put_user(fl->l_len, &target_fl->l_len);
6184     __put_user(fl->l_pid, &target_fl->l_pid);
6185     unlock_user_struct(target_fl, target_flock_addr, 1);
6186     return 0;
6187 }
6188 
6189 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6190 {
6191     struct flock64 fl64;
6192 #ifdef F_GETOWN_EX
6193     struct f_owner_ex fox;
6194     struct target_f_owner_ex *target_fox;
6195 #endif
6196     abi_long ret;
6197     int host_cmd = target_to_host_fcntl_cmd(cmd);
6198 
6199     if (host_cmd == -TARGET_EINVAL)
6200 	    return host_cmd;
6201 
6202     switch(cmd) {
6203     case TARGET_F_GETLK:
6204         ret = copy_from_user_flock(&fl64, arg);
6205         if (ret) {
6206             return ret;
6207         }
6208         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6209         if (ret == 0) {
6210             ret = copy_to_user_flock(arg, &fl64);
6211         }
6212         break;
6213 
6214     case TARGET_F_SETLK:
6215     case TARGET_F_SETLKW:
6216         ret = copy_from_user_flock(&fl64, arg);
6217         if (ret) {
6218             return ret;
6219         }
6220         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6221         break;
6222 
6223     case TARGET_F_GETLK64:
6224         ret = copy_from_user_flock64(&fl64, arg);
6225         if (ret) {
6226             return ret;
6227         }
6228         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6229         if (ret == 0) {
6230             ret = copy_to_user_flock64(arg, &fl64);
6231         }
6232         break;
6233     case TARGET_F_SETLK64:
6234     case TARGET_F_SETLKW64:
6235         ret = copy_from_user_flock64(&fl64, arg);
6236         if (ret) {
6237             return ret;
6238         }
6239         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6240         break;
6241 
6242     case TARGET_F_GETFL:
6243         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6244         if (ret >= 0) {
6245             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6246         }
6247         break;
6248 
6249     case TARGET_F_SETFL:
6250         ret = get_errno(safe_fcntl(fd, host_cmd,
6251                                    target_to_host_bitmask(arg,
6252                                                           fcntl_flags_tbl)));
6253         break;
6254 
6255 #ifdef F_GETOWN_EX
6256     case TARGET_F_GETOWN_EX:
6257         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6258         if (ret >= 0) {
6259             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6260                 return -TARGET_EFAULT;
6261             target_fox->type = tswap32(fox.type);
6262             target_fox->pid = tswap32(fox.pid);
6263             unlock_user_struct(target_fox, arg, 1);
6264         }
6265         break;
6266 #endif
6267 
6268 #ifdef F_SETOWN_EX
6269     case TARGET_F_SETOWN_EX:
6270         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6271             return -TARGET_EFAULT;
6272         fox.type = tswap32(target_fox->type);
6273         fox.pid = tswap32(target_fox->pid);
6274         unlock_user_struct(target_fox, arg, 0);
6275         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6276         break;
6277 #endif
6278 
6279     case TARGET_F_SETOWN:
6280     case TARGET_F_GETOWN:
6281     case TARGET_F_SETSIG:
6282     case TARGET_F_GETSIG:
6283     case TARGET_F_SETLEASE:
6284     case TARGET_F_GETLEASE:
6285     case TARGET_F_SETPIPE_SZ:
6286     case TARGET_F_GETPIPE_SZ:
6287         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6288         break;
6289 
6290     default:
6291         ret = get_errno(safe_fcntl(fd, cmd, arg));
6292         break;
6293     }
6294     return ret;
6295 }
6296 
6297 #ifdef USE_UID16
6298 
6299 static inline int high2lowuid(int uid)
6300 {
6301     if (uid > 65535)
6302         return 65534;
6303     else
6304         return uid;
6305 }
6306 
6307 static inline int high2lowgid(int gid)
6308 {
6309     if (gid > 65535)
6310         return 65534;
6311     else
6312         return gid;
6313 }
6314 
6315 static inline int low2highuid(int uid)
6316 {
6317     if ((int16_t)uid == -1)
6318         return -1;
6319     else
6320         return uid;
6321 }
6322 
6323 static inline int low2highgid(int gid)
6324 {
6325     if ((int16_t)gid == -1)
6326         return -1;
6327     else
6328         return gid;
6329 }
6330 static inline int tswapid(int id)
6331 {
6332     return tswap16(id);
6333 }
6334 
6335 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6336 
6337 #else /* !USE_UID16 */
6338 static inline int high2lowuid(int uid)
6339 {
6340     return uid;
6341 }
6342 static inline int high2lowgid(int gid)
6343 {
6344     return gid;
6345 }
6346 static inline int low2highuid(int uid)
6347 {
6348     return uid;
6349 }
6350 static inline int low2highgid(int gid)
6351 {
6352     return gid;
6353 }
6354 static inline int tswapid(int id)
6355 {
6356     return tswap32(id);
6357 }
6358 
6359 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6360 
6361 #endif /* USE_UID16 */
6362 
6363 /* We must do direct syscalls for setting UID/GID, because we want to
6364  * implement the Linux system call semantics of "change only for this thread",
6365  * not the libc/POSIX semantics of "change for all threads in process".
6366  * (See http://ewontfix.com/17/ for more details.)
6367  * We use the 32-bit version of the syscalls if present; if it is not
6368  * then either the host architecture supports 32-bit UIDs natively with
6369  * the standard syscall, or the 16-bit UID is the best we can do.
6370  */
6371 #ifdef __NR_setuid32
6372 #define __NR_sys_setuid __NR_setuid32
6373 #else
6374 #define __NR_sys_setuid __NR_setuid
6375 #endif
6376 #ifdef __NR_setgid32
6377 #define __NR_sys_setgid __NR_setgid32
6378 #else
6379 #define __NR_sys_setgid __NR_setgid
6380 #endif
6381 #ifdef __NR_setresuid32
6382 #define __NR_sys_setresuid __NR_setresuid32
6383 #else
6384 #define __NR_sys_setresuid __NR_setresuid
6385 #endif
6386 #ifdef __NR_setresgid32
6387 #define __NR_sys_setresgid __NR_setresgid32
6388 #else
6389 #define __NR_sys_setresgid __NR_setresgid
6390 #endif
6391 
6392 _syscall1(int, sys_setuid, uid_t, uid)
6393 _syscall1(int, sys_setgid, gid_t, gid)
6394 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6395 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6396 
6397 void syscall_init(void)
6398 {
6399     IOCTLEntry *ie;
6400     const argtype *arg_type;
6401     int size;
6402     int i;
6403 
6404     thunk_init(STRUCT_MAX);
6405 
6406 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6407 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6408 #include "syscall_types.h"
6409 #undef STRUCT
6410 #undef STRUCT_SPECIAL
6411 
6412     /* Build target_to_host_errno_table[] table from
6413      * host_to_target_errno_table[]. */
6414     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6415         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6416     }
6417 
6418     /* we patch the ioctl size if necessary. We rely on the fact that
6419        no ioctl has all the bits at '1' in the size field */
6420     ie = ioctl_entries;
6421     while (ie->target_cmd != 0) {
6422         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6423             TARGET_IOC_SIZEMASK) {
6424             arg_type = ie->arg_type;
6425             if (arg_type[0] != TYPE_PTR) {
6426                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6427                         ie->target_cmd);
6428                 exit(1);
6429             }
6430             arg_type++;
6431             size = thunk_type_size(arg_type, 0);
6432             ie->target_cmd = (ie->target_cmd &
6433                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6434                 (size << TARGET_IOC_SIZESHIFT);
6435         }
6436 
6437         /* automatic consistency check if same arch */
6438 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6439     (defined(__x86_64__) && defined(TARGET_X86_64))
6440         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6441             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6442                     ie->name, ie->target_cmd, ie->host_cmd);
6443         }
6444 #endif
6445         ie++;
6446     }
6447 }
6448 
6449 #if TARGET_ABI_BITS == 32
6450 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6451 {
6452 #ifdef TARGET_WORDS_BIGENDIAN
6453     return ((uint64_t)word0 << 32) | word1;
6454 #else
6455     return ((uint64_t)word1 << 32) | word0;
6456 #endif
6457 }
6458 #else /* TARGET_ABI_BITS == 32 */
6459 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6460 {
6461     return word0;
6462 }
6463 #endif /* TARGET_ABI_BITS != 32 */
6464 
6465 #ifdef TARGET_NR_truncate64
6466 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6467                                          abi_long arg2,
6468                                          abi_long arg3,
6469                                          abi_long arg4)
6470 {
6471     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6472         arg2 = arg3;
6473         arg3 = arg4;
6474     }
6475     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6476 }
6477 #endif
6478 
6479 #ifdef TARGET_NR_ftruncate64
6480 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6481                                           abi_long arg2,
6482                                           abi_long arg3,
6483                                           abi_long arg4)
6484 {
6485     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6486         arg2 = arg3;
6487         arg3 = arg4;
6488     }
6489     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6490 }
6491 #endif
6492 
6493 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6494                                                  abi_ulong target_addr)
6495 {
6496     struct target_itimerspec *target_itspec;
6497 
6498     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6499         return -TARGET_EFAULT;
6500     }
6501 
6502     host_itspec->it_interval.tv_sec =
6503                             tswapal(target_itspec->it_interval.tv_sec);
6504     host_itspec->it_interval.tv_nsec =
6505                             tswapal(target_itspec->it_interval.tv_nsec);
6506     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6507     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6508 
6509     unlock_user_struct(target_itspec, target_addr, 1);
6510     return 0;
6511 }
6512 
6513 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6514                                                struct itimerspec *host_its)
6515 {
6516     struct target_itimerspec *target_itspec;
6517 
6518     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6519         return -TARGET_EFAULT;
6520     }
6521 
6522     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6523     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6524 
6525     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6526     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6527 
6528     unlock_user_struct(target_itspec, target_addr, 0);
6529     return 0;
6530 }
6531 
6532 static inline abi_long target_to_host_timex(struct timex *host_tx,
6533                                             abi_long target_addr)
6534 {
6535     struct target_timex *target_tx;
6536 
6537     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6538         return -TARGET_EFAULT;
6539     }
6540 
6541     __get_user(host_tx->modes, &target_tx->modes);
6542     __get_user(host_tx->offset, &target_tx->offset);
6543     __get_user(host_tx->freq, &target_tx->freq);
6544     __get_user(host_tx->maxerror, &target_tx->maxerror);
6545     __get_user(host_tx->esterror, &target_tx->esterror);
6546     __get_user(host_tx->status, &target_tx->status);
6547     __get_user(host_tx->constant, &target_tx->constant);
6548     __get_user(host_tx->precision, &target_tx->precision);
6549     __get_user(host_tx->tolerance, &target_tx->tolerance);
6550     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6551     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6552     __get_user(host_tx->tick, &target_tx->tick);
6553     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6554     __get_user(host_tx->jitter, &target_tx->jitter);
6555     __get_user(host_tx->shift, &target_tx->shift);
6556     __get_user(host_tx->stabil, &target_tx->stabil);
6557     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6558     __get_user(host_tx->calcnt, &target_tx->calcnt);
6559     __get_user(host_tx->errcnt, &target_tx->errcnt);
6560     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6561     __get_user(host_tx->tai, &target_tx->tai);
6562 
6563     unlock_user_struct(target_tx, target_addr, 0);
6564     return 0;
6565 }
6566 
6567 static inline abi_long host_to_target_timex(abi_long target_addr,
6568                                             struct timex *host_tx)
6569 {
6570     struct target_timex *target_tx;
6571 
6572     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6573         return -TARGET_EFAULT;
6574     }
6575 
6576     __put_user(host_tx->modes, &target_tx->modes);
6577     __put_user(host_tx->offset, &target_tx->offset);
6578     __put_user(host_tx->freq, &target_tx->freq);
6579     __put_user(host_tx->maxerror, &target_tx->maxerror);
6580     __put_user(host_tx->esterror, &target_tx->esterror);
6581     __put_user(host_tx->status, &target_tx->status);
6582     __put_user(host_tx->constant, &target_tx->constant);
6583     __put_user(host_tx->precision, &target_tx->precision);
6584     __put_user(host_tx->tolerance, &target_tx->tolerance);
6585     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6586     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6587     __put_user(host_tx->tick, &target_tx->tick);
6588     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6589     __put_user(host_tx->jitter, &target_tx->jitter);
6590     __put_user(host_tx->shift, &target_tx->shift);
6591     __put_user(host_tx->stabil, &target_tx->stabil);
6592     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6593     __put_user(host_tx->calcnt, &target_tx->calcnt);
6594     __put_user(host_tx->errcnt, &target_tx->errcnt);
6595     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6596     __put_user(host_tx->tai, &target_tx->tai);
6597 
6598     unlock_user_struct(target_tx, target_addr, 1);
6599     return 0;
6600 }
6601 
6602 
6603 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6604                                                abi_ulong target_addr)
6605 {
6606     struct target_sigevent *target_sevp;
6607 
6608     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6609         return -TARGET_EFAULT;
6610     }
6611 
6612     /* This union is awkward on 64 bit systems because it has a 32 bit
6613      * integer and a pointer in it; we follow the conversion approach
6614      * used for handling sigval types in signal.c so the guest should get
6615      * the correct value back even if we did a 64 bit byteswap and it's
6616      * using the 32 bit integer.
6617      */
6618     host_sevp->sigev_value.sival_ptr =
6619         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6620     host_sevp->sigev_signo =
6621         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6622     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6623     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6624 
6625     unlock_user_struct(target_sevp, target_addr, 1);
6626     return 0;
6627 }
6628 
6629 #if defined(TARGET_NR_mlockall)
6630 static inline int target_to_host_mlockall_arg(int arg)
6631 {
6632     int result = 0;
6633 
6634     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6635         result |= MCL_CURRENT;
6636     }
6637     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6638         result |= MCL_FUTURE;
6639     }
6640     return result;
6641 }
6642 #endif
6643 
6644 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6645      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6646      defined(TARGET_NR_newfstatat))
6647 static inline abi_long host_to_target_stat64(void *cpu_env,
6648                                              abi_ulong target_addr,
6649                                              struct stat *host_st)
6650 {
6651 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6652     if (((CPUARMState *)cpu_env)->eabi) {
6653         struct target_eabi_stat64 *target_st;
6654 
6655         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6656             return -TARGET_EFAULT;
6657         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6658         __put_user(host_st->st_dev, &target_st->st_dev);
6659         __put_user(host_st->st_ino, &target_st->st_ino);
6660 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6661         __put_user(host_st->st_ino, &target_st->__st_ino);
6662 #endif
6663         __put_user(host_st->st_mode, &target_st->st_mode);
6664         __put_user(host_st->st_nlink, &target_st->st_nlink);
6665         __put_user(host_st->st_uid, &target_st->st_uid);
6666         __put_user(host_st->st_gid, &target_st->st_gid);
6667         __put_user(host_st->st_rdev, &target_st->st_rdev);
6668         __put_user(host_st->st_size, &target_st->st_size);
6669         __put_user(host_st->st_blksize, &target_st->st_blksize);
6670         __put_user(host_st->st_blocks, &target_st->st_blocks);
6671         __put_user(host_st->st_atime, &target_st->target_st_atime);
6672         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6673         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6674 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6675         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6676         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6677         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6678 #endif
6679         unlock_user_struct(target_st, target_addr, 1);
6680     } else
6681 #endif
6682     {
6683 #if defined(TARGET_HAS_STRUCT_STAT64)
6684         struct target_stat64 *target_st;
6685 #else
6686         struct target_stat *target_st;
6687 #endif
6688 
6689         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6690             return -TARGET_EFAULT;
6691         memset(target_st, 0, sizeof(*target_st));
6692         __put_user(host_st->st_dev, &target_st->st_dev);
6693         __put_user(host_st->st_ino, &target_st->st_ino);
6694 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6695         __put_user(host_st->st_ino, &target_st->__st_ino);
6696 #endif
6697         __put_user(host_st->st_mode, &target_st->st_mode);
6698         __put_user(host_st->st_nlink, &target_st->st_nlink);
6699         __put_user(host_st->st_uid, &target_st->st_uid);
6700         __put_user(host_st->st_gid, &target_st->st_gid);
6701         __put_user(host_st->st_rdev, &target_st->st_rdev);
6702         /* XXX: better use of kernel struct */
6703         __put_user(host_st->st_size, &target_st->st_size);
6704         __put_user(host_st->st_blksize, &target_st->st_blksize);
6705         __put_user(host_st->st_blocks, &target_st->st_blocks);
6706         __put_user(host_st->st_atime, &target_st->target_st_atime);
6707         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6708         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6709 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6710         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6711         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6712         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6713 #endif
6714         unlock_user_struct(target_st, target_addr, 1);
6715     }
6716 
6717     return 0;
6718 }
6719 #endif
6720 
6721 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6722 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6723                                             abi_ulong target_addr)
6724 {
6725     struct target_statx *target_stx;
6726 
6727     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6728         return -TARGET_EFAULT;
6729     }
6730     memset(target_stx, 0, sizeof(*target_stx));
6731 
6732     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6733     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6734     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6735     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6736     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6737     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6738     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6739     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6740     __put_user(host_stx->stx_size, &target_stx->stx_size);
6741     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6742     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6743     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6744     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6745     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
6746     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6747     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
6748     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6749     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
6750     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6751     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6752     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6753     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6754     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6755 
6756     unlock_user_struct(target_stx, target_addr, 1);
6757 
6758     return 0;
6759 }
6760 #endif
6761 
6762 
6763 /* ??? Using host futex calls even when target atomic operations
6764    are not really atomic probably breaks things.  However implementing
6765    futexes locally would make futexes shared between multiple processes
6766    tricky.  However they're probably useless because guest atomic
6767    operations won't work either.  */
6768 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6769                     target_ulong uaddr2, int val3)
6770 {
6771     struct timespec ts, *pts;
6772     int base_op;
6773 
6774     /* ??? We assume FUTEX_* constants are the same on both host
6775        and target.  */
6776 #ifdef FUTEX_CMD_MASK
6777     base_op = op & FUTEX_CMD_MASK;
6778 #else
6779     base_op = op;
6780 #endif
6781     switch (base_op) {
6782     case FUTEX_WAIT:
6783     case FUTEX_WAIT_BITSET:
6784         if (timeout) {
6785             pts = &ts;
6786             target_to_host_timespec(pts, timeout);
6787         } else {
6788             pts = NULL;
6789         }
6790         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6791                          pts, NULL, val3));
6792     case FUTEX_WAKE:
6793         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6794     case FUTEX_FD:
6795         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6796     case FUTEX_REQUEUE:
6797     case FUTEX_CMP_REQUEUE:
6798     case FUTEX_WAKE_OP:
6799         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6800            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6801            But the prototype takes a `struct timespec *'; insert casts
6802            to satisfy the compiler.  We do not need to tswap TIMEOUT
6803            since it's not compared to guest memory.  */
6804         pts = (struct timespec *)(uintptr_t) timeout;
6805         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6806                                     g2h(uaddr2),
6807                                     (base_op == FUTEX_CMP_REQUEUE
6808                                      ? tswap32(val3)
6809                                      : val3)));
6810     default:
6811         return -TARGET_ENOSYS;
6812     }
6813 }
6814 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6815 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6816                                      abi_long handle, abi_long mount_id,
6817                                      abi_long flags)
6818 {
6819     struct file_handle *target_fh;
6820     struct file_handle *fh;
6821     int mid = 0;
6822     abi_long ret;
6823     char *name;
6824     unsigned int size, total_size;
6825 
6826     if (get_user_s32(size, handle)) {
6827         return -TARGET_EFAULT;
6828     }
6829 
6830     name = lock_user_string(pathname);
6831     if (!name) {
6832         return -TARGET_EFAULT;
6833     }
6834 
6835     total_size = sizeof(struct file_handle) + size;
6836     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6837     if (!target_fh) {
6838         unlock_user(name, pathname, 0);
6839         return -TARGET_EFAULT;
6840     }
6841 
6842     fh = g_malloc0(total_size);
6843     fh->handle_bytes = size;
6844 
6845     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6846     unlock_user(name, pathname, 0);
6847 
6848     /* man name_to_handle_at(2):
6849      * Other than the use of the handle_bytes field, the caller should treat
6850      * the file_handle structure as an opaque data type
6851      */
6852 
6853     memcpy(target_fh, fh, total_size);
6854     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6855     target_fh->handle_type = tswap32(fh->handle_type);
6856     g_free(fh);
6857     unlock_user(target_fh, handle, total_size);
6858 
6859     if (put_user_s32(mid, mount_id)) {
6860         return -TARGET_EFAULT;
6861     }
6862 
6863     return ret;
6864 
6865 }
6866 #endif
6867 
6868 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6869 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6870                                      abi_long flags)
6871 {
6872     struct file_handle *target_fh;
6873     struct file_handle *fh;
6874     unsigned int size, total_size;
6875     abi_long ret;
6876 
6877     if (get_user_s32(size, handle)) {
6878         return -TARGET_EFAULT;
6879     }
6880 
6881     total_size = sizeof(struct file_handle) + size;
6882     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6883     if (!target_fh) {
6884         return -TARGET_EFAULT;
6885     }
6886 
6887     fh = g_memdup(target_fh, total_size);
6888     fh->handle_bytes = size;
6889     fh->handle_type = tswap32(target_fh->handle_type);
6890 
6891     ret = get_errno(open_by_handle_at(mount_fd, fh,
6892                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6893 
6894     g_free(fh);
6895 
6896     unlock_user(target_fh, handle, total_size);
6897 
6898     return ret;
6899 }
6900 #endif
6901 
6902 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6903 
6904 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6905 {
6906     int host_flags;
6907     target_sigset_t *target_mask;
6908     sigset_t host_mask;
6909     abi_long ret;
6910 
6911     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6912         return -TARGET_EINVAL;
6913     }
6914     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6915         return -TARGET_EFAULT;
6916     }
6917 
6918     target_to_host_sigset(&host_mask, target_mask);
6919 
6920     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6921 
6922     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6923     if (ret >= 0) {
6924         fd_trans_register(ret, &target_signalfd_trans);
6925     }
6926 
6927     unlock_user_struct(target_mask, mask, 0);
6928 
6929     return ret;
6930 }
6931 #endif
6932 
6933 /* Map host to target signal numbers for the wait family of syscalls.
6934    Assume all other status bits are the same.  */
6935 int host_to_target_waitstatus(int status)
6936 {
6937     if (WIFSIGNALED(status)) {
6938         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6939     }
6940     if (WIFSTOPPED(status)) {
6941         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6942                | (status & 0xff);
6943     }
6944     return status;
6945 }
6946 
6947 static int open_self_cmdline(void *cpu_env, int fd)
6948 {
6949     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6950     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6951     int i;
6952 
6953     for (i = 0; i < bprm->argc; i++) {
6954         size_t len = strlen(bprm->argv[i]) + 1;
6955 
6956         if (write(fd, bprm->argv[i], len) != len) {
6957             return -1;
6958         }
6959     }
6960 
6961     return 0;
6962 }
6963 
6964 static int open_self_maps(void *cpu_env, int fd)
6965 {
6966     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6967     TaskState *ts = cpu->opaque;
6968     FILE *fp;
6969     char *line = NULL;
6970     size_t len = 0;
6971     ssize_t read;
6972 
6973     fp = fopen("/proc/self/maps", "r");
6974     if (fp == NULL) {
6975         return -1;
6976     }
6977 
6978     while ((read = getline(&line, &len, fp)) != -1) {
6979         int fields, dev_maj, dev_min, inode;
6980         uint64_t min, max, offset;
6981         char flag_r, flag_w, flag_x, flag_p;
6982         char path[512] = "";
6983         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6984                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6985                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6986 
6987         if ((fields < 10) || (fields > 11)) {
6988             continue;
6989         }
6990         if (h2g_valid(min)) {
6991             int flags = page_get_flags(h2g(min));
6992             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6993             if (page_check_range(h2g(min), max - min, flags) == -1) {
6994                 continue;
6995             }
6996             if (h2g(min) == ts->info->stack_limit) {
6997                 pstrcpy(path, sizeof(path), "      [stack]");
6998             }
6999             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7000                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7001                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7002                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7003                     path[0] ? "         " : "", path);
7004         }
7005     }
7006 
7007     free(line);
7008     fclose(fp);
7009 
7010     return 0;
7011 }
7012 
7013 static int open_self_stat(void *cpu_env, int fd)
7014 {
7015     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7016     TaskState *ts = cpu->opaque;
7017     abi_ulong start_stack = ts->info->start_stack;
7018     int i;
7019 
7020     for (i = 0; i < 44; i++) {
7021       char buf[128];
7022       int len;
7023       uint64_t val = 0;
7024 
7025       if (i == 0) {
7026         /* pid */
7027         val = getpid();
7028         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7029       } else if (i == 1) {
7030         /* app name */
7031         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7032       } else if (i == 27) {
7033         /* stack bottom */
7034         val = start_stack;
7035         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7036       } else {
7037         /* for the rest, there is MasterCard */
7038         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7039       }
7040 
7041       len = strlen(buf);
7042       if (write(fd, buf, len) != len) {
7043           return -1;
7044       }
7045     }
7046 
7047     return 0;
7048 }
7049 
7050 static int open_self_auxv(void *cpu_env, int fd)
7051 {
7052     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7053     TaskState *ts = cpu->opaque;
7054     abi_ulong auxv = ts->info->saved_auxv;
7055     abi_ulong len = ts->info->auxv_len;
7056     char *ptr;
7057 
7058     /*
7059      * Auxiliary vector is stored in target process stack.
7060      * read in whole auxv vector and copy it to file
7061      */
7062     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7063     if (ptr != NULL) {
7064         while (len > 0) {
7065             ssize_t r;
7066             r = write(fd, ptr, len);
7067             if (r <= 0) {
7068                 break;
7069             }
7070             len -= r;
7071             ptr += r;
7072         }
7073         lseek(fd, 0, SEEK_SET);
7074         unlock_user(ptr, auxv, len);
7075     }
7076 
7077     return 0;
7078 }
7079 
7080 static int is_proc_myself(const char *filename, const char *entry)
7081 {
7082     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7083         filename += strlen("/proc/");
7084         if (!strncmp(filename, "self/", strlen("self/"))) {
7085             filename += strlen("self/");
7086         } else if (*filename >= '1' && *filename <= '9') {
7087             char myself[80];
7088             snprintf(myself, sizeof(myself), "%d/", getpid());
7089             if (!strncmp(filename, myself, strlen(myself))) {
7090                 filename += strlen(myself);
7091             } else {
7092                 return 0;
7093             }
7094         } else {
7095             return 0;
7096         }
7097         if (!strcmp(filename, entry)) {
7098             return 1;
7099         }
7100     }
7101     return 0;
7102 }
7103 
7104 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7105     defined(TARGET_SPARC) || defined(TARGET_M68K)
7106 static int is_proc(const char *filename, const char *entry)
7107 {
7108     return strcmp(filename, entry) == 0;
7109 }
7110 #endif
7111 
7112 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7113 static int open_net_route(void *cpu_env, int fd)
7114 {
7115     FILE *fp;
7116     char *line = NULL;
7117     size_t len = 0;
7118     ssize_t read;
7119 
7120     fp = fopen("/proc/net/route", "r");
7121     if (fp == NULL) {
7122         return -1;
7123     }
7124 
7125     /* read header */
7126 
7127     read = getline(&line, &len, fp);
7128     dprintf(fd, "%s", line);
7129 
7130     /* read routes */
7131 
7132     while ((read = getline(&line, &len, fp)) != -1) {
7133         char iface[16];
7134         uint32_t dest, gw, mask;
7135         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7136         int fields;
7137 
7138         fields = sscanf(line,
7139                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7140                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7141                         &mask, &mtu, &window, &irtt);
7142         if (fields != 11) {
7143             continue;
7144         }
7145         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7146                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7147                 metric, tswap32(mask), mtu, window, irtt);
7148     }
7149 
7150     free(line);
7151     fclose(fp);
7152 
7153     return 0;
7154 }
7155 #endif
7156 
7157 #if defined(TARGET_SPARC)
7158 static int open_cpuinfo(void *cpu_env, int fd)
7159 {
7160     dprintf(fd, "type\t\t: sun4u\n");
7161     return 0;
7162 }
7163 #endif
7164 
7165 #if defined(TARGET_M68K)
7166 static int open_hardware(void *cpu_env, int fd)
7167 {
7168     dprintf(fd, "Model:\t\tqemu-m68k\n");
7169     return 0;
7170 }
7171 #endif
7172 
7173 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7174 {
7175     struct fake_open {
7176         const char *filename;
7177         int (*fill)(void *cpu_env, int fd);
7178         int (*cmp)(const char *s1, const char *s2);
7179     };
7180     const struct fake_open *fake_open;
7181     static const struct fake_open fakes[] = {
7182         { "maps", open_self_maps, is_proc_myself },
7183         { "stat", open_self_stat, is_proc_myself },
7184         { "auxv", open_self_auxv, is_proc_myself },
7185         { "cmdline", open_self_cmdline, is_proc_myself },
7186 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7187         { "/proc/net/route", open_net_route, is_proc },
7188 #endif
7189 #if defined(TARGET_SPARC)
7190         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7191 #endif
7192 #if defined(TARGET_M68K)
7193         { "/proc/hardware", open_hardware, is_proc },
7194 #endif
7195         { NULL, NULL, NULL }
7196     };
7197 
7198     if (is_proc_myself(pathname, "exe")) {
7199         int execfd = qemu_getauxval(AT_EXECFD);
7200         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7201     }
7202 
7203     for (fake_open = fakes; fake_open->filename; fake_open++) {
7204         if (fake_open->cmp(pathname, fake_open->filename)) {
7205             break;
7206         }
7207     }
7208 
7209     if (fake_open->filename) {
7210         const char *tmpdir;
7211         char filename[PATH_MAX];
7212         int fd, r;
7213 
7214         /* create temporary file to map stat to */
7215         tmpdir = getenv("TMPDIR");
7216         if (!tmpdir)
7217             tmpdir = "/tmp";
7218         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7219         fd = mkstemp(filename);
7220         if (fd < 0) {
7221             return fd;
7222         }
7223         unlink(filename);
7224 
7225         if ((r = fake_open->fill(cpu_env, fd))) {
7226             int e = errno;
7227             close(fd);
7228             errno = e;
7229             return r;
7230         }
7231         lseek(fd, 0, SEEK_SET);
7232 
7233         return fd;
7234     }
7235 
7236     return safe_openat(dirfd, path(pathname), flags, mode);
7237 }
7238 
7239 #define TIMER_MAGIC 0x0caf0000
7240 #define TIMER_MAGIC_MASK 0xffff0000
7241 
7242 /* Convert QEMU provided timer ID back to internal 16bit index format */
7243 static target_timer_t get_timer_id(abi_long arg)
7244 {
7245     target_timer_t timerid = arg;
7246 
7247     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7248         return -TARGET_EINVAL;
7249     }
7250 
7251     timerid &= 0xffff;
7252 
7253     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7254         return -TARGET_EINVAL;
7255     }
7256 
7257     return timerid;
7258 }
7259 
7260 static int target_to_host_cpu_mask(unsigned long *host_mask,
7261                                    size_t host_size,
7262                                    abi_ulong target_addr,
7263                                    size_t target_size)
7264 {
7265     unsigned target_bits = sizeof(abi_ulong) * 8;
7266     unsigned host_bits = sizeof(*host_mask) * 8;
7267     abi_ulong *target_mask;
7268     unsigned i, j;
7269 
7270     assert(host_size >= target_size);
7271 
7272     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7273     if (!target_mask) {
7274         return -TARGET_EFAULT;
7275     }
7276     memset(host_mask, 0, host_size);
7277 
7278     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7279         unsigned bit = i * target_bits;
7280         abi_ulong val;
7281 
7282         __get_user(val, &target_mask[i]);
7283         for (j = 0; j < target_bits; j++, bit++) {
7284             if (val & (1UL << j)) {
7285                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7286             }
7287         }
7288     }
7289 
7290     unlock_user(target_mask, target_addr, 0);
7291     return 0;
7292 }
7293 
7294 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7295                                    size_t host_size,
7296                                    abi_ulong target_addr,
7297                                    size_t target_size)
7298 {
7299     unsigned target_bits = sizeof(abi_ulong) * 8;
7300     unsigned host_bits = sizeof(*host_mask) * 8;
7301     abi_ulong *target_mask;
7302     unsigned i, j;
7303 
7304     assert(host_size >= target_size);
7305 
7306     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7307     if (!target_mask) {
7308         return -TARGET_EFAULT;
7309     }
7310 
7311     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7312         unsigned bit = i * target_bits;
7313         abi_ulong val = 0;
7314 
7315         for (j = 0; j < target_bits; j++, bit++) {
7316             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7317                 val |= 1UL << j;
7318             }
7319         }
7320         __put_user(val, &target_mask[i]);
7321     }
7322 
7323     unlock_user(target_mask, target_addr, target_size);
7324     return 0;
7325 }
7326 
7327 /* This is an internal helper for do_syscall so that it is easier
7328  * to have a single return point, so that actions, such as logging
7329  * of syscall results, can be performed.
7330  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7331  */
7332 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7333                             abi_long arg2, abi_long arg3, abi_long arg4,
7334                             abi_long arg5, abi_long arg6, abi_long arg7,
7335                             abi_long arg8)
7336 {
7337     CPUState *cpu = env_cpu(cpu_env);
7338     abi_long ret;
7339 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7340     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7341     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7342     || defined(TARGET_NR_statx)
7343     struct stat st;
7344 #endif
7345 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7346     || defined(TARGET_NR_fstatfs)
7347     struct statfs stfs;
7348 #endif
7349     void *p;
7350 
7351     switch(num) {
7352     case TARGET_NR_exit:
7353         /* In old applications this may be used to implement _exit(2).
7354            However in threaded applictions it is used for thread termination,
7355            and _exit_group is used for application termination.
7356            Do thread termination if we have more then one thread.  */
7357 
7358         if (block_signals()) {
7359             return -TARGET_ERESTARTSYS;
7360         }
7361 
7362         cpu_list_lock();
7363 
7364         if (CPU_NEXT(first_cpu)) {
7365             TaskState *ts;
7366 
7367             /* Remove the CPU from the list.  */
7368             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7369 
7370             cpu_list_unlock();
7371 
7372             ts = cpu->opaque;
7373             if (ts->child_tidptr) {
7374                 put_user_u32(0, ts->child_tidptr);
7375                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7376                           NULL, NULL, 0);
7377             }
7378             thread_cpu = NULL;
7379             object_unref(OBJECT(cpu));
7380             g_free(ts);
7381             rcu_unregister_thread();
7382             pthread_exit(NULL);
7383         }
7384 
7385         cpu_list_unlock();
7386         preexit_cleanup(cpu_env, arg1);
7387         _exit(arg1);
7388         return 0; /* avoid warning */
7389     case TARGET_NR_read:
7390         if (arg2 == 0 && arg3 == 0) {
7391             return get_errno(safe_read(arg1, 0, 0));
7392         } else {
7393             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7394                 return -TARGET_EFAULT;
7395             ret = get_errno(safe_read(arg1, p, arg3));
7396             if (ret >= 0 &&
7397                 fd_trans_host_to_target_data(arg1)) {
7398                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7399             }
7400             unlock_user(p, arg2, ret);
7401         }
7402         return ret;
7403     case TARGET_NR_write:
7404         if (arg2 == 0 && arg3 == 0) {
7405             return get_errno(safe_write(arg1, 0, 0));
7406         }
7407         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7408             return -TARGET_EFAULT;
7409         if (fd_trans_target_to_host_data(arg1)) {
7410             void *copy = g_malloc(arg3);
7411             memcpy(copy, p, arg3);
7412             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7413             if (ret >= 0) {
7414                 ret = get_errno(safe_write(arg1, copy, ret));
7415             }
7416             g_free(copy);
7417         } else {
7418             ret = get_errno(safe_write(arg1, p, arg3));
7419         }
7420         unlock_user(p, arg2, 0);
7421         return ret;
7422 
7423 #ifdef TARGET_NR_open
7424     case TARGET_NR_open:
7425         if (!(p = lock_user_string(arg1)))
7426             return -TARGET_EFAULT;
7427         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7428                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7429                                   arg3));
7430         fd_trans_unregister(ret);
7431         unlock_user(p, arg1, 0);
7432         return ret;
7433 #endif
7434     case TARGET_NR_openat:
7435         if (!(p = lock_user_string(arg2)))
7436             return -TARGET_EFAULT;
7437         ret = get_errno(do_openat(cpu_env, arg1, p,
7438                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7439                                   arg4));
7440         fd_trans_unregister(ret);
7441         unlock_user(p, arg2, 0);
7442         return ret;
7443 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7444     case TARGET_NR_name_to_handle_at:
7445         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7446         return ret;
7447 #endif
7448 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7449     case TARGET_NR_open_by_handle_at:
7450         ret = do_open_by_handle_at(arg1, arg2, arg3);
7451         fd_trans_unregister(ret);
7452         return ret;
7453 #endif
7454     case TARGET_NR_close:
7455         fd_trans_unregister(arg1);
7456         return get_errno(close(arg1));
7457 
7458     case TARGET_NR_brk:
7459         return do_brk(arg1);
7460 #ifdef TARGET_NR_fork
7461     case TARGET_NR_fork:
7462         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7463 #endif
7464 #ifdef TARGET_NR_waitpid
7465     case TARGET_NR_waitpid:
7466         {
7467             int status;
7468             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7469             if (!is_error(ret) && arg2 && ret
7470                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7471                 return -TARGET_EFAULT;
7472         }
7473         return ret;
7474 #endif
7475 #ifdef TARGET_NR_waitid
7476     case TARGET_NR_waitid:
7477         {
7478             siginfo_t info;
7479             info.si_pid = 0;
7480             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7481             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7482                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7483                     return -TARGET_EFAULT;
7484                 host_to_target_siginfo(p, &info);
7485                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7486             }
7487         }
7488         return ret;
7489 #endif
7490 #ifdef TARGET_NR_creat /* not on alpha */
7491     case TARGET_NR_creat:
7492         if (!(p = lock_user_string(arg1)))
7493             return -TARGET_EFAULT;
7494         ret = get_errno(creat(p, arg2));
7495         fd_trans_unregister(ret);
7496         unlock_user(p, arg1, 0);
7497         return ret;
7498 #endif
7499 #ifdef TARGET_NR_link
7500     case TARGET_NR_link:
7501         {
7502             void * p2;
7503             p = lock_user_string(arg1);
7504             p2 = lock_user_string(arg2);
7505             if (!p || !p2)
7506                 ret = -TARGET_EFAULT;
7507             else
7508                 ret = get_errno(link(p, p2));
7509             unlock_user(p2, arg2, 0);
7510             unlock_user(p, arg1, 0);
7511         }
7512         return ret;
7513 #endif
7514 #if defined(TARGET_NR_linkat)
7515     case TARGET_NR_linkat:
7516         {
7517             void * p2 = NULL;
7518             if (!arg2 || !arg4)
7519                 return -TARGET_EFAULT;
7520             p  = lock_user_string(arg2);
7521             p2 = lock_user_string(arg4);
7522             if (!p || !p2)
7523                 ret = -TARGET_EFAULT;
7524             else
7525                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7526             unlock_user(p, arg2, 0);
7527             unlock_user(p2, arg4, 0);
7528         }
7529         return ret;
7530 #endif
7531 #ifdef TARGET_NR_unlink
7532     case TARGET_NR_unlink:
7533         if (!(p = lock_user_string(arg1)))
7534             return -TARGET_EFAULT;
7535         ret = get_errno(unlink(p));
7536         unlock_user(p, arg1, 0);
7537         return ret;
7538 #endif
7539 #if defined(TARGET_NR_unlinkat)
7540     case TARGET_NR_unlinkat:
7541         if (!(p = lock_user_string(arg2)))
7542             return -TARGET_EFAULT;
7543         ret = get_errno(unlinkat(arg1, p, arg3));
7544         unlock_user(p, arg2, 0);
7545         return ret;
7546 #endif
7547     case TARGET_NR_execve:
7548         {
7549             char **argp, **envp;
7550             int argc, envc;
7551             abi_ulong gp;
7552             abi_ulong guest_argp;
7553             abi_ulong guest_envp;
7554             abi_ulong addr;
7555             char **q;
7556             int total_size = 0;
7557 
7558             argc = 0;
7559             guest_argp = arg2;
7560             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7561                 if (get_user_ual(addr, gp))
7562                     return -TARGET_EFAULT;
7563                 if (!addr)
7564                     break;
7565                 argc++;
7566             }
7567             envc = 0;
7568             guest_envp = arg3;
7569             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7570                 if (get_user_ual(addr, gp))
7571                     return -TARGET_EFAULT;
7572                 if (!addr)
7573                     break;
7574                 envc++;
7575             }
7576 
7577             argp = g_new0(char *, argc + 1);
7578             envp = g_new0(char *, envc + 1);
7579 
7580             for (gp = guest_argp, q = argp; gp;
7581                   gp += sizeof(abi_ulong), q++) {
7582                 if (get_user_ual(addr, gp))
7583                     goto execve_efault;
7584                 if (!addr)
7585                     break;
7586                 if (!(*q = lock_user_string(addr)))
7587                     goto execve_efault;
7588                 total_size += strlen(*q) + 1;
7589             }
7590             *q = NULL;
7591 
7592             for (gp = guest_envp, q = envp; gp;
7593                   gp += sizeof(abi_ulong), q++) {
7594                 if (get_user_ual(addr, gp))
7595                     goto execve_efault;
7596                 if (!addr)
7597                     break;
7598                 if (!(*q = lock_user_string(addr)))
7599                     goto execve_efault;
7600                 total_size += strlen(*q) + 1;
7601             }
7602             *q = NULL;
7603 
7604             if (!(p = lock_user_string(arg1)))
7605                 goto execve_efault;
7606             /* Although execve() is not an interruptible syscall it is
7607              * a special case where we must use the safe_syscall wrapper:
7608              * if we allow a signal to happen before we make the host
7609              * syscall then we will 'lose' it, because at the point of
7610              * execve the process leaves QEMU's control. So we use the
7611              * safe syscall wrapper to ensure that we either take the
7612              * signal as a guest signal, or else it does not happen
7613              * before the execve completes and makes it the other
7614              * program's problem.
7615              */
7616             ret = get_errno(safe_execve(p, argp, envp));
7617             unlock_user(p, arg1, 0);
7618 
7619             goto execve_end;
7620 
7621         execve_efault:
7622             ret = -TARGET_EFAULT;
7623 
7624         execve_end:
7625             for (gp = guest_argp, q = argp; *q;
7626                   gp += sizeof(abi_ulong), q++) {
7627                 if (get_user_ual(addr, gp)
7628                     || !addr)
7629                     break;
7630                 unlock_user(*q, addr, 0);
7631             }
7632             for (gp = guest_envp, q = envp; *q;
7633                   gp += sizeof(abi_ulong), q++) {
7634                 if (get_user_ual(addr, gp)
7635                     || !addr)
7636                     break;
7637                 unlock_user(*q, addr, 0);
7638             }
7639 
7640             g_free(argp);
7641             g_free(envp);
7642         }
7643         return ret;
7644     case TARGET_NR_chdir:
7645         if (!(p = lock_user_string(arg1)))
7646             return -TARGET_EFAULT;
7647         ret = get_errno(chdir(p));
7648         unlock_user(p, arg1, 0);
7649         return ret;
7650 #ifdef TARGET_NR_time
7651     case TARGET_NR_time:
7652         {
7653             time_t host_time;
7654             ret = get_errno(time(&host_time));
7655             if (!is_error(ret)
7656                 && arg1
7657                 && put_user_sal(host_time, arg1))
7658                 return -TARGET_EFAULT;
7659         }
7660         return ret;
7661 #endif
7662 #ifdef TARGET_NR_mknod
7663     case TARGET_NR_mknod:
7664         if (!(p = lock_user_string(arg1)))
7665             return -TARGET_EFAULT;
7666         ret = get_errno(mknod(p, arg2, arg3));
7667         unlock_user(p, arg1, 0);
7668         return ret;
7669 #endif
7670 #if defined(TARGET_NR_mknodat)
7671     case TARGET_NR_mknodat:
7672         if (!(p = lock_user_string(arg2)))
7673             return -TARGET_EFAULT;
7674         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7675         unlock_user(p, arg2, 0);
7676         return ret;
7677 #endif
7678 #ifdef TARGET_NR_chmod
7679     case TARGET_NR_chmod:
7680         if (!(p = lock_user_string(arg1)))
7681             return -TARGET_EFAULT;
7682         ret = get_errno(chmod(p, arg2));
7683         unlock_user(p, arg1, 0);
7684         return ret;
7685 #endif
7686 #ifdef TARGET_NR_lseek
7687     case TARGET_NR_lseek:
7688         return get_errno(lseek(arg1, arg2, arg3));
7689 #endif
7690 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7691     /* Alpha specific */
7692     case TARGET_NR_getxpid:
7693         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7694         return get_errno(getpid());
7695 #endif
7696 #ifdef TARGET_NR_getpid
7697     case TARGET_NR_getpid:
7698         return get_errno(getpid());
7699 #endif
7700     case TARGET_NR_mount:
7701         {
7702             /* need to look at the data field */
7703             void *p2, *p3;
7704 
7705             if (arg1) {
7706                 p = lock_user_string(arg1);
7707                 if (!p) {
7708                     return -TARGET_EFAULT;
7709                 }
7710             } else {
7711                 p = NULL;
7712             }
7713 
7714             p2 = lock_user_string(arg2);
7715             if (!p2) {
7716                 if (arg1) {
7717                     unlock_user(p, arg1, 0);
7718                 }
7719                 return -TARGET_EFAULT;
7720             }
7721 
7722             if (arg3) {
7723                 p3 = lock_user_string(arg3);
7724                 if (!p3) {
7725                     if (arg1) {
7726                         unlock_user(p, arg1, 0);
7727                     }
7728                     unlock_user(p2, arg2, 0);
7729                     return -TARGET_EFAULT;
7730                 }
7731             } else {
7732                 p3 = NULL;
7733             }
7734 
7735             /* FIXME - arg5 should be locked, but it isn't clear how to
7736              * do that since it's not guaranteed to be a NULL-terminated
7737              * string.
7738              */
7739             if (!arg5) {
7740                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7741             } else {
7742                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7743             }
7744             ret = get_errno(ret);
7745 
7746             if (arg1) {
7747                 unlock_user(p, arg1, 0);
7748             }
7749             unlock_user(p2, arg2, 0);
7750             if (arg3) {
7751                 unlock_user(p3, arg3, 0);
7752             }
7753         }
7754         return ret;
7755 #ifdef TARGET_NR_umount
7756     case TARGET_NR_umount:
7757         if (!(p = lock_user_string(arg1)))
7758             return -TARGET_EFAULT;
7759         ret = get_errno(umount(p));
7760         unlock_user(p, arg1, 0);
7761         return ret;
7762 #endif
7763 #ifdef TARGET_NR_stime /* not on alpha */
7764     case TARGET_NR_stime:
7765         {
7766             time_t host_time;
7767             if (get_user_sal(host_time, arg1))
7768                 return -TARGET_EFAULT;
7769             return get_errno(stime(&host_time));
7770         }
7771 #endif
7772 #ifdef TARGET_NR_alarm /* not on alpha */
7773     case TARGET_NR_alarm:
7774         return alarm(arg1);
7775 #endif
7776 #ifdef TARGET_NR_pause /* not on alpha */
7777     case TARGET_NR_pause:
7778         if (!block_signals()) {
7779             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7780         }
7781         return -TARGET_EINTR;
7782 #endif
7783 #ifdef TARGET_NR_utime
7784     case TARGET_NR_utime:
7785         {
7786             struct utimbuf tbuf, *host_tbuf;
7787             struct target_utimbuf *target_tbuf;
7788             if (arg2) {
7789                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7790                     return -TARGET_EFAULT;
7791                 tbuf.actime = tswapal(target_tbuf->actime);
7792                 tbuf.modtime = tswapal(target_tbuf->modtime);
7793                 unlock_user_struct(target_tbuf, arg2, 0);
7794                 host_tbuf = &tbuf;
7795             } else {
7796                 host_tbuf = NULL;
7797             }
7798             if (!(p = lock_user_string(arg1)))
7799                 return -TARGET_EFAULT;
7800             ret = get_errno(utime(p, host_tbuf));
7801             unlock_user(p, arg1, 0);
7802         }
7803         return ret;
7804 #endif
7805 #ifdef TARGET_NR_utimes
7806     case TARGET_NR_utimes:
7807         {
7808             struct timeval *tvp, tv[2];
7809             if (arg2) {
7810                 if (copy_from_user_timeval(&tv[0], arg2)
7811                     || copy_from_user_timeval(&tv[1],
7812                                               arg2 + sizeof(struct target_timeval)))
7813                     return -TARGET_EFAULT;
7814                 tvp = tv;
7815             } else {
7816                 tvp = NULL;
7817             }
7818             if (!(p = lock_user_string(arg1)))
7819                 return -TARGET_EFAULT;
7820             ret = get_errno(utimes(p, tvp));
7821             unlock_user(p, arg1, 0);
7822         }
7823         return ret;
7824 #endif
7825 #if defined(TARGET_NR_futimesat)
7826     case TARGET_NR_futimesat:
7827         {
7828             struct timeval *tvp, tv[2];
7829             if (arg3) {
7830                 if (copy_from_user_timeval(&tv[0], arg3)
7831                     || copy_from_user_timeval(&tv[1],
7832                                               arg3 + sizeof(struct target_timeval)))
7833                     return -TARGET_EFAULT;
7834                 tvp = tv;
7835             } else {
7836                 tvp = NULL;
7837             }
7838             if (!(p = lock_user_string(arg2))) {
7839                 return -TARGET_EFAULT;
7840             }
7841             ret = get_errno(futimesat(arg1, path(p), tvp));
7842             unlock_user(p, arg2, 0);
7843         }
7844         return ret;
7845 #endif
7846 #ifdef TARGET_NR_access
7847     case TARGET_NR_access:
7848         if (!(p = lock_user_string(arg1))) {
7849             return -TARGET_EFAULT;
7850         }
7851         ret = get_errno(access(path(p), arg2));
7852         unlock_user(p, arg1, 0);
7853         return ret;
7854 #endif
7855 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7856     case TARGET_NR_faccessat:
7857         if (!(p = lock_user_string(arg2))) {
7858             return -TARGET_EFAULT;
7859         }
7860         ret = get_errno(faccessat(arg1, p, arg3, 0));
7861         unlock_user(p, arg2, 0);
7862         return ret;
7863 #endif
7864 #ifdef TARGET_NR_nice /* not on alpha */
7865     case TARGET_NR_nice:
7866         return get_errno(nice(arg1));
7867 #endif
7868     case TARGET_NR_sync:
7869         sync();
7870         return 0;
7871 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7872     case TARGET_NR_syncfs:
7873         return get_errno(syncfs(arg1));
7874 #endif
7875     case TARGET_NR_kill:
7876         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7877 #ifdef TARGET_NR_rename
7878     case TARGET_NR_rename:
7879         {
7880             void *p2;
7881             p = lock_user_string(arg1);
7882             p2 = lock_user_string(arg2);
7883             if (!p || !p2)
7884                 ret = -TARGET_EFAULT;
7885             else
7886                 ret = get_errno(rename(p, p2));
7887             unlock_user(p2, arg2, 0);
7888             unlock_user(p, arg1, 0);
7889         }
7890         return ret;
7891 #endif
7892 #if defined(TARGET_NR_renameat)
7893     case TARGET_NR_renameat:
7894         {
7895             void *p2;
7896             p  = lock_user_string(arg2);
7897             p2 = lock_user_string(arg4);
7898             if (!p || !p2)
7899                 ret = -TARGET_EFAULT;
7900             else
7901                 ret = get_errno(renameat(arg1, p, arg3, p2));
7902             unlock_user(p2, arg4, 0);
7903             unlock_user(p, arg2, 0);
7904         }
7905         return ret;
7906 #endif
7907 #if defined(TARGET_NR_renameat2)
7908     case TARGET_NR_renameat2:
7909         {
7910             void *p2;
7911             p  = lock_user_string(arg2);
7912             p2 = lock_user_string(arg4);
7913             if (!p || !p2) {
7914                 ret = -TARGET_EFAULT;
7915             } else {
7916                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7917             }
7918             unlock_user(p2, arg4, 0);
7919             unlock_user(p, arg2, 0);
7920         }
7921         return ret;
7922 #endif
7923 #ifdef TARGET_NR_mkdir
7924     case TARGET_NR_mkdir:
7925         if (!(p = lock_user_string(arg1)))
7926             return -TARGET_EFAULT;
7927         ret = get_errno(mkdir(p, arg2));
7928         unlock_user(p, arg1, 0);
7929         return ret;
7930 #endif
7931 #if defined(TARGET_NR_mkdirat)
7932     case TARGET_NR_mkdirat:
7933         if (!(p = lock_user_string(arg2)))
7934             return -TARGET_EFAULT;
7935         ret = get_errno(mkdirat(arg1, p, arg3));
7936         unlock_user(p, arg2, 0);
7937         return ret;
7938 #endif
7939 #ifdef TARGET_NR_rmdir
7940     case TARGET_NR_rmdir:
7941         if (!(p = lock_user_string(arg1)))
7942             return -TARGET_EFAULT;
7943         ret = get_errno(rmdir(p));
7944         unlock_user(p, arg1, 0);
7945         return ret;
7946 #endif
7947     case TARGET_NR_dup:
7948         ret = get_errno(dup(arg1));
7949         if (ret >= 0) {
7950             fd_trans_dup(arg1, ret);
7951         }
7952         return ret;
7953 #ifdef TARGET_NR_pipe
7954     case TARGET_NR_pipe:
7955         return do_pipe(cpu_env, arg1, 0, 0);
7956 #endif
7957 #ifdef TARGET_NR_pipe2
7958     case TARGET_NR_pipe2:
7959         return do_pipe(cpu_env, arg1,
7960                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7961 #endif
7962     case TARGET_NR_times:
7963         {
7964             struct target_tms *tmsp;
7965             struct tms tms;
7966             ret = get_errno(times(&tms));
7967             if (arg1) {
7968                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7969                 if (!tmsp)
7970                     return -TARGET_EFAULT;
7971                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7972                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7973                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7974                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7975             }
7976             if (!is_error(ret))
7977                 ret = host_to_target_clock_t(ret);
7978         }
7979         return ret;
7980     case TARGET_NR_acct:
7981         if (arg1 == 0) {
7982             ret = get_errno(acct(NULL));
7983         } else {
7984             if (!(p = lock_user_string(arg1))) {
7985                 return -TARGET_EFAULT;
7986             }
7987             ret = get_errno(acct(path(p)));
7988             unlock_user(p, arg1, 0);
7989         }
7990         return ret;
7991 #ifdef TARGET_NR_umount2
7992     case TARGET_NR_umount2:
7993         if (!(p = lock_user_string(arg1)))
7994             return -TARGET_EFAULT;
7995         ret = get_errno(umount2(p, arg2));
7996         unlock_user(p, arg1, 0);
7997         return ret;
7998 #endif
7999     case TARGET_NR_ioctl:
8000         return do_ioctl(arg1, arg2, arg3);
8001 #ifdef TARGET_NR_fcntl
8002     case TARGET_NR_fcntl:
8003         return do_fcntl(arg1, arg2, arg3);
8004 #endif
8005     case TARGET_NR_setpgid:
8006         return get_errno(setpgid(arg1, arg2));
8007     case TARGET_NR_umask:
8008         return get_errno(umask(arg1));
8009     case TARGET_NR_chroot:
8010         if (!(p = lock_user_string(arg1)))
8011             return -TARGET_EFAULT;
8012         ret = get_errno(chroot(p));
8013         unlock_user(p, arg1, 0);
8014         return ret;
8015 #ifdef TARGET_NR_dup2
8016     case TARGET_NR_dup2:
8017         ret = get_errno(dup2(arg1, arg2));
8018         if (ret >= 0) {
8019             fd_trans_dup(arg1, arg2);
8020         }
8021         return ret;
8022 #endif
8023 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8024     case TARGET_NR_dup3:
8025     {
8026         int host_flags;
8027 
8028         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8029             return -EINVAL;
8030         }
8031         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8032         ret = get_errno(dup3(arg1, arg2, host_flags));
8033         if (ret >= 0) {
8034             fd_trans_dup(arg1, arg2);
8035         }
8036         return ret;
8037     }
8038 #endif
8039 #ifdef TARGET_NR_getppid /* not on alpha */
8040     case TARGET_NR_getppid:
8041         return get_errno(getppid());
8042 #endif
8043 #ifdef TARGET_NR_getpgrp
8044     case TARGET_NR_getpgrp:
8045         return get_errno(getpgrp());
8046 #endif
8047     case TARGET_NR_setsid:
8048         return get_errno(setsid());
8049 #ifdef TARGET_NR_sigaction
8050     case TARGET_NR_sigaction:
8051         {
8052 #if defined(TARGET_ALPHA)
8053             struct target_sigaction act, oact, *pact = 0;
8054             struct target_old_sigaction *old_act;
8055             if (arg2) {
8056                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8057                     return -TARGET_EFAULT;
8058                 act._sa_handler = old_act->_sa_handler;
8059                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8060                 act.sa_flags = old_act->sa_flags;
8061                 act.sa_restorer = 0;
8062                 unlock_user_struct(old_act, arg2, 0);
8063                 pact = &act;
8064             }
8065             ret = get_errno(do_sigaction(arg1, pact, &oact));
8066             if (!is_error(ret) && arg3) {
8067                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8068                     return -TARGET_EFAULT;
8069                 old_act->_sa_handler = oact._sa_handler;
8070                 old_act->sa_mask = oact.sa_mask.sig[0];
8071                 old_act->sa_flags = oact.sa_flags;
8072                 unlock_user_struct(old_act, arg3, 1);
8073             }
8074 #elif defined(TARGET_MIPS)
8075 	    struct target_sigaction act, oact, *pact, *old_act;
8076 
8077 	    if (arg2) {
8078                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8079                     return -TARGET_EFAULT;
8080 		act._sa_handler = old_act->_sa_handler;
8081 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8082 		act.sa_flags = old_act->sa_flags;
8083 		unlock_user_struct(old_act, arg2, 0);
8084 		pact = &act;
8085 	    } else {
8086 		pact = NULL;
8087 	    }
8088 
8089 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8090 
8091 	    if (!is_error(ret) && arg3) {
8092                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8093                     return -TARGET_EFAULT;
8094 		old_act->_sa_handler = oact._sa_handler;
8095 		old_act->sa_flags = oact.sa_flags;
8096 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8097 		old_act->sa_mask.sig[1] = 0;
8098 		old_act->sa_mask.sig[2] = 0;
8099 		old_act->sa_mask.sig[3] = 0;
8100 		unlock_user_struct(old_act, arg3, 1);
8101 	    }
8102 #else
8103             struct target_old_sigaction *old_act;
8104             struct target_sigaction act, oact, *pact;
8105             if (arg2) {
8106                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8107                     return -TARGET_EFAULT;
8108                 act._sa_handler = old_act->_sa_handler;
8109                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8110                 act.sa_flags = old_act->sa_flags;
8111                 act.sa_restorer = old_act->sa_restorer;
8112 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8113                 act.ka_restorer = 0;
8114 #endif
8115                 unlock_user_struct(old_act, arg2, 0);
8116                 pact = &act;
8117             } else {
8118                 pact = NULL;
8119             }
8120             ret = get_errno(do_sigaction(arg1, pact, &oact));
8121             if (!is_error(ret) && arg3) {
8122                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8123                     return -TARGET_EFAULT;
8124                 old_act->_sa_handler = oact._sa_handler;
8125                 old_act->sa_mask = oact.sa_mask.sig[0];
8126                 old_act->sa_flags = oact.sa_flags;
8127                 old_act->sa_restorer = oact.sa_restorer;
8128                 unlock_user_struct(old_act, arg3, 1);
8129             }
8130 #endif
8131         }
8132         return ret;
8133 #endif
8134     case TARGET_NR_rt_sigaction:
8135         {
8136 #if defined(TARGET_ALPHA)
8137             /* For Alpha and SPARC this is a 5 argument syscall, with
8138              * a 'restorer' parameter which must be copied into the
8139              * sa_restorer field of the sigaction struct.
8140              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8141              * and arg5 is the sigsetsize.
8142              * Alpha also has a separate rt_sigaction struct that it uses
8143              * here; SPARC uses the usual sigaction struct.
8144              */
8145             struct target_rt_sigaction *rt_act;
8146             struct target_sigaction act, oact, *pact = 0;
8147 
8148             if (arg4 != sizeof(target_sigset_t)) {
8149                 return -TARGET_EINVAL;
8150             }
8151             if (arg2) {
8152                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8153                     return -TARGET_EFAULT;
8154                 act._sa_handler = rt_act->_sa_handler;
8155                 act.sa_mask = rt_act->sa_mask;
8156                 act.sa_flags = rt_act->sa_flags;
8157                 act.sa_restorer = arg5;
8158                 unlock_user_struct(rt_act, arg2, 0);
8159                 pact = &act;
8160             }
8161             ret = get_errno(do_sigaction(arg1, pact, &oact));
8162             if (!is_error(ret) && arg3) {
8163                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8164                     return -TARGET_EFAULT;
8165                 rt_act->_sa_handler = oact._sa_handler;
8166                 rt_act->sa_mask = oact.sa_mask;
8167                 rt_act->sa_flags = oact.sa_flags;
8168                 unlock_user_struct(rt_act, arg3, 1);
8169             }
8170 #else
8171 #ifdef TARGET_SPARC
8172             target_ulong restorer = arg4;
8173             target_ulong sigsetsize = arg5;
8174 #else
8175             target_ulong sigsetsize = arg4;
8176 #endif
8177             struct target_sigaction *act;
8178             struct target_sigaction *oact;
8179 
8180             if (sigsetsize != sizeof(target_sigset_t)) {
8181                 return -TARGET_EINVAL;
8182             }
8183             if (arg2) {
8184                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8185                     return -TARGET_EFAULT;
8186                 }
8187 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8188                 act->ka_restorer = restorer;
8189 #endif
8190             } else {
8191                 act = NULL;
8192             }
8193             if (arg3) {
8194                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8195                     ret = -TARGET_EFAULT;
8196                     goto rt_sigaction_fail;
8197                 }
8198             } else
8199                 oact = NULL;
8200             ret = get_errno(do_sigaction(arg1, act, oact));
8201 	rt_sigaction_fail:
8202             if (act)
8203                 unlock_user_struct(act, arg2, 0);
8204             if (oact)
8205                 unlock_user_struct(oact, arg3, 1);
8206 #endif
8207         }
8208         return ret;
8209 #ifdef TARGET_NR_sgetmask /* not on alpha */
8210     case TARGET_NR_sgetmask:
8211         {
8212             sigset_t cur_set;
8213             abi_ulong target_set;
8214             ret = do_sigprocmask(0, NULL, &cur_set);
8215             if (!ret) {
8216                 host_to_target_old_sigset(&target_set, &cur_set);
8217                 ret = target_set;
8218             }
8219         }
8220         return ret;
8221 #endif
8222 #ifdef TARGET_NR_ssetmask /* not on alpha */
8223     case TARGET_NR_ssetmask:
8224         {
8225             sigset_t set, oset;
8226             abi_ulong target_set = arg1;
8227             target_to_host_old_sigset(&set, &target_set);
8228             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8229             if (!ret) {
8230                 host_to_target_old_sigset(&target_set, &oset);
8231                 ret = target_set;
8232             }
8233         }
8234         return ret;
8235 #endif
8236 #ifdef TARGET_NR_sigprocmask
8237     case TARGET_NR_sigprocmask:
8238         {
8239 #if defined(TARGET_ALPHA)
8240             sigset_t set, oldset;
8241             abi_ulong mask;
8242             int how;
8243 
8244             switch (arg1) {
8245             case TARGET_SIG_BLOCK:
8246                 how = SIG_BLOCK;
8247                 break;
8248             case TARGET_SIG_UNBLOCK:
8249                 how = SIG_UNBLOCK;
8250                 break;
8251             case TARGET_SIG_SETMASK:
8252                 how = SIG_SETMASK;
8253                 break;
8254             default:
8255                 return -TARGET_EINVAL;
8256             }
8257             mask = arg2;
8258             target_to_host_old_sigset(&set, &mask);
8259 
8260             ret = do_sigprocmask(how, &set, &oldset);
8261             if (!is_error(ret)) {
8262                 host_to_target_old_sigset(&mask, &oldset);
8263                 ret = mask;
8264                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8265             }
8266 #else
8267             sigset_t set, oldset, *set_ptr;
8268             int how;
8269 
8270             if (arg2) {
8271                 switch (arg1) {
8272                 case TARGET_SIG_BLOCK:
8273                     how = SIG_BLOCK;
8274                     break;
8275                 case TARGET_SIG_UNBLOCK:
8276                     how = SIG_UNBLOCK;
8277                     break;
8278                 case TARGET_SIG_SETMASK:
8279                     how = SIG_SETMASK;
8280                     break;
8281                 default:
8282                     return -TARGET_EINVAL;
8283                 }
8284                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8285                     return -TARGET_EFAULT;
8286                 target_to_host_old_sigset(&set, p);
8287                 unlock_user(p, arg2, 0);
8288                 set_ptr = &set;
8289             } else {
8290                 how = 0;
8291                 set_ptr = NULL;
8292             }
8293             ret = do_sigprocmask(how, set_ptr, &oldset);
8294             if (!is_error(ret) && arg3) {
8295                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8296                     return -TARGET_EFAULT;
8297                 host_to_target_old_sigset(p, &oldset);
8298                 unlock_user(p, arg3, sizeof(target_sigset_t));
8299             }
8300 #endif
8301         }
8302         return ret;
8303 #endif
8304     case TARGET_NR_rt_sigprocmask:
8305         {
8306             int how = arg1;
8307             sigset_t set, oldset, *set_ptr;
8308 
8309             if (arg4 != sizeof(target_sigset_t)) {
8310                 return -TARGET_EINVAL;
8311             }
8312 
8313             if (arg2) {
8314                 switch(how) {
8315                 case TARGET_SIG_BLOCK:
8316                     how = SIG_BLOCK;
8317                     break;
8318                 case TARGET_SIG_UNBLOCK:
8319                     how = SIG_UNBLOCK;
8320                     break;
8321                 case TARGET_SIG_SETMASK:
8322                     how = SIG_SETMASK;
8323                     break;
8324                 default:
8325                     return -TARGET_EINVAL;
8326                 }
8327                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8328                     return -TARGET_EFAULT;
8329                 target_to_host_sigset(&set, p);
8330                 unlock_user(p, arg2, 0);
8331                 set_ptr = &set;
8332             } else {
8333                 how = 0;
8334                 set_ptr = NULL;
8335             }
8336             ret = do_sigprocmask(how, set_ptr, &oldset);
8337             if (!is_error(ret) && arg3) {
8338                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8339                     return -TARGET_EFAULT;
8340                 host_to_target_sigset(p, &oldset);
8341                 unlock_user(p, arg3, sizeof(target_sigset_t));
8342             }
8343         }
8344         return ret;
8345 #ifdef TARGET_NR_sigpending
8346     case TARGET_NR_sigpending:
8347         {
8348             sigset_t set;
8349             ret = get_errno(sigpending(&set));
8350             if (!is_error(ret)) {
8351                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8352                     return -TARGET_EFAULT;
8353                 host_to_target_old_sigset(p, &set);
8354                 unlock_user(p, arg1, sizeof(target_sigset_t));
8355             }
8356         }
8357         return ret;
8358 #endif
8359     case TARGET_NR_rt_sigpending:
8360         {
8361             sigset_t set;
8362 
8363             /* Yes, this check is >, not != like most. We follow the kernel's
8364              * logic and it does it like this because it implements
8365              * NR_sigpending through the same code path, and in that case
8366              * the old_sigset_t is smaller in size.
8367              */
8368             if (arg2 > sizeof(target_sigset_t)) {
8369                 return -TARGET_EINVAL;
8370             }
8371 
8372             ret = get_errno(sigpending(&set));
8373             if (!is_error(ret)) {
8374                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8375                     return -TARGET_EFAULT;
8376                 host_to_target_sigset(p, &set);
8377                 unlock_user(p, arg1, sizeof(target_sigset_t));
8378             }
8379         }
8380         return ret;
8381 #ifdef TARGET_NR_sigsuspend
8382     case TARGET_NR_sigsuspend:
8383         {
8384             TaskState *ts = cpu->opaque;
8385 #if defined(TARGET_ALPHA)
8386             abi_ulong mask = arg1;
8387             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8388 #else
8389             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8390                 return -TARGET_EFAULT;
8391             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8392             unlock_user(p, arg1, 0);
8393 #endif
8394             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8395                                                SIGSET_T_SIZE));
8396             if (ret != -TARGET_ERESTARTSYS) {
8397                 ts->in_sigsuspend = 1;
8398             }
8399         }
8400         return ret;
8401 #endif
8402     case TARGET_NR_rt_sigsuspend:
8403         {
8404             TaskState *ts = cpu->opaque;
8405 
8406             if (arg2 != sizeof(target_sigset_t)) {
8407                 return -TARGET_EINVAL;
8408             }
8409             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8410                 return -TARGET_EFAULT;
8411             target_to_host_sigset(&ts->sigsuspend_mask, p);
8412             unlock_user(p, arg1, 0);
8413             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8414                                                SIGSET_T_SIZE));
8415             if (ret != -TARGET_ERESTARTSYS) {
8416                 ts->in_sigsuspend = 1;
8417             }
8418         }
8419         return ret;
8420     case TARGET_NR_rt_sigtimedwait:
8421         {
8422             sigset_t set;
8423             struct timespec uts, *puts;
8424             siginfo_t uinfo;
8425 
8426             if (arg4 != sizeof(target_sigset_t)) {
8427                 return -TARGET_EINVAL;
8428             }
8429 
8430             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8431                 return -TARGET_EFAULT;
8432             target_to_host_sigset(&set, p);
8433             unlock_user(p, arg1, 0);
8434             if (arg3) {
8435                 puts = &uts;
8436                 target_to_host_timespec(puts, arg3);
8437             } else {
8438                 puts = NULL;
8439             }
8440             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8441                                                  SIGSET_T_SIZE));
8442             if (!is_error(ret)) {
8443                 if (arg2) {
8444                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8445                                   0);
8446                     if (!p) {
8447                         return -TARGET_EFAULT;
8448                     }
8449                     host_to_target_siginfo(p, &uinfo);
8450                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8451                 }
8452                 ret = host_to_target_signal(ret);
8453             }
8454         }
8455         return ret;
8456     case TARGET_NR_rt_sigqueueinfo:
8457         {
8458             siginfo_t uinfo;
8459 
8460             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8461             if (!p) {
8462                 return -TARGET_EFAULT;
8463             }
8464             target_to_host_siginfo(&uinfo, p);
8465             unlock_user(p, arg3, 0);
8466             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8467         }
8468         return ret;
8469     case TARGET_NR_rt_tgsigqueueinfo:
8470         {
8471             siginfo_t uinfo;
8472 
8473             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8474             if (!p) {
8475                 return -TARGET_EFAULT;
8476             }
8477             target_to_host_siginfo(&uinfo, p);
8478             unlock_user(p, arg4, 0);
8479             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8480         }
8481         return ret;
8482 #ifdef TARGET_NR_sigreturn
8483     case TARGET_NR_sigreturn:
8484         if (block_signals()) {
8485             return -TARGET_ERESTARTSYS;
8486         }
8487         return do_sigreturn(cpu_env);
8488 #endif
8489     case TARGET_NR_rt_sigreturn:
8490         if (block_signals()) {
8491             return -TARGET_ERESTARTSYS;
8492         }
8493         return do_rt_sigreturn(cpu_env);
8494     case TARGET_NR_sethostname:
8495         if (!(p = lock_user_string(arg1)))
8496             return -TARGET_EFAULT;
8497         ret = get_errno(sethostname(p, arg2));
8498         unlock_user(p, arg1, 0);
8499         return ret;
8500 #ifdef TARGET_NR_setrlimit
8501     case TARGET_NR_setrlimit:
8502         {
8503             int resource = target_to_host_resource(arg1);
8504             struct target_rlimit *target_rlim;
8505             struct rlimit rlim;
8506             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8507                 return -TARGET_EFAULT;
8508             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8509             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8510             unlock_user_struct(target_rlim, arg2, 0);
8511             /*
8512              * If we just passed through resource limit settings for memory then
8513              * they would also apply to QEMU's own allocations, and QEMU will
8514              * crash or hang or die if its allocations fail. Ideally we would
8515              * track the guest allocations in QEMU and apply the limits ourselves.
8516              * For now, just tell the guest the call succeeded but don't actually
8517              * limit anything.
8518              */
8519             if (resource != RLIMIT_AS &&
8520                 resource != RLIMIT_DATA &&
8521                 resource != RLIMIT_STACK) {
8522                 return get_errno(setrlimit(resource, &rlim));
8523             } else {
8524                 return 0;
8525             }
8526         }
8527 #endif
8528 #ifdef TARGET_NR_getrlimit
8529     case TARGET_NR_getrlimit:
8530         {
8531             int resource = target_to_host_resource(arg1);
8532             struct target_rlimit *target_rlim;
8533             struct rlimit rlim;
8534 
8535             ret = get_errno(getrlimit(resource, &rlim));
8536             if (!is_error(ret)) {
8537                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8538                     return -TARGET_EFAULT;
8539                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8540                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8541                 unlock_user_struct(target_rlim, arg2, 1);
8542             }
8543         }
8544         return ret;
8545 #endif
8546     case TARGET_NR_getrusage:
8547         {
8548             struct rusage rusage;
8549             ret = get_errno(getrusage(arg1, &rusage));
8550             if (!is_error(ret)) {
8551                 ret = host_to_target_rusage(arg2, &rusage);
8552             }
8553         }
8554         return ret;
8555     case TARGET_NR_gettimeofday:
8556         {
8557             struct timeval tv;
8558             ret = get_errno(gettimeofday(&tv, NULL));
8559             if (!is_error(ret)) {
8560                 if (copy_to_user_timeval(arg1, &tv))
8561                     return -TARGET_EFAULT;
8562             }
8563         }
8564         return ret;
8565     case TARGET_NR_settimeofday:
8566         {
8567             struct timeval tv, *ptv = NULL;
8568             struct timezone tz, *ptz = NULL;
8569 
8570             if (arg1) {
8571                 if (copy_from_user_timeval(&tv, arg1)) {
8572                     return -TARGET_EFAULT;
8573                 }
8574                 ptv = &tv;
8575             }
8576 
8577             if (arg2) {
8578                 if (copy_from_user_timezone(&tz, arg2)) {
8579                     return -TARGET_EFAULT;
8580                 }
8581                 ptz = &tz;
8582             }
8583 
8584             return get_errno(settimeofday(ptv, ptz));
8585         }
8586 #if defined(TARGET_NR_select)
8587     case TARGET_NR_select:
8588 #if defined(TARGET_WANT_NI_OLD_SELECT)
8589         /* some architectures used to have old_select here
8590          * but now ENOSYS it.
8591          */
8592         ret = -TARGET_ENOSYS;
8593 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8594         ret = do_old_select(arg1);
8595 #else
8596         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8597 #endif
8598         return ret;
8599 #endif
8600 #ifdef TARGET_NR_pselect6
8601     case TARGET_NR_pselect6:
8602         {
8603             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8604             fd_set rfds, wfds, efds;
8605             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8606             struct timespec ts, *ts_ptr;
8607 
8608             /*
8609              * The 6th arg is actually two args smashed together,
8610              * so we cannot use the C library.
8611              */
8612             sigset_t set;
8613             struct {
8614                 sigset_t *set;
8615                 size_t size;
8616             } sig, *sig_ptr;
8617 
8618             abi_ulong arg_sigset, arg_sigsize, *arg7;
8619             target_sigset_t *target_sigset;
8620 
8621             n = arg1;
8622             rfd_addr = arg2;
8623             wfd_addr = arg3;
8624             efd_addr = arg4;
8625             ts_addr = arg5;
8626 
8627             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8628             if (ret) {
8629                 return ret;
8630             }
8631             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8632             if (ret) {
8633                 return ret;
8634             }
8635             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8636             if (ret) {
8637                 return ret;
8638             }
8639 
8640             /*
8641              * This takes a timespec, and not a timeval, so we cannot
8642              * use the do_select() helper ...
8643              */
8644             if (ts_addr) {
8645                 if (target_to_host_timespec(&ts, ts_addr)) {
8646                     return -TARGET_EFAULT;
8647                 }
8648                 ts_ptr = &ts;
8649             } else {
8650                 ts_ptr = NULL;
8651             }
8652 
8653             /* Extract the two packed args for the sigset */
8654             if (arg6) {
8655                 sig_ptr = &sig;
8656                 sig.size = SIGSET_T_SIZE;
8657 
8658                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8659                 if (!arg7) {
8660                     return -TARGET_EFAULT;
8661                 }
8662                 arg_sigset = tswapal(arg7[0]);
8663                 arg_sigsize = tswapal(arg7[1]);
8664                 unlock_user(arg7, arg6, 0);
8665 
8666                 if (arg_sigset) {
8667                     sig.set = &set;
8668                     if (arg_sigsize != sizeof(*target_sigset)) {
8669                         /* Like the kernel, we enforce correct size sigsets */
8670                         return -TARGET_EINVAL;
8671                     }
8672                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8673                                               sizeof(*target_sigset), 1);
8674                     if (!target_sigset) {
8675                         return -TARGET_EFAULT;
8676                     }
8677                     target_to_host_sigset(&set, target_sigset);
8678                     unlock_user(target_sigset, arg_sigset, 0);
8679                 } else {
8680                     sig.set = NULL;
8681                 }
8682             } else {
8683                 sig_ptr = NULL;
8684             }
8685 
8686             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8687                                           ts_ptr, sig_ptr));
8688 
8689             if (!is_error(ret)) {
8690                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8691                     return -TARGET_EFAULT;
8692                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8693                     return -TARGET_EFAULT;
8694                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8695                     return -TARGET_EFAULT;
8696 
8697                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8698                     return -TARGET_EFAULT;
8699             }
8700         }
8701         return ret;
8702 #endif
8703 #ifdef TARGET_NR_symlink
8704     case TARGET_NR_symlink:
8705         {
8706             void *p2;
8707             p = lock_user_string(arg1);
8708             p2 = lock_user_string(arg2);
8709             if (!p || !p2)
8710                 ret = -TARGET_EFAULT;
8711             else
8712                 ret = get_errno(symlink(p, p2));
8713             unlock_user(p2, arg2, 0);
8714             unlock_user(p, arg1, 0);
8715         }
8716         return ret;
8717 #endif
8718 #if defined(TARGET_NR_symlinkat)
8719     case TARGET_NR_symlinkat:
8720         {
8721             void *p2;
8722             p  = lock_user_string(arg1);
8723             p2 = lock_user_string(arg3);
8724             if (!p || !p2)
8725                 ret = -TARGET_EFAULT;
8726             else
8727                 ret = get_errno(symlinkat(p, arg2, p2));
8728             unlock_user(p2, arg3, 0);
8729             unlock_user(p, arg1, 0);
8730         }
8731         return ret;
8732 #endif
8733 #ifdef TARGET_NR_readlink
8734     case TARGET_NR_readlink:
8735         {
8736             void *p2;
8737             p = lock_user_string(arg1);
8738             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8739             if (!p || !p2) {
8740                 ret = -TARGET_EFAULT;
8741             } else if (!arg3) {
8742                 /* Short circuit this for the magic exe check. */
8743                 ret = -TARGET_EINVAL;
8744             } else if (is_proc_myself((const char *)p, "exe")) {
8745                 char real[PATH_MAX], *temp;
8746                 temp = realpath(exec_path, real);
8747                 /* Return value is # of bytes that we wrote to the buffer. */
8748                 if (temp == NULL) {
8749                     ret = get_errno(-1);
8750                 } else {
8751                     /* Don't worry about sign mismatch as earlier mapping
8752                      * logic would have thrown a bad address error. */
8753                     ret = MIN(strlen(real), arg3);
8754                     /* We cannot NUL terminate the string. */
8755                     memcpy(p2, real, ret);
8756                 }
8757             } else {
8758                 ret = get_errno(readlink(path(p), p2, arg3));
8759             }
8760             unlock_user(p2, arg2, ret);
8761             unlock_user(p, arg1, 0);
8762         }
8763         return ret;
8764 #endif
8765 #if defined(TARGET_NR_readlinkat)
8766     case TARGET_NR_readlinkat:
8767         {
8768             void *p2;
8769             p  = lock_user_string(arg2);
8770             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8771             if (!p || !p2) {
8772                 ret = -TARGET_EFAULT;
8773             } else if (is_proc_myself((const char *)p, "exe")) {
8774                 char real[PATH_MAX], *temp;
8775                 temp = realpath(exec_path, real);
8776                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8777                 snprintf((char *)p2, arg4, "%s", real);
8778             } else {
8779                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8780             }
8781             unlock_user(p2, arg3, ret);
8782             unlock_user(p, arg2, 0);
8783         }
8784         return ret;
8785 #endif
8786 #ifdef TARGET_NR_swapon
8787     case TARGET_NR_swapon:
8788         if (!(p = lock_user_string(arg1)))
8789             return -TARGET_EFAULT;
8790         ret = get_errno(swapon(p, arg2));
8791         unlock_user(p, arg1, 0);
8792         return ret;
8793 #endif
8794     case TARGET_NR_reboot:
8795         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8796            /* arg4 must be ignored in all other cases */
8797            p = lock_user_string(arg4);
8798            if (!p) {
8799                return -TARGET_EFAULT;
8800            }
8801            ret = get_errno(reboot(arg1, arg2, arg3, p));
8802            unlock_user(p, arg4, 0);
8803         } else {
8804            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8805         }
8806         return ret;
8807 #ifdef TARGET_NR_mmap
8808     case TARGET_NR_mmap:
8809 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8810     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8811     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8812     || defined(TARGET_S390X)
8813         {
8814             abi_ulong *v;
8815             abi_ulong v1, v2, v3, v4, v5, v6;
8816             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8817                 return -TARGET_EFAULT;
8818             v1 = tswapal(v[0]);
8819             v2 = tswapal(v[1]);
8820             v3 = tswapal(v[2]);
8821             v4 = tswapal(v[3]);
8822             v5 = tswapal(v[4]);
8823             v6 = tswapal(v[5]);
8824             unlock_user(v, arg1, 0);
8825             ret = get_errno(target_mmap(v1, v2, v3,
8826                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8827                                         v5, v6));
8828         }
8829 #else
8830         ret = get_errno(target_mmap(arg1, arg2, arg3,
8831                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8832                                     arg5,
8833                                     arg6));
8834 #endif
8835         return ret;
8836 #endif
8837 #ifdef TARGET_NR_mmap2
8838     case TARGET_NR_mmap2:
8839 #ifndef MMAP_SHIFT
8840 #define MMAP_SHIFT 12
8841 #endif
8842         ret = target_mmap(arg1, arg2, arg3,
8843                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8844                           arg5, arg6 << MMAP_SHIFT);
8845         return get_errno(ret);
8846 #endif
8847     case TARGET_NR_munmap:
8848         return get_errno(target_munmap(arg1, arg2));
8849     case TARGET_NR_mprotect:
8850         {
8851             TaskState *ts = cpu->opaque;
8852             /* Special hack to detect libc making the stack executable.  */
8853             if ((arg3 & PROT_GROWSDOWN)
8854                 && arg1 >= ts->info->stack_limit
8855                 && arg1 <= ts->info->start_stack) {
8856                 arg3 &= ~PROT_GROWSDOWN;
8857                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8858                 arg1 = ts->info->stack_limit;
8859             }
8860         }
8861         return get_errno(target_mprotect(arg1, arg2, arg3));
8862 #ifdef TARGET_NR_mremap
8863     case TARGET_NR_mremap:
8864         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8865 #endif
8866         /* ??? msync/mlock/munlock are broken for softmmu.  */
8867 #ifdef TARGET_NR_msync
8868     case TARGET_NR_msync:
8869         return get_errno(msync(g2h(arg1), arg2, arg3));
8870 #endif
8871 #ifdef TARGET_NR_mlock
8872     case TARGET_NR_mlock:
8873         return get_errno(mlock(g2h(arg1), arg2));
8874 #endif
8875 #ifdef TARGET_NR_munlock
8876     case TARGET_NR_munlock:
8877         return get_errno(munlock(g2h(arg1), arg2));
8878 #endif
8879 #ifdef TARGET_NR_mlockall
8880     case TARGET_NR_mlockall:
8881         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8882 #endif
8883 #ifdef TARGET_NR_munlockall
8884     case TARGET_NR_munlockall:
8885         return get_errno(munlockall());
8886 #endif
8887 #ifdef TARGET_NR_truncate
8888     case TARGET_NR_truncate:
8889         if (!(p = lock_user_string(arg1)))
8890             return -TARGET_EFAULT;
8891         ret = get_errno(truncate(p, arg2));
8892         unlock_user(p, arg1, 0);
8893         return ret;
8894 #endif
8895 #ifdef TARGET_NR_ftruncate
8896     case TARGET_NR_ftruncate:
8897         return get_errno(ftruncate(arg1, arg2));
8898 #endif
8899     case TARGET_NR_fchmod:
8900         return get_errno(fchmod(arg1, arg2));
8901 #if defined(TARGET_NR_fchmodat)
8902     case TARGET_NR_fchmodat:
8903         if (!(p = lock_user_string(arg2)))
8904             return -TARGET_EFAULT;
8905         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8906         unlock_user(p, arg2, 0);
8907         return ret;
8908 #endif
8909     case TARGET_NR_getpriority:
8910         /* Note that negative values are valid for getpriority, so we must
8911            differentiate based on errno settings.  */
8912         errno = 0;
8913         ret = getpriority(arg1, arg2);
8914         if (ret == -1 && errno != 0) {
8915             return -host_to_target_errno(errno);
8916         }
8917 #ifdef TARGET_ALPHA
8918         /* Return value is the unbiased priority.  Signal no error.  */
8919         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8920 #else
8921         /* Return value is a biased priority to avoid negative numbers.  */
8922         ret = 20 - ret;
8923 #endif
8924         return ret;
8925     case TARGET_NR_setpriority:
8926         return get_errno(setpriority(arg1, arg2, arg3));
8927 #ifdef TARGET_NR_statfs
8928     case TARGET_NR_statfs:
8929         if (!(p = lock_user_string(arg1))) {
8930             return -TARGET_EFAULT;
8931         }
8932         ret = get_errno(statfs(path(p), &stfs));
8933         unlock_user(p, arg1, 0);
8934     convert_statfs:
8935         if (!is_error(ret)) {
8936             struct target_statfs *target_stfs;
8937 
8938             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8939                 return -TARGET_EFAULT;
8940             __put_user(stfs.f_type, &target_stfs->f_type);
8941             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8942             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8943             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8944             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8945             __put_user(stfs.f_files, &target_stfs->f_files);
8946             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8947             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8948             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8949             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8950             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8951 #ifdef _STATFS_F_FLAGS
8952             __put_user(stfs.f_flags, &target_stfs->f_flags);
8953 #else
8954             __put_user(0, &target_stfs->f_flags);
8955 #endif
8956             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8957             unlock_user_struct(target_stfs, arg2, 1);
8958         }
8959         return ret;
8960 #endif
8961 #ifdef TARGET_NR_fstatfs
8962     case TARGET_NR_fstatfs:
8963         ret = get_errno(fstatfs(arg1, &stfs));
8964         goto convert_statfs;
8965 #endif
8966 #ifdef TARGET_NR_statfs64
8967     case TARGET_NR_statfs64:
8968         if (!(p = lock_user_string(arg1))) {
8969             return -TARGET_EFAULT;
8970         }
8971         ret = get_errno(statfs(path(p), &stfs));
8972         unlock_user(p, arg1, 0);
8973     convert_statfs64:
8974         if (!is_error(ret)) {
8975             struct target_statfs64 *target_stfs;
8976 
8977             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8978                 return -TARGET_EFAULT;
8979             __put_user(stfs.f_type, &target_stfs->f_type);
8980             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8981             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8982             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8983             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8984             __put_user(stfs.f_files, &target_stfs->f_files);
8985             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8986             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8987             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8988             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8989             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8990             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8991             unlock_user_struct(target_stfs, arg3, 1);
8992         }
8993         return ret;
8994     case TARGET_NR_fstatfs64:
8995         ret = get_errno(fstatfs(arg1, &stfs));
8996         goto convert_statfs64;
8997 #endif
8998 #ifdef TARGET_NR_socketcall
8999     case TARGET_NR_socketcall:
9000         return do_socketcall(arg1, arg2);
9001 #endif
9002 #ifdef TARGET_NR_accept
9003     case TARGET_NR_accept:
9004         return do_accept4(arg1, arg2, arg3, 0);
9005 #endif
9006 #ifdef TARGET_NR_accept4
9007     case TARGET_NR_accept4:
9008         return do_accept4(arg1, arg2, arg3, arg4);
9009 #endif
9010 #ifdef TARGET_NR_bind
9011     case TARGET_NR_bind:
9012         return do_bind(arg1, arg2, arg3);
9013 #endif
9014 #ifdef TARGET_NR_connect
9015     case TARGET_NR_connect:
9016         return do_connect(arg1, arg2, arg3);
9017 #endif
9018 #ifdef TARGET_NR_getpeername
9019     case TARGET_NR_getpeername:
9020         return do_getpeername(arg1, arg2, arg3);
9021 #endif
9022 #ifdef TARGET_NR_getsockname
9023     case TARGET_NR_getsockname:
9024         return do_getsockname(arg1, arg2, arg3);
9025 #endif
9026 #ifdef TARGET_NR_getsockopt
9027     case TARGET_NR_getsockopt:
9028         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9029 #endif
9030 #ifdef TARGET_NR_listen
9031     case TARGET_NR_listen:
9032         return get_errno(listen(arg1, arg2));
9033 #endif
9034 #ifdef TARGET_NR_recv
9035     case TARGET_NR_recv:
9036         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9037 #endif
9038 #ifdef TARGET_NR_recvfrom
9039     case TARGET_NR_recvfrom:
9040         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9041 #endif
9042 #ifdef TARGET_NR_recvmsg
9043     case TARGET_NR_recvmsg:
9044         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9045 #endif
9046 #ifdef TARGET_NR_send
9047     case TARGET_NR_send:
9048         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9049 #endif
9050 #ifdef TARGET_NR_sendmsg
9051     case TARGET_NR_sendmsg:
9052         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9053 #endif
9054 #ifdef TARGET_NR_sendmmsg
9055     case TARGET_NR_sendmmsg:
9056         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9057     case TARGET_NR_recvmmsg:
9058         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9059 #endif
9060 #ifdef TARGET_NR_sendto
9061     case TARGET_NR_sendto:
9062         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9063 #endif
9064 #ifdef TARGET_NR_shutdown
9065     case TARGET_NR_shutdown:
9066         return get_errno(shutdown(arg1, arg2));
9067 #endif
9068 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9069     case TARGET_NR_getrandom:
9070         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9071         if (!p) {
9072             return -TARGET_EFAULT;
9073         }
9074         ret = get_errno(getrandom(p, arg2, arg3));
9075         unlock_user(p, arg1, ret);
9076         return ret;
9077 #endif
9078 #ifdef TARGET_NR_socket
9079     case TARGET_NR_socket:
9080         return do_socket(arg1, arg2, arg3);
9081 #endif
9082 #ifdef TARGET_NR_socketpair
9083     case TARGET_NR_socketpair:
9084         return do_socketpair(arg1, arg2, arg3, arg4);
9085 #endif
9086 #ifdef TARGET_NR_setsockopt
9087     case TARGET_NR_setsockopt:
9088         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9089 #endif
9090 #if defined(TARGET_NR_syslog)
9091     case TARGET_NR_syslog:
9092         {
9093             int len = arg2;
9094 
9095             switch (arg1) {
9096             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9097             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9098             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9099             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9100             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9101             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9102             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9103             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9104                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9105             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9106             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9107             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9108                 {
9109                     if (len < 0) {
9110                         return -TARGET_EINVAL;
9111                     }
9112                     if (len == 0) {
9113                         return 0;
9114                     }
9115                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9116                     if (!p) {
9117                         return -TARGET_EFAULT;
9118                     }
9119                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9120                     unlock_user(p, arg2, arg3);
9121                 }
9122                 return ret;
9123             default:
9124                 return -TARGET_EINVAL;
9125             }
9126         }
9127         break;
9128 #endif
9129     case TARGET_NR_setitimer:
9130         {
9131             struct itimerval value, ovalue, *pvalue;
9132 
9133             if (arg2) {
9134                 pvalue = &value;
9135                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9136                     || copy_from_user_timeval(&pvalue->it_value,
9137                                               arg2 + sizeof(struct target_timeval)))
9138                     return -TARGET_EFAULT;
9139             } else {
9140                 pvalue = NULL;
9141             }
9142             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9143             if (!is_error(ret) && arg3) {
9144                 if (copy_to_user_timeval(arg3,
9145                                          &ovalue.it_interval)
9146                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9147                                             &ovalue.it_value))
9148                     return -TARGET_EFAULT;
9149             }
9150         }
9151         return ret;
9152     case TARGET_NR_getitimer:
9153         {
9154             struct itimerval value;
9155 
9156             ret = get_errno(getitimer(arg1, &value));
9157             if (!is_error(ret) && arg2) {
9158                 if (copy_to_user_timeval(arg2,
9159                                          &value.it_interval)
9160                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9161                                             &value.it_value))
9162                     return -TARGET_EFAULT;
9163             }
9164         }
9165         return ret;
9166 #ifdef TARGET_NR_stat
9167     case TARGET_NR_stat:
9168         if (!(p = lock_user_string(arg1))) {
9169             return -TARGET_EFAULT;
9170         }
9171         ret = get_errno(stat(path(p), &st));
9172         unlock_user(p, arg1, 0);
9173         goto do_stat;
9174 #endif
9175 #ifdef TARGET_NR_lstat
9176     case TARGET_NR_lstat:
9177         if (!(p = lock_user_string(arg1))) {
9178             return -TARGET_EFAULT;
9179         }
9180         ret = get_errno(lstat(path(p), &st));
9181         unlock_user(p, arg1, 0);
9182         goto do_stat;
9183 #endif
9184 #ifdef TARGET_NR_fstat
9185     case TARGET_NR_fstat:
9186         {
9187             ret = get_errno(fstat(arg1, &st));
9188 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9189         do_stat:
9190 #endif
9191             if (!is_error(ret)) {
9192                 struct target_stat *target_st;
9193 
9194                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9195                     return -TARGET_EFAULT;
9196                 memset(target_st, 0, sizeof(*target_st));
9197                 __put_user(st.st_dev, &target_st->st_dev);
9198                 __put_user(st.st_ino, &target_st->st_ino);
9199                 __put_user(st.st_mode, &target_st->st_mode);
9200                 __put_user(st.st_uid, &target_st->st_uid);
9201                 __put_user(st.st_gid, &target_st->st_gid);
9202                 __put_user(st.st_nlink, &target_st->st_nlink);
9203                 __put_user(st.st_rdev, &target_st->st_rdev);
9204                 __put_user(st.st_size, &target_st->st_size);
9205                 __put_user(st.st_blksize, &target_st->st_blksize);
9206                 __put_user(st.st_blocks, &target_st->st_blocks);
9207                 __put_user(st.st_atime, &target_st->target_st_atime);
9208                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9209                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9210 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9211     defined(TARGET_STAT_HAVE_NSEC)
9212                 __put_user(st.st_atim.tv_nsec,
9213                            &target_st->target_st_atime_nsec);
9214                 __put_user(st.st_mtim.tv_nsec,
9215                            &target_st->target_st_mtime_nsec);
9216                 __put_user(st.st_ctim.tv_nsec,
9217                            &target_st->target_st_ctime_nsec);
9218 #endif
9219                 unlock_user_struct(target_st, arg2, 1);
9220             }
9221         }
9222         return ret;
9223 #endif
9224     case TARGET_NR_vhangup:
9225         return get_errno(vhangup());
9226 #ifdef TARGET_NR_syscall
9227     case TARGET_NR_syscall:
9228         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9229                           arg6, arg7, arg8, 0);
9230 #endif
9231     case TARGET_NR_wait4:
9232         {
9233             int status;
9234             abi_long status_ptr = arg2;
9235             struct rusage rusage, *rusage_ptr;
9236             abi_ulong target_rusage = arg4;
9237             abi_long rusage_err;
9238             if (target_rusage)
9239                 rusage_ptr = &rusage;
9240             else
9241                 rusage_ptr = NULL;
9242             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9243             if (!is_error(ret)) {
9244                 if (status_ptr && ret) {
9245                     status = host_to_target_waitstatus(status);
9246                     if (put_user_s32(status, status_ptr))
9247                         return -TARGET_EFAULT;
9248                 }
9249                 if (target_rusage) {
9250                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9251                     if (rusage_err) {
9252                         ret = rusage_err;
9253                     }
9254                 }
9255             }
9256         }
9257         return ret;
9258 #ifdef TARGET_NR_swapoff
9259     case TARGET_NR_swapoff:
9260         if (!(p = lock_user_string(arg1)))
9261             return -TARGET_EFAULT;
9262         ret = get_errno(swapoff(p));
9263         unlock_user(p, arg1, 0);
9264         return ret;
9265 #endif
9266     case TARGET_NR_sysinfo:
9267         {
9268             struct target_sysinfo *target_value;
9269             struct sysinfo value;
9270             ret = get_errno(sysinfo(&value));
9271             if (!is_error(ret) && arg1)
9272             {
9273                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9274                     return -TARGET_EFAULT;
9275                 __put_user(value.uptime, &target_value->uptime);
9276                 __put_user(value.loads[0], &target_value->loads[0]);
9277                 __put_user(value.loads[1], &target_value->loads[1]);
9278                 __put_user(value.loads[2], &target_value->loads[2]);
9279                 __put_user(value.totalram, &target_value->totalram);
9280                 __put_user(value.freeram, &target_value->freeram);
9281                 __put_user(value.sharedram, &target_value->sharedram);
9282                 __put_user(value.bufferram, &target_value->bufferram);
9283                 __put_user(value.totalswap, &target_value->totalswap);
9284                 __put_user(value.freeswap, &target_value->freeswap);
9285                 __put_user(value.procs, &target_value->procs);
9286                 __put_user(value.totalhigh, &target_value->totalhigh);
9287                 __put_user(value.freehigh, &target_value->freehigh);
9288                 __put_user(value.mem_unit, &target_value->mem_unit);
9289                 unlock_user_struct(target_value, arg1, 1);
9290             }
9291         }
9292         return ret;
9293 #ifdef TARGET_NR_ipc
9294     case TARGET_NR_ipc:
9295         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9296 #endif
9297 #ifdef TARGET_NR_semget
9298     case TARGET_NR_semget:
9299         return get_errno(semget(arg1, arg2, arg3));
9300 #endif
9301 #ifdef TARGET_NR_semop
9302     case TARGET_NR_semop:
9303         return do_semop(arg1, arg2, arg3);
9304 #endif
9305 #ifdef TARGET_NR_semctl
9306     case TARGET_NR_semctl:
9307         return do_semctl(arg1, arg2, arg3, arg4);
9308 #endif
9309 #ifdef TARGET_NR_msgctl
9310     case TARGET_NR_msgctl:
9311         return do_msgctl(arg1, arg2, arg3);
9312 #endif
9313 #ifdef TARGET_NR_msgget
9314     case TARGET_NR_msgget:
9315         return get_errno(msgget(arg1, arg2));
9316 #endif
9317 #ifdef TARGET_NR_msgrcv
9318     case TARGET_NR_msgrcv:
9319         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9320 #endif
9321 #ifdef TARGET_NR_msgsnd
9322     case TARGET_NR_msgsnd:
9323         return do_msgsnd(arg1, arg2, arg3, arg4);
9324 #endif
9325 #ifdef TARGET_NR_shmget
9326     case TARGET_NR_shmget:
9327         return get_errno(shmget(arg1, arg2, arg3));
9328 #endif
9329 #ifdef TARGET_NR_shmctl
9330     case TARGET_NR_shmctl:
9331         return do_shmctl(arg1, arg2, arg3);
9332 #endif
9333 #ifdef TARGET_NR_shmat
9334     case TARGET_NR_shmat:
9335         return do_shmat(cpu_env, arg1, arg2, arg3);
9336 #endif
9337 #ifdef TARGET_NR_shmdt
9338     case TARGET_NR_shmdt:
9339         return do_shmdt(arg1);
9340 #endif
9341     case TARGET_NR_fsync:
9342         return get_errno(fsync(arg1));
9343     case TARGET_NR_clone:
9344         /* Linux manages to have three different orderings for its
9345          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9346          * match the kernel's CONFIG_CLONE_* settings.
9347          * Microblaze is further special in that it uses a sixth
9348          * implicit argument to clone for the TLS pointer.
9349          */
9350 #if defined(TARGET_MICROBLAZE)
9351         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9352 #elif defined(TARGET_CLONE_BACKWARDS)
9353         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9354 #elif defined(TARGET_CLONE_BACKWARDS2)
9355         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9356 #else
9357         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9358 #endif
9359         return ret;
9360 #ifdef __NR_exit_group
9361         /* new thread calls */
9362     case TARGET_NR_exit_group:
9363         preexit_cleanup(cpu_env, arg1);
9364         return get_errno(exit_group(arg1));
9365 #endif
9366     case TARGET_NR_setdomainname:
9367         if (!(p = lock_user_string(arg1)))
9368             return -TARGET_EFAULT;
9369         ret = get_errno(setdomainname(p, arg2));
9370         unlock_user(p, arg1, 0);
9371         return ret;
9372     case TARGET_NR_uname:
9373         /* no need to transcode because we use the linux syscall */
9374         {
9375             struct new_utsname * buf;
9376 
9377             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9378                 return -TARGET_EFAULT;
9379             ret = get_errno(sys_uname(buf));
9380             if (!is_error(ret)) {
9381                 /* Overwrite the native machine name with whatever is being
9382                    emulated. */
9383                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9384                           sizeof(buf->machine));
9385                 /* Allow the user to override the reported release.  */
9386                 if (qemu_uname_release && *qemu_uname_release) {
9387                     g_strlcpy(buf->release, qemu_uname_release,
9388                               sizeof(buf->release));
9389                 }
9390             }
9391             unlock_user_struct(buf, arg1, 1);
9392         }
9393         return ret;
9394 #ifdef TARGET_I386
9395     case TARGET_NR_modify_ldt:
9396         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9397 #if !defined(TARGET_X86_64)
9398     case TARGET_NR_vm86:
9399         return do_vm86(cpu_env, arg1, arg2);
9400 #endif
9401 #endif
9402     case TARGET_NR_adjtimex:
9403         {
9404             struct timex host_buf;
9405 
9406             if (target_to_host_timex(&host_buf, arg1) != 0) {
9407                 return -TARGET_EFAULT;
9408             }
9409             ret = get_errno(adjtimex(&host_buf));
9410             if (!is_error(ret)) {
9411                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9412                     return -TARGET_EFAULT;
9413                 }
9414             }
9415         }
9416         return ret;
9417 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9418     case TARGET_NR_clock_adjtime:
9419         {
9420             struct timex htx, *phtx = &htx;
9421 
9422             if (target_to_host_timex(phtx, arg2) != 0) {
9423                 return -TARGET_EFAULT;
9424             }
9425             ret = get_errno(clock_adjtime(arg1, phtx));
9426             if (!is_error(ret) && phtx) {
9427                 if (host_to_target_timex(arg2, phtx) != 0) {
9428                     return -TARGET_EFAULT;
9429                 }
9430             }
9431         }
9432         return ret;
9433 #endif
9434     case TARGET_NR_getpgid:
9435         return get_errno(getpgid(arg1));
9436     case TARGET_NR_fchdir:
9437         return get_errno(fchdir(arg1));
9438     case TARGET_NR_personality:
9439         return get_errno(personality(arg1));
9440 #ifdef TARGET_NR__llseek /* Not on alpha */
9441     case TARGET_NR__llseek:
9442         {
9443             int64_t res;
9444 #if !defined(__NR_llseek)
9445             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9446             if (res == -1) {
9447                 ret = get_errno(res);
9448             } else {
9449                 ret = 0;
9450             }
9451 #else
9452             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9453 #endif
9454             if ((ret == 0) && put_user_s64(res, arg4)) {
9455                 return -TARGET_EFAULT;
9456             }
9457         }
9458         return ret;
9459 #endif
9460 #ifdef TARGET_NR_getdents
9461     case TARGET_NR_getdents:
9462 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9463 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9464         {
9465             struct target_dirent *target_dirp;
9466             struct linux_dirent *dirp;
9467             abi_long count = arg3;
9468 
9469             dirp = g_try_malloc(count);
9470             if (!dirp) {
9471                 return -TARGET_ENOMEM;
9472             }
9473 
9474             ret = get_errno(sys_getdents(arg1, dirp, count));
9475             if (!is_error(ret)) {
9476                 struct linux_dirent *de;
9477 		struct target_dirent *tde;
9478                 int len = ret;
9479                 int reclen, treclen;
9480 		int count1, tnamelen;
9481 
9482 		count1 = 0;
9483                 de = dirp;
9484                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9485                     return -TARGET_EFAULT;
9486 		tde = target_dirp;
9487                 while (len > 0) {
9488                     reclen = de->d_reclen;
9489                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9490                     assert(tnamelen >= 0);
9491                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9492                     assert(count1 + treclen <= count);
9493                     tde->d_reclen = tswap16(treclen);
9494                     tde->d_ino = tswapal(de->d_ino);
9495                     tde->d_off = tswapal(de->d_off);
9496                     memcpy(tde->d_name, de->d_name, tnamelen);
9497                     de = (struct linux_dirent *)((char *)de + reclen);
9498                     len -= reclen;
9499                     tde = (struct target_dirent *)((char *)tde + treclen);
9500 		    count1 += treclen;
9501                 }
9502 		ret = count1;
9503                 unlock_user(target_dirp, arg2, ret);
9504             }
9505             g_free(dirp);
9506         }
9507 #else
9508         {
9509             struct linux_dirent *dirp;
9510             abi_long count = arg3;
9511 
9512             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9513                 return -TARGET_EFAULT;
9514             ret = get_errno(sys_getdents(arg1, dirp, count));
9515             if (!is_error(ret)) {
9516                 struct linux_dirent *de;
9517                 int len = ret;
9518                 int reclen;
9519                 de = dirp;
9520                 while (len > 0) {
9521                     reclen = de->d_reclen;
9522                     if (reclen > len)
9523                         break;
9524                     de->d_reclen = tswap16(reclen);
9525                     tswapls(&de->d_ino);
9526                     tswapls(&de->d_off);
9527                     de = (struct linux_dirent *)((char *)de + reclen);
9528                     len -= reclen;
9529                 }
9530             }
9531             unlock_user(dirp, arg2, ret);
9532         }
9533 #endif
9534 #else
9535         /* Implement getdents in terms of getdents64 */
9536         {
9537             struct linux_dirent64 *dirp;
9538             abi_long count = arg3;
9539 
9540             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9541             if (!dirp) {
9542                 return -TARGET_EFAULT;
9543             }
9544             ret = get_errno(sys_getdents64(arg1, dirp, count));
9545             if (!is_error(ret)) {
9546                 /* Convert the dirent64 structs to target dirent.  We do this
9547                  * in-place, since we can guarantee that a target_dirent is no
9548                  * larger than a dirent64; however this means we have to be
9549                  * careful to read everything before writing in the new format.
9550                  */
9551                 struct linux_dirent64 *de;
9552                 struct target_dirent *tde;
9553                 int len = ret;
9554                 int tlen = 0;
9555 
9556                 de = dirp;
9557                 tde = (struct target_dirent *)dirp;
9558                 while (len > 0) {
9559                     int namelen, treclen;
9560                     int reclen = de->d_reclen;
9561                     uint64_t ino = de->d_ino;
9562                     int64_t off = de->d_off;
9563                     uint8_t type = de->d_type;
9564 
9565                     namelen = strlen(de->d_name);
9566                     treclen = offsetof(struct target_dirent, d_name)
9567                         + namelen + 2;
9568                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9569 
9570                     memmove(tde->d_name, de->d_name, namelen + 1);
9571                     tde->d_ino = tswapal(ino);
9572                     tde->d_off = tswapal(off);
9573                     tde->d_reclen = tswap16(treclen);
9574                     /* The target_dirent type is in what was formerly a padding
9575                      * byte at the end of the structure:
9576                      */
9577                     *(((char *)tde) + treclen - 1) = type;
9578 
9579                     de = (struct linux_dirent64 *)((char *)de + reclen);
9580                     tde = (struct target_dirent *)((char *)tde + treclen);
9581                     len -= reclen;
9582                     tlen += treclen;
9583                 }
9584                 ret = tlen;
9585             }
9586             unlock_user(dirp, arg2, ret);
9587         }
9588 #endif
9589         return ret;
9590 #endif /* TARGET_NR_getdents */
9591 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9592     case TARGET_NR_getdents64:
9593         {
9594             struct linux_dirent64 *dirp;
9595             abi_long count = arg3;
9596             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9597                 return -TARGET_EFAULT;
9598             ret = get_errno(sys_getdents64(arg1, dirp, count));
9599             if (!is_error(ret)) {
9600                 struct linux_dirent64 *de;
9601                 int len = ret;
9602                 int reclen;
9603                 de = dirp;
9604                 while (len > 0) {
9605                     reclen = de->d_reclen;
9606                     if (reclen > len)
9607                         break;
9608                     de->d_reclen = tswap16(reclen);
9609                     tswap64s((uint64_t *)&de->d_ino);
9610                     tswap64s((uint64_t *)&de->d_off);
9611                     de = (struct linux_dirent64 *)((char *)de + reclen);
9612                     len -= reclen;
9613                 }
9614             }
9615             unlock_user(dirp, arg2, ret);
9616         }
9617         return ret;
9618 #endif /* TARGET_NR_getdents64 */
9619 #if defined(TARGET_NR__newselect)
9620     case TARGET_NR__newselect:
9621         return do_select(arg1, arg2, arg3, arg4, arg5);
9622 #endif
9623 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9624 # ifdef TARGET_NR_poll
9625     case TARGET_NR_poll:
9626 # endif
9627 # ifdef TARGET_NR_ppoll
9628     case TARGET_NR_ppoll:
9629 # endif
9630         {
9631             struct target_pollfd *target_pfd;
9632             unsigned int nfds = arg2;
9633             struct pollfd *pfd;
9634             unsigned int i;
9635 
9636             pfd = NULL;
9637             target_pfd = NULL;
9638             if (nfds) {
9639                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9640                     return -TARGET_EINVAL;
9641                 }
9642 
9643                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9644                                        sizeof(struct target_pollfd) * nfds, 1);
9645                 if (!target_pfd) {
9646                     return -TARGET_EFAULT;
9647                 }
9648 
9649                 pfd = alloca(sizeof(struct pollfd) * nfds);
9650                 for (i = 0; i < nfds; i++) {
9651                     pfd[i].fd = tswap32(target_pfd[i].fd);
9652                     pfd[i].events = tswap16(target_pfd[i].events);
9653                 }
9654             }
9655 
9656             switch (num) {
9657 # ifdef TARGET_NR_ppoll
9658             case TARGET_NR_ppoll:
9659             {
9660                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9661                 target_sigset_t *target_set;
9662                 sigset_t _set, *set = &_set;
9663 
9664                 if (arg3) {
9665                     if (target_to_host_timespec(timeout_ts, arg3)) {
9666                         unlock_user(target_pfd, arg1, 0);
9667                         return -TARGET_EFAULT;
9668                     }
9669                 } else {
9670                     timeout_ts = NULL;
9671                 }
9672 
9673                 if (arg4) {
9674                     if (arg5 != sizeof(target_sigset_t)) {
9675                         unlock_user(target_pfd, arg1, 0);
9676                         return -TARGET_EINVAL;
9677                     }
9678 
9679                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9680                     if (!target_set) {
9681                         unlock_user(target_pfd, arg1, 0);
9682                         return -TARGET_EFAULT;
9683                     }
9684                     target_to_host_sigset(set, target_set);
9685                 } else {
9686                     set = NULL;
9687                 }
9688 
9689                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9690                                            set, SIGSET_T_SIZE));
9691 
9692                 if (!is_error(ret) && arg3) {
9693                     host_to_target_timespec(arg3, timeout_ts);
9694                 }
9695                 if (arg4) {
9696                     unlock_user(target_set, arg4, 0);
9697                 }
9698                 break;
9699             }
9700 # endif
9701 # ifdef TARGET_NR_poll
9702             case TARGET_NR_poll:
9703             {
9704                 struct timespec ts, *pts;
9705 
9706                 if (arg3 >= 0) {
9707                     /* Convert ms to secs, ns */
9708                     ts.tv_sec = arg3 / 1000;
9709                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9710                     pts = &ts;
9711                 } else {
9712                     /* -ve poll() timeout means "infinite" */
9713                     pts = NULL;
9714                 }
9715                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9716                 break;
9717             }
9718 # endif
9719             default:
9720                 g_assert_not_reached();
9721             }
9722 
9723             if (!is_error(ret)) {
9724                 for(i = 0; i < nfds; i++) {
9725                     target_pfd[i].revents = tswap16(pfd[i].revents);
9726                 }
9727             }
9728             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9729         }
9730         return ret;
9731 #endif
9732     case TARGET_NR_flock:
9733         /* NOTE: the flock constant seems to be the same for every
9734            Linux platform */
9735         return get_errno(safe_flock(arg1, arg2));
9736     case TARGET_NR_readv:
9737         {
9738             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9739             if (vec != NULL) {
9740                 ret = get_errno(safe_readv(arg1, vec, arg3));
9741                 unlock_iovec(vec, arg2, arg3, 1);
9742             } else {
9743                 ret = -host_to_target_errno(errno);
9744             }
9745         }
9746         return ret;
9747     case TARGET_NR_writev:
9748         {
9749             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9750             if (vec != NULL) {
9751                 ret = get_errno(safe_writev(arg1, vec, arg3));
9752                 unlock_iovec(vec, arg2, arg3, 0);
9753             } else {
9754                 ret = -host_to_target_errno(errno);
9755             }
9756         }
9757         return ret;
9758 #if defined(TARGET_NR_preadv)
9759     case TARGET_NR_preadv:
9760         {
9761             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9762             if (vec != NULL) {
9763                 unsigned long low, high;
9764 
9765                 target_to_host_low_high(arg4, arg5, &low, &high);
9766                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9767                 unlock_iovec(vec, arg2, arg3, 1);
9768             } else {
9769                 ret = -host_to_target_errno(errno);
9770            }
9771         }
9772         return ret;
9773 #endif
9774 #if defined(TARGET_NR_pwritev)
9775     case TARGET_NR_pwritev:
9776         {
9777             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9778             if (vec != NULL) {
9779                 unsigned long low, high;
9780 
9781                 target_to_host_low_high(arg4, arg5, &low, &high);
9782                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9783                 unlock_iovec(vec, arg2, arg3, 0);
9784             } else {
9785                 ret = -host_to_target_errno(errno);
9786            }
9787         }
9788         return ret;
9789 #endif
9790     case TARGET_NR_getsid:
9791         return get_errno(getsid(arg1));
9792 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9793     case TARGET_NR_fdatasync:
9794         return get_errno(fdatasync(arg1));
9795 #endif
9796 #ifdef TARGET_NR__sysctl
9797     case TARGET_NR__sysctl:
9798         /* We don't implement this, but ENOTDIR is always a safe
9799            return value. */
9800         return -TARGET_ENOTDIR;
9801 #endif
9802     case TARGET_NR_sched_getaffinity:
9803         {
9804             unsigned int mask_size;
9805             unsigned long *mask;
9806 
9807             /*
9808              * sched_getaffinity needs multiples of ulong, so need to take
9809              * care of mismatches between target ulong and host ulong sizes.
9810              */
9811             if (arg2 & (sizeof(abi_ulong) - 1)) {
9812                 return -TARGET_EINVAL;
9813             }
9814             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9815 
9816             mask = alloca(mask_size);
9817             memset(mask, 0, mask_size);
9818             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9819 
9820             if (!is_error(ret)) {
9821                 if (ret > arg2) {
9822                     /* More data returned than the caller's buffer will fit.
9823                      * This only happens if sizeof(abi_long) < sizeof(long)
9824                      * and the caller passed us a buffer holding an odd number
9825                      * of abi_longs. If the host kernel is actually using the
9826                      * extra 4 bytes then fail EINVAL; otherwise we can just
9827                      * ignore them and only copy the interesting part.
9828                      */
9829                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9830                     if (numcpus > arg2 * 8) {
9831                         return -TARGET_EINVAL;
9832                     }
9833                     ret = arg2;
9834                 }
9835 
9836                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9837                     return -TARGET_EFAULT;
9838                 }
9839             }
9840         }
9841         return ret;
9842     case TARGET_NR_sched_setaffinity:
9843         {
9844             unsigned int mask_size;
9845             unsigned long *mask;
9846 
9847             /*
9848              * sched_setaffinity needs multiples of ulong, so need to take
9849              * care of mismatches between target ulong and host ulong sizes.
9850              */
9851             if (arg2 & (sizeof(abi_ulong) - 1)) {
9852                 return -TARGET_EINVAL;
9853             }
9854             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9855             mask = alloca(mask_size);
9856 
9857             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9858             if (ret) {
9859                 return ret;
9860             }
9861 
9862             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9863         }
9864     case TARGET_NR_getcpu:
9865         {
9866             unsigned cpu, node;
9867             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9868                                        arg2 ? &node : NULL,
9869                                        NULL));
9870             if (is_error(ret)) {
9871                 return ret;
9872             }
9873             if (arg1 && put_user_u32(cpu, arg1)) {
9874                 return -TARGET_EFAULT;
9875             }
9876             if (arg2 && put_user_u32(node, arg2)) {
9877                 return -TARGET_EFAULT;
9878             }
9879         }
9880         return ret;
9881     case TARGET_NR_sched_setparam:
9882         {
9883             struct sched_param *target_schp;
9884             struct sched_param schp;
9885 
9886             if (arg2 == 0) {
9887                 return -TARGET_EINVAL;
9888             }
9889             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9890                 return -TARGET_EFAULT;
9891             schp.sched_priority = tswap32(target_schp->sched_priority);
9892             unlock_user_struct(target_schp, arg2, 0);
9893             return get_errno(sched_setparam(arg1, &schp));
9894         }
9895     case TARGET_NR_sched_getparam:
9896         {
9897             struct sched_param *target_schp;
9898             struct sched_param schp;
9899 
9900             if (arg2 == 0) {
9901                 return -TARGET_EINVAL;
9902             }
9903             ret = get_errno(sched_getparam(arg1, &schp));
9904             if (!is_error(ret)) {
9905                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9906                     return -TARGET_EFAULT;
9907                 target_schp->sched_priority = tswap32(schp.sched_priority);
9908                 unlock_user_struct(target_schp, arg2, 1);
9909             }
9910         }
9911         return ret;
9912     case TARGET_NR_sched_setscheduler:
9913         {
9914             struct sched_param *target_schp;
9915             struct sched_param schp;
9916             if (arg3 == 0) {
9917                 return -TARGET_EINVAL;
9918             }
9919             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9920                 return -TARGET_EFAULT;
9921             schp.sched_priority = tswap32(target_schp->sched_priority);
9922             unlock_user_struct(target_schp, arg3, 0);
9923             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9924         }
9925     case TARGET_NR_sched_getscheduler:
9926         return get_errno(sched_getscheduler(arg1));
9927     case TARGET_NR_sched_yield:
9928         return get_errno(sched_yield());
9929     case TARGET_NR_sched_get_priority_max:
9930         return get_errno(sched_get_priority_max(arg1));
9931     case TARGET_NR_sched_get_priority_min:
9932         return get_errno(sched_get_priority_min(arg1));
9933     case TARGET_NR_sched_rr_get_interval:
9934         {
9935             struct timespec ts;
9936             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9937             if (!is_error(ret)) {
9938                 ret = host_to_target_timespec(arg2, &ts);
9939             }
9940         }
9941         return ret;
9942     case TARGET_NR_nanosleep:
9943         {
9944             struct timespec req, rem;
9945             target_to_host_timespec(&req, arg1);
9946             ret = get_errno(safe_nanosleep(&req, &rem));
9947             if (is_error(ret) && arg2) {
9948                 host_to_target_timespec(arg2, &rem);
9949             }
9950         }
9951         return ret;
9952     case TARGET_NR_prctl:
9953         switch (arg1) {
9954         case PR_GET_PDEATHSIG:
9955         {
9956             int deathsig;
9957             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9958             if (!is_error(ret) && arg2
9959                 && put_user_ual(deathsig, arg2)) {
9960                 return -TARGET_EFAULT;
9961             }
9962             return ret;
9963         }
9964 #ifdef PR_GET_NAME
9965         case PR_GET_NAME:
9966         {
9967             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9968             if (!name) {
9969                 return -TARGET_EFAULT;
9970             }
9971             ret = get_errno(prctl(arg1, (unsigned long)name,
9972                                   arg3, arg4, arg5));
9973             unlock_user(name, arg2, 16);
9974             return ret;
9975         }
9976         case PR_SET_NAME:
9977         {
9978             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9979             if (!name) {
9980                 return -TARGET_EFAULT;
9981             }
9982             ret = get_errno(prctl(arg1, (unsigned long)name,
9983                                   arg3, arg4, arg5));
9984             unlock_user(name, arg2, 0);
9985             return ret;
9986         }
9987 #endif
9988 #ifdef TARGET_MIPS
9989         case TARGET_PR_GET_FP_MODE:
9990         {
9991             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9992             ret = 0;
9993             if (env->CP0_Status & (1 << CP0St_FR)) {
9994                 ret |= TARGET_PR_FP_MODE_FR;
9995             }
9996             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9997                 ret |= TARGET_PR_FP_MODE_FRE;
9998             }
9999             return ret;
10000         }
10001         case TARGET_PR_SET_FP_MODE:
10002         {
10003             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10004             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10005             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10006             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10007             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10008 
10009             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10010                                             TARGET_PR_FP_MODE_FRE;
10011 
10012             /* If nothing to change, return right away, successfully.  */
10013             if (old_fr == new_fr && old_fre == new_fre) {
10014                 return 0;
10015             }
10016             /* Check the value is valid */
10017             if (arg2 & ~known_bits) {
10018                 return -TARGET_EOPNOTSUPP;
10019             }
10020             /* Setting FRE without FR is not supported.  */
10021             if (new_fre && !new_fr) {
10022                 return -TARGET_EOPNOTSUPP;
10023             }
10024             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10025                 /* FR1 is not supported */
10026                 return -TARGET_EOPNOTSUPP;
10027             }
10028             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10029                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10030                 /* cannot set FR=0 */
10031                 return -TARGET_EOPNOTSUPP;
10032             }
10033             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10034                 /* Cannot set FRE=1 */
10035                 return -TARGET_EOPNOTSUPP;
10036             }
10037 
10038             int i;
10039             fpr_t *fpr = env->active_fpu.fpr;
10040             for (i = 0; i < 32 ; i += 2) {
10041                 if (!old_fr && new_fr) {
10042                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10043                 } else if (old_fr && !new_fr) {
10044                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10045                 }
10046             }
10047 
10048             if (new_fr) {
10049                 env->CP0_Status |= (1 << CP0St_FR);
10050                 env->hflags |= MIPS_HFLAG_F64;
10051             } else {
10052                 env->CP0_Status &= ~(1 << CP0St_FR);
10053                 env->hflags &= ~MIPS_HFLAG_F64;
10054             }
10055             if (new_fre) {
10056                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10057                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10058                     env->hflags |= MIPS_HFLAG_FRE;
10059                 }
10060             } else {
10061                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10062                 env->hflags &= ~MIPS_HFLAG_FRE;
10063             }
10064 
10065             return 0;
10066         }
10067 #endif /* MIPS */
10068 #ifdef TARGET_AARCH64
10069         case TARGET_PR_SVE_SET_VL:
10070             /*
10071              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10072              * PR_SVE_VL_INHERIT.  Note the kernel definition
10073              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10074              * even though the current architectural maximum is VQ=16.
10075              */
10076             ret = -TARGET_EINVAL;
10077             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10078                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10079                 CPUARMState *env = cpu_env;
10080                 ARMCPU *cpu = env_archcpu(env);
10081                 uint32_t vq, old_vq;
10082 
10083                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10084                 vq = MAX(arg2 / 16, 1);
10085                 vq = MIN(vq, cpu->sve_max_vq);
10086 
10087                 if (vq < old_vq) {
10088                     aarch64_sve_narrow_vq(env, vq);
10089                 }
10090                 env->vfp.zcr_el[1] = vq - 1;
10091                 arm_rebuild_hflags(env);
10092                 ret = vq * 16;
10093             }
10094             return ret;
10095         case TARGET_PR_SVE_GET_VL:
10096             ret = -TARGET_EINVAL;
10097             {
10098                 ARMCPU *cpu = env_archcpu(cpu_env);
10099                 if (cpu_isar_feature(aa64_sve, cpu)) {
10100                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10101                 }
10102             }
10103             return ret;
10104         case TARGET_PR_PAC_RESET_KEYS:
10105             {
10106                 CPUARMState *env = cpu_env;
10107                 ARMCPU *cpu = env_archcpu(env);
10108 
10109                 if (arg3 || arg4 || arg5) {
10110                     return -TARGET_EINVAL;
10111                 }
10112                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10113                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10114                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10115                                TARGET_PR_PAC_APGAKEY);
10116                     int ret = 0;
10117                     Error *err = NULL;
10118 
10119                     if (arg2 == 0) {
10120                         arg2 = all;
10121                     } else if (arg2 & ~all) {
10122                         return -TARGET_EINVAL;
10123                     }
10124                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10125                         ret |= qemu_guest_getrandom(&env->keys.apia,
10126                                                     sizeof(ARMPACKey), &err);
10127                     }
10128                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10129                         ret |= qemu_guest_getrandom(&env->keys.apib,
10130                                                     sizeof(ARMPACKey), &err);
10131                     }
10132                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10133                         ret |= qemu_guest_getrandom(&env->keys.apda,
10134                                                     sizeof(ARMPACKey), &err);
10135                     }
10136                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10137                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10138                                                     sizeof(ARMPACKey), &err);
10139                     }
10140                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10141                         ret |= qemu_guest_getrandom(&env->keys.apga,
10142                                                     sizeof(ARMPACKey), &err);
10143                     }
10144                     if (ret != 0) {
10145                         /*
10146                          * Some unknown failure in the crypto.  The best
10147                          * we can do is log it and fail the syscall.
10148                          * The real syscall cannot fail this way.
10149                          */
10150                         qemu_log_mask(LOG_UNIMP,
10151                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10152                                       error_get_pretty(err));
10153                         error_free(err);
10154                         return -TARGET_EIO;
10155                     }
10156                     return 0;
10157                 }
10158             }
10159             return -TARGET_EINVAL;
10160 #endif /* AARCH64 */
10161         case PR_GET_SECCOMP:
10162         case PR_SET_SECCOMP:
10163             /* Disable seccomp to prevent the target disabling syscalls we
10164              * need. */
10165             return -TARGET_EINVAL;
10166         default:
10167             /* Most prctl options have no pointer arguments */
10168             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10169         }
10170         break;
10171 #ifdef TARGET_NR_arch_prctl
10172     case TARGET_NR_arch_prctl:
10173 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10174         return do_arch_prctl(cpu_env, arg1, arg2);
10175 #else
10176 #error unreachable
10177 #endif
10178 #endif
10179 #ifdef TARGET_NR_pread64
10180     case TARGET_NR_pread64:
10181         if (regpairs_aligned(cpu_env, num)) {
10182             arg4 = arg5;
10183             arg5 = arg6;
10184         }
10185         if (arg2 == 0 && arg3 == 0) {
10186             /* Special-case NULL buffer and zero length, which should succeed */
10187             p = 0;
10188         } else {
10189             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10190             if (!p) {
10191                 return -TARGET_EFAULT;
10192             }
10193         }
10194         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10195         unlock_user(p, arg2, ret);
10196         return ret;
10197     case TARGET_NR_pwrite64:
10198         if (regpairs_aligned(cpu_env, num)) {
10199             arg4 = arg5;
10200             arg5 = arg6;
10201         }
10202         if (arg2 == 0 && arg3 == 0) {
10203             /* Special-case NULL buffer and zero length, which should succeed */
10204             p = 0;
10205         } else {
10206             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10207             if (!p) {
10208                 return -TARGET_EFAULT;
10209             }
10210         }
10211         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10212         unlock_user(p, arg2, 0);
10213         return ret;
10214 #endif
10215     case TARGET_NR_getcwd:
10216         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10217             return -TARGET_EFAULT;
10218         ret = get_errno(sys_getcwd1(p, arg2));
10219         unlock_user(p, arg1, ret);
10220         return ret;
10221     case TARGET_NR_capget:
10222     case TARGET_NR_capset:
10223     {
10224         struct target_user_cap_header *target_header;
10225         struct target_user_cap_data *target_data = NULL;
10226         struct __user_cap_header_struct header;
10227         struct __user_cap_data_struct data[2];
10228         struct __user_cap_data_struct *dataptr = NULL;
10229         int i, target_datalen;
10230         int data_items = 1;
10231 
10232         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10233             return -TARGET_EFAULT;
10234         }
10235         header.version = tswap32(target_header->version);
10236         header.pid = tswap32(target_header->pid);
10237 
10238         if (header.version != _LINUX_CAPABILITY_VERSION) {
10239             /* Version 2 and up takes pointer to two user_data structs */
10240             data_items = 2;
10241         }
10242 
10243         target_datalen = sizeof(*target_data) * data_items;
10244 
10245         if (arg2) {
10246             if (num == TARGET_NR_capget) {
10247                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10248             } else {
10249                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10250             }
10251             if (!target_data) {
10252                 unlock_user_struct(target_header, arg1, 0);
10253                 return -TARGET_EFAULT;
10254             }
10255 
10256             if (num == TARGET_NR_capset) {
10257                 for (i = 0; i < data_items; i++) {
10258                     data[i].effective = tswap32(target_data[i].effective);
10259                     data[i].permitted = tswap32(target_data[i].permitted);
10260                     data[i].inheritable = tswap32(target_data[i].inheritable);
10261                 }
10262             }
10263 
10264             dataptr = data;
10265         }
10266 
10267         if (num == TARGET_NR_capget) {
10268             ret = get_errno(capget(&header, dataptr));
10269         } else {
10270             ret = get_errno(capset(&header, dataptr));
10271         }
10272 
10273         /* The kernel always updates version for both capget and capset */
10274         target_header->version = tswap32(header.version);
10275         unlock_user_struct(target_header, arg1, 1);
10276 
10277         if (arg2) {
10278             if (num == TARGET_NR_capget) {
10279                 for (i = 0; i < data_items; i++) {
10280                     target_data[i].effective = tswap32(data[i].effective);
10281                     target_data[i].permitted = tswap32(data[i].permitted);
10282                     target_data[i].inheritable = tswap32(data[i].inheritable);
10283                 }
10284                 unlock_user(target_data, arg2, target_datalen);
10285             } else {
10286                 unlock_user(target_data, arg2, 0);
10287             }
10288         }
10289         return ret;
10290     }
10291     case TARGET_NR_sigaltstack:
10292         return do_sigaltstack(arg1, arg2,
10293                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10294 
10295 #ifdef CONFIG_SENDFILE
10296 #ifdef TARGET_NR_sendfile
10297     case TARGET_NR_sendfile:
10298     {
10299         off_t *offp = NULL;
10300         off_t off;
10301         if (arg3) {
10302             ret = get_user_sal(off, arg3);
10303             if (is_error(ret)) {
10304                 return ret;
10305             }
10306             offp = &off;
10307         }
10308         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10309         if (!is_error(ret) && arg3) {
10310             abi_long ret2 = put_user_sal(off, arg3);
10311             if (is_error(ret2)) {
10312                 ret = ret2;
10313             }
10314         }
10315         return ret;
10316     }
10317 #endif
10318 #ifdef TARGET_NR_sendfile64
10319     case TARGET_NR_sendfile64:
10320     {
10321         off_t *offp = NULL;
10322         off_t off;
10323         if (arg3) {
10324             ret = get_user_s64(off, arg3);
10325             if (is_error(ret)) {
10326                 return ret;
10327             }
10328             offp = &off;
10329         }
10330         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10331         if (!is_error(ret) && arg3) {
10332             abi_long ret2 = put_user_s64(off, arg3);
10333             if (is_error(ret2)) {
10334                 ret = ret2;
10335             }
10336         }
10337         return ret;
10338     }
10339 #endif
10340 #endif
10341 #ifdef TARGET_NR_vfork
10342     case TARGET_NR_vfork:
10343         return get_errno(do_fork(cpu_env,
10344                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10345                          0, 0, 0, 0));
10346 #endif
10347 #ifdef TARGET_NR_ugetrlimit
10348     case TARGET_NR_ugetrlimit:
10349     {
10350 	struct rlimit rlim;
10351 	int resource = target_to_host_resource(arg1);
10352 	ret = get_errno(getrlimit(resource, &rlim));
10353 	if (!is_error(ret)) {
10354 	    struct target_rlimit *target_rlim;
10355             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10356                 return -TARGET_EFAULT;
10357 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10358 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10359             unlock_user_struct(target_rlim, arg2, 1);
10360 	}
10361         return ret;
10362     }
10363 #endif
10364 #ifdef TARGET_NR_truncate64
10365     case TARGET_NR_truncate64:
10366         if (!(p = lock_user_string(arg1)))
10367             return -TARGET_EFAULT;
10368 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10369         unlock_user(p, arg1, 0);
10370         return ret;
10371 #endif
10372 #ifdef TARGET_NR_ftruncate64
10373     case TARGET_NR_ftruncate64:
10374         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10375 #endif
10376 #ifdef TARGET_NR_stat64
10377     case TARGET_NR_stat64:
10378         if (!(p = lock_user_string(arg1))) {
10379             return -TARGET_EFAULT;
10380         }
10381         ret = get_errno(stat(path(p), &st));
10382         unlock_user(p, arg1, 0);
10383         if (!is_error(ret))
10384             ret = host_to_target_stat64(cpu_env, arg2, &st);
10385         return ret;
10386 #endif
10387 #ifdef TARGET_NR_lstat64
10388     case TARGET_NR_lstat64:
10389         if (!(p = lock_user_string(arg1))) {
10390             return -TARGET_EFAULT;
10391         }
10392         ret = get_errno(lstat(path(p), &st));
10393         unlock_user(p, arg1, 0);
10394         if (!is_error(ret))
10395             ret = host_to_target_stat64(cpu_env, arg2, &st);
10396         return ret;
10397 #endif
10398 #ifdef TARGET_NR_fstat64
10399     case TARGET_NR_fstat64:
10400         ret = get_errno(fstat(arg1, &st));
10401         if (!is_error(ret))
10402             ret = host_to_target_stat64(cpu_env, arg2, &st);
10403         return ret;
10404 #endif
10405 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10406 #ifdef TARGET_NR_fstatat64
10407     case TARGET_NR_fstatat64:
10408 #endif
10409 #ifdef TARGET_NR_newfstatat
10410     case TARGET_NR_newfstatat:
10411 #endif
10412         if (!(p = lock_user_string(arg2))) {
10413             return -TARGET_EFAULT;
10414         }
10415         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10416         unlock_user(p, arg2, 0);
10417         if (!is_error(ret))
10418             ret = host_to_target_stat64(cpu_env, arg3, &st);
10419         return ret;
10420 #endif
10421 #if defined(TARGET_NR_statx)
10422     case TARGET_NR_statx:
10423         {
10424             struct target_statx *target_stx;
10425             int dirfd = arg1;
10426             int flags = arg3;
10427 
10428             p = lock_user_string(arg2);
10429             if (p == NULL) {
10430                 return -TARGET_EFAULT;
10431             }
10432 #if defined(__NR_statx)
10433             {
10434                 /*
10435                  * It is assumed that struct statx is architecture independent.
10436                  */
10437                 struct target_statx host_stx;
10438                 int mask = arg4;
10439 
10440                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10441                 if (!is_error(ret)) {
10442                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10443                         unlock_user(p, arg2, 0);
10444                         return -TARGET_EFAULT;
10445                     }
10446                 }
10447 
10448                 if (ret != -TARGET_ENOSYS) {
10449                     unlock_user(p, arg2, 0);
10450                     return ret;
10451                 }
10452             }
10453 #endif
10454             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10455             unlock_user(p, arg2, 0);
10456 
10457             if (!is_error(ret)) {
10458                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10459                     return -TARGET_EFAULT;
10460                 }
10461                 memset(target_stx, 0, sizeof(*target_stx));
10462                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10463                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10464                 __put_user(st.st_ino, &target_stx->stx_ino);
10465                 __put_user(st.st_mode, &target_stx->stx_mode);
10466                 __put_user(st.st_uid, &target_stx->stx_uid);
10467                 __put_user(st.st_gid, &target_stx->stx_gid);
10468                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10469                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10470                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10471                 __put_user(st.st_size, &target_stx->stx_size);
10472                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10473                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10474                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10475                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10476                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10477                 unlock_user_struct(target_stx, arg5, 1);
10478             }
10479         }
10480         return ret;
10481 #endif
10482 #ifdef TARGET_NR_lchown
10483     case TARGET_NR_lchown:
10484         if (!(p = lock_user_string(arg1)))
10485             return -TARGET_EFAULT;
10486         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10487         unlock_user(p, arg1, 0);
10488         return ret;
10489 #endif
10490 #ifdef TARGET_NR_getuid
10491     case TARGET_NR_getuid:
10492         return get_errno(high2lowuid(getuid()));
10493 #endif
10494 #ifdef TARGET_NR_getgid
10495     case TARGET_NR_getgid:
10496         return get_errno(high2lowgid(getgid()));
10497 #endif
10498 #ifdef TARGET_NR_geteuid
10499     case TARGET_NR_geteuid:
10500         return get_errno(high2lowuid(geteuid()));
10501 #endif
10502 #ifdef TARGET_NR_getegid
10503     case TARGET_NR_getegid:
10504         return get_errno(high2lowgid(getegid()));
10505 #endif
10506     case TARGET_NR_setreuid:
10507         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10508     case TARGET_NR_setregid:
10509         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10510     case TARGET_NR_getgroups:
10511         {
10512             int gidsetsize = arg1;
10513             target_id *target_grouplist;
10514             gid_t *grouplist;
10515             int i;
10516 
10517             grouplist = alloca(gidsetsize * sizeof(gid_t));
10518             ret = get_errno(getgroups(gidsetsize, grouplist));
10519             if (gidsetsize == 0)
10520                 return ret;
10521             if (!is_error(ret)) {
10522                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10523                 if (!target_grouplist)
10524                     return -TARGET_EFAULT;
10525                 for(i = 0;i < ret; i++)
10526                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10527                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10528             }
10529         }
10530         return ret;
10531     case TARGET_NR_setgroups:
10532         {
10533             int gidsetsize = arg1;
10534             target_id *target_grouplist;
10535             gid_t *grouplist = NULL;
10536             int i;
10537             if (gidsetsize) {
10538                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10539                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10540                 if (!target_grouplist) {
10541                     return -TARGET_EFAULT;
10542                 }
10543                 for (i = 0; i < gidsetsize; i++) {
10544                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10545                 }
10546                 unlock_user(target_grouplist, arg2, 0);
10547             }
10548             return get_errno(setgroups(gidsetsize, grouplist));
10549         }
10550     case TARGET_NR_fchown:
10551         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10552 #if defined(TARGET_NR_fchownat)
10553     case TARGET_NR_fchownat:
10554         if (!(p = lock_user_string(arg2)))
10555             return -TARGET_EFAULT;
10556         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10557                                  low2highgid(arg4), arg5));
10558         unlock_user(p, arg2, 0);
10559         return ret;
10560 #endif
10561 #ifdef TARGET_NR_setresuid
10562     case TARGET_NR_setresuid:
10563         return get_errno(sys_setresuid(low2highuid(arg1),
10564                                        low2highuid(arg2),
10565                                        low2highuid(arg3)));
10566 #endif
10567 #ifdef TARGET_NR_getresuid
10568     case TARGET_NR_getresuid:
10569         {
10570             uid_t ruid, euid, suid;
10571             ret = get_errno(getresuid(&ruid, &euid, &suid));
10572             if (!is_error(ret)) {
10573                 if (put_user_id(high2lowuid(ruid), arg1)
10574                     || put_user_id(high2lowuid(euid), arg2)
10575                     || put_user_id(high2lowuid(suid), arg3))
10576                     return -TARGET_EFAULT;
10577             }
10578         }
10579         return ret;
10580 #endif
10581 #ifdef TARGET_NR_getresgid
10582     case TARGET_NR_setresgid:
10583         return get_errno(sys_setresgid(low2highgid(arg1),
10584                                        low2highgid(arg2),
10585                                        low2highgid(arg3)));
10586 #endif
10587 #ifdef TARGET_NR_getresgid
10588     case TARGET_NR_getresgid:
10589         {
10590             gid_t rgid, egid, sgid;
10591             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10592             if (!is_error(ret)) {
10593                 if (put_user_id(high2lowgid(rgid), arg1)
10594                     || put_user_id(high2lowgid(egid), arg2)
10595                     || put_user_id(high2lowgid(sgid), arg3))
10596                     return -TARGET_EFAULT;
10597             }
10598         }
10599         return ret;
10600 #endif
10601 #ifdef TARGET_NR_chown
10602     case TARGET_NR_chown:
10603         if (!(p = lock_user_string(arg1)))
10604             return -TARGET_EFAULT;
10605         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10606         unlock_user(p, arg1, 0);
10607         return ret;
10608 #endif
10609     case TARGET_NR_setuid:
10610         return get_errno(sys_setuid(low2highuid(arg1)));
10611     case TARGET_NR_setgid:
10612         return get_errno(sys_setgid(low2highgid(arg1)));
10613     case TARGET_NR_setfsuid:
10614         return get_errno(setfsuid(arg1));
10615     case TARGET_NR_setfsgid:
10616         return get_errno(setfsgid(arg1));
10617 
10618 #ifdef TARGET_NR_lchown32
10619     case TARGET_NR_lchown32:
10620         if (!(p = lock_user_string(arg1)))
10621             return -TARGET_EFAULT;
10622         ret = get_errno(lchown(p, arg2, arg3));
10623         unlock_user(p, arg1, 0);
10624         return ret;
10625 #endif
10626 #ifdef TARGET_NR_getuid32
10627     case TARGET_NR_getuid32:
10628         return get_errno(getuid());
10629 #endif
10630 
10631 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10632    /* Alpha specific */
10633     case TARGET_NR_getxuid:
10634          {
10635             uid_t euid;
10636             euid=geteuid();
10637             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10638          }
10639         return get_errno(getuid());
10640 #endif
10641 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10642    /* Alpha specific */
10643     case TARGET_NR_getxgid:
10644          {
10645             uid_t egid;
10646             egid=getegid();
10647             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10648          }
10649         return get_errno(getgid());
10650 #endif
10651 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10652     /* Alpha specific */
10653     case TARGET_NR_osf_getsysinfo:
10654         ret = -TARGET_EOPNOTSUPP;
10655         switch (arg1) {
10656           case TARGET_GSI_IEEE_FP_CONTROL:
10657             {
10658                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10659                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10660 
10661                 swcr &= ~SWCR_STATUS_MASK;
10662                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10663 
10664                 if (put_user_u64 (swcr, arg2))
10665                         return -TARGET_EFAULT;
10666                 ret = 0;
10667             }
10668             break;
10669 
10670           /* case GSI_IEEE_STATE_AT_SIGNAL:
10671              -- Not implemented in linux kernel.
10672              case GSI_UACPROC:
10673              -- Retrieves current unaligned access state; not much used.
10674              case GSI_PROC_TYPE:
10675              -- Retrieves implver information; surely not used.
10676              case GSI_GET_HWRPB:
10677              -- Grabs a copy of the HWRPB; surely not used.
10678           */
10679         }
10680         return ret;
10681 #endif
10682 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10683     /* Alpha specific */
10684     case TARGET_NR_osf_setsysinfo:
10685         ret = -TARGET_EOPNOTSUPP;
10686         switch (arg1) {
10687           case TARGET_SSI_IEEE_FP_CONTROL:
10688             {
10689                 uint64_t swcr, fpcr;
10690 
10691                 if (get_user_u64 (swcr, arg2)) {
10692                     return -TARGET_EFAULT;
10693                 }
10694 
10695                 /*
10696                  * The kernel calls swcr_update_status to update the
10697                  * status bits from the fpcr at every point that it
10698                  * could be queried.  Therefore, we store the status
10699                  * bits only in FPCR.
10700                  */
10701                 ((CPUAlphaState *)cpu_env)->swcr
10702                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10703 
10704                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10705                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10706                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10707                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10708                 ret = 0;
10709             }
10710             break;
10711 
10712           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10713             {
10714                 uint64_t exc, fpcr, fex;
10715 
10716                 if (get_user_u64(exc, arg2)) {
10717                     return -TARGET_EFAULT;
10718                 }
10719                 exc &= SWCR_STATUS_MASK;
10720                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10721 
10722                 /* Old exceptions are not signaled.  */
10723                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10724                 fex = exc & ~fex;
10725                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10726                 fex &= ((CPUArchState *)cpu_env)->swcr;
10727 
10728                 /* Update the hardware fpcr.  */
10729                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10730                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10731 
10732                 if (fex) {
10733                     int si_code = TARGET_FPE_FLTUNK;
10734                     target_siginfo_t info;
10735 
10736                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10737                         si_code = TARGET_FPE_FLTUND;
10738                     }
10739                     if (fex & SWCR_TRAP_ENABLE_INE) {
10740                         si_code = TARGET_FPE_FLTRES;
10741                     }
10742                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10743                         si_code = TARGET_FPE_FLTUND;
10744                     }
10745                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10746                         si_code = TARGET_FPE_FLTOVF;
10747                     }
10748                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10749                         si_code = TARGET_FPE_FLTDIV;
10750                     }
10751                     if (fex & SWCR_TRAP_ENABLE_INV) {
10752                         si_code = TARGET_FPE_FLTINV;
10753                     }
10754 
10755                     info.si_signo = SIGFPE;
10756                     info.si_errno = 0;
10757                     info.si_code = si_code;
10758                     info._sifields._sigfault._addr
10759                         = ((CPUArchState *)cpu_env)->pc;
10760                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10761                                  QEMU_SI_FAULT, &info);
10762                 }
10763                 ret = 0;
10764             }
10765             break;
10766 
10767           /* case SSI_NVPAIRS:
10768              -- Used with SSIN_UACPROC to enable unaligned accesses.
10769              case SSI_IEEE_STATE_AT_SIGNAL:
10770              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10771              -- Not implemented in linux kernel
10772           */
10773         }
10774         return ret;
10775 #endif
10776 #ifdef TARGET_NR_osf_sigprocmask
10777     /* Alpha specific.  */
10778     case TARGET_NR_osf_sigprocmask:
10779         {
10780             abi_ulong mask;
10781             int how;
10782             sigset_t set, oldset;
10783 
10784             switch(arg1) {
10785             case TARGET_SIG_BLOCK:
10786                 how = SIG_BLOCK;
10787                 break;
10788             case TARGET_SIG_UNBLOCK:
10789                 how = SIG_UNBLOCK;
10790                 break;
10791             case TARGET_SIG_SETMASK:
10792                 how = SIG_SETMASK;
10793                 break;
10794             default:
10795                 return -TARGET_EINVAL;
10796             }
10797             mask = arg2;
10798             target_to_host_old_sigset(&set, &mask);
10799             ret = do_sigprocmask(how, &set, &oldset);
10800             if (!ret) {
10801                 host_to_target_old_sigset(&mask, &oldset);
10802                 ret = mask;
10803             }
10804         }
10805         return ret;
10806 #endif
10807 
10808 #ifdef TARGET_NR_getgid32
10809     case TARGET_NR_getgid32:
10810         return get_errno(getgid());
10811 #endif
10812 #ifdef TARGET_NR_geteuid32
10813     case TARGET_NR_geteuid32:
10814         return get_errno(geteuid());
10815 #endif
10816 #ifdef TARGET_NR_getegid32
10817     case TARGET_NR_getegid32:
10818         return get_errno(getegid());
10819 #endif
10820 #ifdef TARGET_NR_setreuid32
10821     case TARGET_NR_setreuid32:
10822         return get_errno(setreuid(arg1, arg2));
10823 #endif
10824 #ifdef TARGET_NR_setregid32
10825     case TARGET_NR_setregid32:
10826         return get_errno(setregid(arg1, arg2));
10827 #endif
10828 #ifdef TARGET_NR_getgroups32
10829     case TARGET_NR_getgroups32:
10830         {
10831             int gidsetsize = arg1;
10832             uint32_t *target_grouplist;
10833             gid_t *grouplist;
10834             int i;
10835 
10836             grouplist = alloca(gidsetsize * sizeof(gid_t));
10837             ret = get_errno(getgroups(gidsetsize, grouplist));
10838             if (gidsetsize == 0)
10839                 return ret;
10840             if (!is_error(ret)) {
10841                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10842                 if (!target_grouplist) {
10843                     return -TARGET_EFAULT;
10844                 }
10845                 for(i = 0;i < ret; i++)
10846                     target_grouplist[i] = tswap32(grouplist[i]);
10847                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10848             }
10849         }
10850         return ret;
10851 #endif
10852 #ifdef TARGET_NR_setgroups32
10853     case TARGET_NR_setgroups32:
10854         {
10855             int gidsetsize = arg1;
10856             uint32_t *target_grouplist;
10857             gid_t *grouplist;
10858             int i;
10859 
10860             grouplist = alloca(gidsetsize * sizeof(gid_t));
10861             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10862             if (!target_grouplist) {
10863                 return -TARGET_EFAULT;
10864             }
10865             for(i = 0;i < gidsetsize; i++)
10866                 grouplist[i] = tswap32(target_grouplist[i]);
10867             unlock_user(target_grouplist, arg2, 0);
10868             return get_errno(setgroups(gidsetsize, grouplist));
10869         }
10870 #endif
10871 #ifdef TARGET_NR_fchown32
10872     case TARGET_NR_fchown32:
10873         return get_errno(fchown(arg1, arg2, arg3));
10874 #endif
10875 #ifdef TARGET_NR_setresuid32
10876     case TARGET_NR_setresuid32:
10877         return get_errno(sys_setresuid(arg1, arg2, arg3));
10878 #endif
10879 #ifdef TARGET_NR_getresuid32
10880     case TARGET_NR_getresuid32:
10881         {
10882             uid_t ruid, euid, suid;
10883             ret = get_errno(getresuid(&ruid, &euid, &suid));
10884             if (!is_error(ret)) {
10885                 if (put_user_u32(ruid, arg1)
10886                     || put_user_u32(euid, arg2)
10887                     || put_user_u32(suid, arg3))
10888                     return -TARGET_EFAULT;
10889             }
10890         }
10891         return ret;
10892 #endif
10893 #ifdef TARGET_NR_setresgid32
10894     case TARGET_NR_setresgid32:
10895         return get_errno(sys_setresgid(arg1, arg2, arg3));
10896 #endif
10897 #ifdef TARGET_NR_getresgid32
10898     case TARGET_NR_getresgid32:
10899         {
10900             gid_t rgid, egid, sgid;
10901             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10902             if (!is_error(ret)) {
10903                 if (put_user_u32(rgid, arg1)
10904                     || put_user_u32(egid, arg2)
10905                     || put_user_u32(sgid, arg3))
10906                     return -TARGET_EFAULT;
10907             }
10908         }
10909         return ret;
10910 #endif
10911 #ifdef TARGET_NR_chown32
10912     case TARGET_NR_chown32:
10913         if (!(p = lock_user_string(arg1)))
10914             return -TARGET_EFAULT;
10915         ret = get_errno(chown(p, arg2, arg3));
10916         unlock_user(p, arg1, 0);
10917         return ret;
10918 #endif
10919 #ifdef TARGET_NR_setuid32
10920     case TARGET_NR_setuid32:
10921         return get_errno(sys_setuid(arg1));
10922 #endif
10923 #ifdef TARGET_NR_setgid32
10924     case TARGET_NR_setgid32:
10925         return get_errno(sys_setgid(arg1));
10926 #endif
10927 #ifdef TARGET_NR_setfsuid32
10928     case TARGET_NR_setfsuid32:
10929         return get_errno(setfsuid(arg1));
10930 #endif
10931 #ifdef TARGET_NR_setfsgid32
10932     case TARGET_NR_setfsgid32:
10933         return get_errno(setfsgid(arg1));
10934 #endif
10935 #ifdef TARGET_NR_mincore
10936     case TARGET_NR_mincore:
10937         {
10938             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10939             if (!a) {
10940                 return -TARGET_ENOMEM;
10941             }
10942             p = lock_user_string(arg3);
10943             if (!p) {
10944                 ret = -TARGET_EFAULT;
10945             } else {
10946                 ret = get_errno(mincore(a, arg2, p));
10947                 unlock_user(p, arg3, ret);
10948             }
10949             unlock_user(a, arg1, 0);
10950         }
10951         return ret;
10952 #endif
10953 #ifdef TARGET_NR_arm_fadvise64_64
10954     case TARGET_NR_arm_fadvise64_64:
10955         /* arm_fadvise64_64 looks like fadvise64_64 but
10956          * with different argument order: fd, advice, offset, len
10957          * rather than the usual fd, offset, len, advice.
10958          * Note that offset and len are both 64-bit so appear as
10959          * pairs of 32-bit registers.
10960          */
10961         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10962                             target_offset64(arg5, arg6), arg2);
10963         return -host_to_target_errno(ret);
10964 #endif
10965 
10966 #if TARGET_ABI_BITS == 32
10967 
10968 #ifdef TARGET_NR_fadvise64_64
10969     case TARGET_NR_fadvise64_64:
10970 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10971         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10972         ret = arg2;
10973         arg2 = arg3;
10974         arg3 = arg4;
10975         arg4 = arg5;
10976         arg5 = arg6;
10977         arg6 = ret;
10978 #else
10979         /* 6 args: fd, offset (high, low), len (high, low), advice */
10980         if (regpairs_aligned(cpu_env, num)) {
10981             /* offset is in (3,4), len in (5,6) and advice in 7 */
10982             arg2 = arg3;
10983             arg3 = arg4;
10984             arg4 = arg5;
10985             arg5 = arg6;
10986             arg6 = arg7;
10987         }
10988 #endif
10989         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10990                             target_offset64(arg4, arg5), arg6);
10991         return -host_to_target_errno(ret);
10992 #endif
10993 
10994 #ifdef TARGET_NR_fadvise64
10995     case TARGET_NR_fadvise64:
10996         /* 5 args: fd, offset (high, low), len, advice */
10997         if (regpairs_aligned(cpu_env, num)) {
10998             /* offset is in (3,4), len in 5 and advice in 6 */
10999             arg2 = arg3;
11000             arg3 = arg4;
11001             arg4 = arg5;
11002             arg5 = arg6;
11003         }
11004         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11005         return -host_to_target_errno(ret);
11006 #endif
11007 
11008 #else /* not a 32-bit ABI */
11009 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11010 #ifdef TARGET_NR_fadvise64_64
11011     case TARGET_NR_fadvise64_64:
11012 #endif
11013 #ifdef TARGET_NR_fadvise64
11014     case TARGET_NR_fadvise64:
11015 #endif
11016 #ifdef TARGET_S390X
11017         switch (arg4) {
11018         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11019         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11020         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11021         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11022         default: break;
11023         }
11024 #endif
11025         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11026 #endif
11027 #endif /* end of 64-bit ABI fadvise handling */
11028 
11029 #ifdef TARGET_NR_madvise
11030     case TARGET_NR_madvise:
11031         /* A straight passthrough may not be safe because qemu sometimes
11032            turns private file-backed mappings into anonymous mappings.
11033            This will break MADV_DONTNEED.
11034            This is a hint, so ignoring and returning success is ok.  */
11035         return 0;
11036 #endif
11037 #if TARGET_ABI_BITS == 32
11038     case TARGET_NR_fcntl64:
11039     {
11040 	int cmd;
11041 	struct flock64 fl;
11042         from_flock64_fn *copyfrom = copy_from_user_flock64;
11043         to_flock64_fn *copyto = copy_to_user_flock64;
11044 
11045 #ifdef TARGET_ARM
11046         if (!((CPUARMState *)cpu_env)->eabi) {
11047             copyfrom = copy_from_user_oabi_flock64;
11048             copyto = copy_to_user_oabi_flock64;
11049         }
11050 #endif
11051 
11052 	cmd = target_to_host_fcntl_cmd(arg2);
11053         if (cmd == -TARGET_EINVAL) {
11054             return cmd;
11055         }
11056 
11057         switch(arg2) {
11058         case TARGET_F_GETLK64:
11059             ret = copyfrom(&fl, arg3);
11060             if (ret) {
11061                 break;
11062             }
11063             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11064             if (ret == 0) {
11065                 ret = copyto(arg3, &fl);
11066             }
11067 	    break;
11068 
11069         case TARGET_F_SETLK64:
11070         case TARGET_F_SETLKW64:
11071             ret = copyfrom(&fl, arg3);
11072             if (ret) {
11073                 break;
11074             }
11075             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11076 	    break;
11077         default:
11078             ret = do_fcntl(arg1, arg2, arg3);
11079             break;
11080         }
11081         return ret;
11082     }
11083 #endif
11084 #ifdef TARGET_NR_cacheflush
11085     case TARGET_NR_cacheflush:
11086         /* self-modifying code is handled automatically, so nothing needed */
11087         return 0;
11088 #endif
11089 #ifdef TARGET_NR_getpagesize
11090     case TARGET_NR_getpagesize:
11091         return TARGET_PAGE_SIZE;
11092 #endif
11093     case TARGET_NR_gettid:
11094         return get_errno(sys_gettid());
11095 #ifdef TARGET_NR_readahead
11096     case TARGET_NR_readahead:
11097 #if TARGET_ABI_BITS == 32
11098         if (regpairs_aligned(cpu_env, num)) {
11099             arg2 = arg3;
11100             arg3 = arg4;
11101             arg4 = arg5;
11102         }
11103         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11104 #else
11105         ret = get_errno(readahead(arg1, arg2, arg3));
11106 #endif
11107         return ret;
11108 #endif
11109 #ifdef CONFIG_ATTR
11110 #ifdef TARGET_NR_setxattr
11111     case TARGET_NR_listxattr:
11112     case TARGET_NR_llistxattr:
11113     {
11114         void *p, *b = 0;
11115         if (arg2) {
11116             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11117             if (!b) {
11118                 return -TARGET_EFAULT;
11119             }
11120         }
11121         p = lock_user_string(arg1);
11122         if (p) {
11123             if (num == TARGET_NR_listxattr) {
11124                 ret = get_errno(listxattr(p, b, arg3));
11125             } else {
11126                 ret = get_errno(llistxattr(p, b, arg3));
11127             }
11128         } else {
11129             ret = -TARGET_EFAULT;
11130         }
11131         unlock_user(p, arg1, 0);
11132         unlock_user(b, arg2, arg3);
11133         return ret;
11134     }
11135     case TARGET_NR_flistxattr:
11136     {
11137         void *b = 0;
11138         if (arg2) {
11139             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11140             if (!b) {
11141                 return -TARGET_EFAULT;
11142             }
11143         }
11144         ret = get_errno(flistxattr(arg1, b, arg3));
11145         unlock_user(b, arg2, arg3);
11146         return ret;
11147     }
11148     case TARGET_NR_setxattr:
11149     case TARGET_NR_lsetxattr:
11150         {
11151             void *p, *n, *v = 0;
11152             if (arg3) {
11153                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11154                 if (!v) {
11155                     return -TARGET_EFAULT;
11156                 }
11157             }
11158             p = lock_user_string(arg1);
11159             n = lock_user_string(arg2);
11160             if (p && n) {
11161                 if (num == TARGET_NR_setxattr) {
11162                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11163                 } else {
11164                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11165                 }
11166             } else {
11167                 ret = -TARGET_EFAULT;
11168             }
11169             unlock_user(p, arg1, 0);
11170             unlock_user(n, arg2, 0);
11171             unlock_user(v, arg3, 0);
11172         }
11173         return ret;
11174     case TARGET_NR_fsetxattr:
11175         {
11176             void *n, *v = 0;
11177             if (arg3) {
11178                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11179                 if (!v) {
11180                     return -TARGET_EFAULT;
11181                 }
11182             }
11183             n = lock_user_string(arg2);
11184             if (n) {
11185                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11186             } else {
11187                 ret = -TARGET_EFAULT;
11188             }
11189             unlock_user(n, arg2, 0);
11190             unlock_user(v, arg3, 0);
11191         }
11192         return ret;
11193     case TARGET_NR_getxattr:
11194     case TARGET_NR_lgetxattr:
11195         {
11196             void *p, *n, *v = 0;
11197             if (arg3) {
11198                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11199                 if (!v) {
11200                     return -TARGET_EFAULT;
11201                 }
11202             }
11203             p = lock_user_string(arg1);
11204             n = lock_user_string(arg2);
11205             if (p && n) {
11206                 if (num == TARGET_NR_getxattr) {
11207                     ret = get_errno(getxattr(p, n, v, arg4));
11208                 } else {
11209                     ret = get_errno(lgetxattr(p, n, v, arg4));
11210                 }
11211             } else {
11212                 ret = -TARGET_EFAULT;
11213             }
11214             unlock_user(p, arg1, 0);
11215             unlock_user(n, arg2, 0);
11216             unlock_user(v, arg3, arg4);
11217         }
11218         return ret;
11219     case TARGET_NR_fgetxattr:
11220         {
11221             void *n, *v = 0;
11222             if (arg3) {
11223                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11224                 if (!v) {
11225                     return -TARGET_EFAULT;
11226                 }
11227             }
11228             n = lock_user_string(arg2);
11229             if (n) {
11230                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11231             } else {
11232                 ret = -TARGET_EFAULT;
11233             }
11234             unlock_user(n, arg2, 0);
11235             unlock_user(v, arg3, arg4);
11236         }
11237         return ret;
11238     case TARGET_NR_removexattr:
11239     case TARGET_NR_lremovexattr:
11240         {
11241             void *p, *n;
11242             p = lock_user_string(arg1);
11243             n = lock_user_string(arg2);
11244             if (p && n) {
11245                 if (num == TARGET_NR_removexattr) {
11246                     ret = get_errno(removexattr(p, n));
11247                 } else {
11248                     ret = get_errno(lremovexattr(p, n));
11249                 }
11250             } else {
11251                 ret = -TARGET_EFAULT;
11252             }
11253             unlock_user(p, arg1, 0);
11254             unlock_user(n, arg2, 0);
11255         }
11256         return ret;
11257     case TARGET_NR_fremovexattr:
11258         {
11259             void *n;
11260             n = lock_user_string(arg2);
11261             if (n) {
11262                 ret = get_errno(fremovexattr(arg1, n));
11263             } else {
11264                 ret = -TARGET_EFAULT;
11265             }
11266             unlock_user(n, arg2, 0);
11267         }
11268         return ret;
11269 #endif
11270 #endif /* CONFIG_ATTR */
11271 #ifdef TARGET_NR_set_thread_area
11272     case TARGET_NR_set_thread_area:
11273 #if defined(TARGET_MIPS)
11274       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11275       return 0;
11276 #elif defined(TARGET_CRIS)
11277       if (arg1 & 0xff)
11278           ret = -TARGET_EINVAL;
11279       else {
11280           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11281           ret = 0;
11282       }
11283       return ret;
11284 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11285       return do_set_thread_area(cpu_env, arg1);
11286 #elif defined(TARGET_M68K)
11287       {
11288           TaskState *ts = cpu->opaque;
11289           ts->tp_value = arg1;
11290           return 0;
11291       }
11292 #else
11293       return -TARGET_ENOSYS;
11294 #endif
11295 #endif
11296 #ifdef TARGET_NR_get_thread_area
11297     case TARGET_NR_get_thread_area:
11298 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11299         return do_get_thread_area(cpu_env, arg1);
11300 #elif defined(TARGET_M68K)
11301         {
11302             TaskState *ts = cpu->opaque;
11303             return ts->tp_value;
11304         }
11305 #else
11306         return -TARGET_ENOSYS;
11307 #endif
11308 #endif
11309 #ifdef TARGET_NR_getdomainname
11310     case TARGET_NR_getdomainname:
11311         return -TARGET_ENOSYS;
11312 #endif
11313 
11314 #ifdef TARGET_NR_clock_settime
11315     case TARGET_NR_clock_settime:
11316     {
11317         struct timespec ts;
11318 
11319         ret = target_to_host_timespec(&ts, arg2);
11320         if (!is_error(ret)) {
11321             ret = get_errno(clock_settime(arg1, &ts));
11322         }
11323         return ret;
11324     }
11325 #endif
11326 #ifdef TARGET_NR_clock_gettime
11327     case TARGET_NR_clock_gettime:
11328     {
11329         struct timespec ts;
11330         ret = get_errno(clock_gettime(arg1, &ts));
11331         if (!is_error(ret)) {
11332             ret = host_to_target_timespec(arg2, &ts);
11333         }
11334         return ret;
11335     }
11336 #endif
11337 #ifdef TARGET_NR_clock_getres
11338     case TARGET_NR_clock_getres:
11339     {
11340         struct timespec ts;
11341         ret = get_errno(clock_getres(arg1, &ts));
11342         if (!is_error(ret)) {
11343             host_to_target_timespec(arg2, &ts);
11344         }
11345         return ret;
11346     }
11347 #endif
11348 #ifdef TARGET_NR_clock_nanosleep
11349     case TARGET_NR_clock_nanosleep:
11350     {
11351         struct timespec ts;
11352         target_to_host_timespec(&ts, arg3);
11353         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11354                                              &ts, arg4 ? &ts : NULL));
11355         if (arg4)
11356             host_to_target_timespec(arg4, &ts);
11357 
11358 #if defined(TARGET_PPC)
11359         /* clock_nanosleep is odd in that it returns positive errno values.
11360          * On PPC, CR0 bit 3 should be set in such a situation. */
11361         if (ret && ret != -TARGET_ERESTARTSYS) {
11362             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11363         }
11364 #endif
11365         return ret;
11366     }
11367 #endif
11368 
11369 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11370     case TARGET_NR_set_tid_address:
11371         return get_errno(set_tid_address((int *)g2h(arg1)));
11372 #endif
11373 
11374     case TARGET_NR_tkill:
11375         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11376 
11377     case TARGET_NR_tgkill:
11378         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11379                          target_to_host_signal(arg3)));
11380 
11381 #ifdef TARGET_NR_set_robust_list
11382     case TARGET_NR_set_robust_list:
11383     case TARGET_NR_get_robust_list:
11384         /* The ABI for supporting robust futexes has userspace pass
11385          * the kernel a pointer to a linked list which is updated by
11386          * userspace after the syscall; the list is walked by the kernel
11387          * when the thread exits. Since the linked list in QEMU guest
11388          * memory isn't a valid linked list for the host and we have
11389          * no way to reliably intercept the thread-death event, we can't
11390          * support these. Silently return ENOSYS so that guest userspace
11391          * falls back to a non-robust futex implementation (which should
11392          * be OK except in the corner case of the guest crashing while
11393          * holding a mutex that is shared with another process via
11394          * shared memory).
11395          */
11396         return -TARGET_ENOSYS;
11397 #endif
11398 
11399 #if defined(TARGET_NR_utimensat)
11400     case TARGET_NR_utimensat:
11401         {
11402             struct timespec *tsp, ts[2];
11403             if (!arg3) {
11404                 tsp = NULL;
11405             } else {
11406                 target_to_host_timespec(ts, arg3);
11407                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11408                 tsp = ts;
11409             }
11410             if (!arg2)
11411                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11412             else {
11413                 if (!(p = lock_user_string(arg2))) {
11414                     return -TARGET_EFAULT;
11415                 }
11416                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11417                 unlock_user(p, arg2, 0);
11418             }
11419         }
11420         return ret;
11421 #endif
11422     case TARGET_NR_futex:
11423         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11424 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11425     case TARGET_NR_inotify_init:
11426         ret = get_errno(sys_inotify_init());
11427         if (ret >= 0) {
11428             fd_trans_register(ret, &target_inotify_trans);
11429         }
11430         return ret;
11431 #endif
11432 #ifdef CONFIG_INOTIFY1
11433 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11434     case TARGET_NR_inotify_init1:
11435         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11436                                           fcntl_flags_tbl)));
11437         if (ret >= 0) {
11438             fd_trans_register(ret, &target_inotify_trans);
11439         }
11440         return ret;
11441 #endif
11442 #endif
11443 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11444     case TARGET_NR_inotify_add_watch:
11445         p = lock_user_string(arg2);
11446         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11447         unlock_user(p, arg2, 0);
11448         return ret;
11449 #endif
11450 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11451     case TARGET_NR_inotify_rm_watch:
11452         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11453 #endif
11454 
11455 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11456     case TARGET_NR_mq_open:
11457         {
11458             struct mq_attr posix_mq_attr;
11459             struct mq_attr *pposix_mq_attr;
11460             int host_flags;
11461 
11462             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11463             pposix_mq_attr = NULL;
11464             if (arg4) {
11465                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11466                     return -TARGET_EFAULT;
11467                 }
11468                 pposix_mq_attr = &posix_mq_attr;
11469             }
11470             p = lock_user_string(arg1 - 1);
11471             if (!p) {
11472                 return -TARGET_EFAULT;
11473             }
11474             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11475             unlock_user (p, arg1, 0);
11476         }
11477         return ret;
11478 
11479     case TARGET_NR_mq_unlink:
11480         p = lock_user_string(arg1 - 1);
11481         if (!p) {
11482             return -TARGET_EFAULT;
11483         }
11484         ret = get_errno(mq_unlink(p));
11485         unlock_user (p, arg1, 0);
11486         return ret;
11487 
11488     case TARGET_NR_mq_timedsend:
11489         {
11490             struct timespec ts;
11491 
11492             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11493             if (arg5 != 0) {
11494                 target_to_host_timespec(&ts, arg5);
11495                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11496                 host_to_target_timespec(arg5, &ts);
11497             } else {
11498                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11499             }
11500             unlock_user (p, arg2, arg3);
11501         }
11502         return ret;
11503 
11504     case TARGET_NR_mq_timedreceive:
11505         {
11506             struct timespec ts;
11507             unsigned int prio;
11508 
11509             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11510             if (arg5 != 0) {
11511                 target_to_host_timespec(&ts, arg5);
11512                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11513                                                      &prio, &ts));
11514                 host_to_target_timespec(arg5, &ts);
11515             } else {
11516                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11517                                                      &prio, NULL));
11518             }
11519             unlock_user (p, arg2, arg3);
11520             if (arg4 != 0)
11521                 put_user_u32(prio, arg4);
11522         }
11523         return ret;
11524 
11525     /* Not implemented for now... */
11526 /*     case TARGET_NR_mq_notify: */
11527 /*         break; */
11528 
11529     case TARGET_NR_mq_getsetattr:
11530         {
11531             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11532             ret = 0;
11533             if (arg2 != 0) {
11534                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11535                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11536                                            &posix_mq_attr_out));
11537             } else if (arg3 != 0) {
11538                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11539             }
11540             if (ret == 0 && arg3 != 0) {
11541                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11542             }
11543         }
11544         return ret;
11545 #endif
11546 
11547 #ifdef CONFIG_SPLICE
11548 #ifdef TARGET_NR_tee
11549     case TARGET_NR_tee:
11550         {
11551             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11552         }
11553         return ret;
11554 #endif
11555 #ifdef TARGET_NR_splice
11556     case TARGET_NR_splice:
11557         {
11558             loff_t loff_in, loff_out;
11559             loff_t *ploff_in = NULL, *ploff_out = NULL;
11560             if (arg2) {
11561                 if (get_user_u64(loff_in, arg2)) {
11562                     return -TARGET_EFAULT;
11563                 }
11564                 ploff_in = &loff_in;
11565             }
11566             if (arg4) {
11567                 if (get_user_u64(loff_out, arg4)) {
11568                     return -TARGET_EFAULT;
11569                 }
11570                 ploff_out = &loff_out;
11571             }
11572             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11573             if (arg2) {
11574                 if (put_user_u64(loff_in, arg2)) {
11575                     return -TARGET_EFAULT;
11576                 }
11577             }
11578             if (arg4) {
11579                 if (put_user_u64(loff_out, arg4)) {
11580                     return -TARGET_EFAULT;
11581                 }
11582             }
11583         }
11584         return ret;
11585 #endif
11586 #ifdef TARGET_NR_vmsplice
11587 	case TARGET_NR_vmsplice:
11588         {
11589             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11590             if (vec != NULL) {
11591                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11592                 unlock_iovec(vec, arg2, arg3, 0);
11593             } else {
11594                 ret = -host_to_target_errno(errno);
11595             }
11596         }
11597         return ret;
11598 #endif
11599 #endif /* CONFIG_SPLICE */
11600 #ifdef CONFIG_EVENTFD
11601 #if defined(TARGET_NR_eventfd)
11602     case TARGET_NR_eventfd:
11603         ret = get_errno(eventfd(arg1, 0));
11604         if (ret >= 0) {
11605             fd_trans_register(ret, &target_eventfd_trans);
11606         }
11607         return ret;
11608 #endif
11609 #if defined(TARGET_NR_eventfd2)
11610     case TARGET_NR_eventfd2:
11611     {
11612         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11613         if (arg2 & TARGET_O_NONBLOCK) {
11614             host_flags |= O_NONBLOCK;
11615         }
11616         if (arg2 & TARGET_O_CLOEXEC) {
11617             host_flags |= O_CLOEXEC;
11618         }
11619         ret = get_errno(eventfd(arg1, host_flags));
11620         if (ret >= 0) {
11621             fd_trans_register(ret, &target_eventfd_trans);
11622         }
11623         return ret;
11624     }
11625 #endif
11626 #endif /* CONFIG_EVENTFD  */
11627 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11628     case TARGET_NR_fallocate:
11629 #if TARGET_ABI_BITS == 32
11630         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11631                                   target_offset64(arg5, arg6)));
11632 #else
11633         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11634 #endif
11635         return ret;
11636 #endif
11637 #if defined(CONFIG_SYNC_FILE_RANGE)
11638 #if defined(TARGET_NR_sync_file_range)
11639     case TARGET_NR_sync_file_range:
11640 #if TARGET_ABI_BITS == 32
11641 #if defined(TARGET_MIPS)
11642         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11643                                         target_offset64(arg5, arg6), arg7));
11644 #else
11645         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11646                                         target_offset64(arg4, arg5), arg6));
11647 #endif /* !TARGET_MIPS */
11648 #else
11649         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11650 #endif
11651         return ret;
11652 #endif
11653 #if defined(TARGET_NR_sync_file_range2)
11654     case TARGET_NR_sync_file_range2:
11655         /* This is like sync_file_range but the arguments are reordered */
11656 #if TARGET_ABI_BITS == 32
11657         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11658                                         target_offset64(arg5, arg6), arg2));
11659 #else
11660         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11661 #endif
11662         return ret;
11663 #endif
11664 #endif
11665 #if defined(TARGET_NR_signalfd4)
11666     case TARGET_NR_signalfd4:
11667         return do_signalfd4(arg1, arg2, arg4);
11668 #endif
11669 #if defined(TARGET_NR_signalfd)
11670     case TARGET_NR_signalfd:
11671         return do_signalfd4(arg1, arg2, 0);
11672 #endif
11673 #if defined(CONFIG_EPOLL)
11674 #if defined(TARGET_NR_epoll_create)
11675     case TARGET_NR_epoll_create:
11676         return get_errno(epoll_create(arg1));
11677 #endif
11678 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11679     case TARGET_NR_epoll_create1:
11680         return get_errno(epoll_create1(arg1));
11681 #endif
11682 #if defined(TARGET_NR_epoll_ctl)
11683     case TARGET_NR_epoll_ctl:
11684     {
11685         struct epoll_event ep;
11686         struct epoll_event *epp = 0;
11687         if (arg4) {
11688             struct target_epoll_event *target_ep;
11689             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11690                 return -TARGET_EFAULT;
11691             }
11692             ep.events = tswap32(target_ep->events);
11693             /* The epoll_data_t union is just opaque data to the kernel,
11694              * so we transfer all 64 bits across and need not worry what
11695              * actual data type it is.
11696              */
11697             ep.data.u64 = tswap64(target_ep->data.u64);
11698             unlock_user_struct(target_ep, arg4, 0);
11699             epp = &ep;
11700         }
11701         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11702     }
11703 #endif
11704 
11705 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11706 #if defined(TARGET_NR_epoll_wait)
11707     case TARGET_NR_epoll_wait:
11708 #endif
11709 #if defined(TARGET_NR_epoll_pwait)
11710     case TARGET_NR_epoll_pwait:
11711 #endif
11712     {
11713         struct target_epoll_event *target_ep;
11714         struct epoll_event *ep;
11715         int epfd = arg1;
11716         int maxevents = arg3;
11717         int timeout = arg4;
11718 
11719         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11720             return -TARGET_EINVAL;
11721         }
11722 
11723         target_ep = lock_user(VERIFY_WRITE, arg2,
11724                               maxevents * sizeof(struct target_epoll_event), 1);
11725         if (!target_ep) {
11726             return -TARGET_EFAULT;
11727         }
11728 
11729         ep = g_try_new(struct epoll_event, maxevents);
11730         if (!ep) {
11731             unlock_user(target_ep, arg2, 0);
11732             return -TARGET_ENOMEM;
11733         }
11734 
11735         switch (num) {
11736 #if defined(TARGET_NR_epoll_pwait)
11737         case TARGET_NR_epoll_pwait:
11738         {
11739             target_sigset_t *target_set;
11740             sigset_t _set, *set = &_set;
11741 
11742             if (arg5) {
11743                 if (arg6 != sizeof(target_sigset_t)) {
11744                     ret = -TARGET_EINVAL;
11745                     break;
11746                 }
11747 
11748                 target_set = lock_user(VERIFY_READ, arg5,
11749                                        sizeof(target_sigset_t), 1);
11750                 if (!target_set) {
11751                     ret = -TARGET_EFAULT;
11752                     break;
11753                 }
11754                 target_to_host_sigset(set, target_set);
11755                 unlock_user(target_set, arg5, 0);
11756             } else {
11757                 set = NULL;
11758             }
11759 
11760             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11761                                              set, SIGSET_T_SIZE));
11762             break;
11763         }
11764 #endif
11765 #if defined(TARGET_NR_epoll_wait)
11766         case TARGET_NR_epoll_wait:
11767             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11768                                              NULL, 0));
11769             break;
11770 #endif
11771         default:
11772             ret = -TARGET_ENOSYS;
11773         }
11774         if (!is_error(ret)) {
11775             int i;
11776             for (i = 0; i < ret; i++) {
11777                 target_ep[i].events = tswap32(ep[i].events);
11778                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11779             }
11780             unlock_user(target_ep, arg2,
11781                         ret * sizeof(struct target_epoll_event));
11782         } else {
11783             unlock_user(target_ep, arg2, 0);
11784         }
11785         g_free(ep);
11786         return ret;
11787     }
11788 #endif
11789 #endif
11790 #ifdef TARGET_NR_prlimit64
11791     case TARGET_NR_prlimit64:
11792     {
11793         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11794         struct target_rlimit64 *target_rnew, *target_rold;
11795         struct host_rlimit64 rnew, rold, *rnewp = 0;
11796         int resource = target_to_host_resource(arg2);
11797         if (arg3) {
11798             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11799                 return -TARGET_EFAULT;
11800             }
11801             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11802             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11803             unlock_user_struct(target_rnew, arg3, 0);
11804             rnewp = &rnew;
11805         }
11806 
11807         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11808         if (!is_error(ret) && arg4) {
11809             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11810                 return -TARGET_EFAULT;
11811             }
11812             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11813             target_rold->rlim_max = tswap64(rold.rlim_max);
11814             unlock_user_struct(target_rold, arg4, 1);
11815         }
11816         return ret;
11817     }
11818 #endif
11819 #ifdef TARGET_NR_gethostname
11820     case TARGET_NR_gethostname:
11821     {
11822         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11823         if (name) {
11824             ret = get_errno(gethostname(name, arg2));
11825             unlock_user(name, arg1, arg2);
11826         } else {
11827             ret = -TARGET_EFAULT;
11828         }
11829         return ret;
11830     }
11831 #endif
11832 #ifdef TARGET_NR_atomic_cmpxchg_32
11833     case TARGET_NR_atomic_cmpxchg_32:
11834     {
11835         /* should use start_exclusive from main.c */
11836         abi_ulong mem_value;
11837         if (get_user_u32(mem_value, arg6)) {
11838             target_siginfo_t info;
11839             info.si_signo = SIGSEGV;
11840             info.si_errno = 0;
11841             info.si_code = TARGET_SEGV_MAPERR;
11842             info._sifields._sigfault._addr = arg6;
11843             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11844                          QEMU_SI_FAULT, &info);
11845             ret = 0xdeadbeef;
11846 
11847         }
11848         if (mem_value == arg2)
11849             put_user_u32(arg1, arg6);
11850         return mem_value;
11851     }
11852 #endif
11853 #ifdef TARGET_NR_atomic_barrier
11854     case TARGET_NR_atomic_barrier:
11855         /* Like the kernel implementation and the
11856            qemu arm barrier, no-op this? */
11857         return 0;
11858 #endif
11859 
11860 #ifdef TARGET_NR_timer_create
11861     case TARGET_NR_timer_create:
11862     {
11863         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11864 
11865         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11866 
11867         int clkid = arg1;
11868         int timer_index = next_free_host_timer();
11869 
11870         if (timer_index < 0) {
11871             ret = -TARGET_EAGAIN;
11872         } else {
11873             timer_t *phtimer = g_posix_timers  + timer_index;
11874 
11875             if (arg2) {
11876                 phost_sevp = &host_sevp;
11877                 ret = target_to_host_sigevent(phost_sevp, arg2);
11878                 if (ret != 0) {
11879                     return ret;
11880                 }
11881             }
11882 
11883             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11884             if (ret) {
11885                 phtimer = NULL;
11886             } else {
11887                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11888                     return -TARGET_EFAULT;
11889                 }
11890             }
11891         }
11892         return ret;
11893     }
11894 #endif
11895 
11896 #ifdef TARGET_NR_timer_settime
11897     case TARGET_NR_timer_settime:
11898     {
11899         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11900          * struct itimerspec * old_value */
11901         target_timer_t timerid = get_timer_id(arg1);
11902 
11903         if (timerid < 0) {
11904             ret = timerid;
11905         } else if (arg3 == 0) {
11906             ret = -TARGET_EINVAL;
11907         } else {
11908             timer_t htimer = g_posix_timers[timerid];
11909             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11910 
11911             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11912                 return -TARGET_EFAULT;
11913             }
11914             ret = get_errno(
11915                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11916             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11917                 return -TARGET_EFAULT;
11918             }
11919         }
11920         return ret;
11921     }
11922 #endif
11923 
11924 #ifdef TARGET_NR_timer_gettime
11925     case TARGET_NR_timer_gettime:
11926     {
11927         /* args: timer_t timerid, struct itimerspec *curr_value */
11928         target_timer_t timerid = get_timer_id(arg1);
11929 
11930         if (timerid < 0) {
11931             ret = timerid;
11932         } else if (!arg2) {
11933             ret = -TARGET_EFAULT;
11934         } else {
11935             timer_t htimer = g_posix_timers[timerid];
11936             struct itimerspec hspec;
11937             ret = get_errno(timer_gettime(htimer, &hspec));
11938 
11939             if (host_to_target_itimerspec(arg2, &hspec)) {
11940                 ret = -TARGET_EFAULT;
11941             }
11942         }
11943         return ret;
11944     }
11945 #endif
11946 
11947 #ifdef TARGET_NR_timer_getoverrun
11948     case TARGET_NR_timer_getoverrun:
11949     {
11950         /* args: timer_t timerid */
11951         target_timer_t timerid = get_timer_id(arg1);
11952 
11953         if (timerid < 0) {
11954             ret = timerid;
11955         } else {
11956             timer_t htimer = g_posix_timers[timerid];
11957             ret = get_errno(timer_getoverrun(htimer));
11958         }
11959         return ret;
11960     }
11961 #endif
11962 
11963 #ifdef TARGET_NR_timer_delete
11964     case TARGET_NR_timer_delete:
11965     {
11966         /* args: timer_t timerid */
11967         target_timer_t timerid = get_timer_id(arg1);
11968 
11969         if (timerid < 0) {
11970             ret = timerid;
11971         } else {
11972             timer_t htimer = g_posix_timers[timerid];
11973             ret = get_errno(timer_delete(htimer));
11974             g_posix_timers[timerid] = 0;
11975         }
11976         return ret;
11977     }
11978 #endif
11979 
11980 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11981     case TARGET_NR_timerfd_create:
11982         return get_errno(timerfd_create(arg1,
11983                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11984 #endif
11985 
11986 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11987     case TARGET_NR_timerfd_gettime:
11988         {
11989             struct itimerspec its_curr;
11990 
11991             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11992 
11993             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11994                 return -TARGET_EFAULT;
11995             }
11996         }
11997         return ret;
11998 #endif
11999 
12000 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12001     case TARGET_NR_timerfd_settime:
12002         {
12003             struct itimerspec its_new, its_old, *p_new;
12004 
12005             if (arg3) {
12006                 if (target_to_host_itimerspec(&its_new, arg3)) {
12007                     return -TARGET_EFAULT;
12008                 }
12009                 p_new = &its_new;
12010             } else {
12011                 p_new = NULL;
12012             }
12013 
12014             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12015 
12016             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12017                 return -TARGET_EFAULT;
12018             }
12019         }
12020         return ret;
12021 #endif
12022 
12023 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12024     case TARGET_NR_ioprio_get:
12025         return get_errno(ioprio_get(arg1, arg2));
12026 #endif
12027 
12028 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12029     case TARGET_NR_ioprio_set:
12030         return get_errno(ioprio_set(arg1, arg2, arg3));
12031 #endif
12032 
12033 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12034     case TARGET_NR_setns:
12035         return get_errno(setns(arg1, arg2));
12036 #endif
12037 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12038     case TARGET_NR_unshare:
12039         return get_errno(unshare(arg1));
12040 #endif
12041 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12042     case TARGET_NR_kcmp:
12043         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12044 #endif
12045 #ifdef TARGET_NR_swapcontext
12046     case TARGET_NR_swapcontext:
12047         /* PowerPC specific.  */
12048         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12049 #endif
12050 #ifdef TARGET_NR_memfd_create
12051     case TARGET_NR_memfd_create:
12052         p = lock_user_string(arg1);
12053         if (!p) {
12054             return -TARGET_EFAULT;
12055         }
12056         ret = get_errno(memfd_create(p, arg2));
12057         fd_trans_unregister(ret);
12058         unlock_user(p, arg1, 0);
12059         return ret;
12060 #endif
12061 
12062     default:
12063         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12064         return -TARGET_ENOSYS;
12065     }
12066     return ret;
12067 }
12068 
12069 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12070                     abi_long arg2, abi_long arg3, abi_long arg4,
12071                     abi_long arg5, abi_long arg6, abi_long arg7,
12072                     abi_long arg8)
12073 {
12074     CPUState *cpu = env_cpu(cpu_env);
12075     abi_long ret;
12076 
12077 #ifdef DEBUG_ERESTARTSYS
12078     /* Debug-only code for exercising the syscall-restart code paths
12079      * in the per-architecture cpu main loops: restart every syscall
12080      * the guest makes once before letting it through.
12081      */
12082     {
12083         static bool flag;
12084         flag = !flag;
12085         if (flag) {
12086             return -TARGET_ERESTARTSYS;
12087         }
12088     }
12089 #endif
12090 
12091     record_syscall_start(cpu, num, arg1,
12092                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12093 
12094     if (unlikely(do_strace)) {
12095         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12096         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12097                           arg5, arg6, arg7, arg8);
12098         print_syscall_ret(num, ret);
12099     } else {
12100         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12101                           arg5, arg6, arg7, arg8);
12102     }
12103 
12104     record_syscall_return(cpu, num, ret);
12105     return ret;
12106 }
12107