xref: /qemu/linux-user/syscall.c (revision a8d25326)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #ifdef CONFIG_TIMERFD
59 #include <sys/timerfd.h>
60 #endif
61 #ifdef CONFIG_EVENTFD
62 #include <sys/eventfd.h>
63 #endif
64 #ifdef CONFIG_EPOLL
65 #include <sys/epoll.h>
66 #endif
67 #ifdef CONFIG_ATTR
68 #include "qemu/xattr.h"
69 #endif
70 #ifdef CONFIG_SENDFILE
71 #include <sys/sendfile.h>
72 #endif
73 
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
80 
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/cdrom.h>
84 #include <linux/hdreg.h>
85 #include <linux/soundcard.h>
86 #include <linux/kd.h>
87 #include <linux/mtio.h>
88 #include <linux/fs.h>
89 #if defined(CONFIG_FIEMAP)
90 #include <linux/fiemap.h>
91 #endif
92 #include <linux/fb.h>
93 #if defined(CONFIG_USBFS)
94 #include <linux/usbdevice_fs.h>
95 #include <linux/usb/ch9.h>
96 #endif
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #include "linux_loop.h"
106 #include "uname.h"
107 
108 #include "qemu.h"
109 #include "qemu/guest-random.h"
110 #include "qapi/error.h"
111 #include "fd-trans.h"
112 
113 #ifndef CLONE_IO
114 #define CLONE_IO                0x80000000      /* Clone io context */
115 #endif
116 
117 /* We can't directly call the host clone syscall, because this will
118  * badly confuse libc (breaking mutexes, for example). So we must
119  * divide clone flags into:
120  *  * flag combinations that look like pthread_create()
121  *  * flag combinations that look like fork()
122  *  * flags we can implement within QEMU itself
123  *  * flags we can't support and will return an error for
124  */
125 /* For thread creation, all these flags must be present; for
126  * fork, none must be present.
127  */
128 #define CLONE_THREAD_FLAGS                              \
129     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
130      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
131 
132 /* These flags are ignored:
133  * CLONE_DETACHED is now ignored by the kernel;
134  * CLONE_IO is just an optimisation hint to the I/O scheduler
135  */
136 #define CLONE_IGNORED_FLAGS                     \
137     (CLONE_DETACHED | CLONE_IO)
138 
139 /* Flags for fork which we can implement within QEMU itself */
140 #define CLONE_OPTIONAL_FORK_FLAGS               \
141     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
142      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
143 
144 /* Flags for thread creation which we can implement within QEMU itself */
145 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
146     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
147      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
148 
149 #define CLONE_INVALID_FORK_FLAGS                                        \
150     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
151 
152 #define CLONE_INVALID_THREAD_FLAGS                                      \
153     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
154        CLONE_IGNORED_FLAGS))
155 
156 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
157  * have almost all been allocated. We cannot support any of
158  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
159  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
160  * The checks against the invalid thread masks above will catch these.
161  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
162  */
163 
164 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
165  * once. This exercises the codepaths for restart.
166  */
167 //#define DEBUG_ERESTARTSYS
168 
169 //#include <linux/msdos_fs.h>
170 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
171 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
172 
173 #undef _syscall0
174 #undef _syscall1
175 #undef _syscall2
176 #undef _syscall3
177 #undef _syscall4
178 #undef _syscall5
179 #undef _syscall6
180 
181 #define _syscall0(type,name)		\
182 static type name (void)			\
183 {					\
184 	return syscall(__NR_##name);	\
185 }
186 
187 #define _syscall1(type,name,type1,arg1)		\
188 static type name (type1 arg1)			\
189 {						\
190 	return syscall(__NR_##name, arg1);	\
191 }
192 
193 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
194 static type name (type1 arg1,type2 arg2)		\
195 {							\
196 	return syscall(__NR_##name, arg1, arg2);	\
197 }
198 
199 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
200 static type name (type1 arg1,type2 arg2,type3 arg3)		\
201 {								\
202 	return syscall(__NR_##name, arg1, arg2, arg3);		\
203 }
204 
205 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
206 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
207 {										\
208 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
209 }
210 
211 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
212 		  type5,arg5)							\
213 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
214 {										\
215 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
216 }
217 
218 
219 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
220 		  type5,arg5,type6,arg6)					\
221 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
222                   type6 arg6)							\
223 {										\
224 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
225 }
226 
227 
228 #define __NR_sys_uname __NR_uname
229 #define __NR_sys_getcwd1 __NR_getcwd
230 #define __NR_sys_getdents __NR_getdents
231 #define __NR_sys_getdents64 __NR_getdents64
232 #define __NR_sys_getpriority __NR_getpriority
233 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
234 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
235 #define __NR_sys_syslog __NR_syslog
236 #define __NR_sys_futex __NR_futex
237 #define __NR_sys_inotify_init __NR_inotify_init
238 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
239 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
240 
241 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
242 #define __NR__llseek __NR_lseek
243 #endif
244 
245 /* Newer kernel ports have llseek() instead of _llseek() */
246 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
247 #define TARGET_NR__llseek TARGET_NR_llseek
248 #endif
249 
250 #define __NR_sys_gettid __NR_gettid
251 _syscall0(int, sys_gettid)
252 
253 /* For the 64-bit guest on 32-bit host case we must emulate
254  * getdents using getdents64, because otherwise the host
255  * might hand us back more dirent records than we can fit
256  * into the guest buffer after structure format conversion.
257  * Otherwise we emulate getdents with getdents if the host has it.
258  */
259 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
260 #define EMULATE_GETDENTS_WITH_GETDENTS
261 #endif
262 
263 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
264 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
265 #endif
266 #if (defined(TARGET_NR_getdents) && \
267       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
268     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
269 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
270 #endif
271 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
272 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
273           loff_t *, res, uint, wh);
274 #endif
275 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
276 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
277           siginfo_t *, uinfo)
278 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
279 #ifdef __NR_exit_group
280 _syscall1(int,exit_group,int,error_code)
281 #endif
282 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
283 _syscall1(int,set_tid_address,int *,tidptr)
284 #endif
285 #if defined(TARGET_NR_futex) && defined(__NR_futex)
286 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
287           const struct timespec *,timeout,int *,uaddr2,int,val3)
288 #endif
289 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
290 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
291           unsigned long *, user_mask_ptr);
292 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
293 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
294           unsigned long *, user_mask_ptr);
295 #define __NR_sys_getcpu __NR_getcpu
296 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
297 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
298           void *, arg);
299 _syscall2(int, capget, struct __user_cap_header_struct *, header,
300           struct __user_cap_data_struct *, data);
301 _syscall2(int, capset, struct __user_cap_header_struct *, header,
302           struct __user_cap_data_struct *, data);
303 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
304 _syscall2(int, ioprio_get, int, which, int, who)
305 #endif
306 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
307 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
308 #endif
309 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
310 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
311 #endif
312 
313 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
314 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
315           unsigned long, idx1, unsigned long, idx2)
316 #endif
317 
318 static bitmask_transtbl fcntl_flags_tbl[] = {
319   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
320   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
321   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
322   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
323   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
324   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
325   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
326   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
327   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
328   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
329   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
330   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
331   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
332 #if defined(O_DIRECT)
333   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
334 #endif
335 #if defined(O_NOATIME)
336   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
337 #endif
338 #if defined(O_CLOEXEC)
339   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
340 #endif
341 #if defined(O_PATH)
342   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
343 #endif
344 #if defined(O_TMPFILE)
345   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
346 #endif
347   /* Don't terminate the list prematurely on 64-bit host+guest.  */
348 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
349   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
350 #endif
351   { 0, 0, 0, 0 }
352 };
353 
354 static int sys_getcwd1(char *buf, size_t size)
355 {
356   if (getcwd(buf, size) == NULL) {
357       /* getcwd() sets errno */
358       return (-1);
359   }
360   return strlen(buf)+1;
361 }
362 
363 #ifdef TARGET_NR_utimensat
364 #if defined(__NR_utimensat)
365 #define __NR_sys_utimensat __NR_utimensat
366 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
367           const struct timespec *,tsp,int,flags)
368 #else
369 static int sys_utimensat(int dirfd, const char *pathname,
370                          const struct timespec times[2], int flags)
371 {
372     errno = ENOSYS;
373     return -1;
374 }
375 #endif
376 #endif /* TARGET_NR_utimensat */
377 
378 #ifdef TARGET_NR_renameat2
379 #if defined(__NR_renameat2)
380 #define __NR_sys_renameat2 __NR_renameat2
381 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
382           const char *, new, unsigned int, flags)
383 #else
384 static int sys_renameat2(int oldfd, const char *old,
385                          int newfd, const char *new, int flags)
386 {
387     if (flags == 0) {
388         return renameat(oldfd, old, newfd, new);
389     }
390     errno = ENOSYS;
391     return -1;
392 }
393 #endif
394 #endif /* TARGET_NR_renameat2 */
395 
396 #ifdef CONFIG_INOTIFY
397 #include <sys/inotify.h>
398 
399 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
400 static int sys_inotify_init(void)
401 {
402   return (inotify_init());
403 }
404 #endif
405 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
406 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
407 {
408   return (inotify_add_watch(fd, pathname, mask));
409 }
410 #endif
411 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
412 static int sys_inotify_rm_watch(int fd, int32_t wd)
413 {
414   return (inotify_rm_watch(fd, wd));
415 }
416 #endif
417 #ifdef CONFIG_INOTIFY1
418 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
419 static int sys_inotify_init1(int flags)
420 {
421   return (inotify_init1(flags));
422 }
423 #endif
424 #endif
425 #else
426 /* Userspace can usually survive runtime without inotify */
427 #undef TARGET_NR_inotify_init
428 #undef TARGET_NR_inotify_init1
429 #undef TARGET_NR_inotify_add_watch
430 #undef TARGET_NR_inotify_rm_watch
431 #endif /* CONFIG_INOTIFY  */
432 
433 #if defined(TARGET_NR_prlimit64)
434 #ifndef __NR_prlimit64
435 # define __NR_prlimit64 -1
436 #endif
437 #define __NR_sys_prlimit64 __NR_prlimit64
438 /* The glibc rlimit structure may not be that used by the underlying syscall */
439 struct host_rlimit64 {
440     uint64_t rlim_cur;
441     uint64_t rlim_max;
442 };
443 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
444           const struct host_rlimit64 *, new_limit,
445           struct host_rlimit64 *, old_limit)
446 #endif
447 
448 
449 #if defined(TARGET_NR_timer_create)
450 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
451 static timer_t g_posix_timers[32] = { 0, } ;
452 
453 static inline int next_free_host_timer(void)
454 {
455     int k ;
456     /* FIXME: Does finding the next free slot require a lock? */
457     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
458         if (g_posix_timers[k] == 0) {
459             g_posix_timers[k] = (timer_t) 1;
460             return k;
461         }
462     }
463     return -1;
464 }
465 #endif
466 
467 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
468 #ifdef TARGET_ARM
469 static inline int regpairs_aligned(void *cpu_env, int num)
470 {
471     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
472 }
473 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
474 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
475 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
476 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
477  * of registers which translates to the same as ARM/MIPS, because we start with
478  * r3 as arg1 */
479 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
480 #elif defined(TARGET_SH4)
481 /* SH4 doesn't align register pairs, except for p{read,write}64 */
482 static inline int regpairs_aligned(void *cpu_env, int num)
483 {
484     switch (num) {
485     case TARGET_NR_pread64:
486     case TARGET_NR_pwrite64:
487         return 1;
488 
489     default:
490         return 0;
491     }
492 }
493 #elif defined(TARGET_XTENSA)
494 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
495 #else
496 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
497 #endif
498 
499 #define ERRNO_TABLE_SIZE 1200
500 
501 /* target_to_host_errno_table[] is initialized from
502  * host_to_target_errno_table[] in syscall_init(). */
503 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
504 };
505 
506 /*
507  * This list is the union of errno values overridden in asm-<arch>/errno.h
508  * minus the errnos that are not actually generic to all archs.
509  */
510 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
511     [EAGAIN]		= TARGET_EAGAIN,
512     [EIDRM]		= TARGET_EIDRM,
513     [ECHRNG]		= TARGET_ECHRNG,
514     [EL2NSYNC]		= TARGET_EL2NSYNC,
515     [EL3HLT]		= TARGET_EL3HLT,
516     [EL3RST]		= TARGET_EL3RST,
517     [ELNRNG]		= TARGET_ELNRNG,
518     [EUNATCH]		= TARGET_EUNATCH,
519     [ENOCSI]		= TARGET_ENOCSI,
520     [EL2HLT]		= TARGET_EL2HLT,
521     [EDEADLK]		= TARGET_EDEADLK,
522     [ENOLCK]		= TARGET_ENOLCK,
523     [EBADE]		= TARGET_EBADE,
524     [EBADR]		= TARGET_EBADR,
525     [EXFULL]		= TARGET_EXFULL,
526     [ENOANO]		= TARGET_ENOANO,
527     [EBADRQC]		= TARGET_EBADRQC,
528     [EBADSLT]		= TARGET_EBADSLT,
529     [EBFONT]		= TARGET_EBFONT,
530     [ENOSTR]		= TARGET_ENOSTR,
531     [ENODATA]		= TARGET_ENODATA,
532     [ETIME]		= TARGET_ETIME,
533     [ENOSR]		= TARGET_ENOSR,
534     [ENONET]		= TARGET_ENONET,
535     [ENOPKG]		= TARGET_ENOPKG,
536     [EREMOTE]		= TARGET_EREMOTE,
537     [ENOLINK]		= TARGET_ENOLINK,
538     [EADV]		= TARGET_EADV,
539     [ESRMNT]		= TARGET_ESRMNT,
540     [ECOMM]		= TARGET_ECOMM,
541     [EPROTO]		= TARGET_EPROTO,
542     [EDOTDOT]		= TARGET_EDOTDOT,
543     [EMULTIHOP]		= TARGET_EMULTIHOP,
544     [EBADMSG]		= TARGET_EBADMSG,
545     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
546     [EOVERFLOW]		= TARGET_EOVERFLOW,
547     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
548     [EBADFD]		= TARGET_EBADFD,
549     [EREMCHG]		= TARGET_EREMCHG,
550     [ELIBACC]		= TARGET_ELIBACC,
551     [ELIBBAD]		= TARGET_ELIBBAD,
552     [ELIBSCN]		= TARGET_ELIBSCN,
553     [ELIBMAX]		= TARGET_ELIBMAX,
554     [ELIBEXEC]		= TARGET_ELIBEXEC,
555     [EILSEQ]		= TARGET_EILSEQ,
556     [ENOSYS]		= TARGET_ENOSYS,
557     [ELOOP]		= TARGET_ELOOP,
558     [ERESTART]		= TARGET_ERESTART,
559     [ESTRPIPE]		= TARGET_ESTRPIPE,
560     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
561     [EUSERS]		= TARGET_EUSERS,
562     [ENOTSOCK]		= TARGET_ENOTSOCK,
563     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
564     [EMSGSIZE]		= TARGET_EMSGSIZE,
565     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
566     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
567     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
568     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
569     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
570     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
571     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
572     [EADDRINUSE]	= TARGET_EADDRINUSE,
573     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
574     [ENETDOWN]		= TARGET_ENETDOWN,
575     [ENETUNREACH]	= TARGET_ENETUNREACH,
576     [ENETRESET]		= TARGET_ENETRESET,
577     [ECONNABORTED]	= TARGET_ECONNABORTED,
578     [ECONNRESET]	= TARGET_ECONNRESET,
579     [ENOBUFS]		= TARGET_ENOBUFS,
580     [EISCONN]		= TARGET_EISCONN,
581     [ENOTCONN]		= TARGET_ENOTCONN,
582     [EUCLEAN]		= TARGET_EUCLEAN,
583     [ENOTNAM]		= TARGET_ENOTNAM,
584     [ENAVAIL]		= TARGET_ENAVAIL,
585     [EISNAM]		= TARGET_EISNAM,
586     [EREMOTEIO]		= TARGET_EREMOTEIO,
587     [EDQUOT]            = TARGET_EDQUOT,
588     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
589     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
590     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
591     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
592     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
593     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
594     [EALREADY]		= TARGET_EALREADY,
595     [EINPROGRESS]	= TARGET_EINPROGRESS,
596     [ESTALE]		= TARGET_ESTALE,
597     [ECANCELED]		= TARGET_ECANCELED,
598     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
599     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
600 #ifdef ENOKEY
601     [ENOKEY]		= TARGET_ENOKEY,
602 #endif
603 #ifdef EKEYEXPIRED
604     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
605 #endif
606 #ifdef EKEYREVOKED
607     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
608 #endif
609 #ifdef EKEYREJECTED
610     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
611 #endif
612 #ifdef EOWNERDEAD
613     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
614 #endif
615 #ifdef ENOTRECOVERABLE
616     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
617 #endif
618 #ifdef ENOMSG
619     [ENOMSG]            = TARGET_ENOMSG,
620 #endif
621 #ifdef ERKFILL
622     [ERFKILL]           = TARGET_ERFKILL,
623 #endif
624 #ifdef EHWPOISON
625     [EHWPOISON]         = TARGET_EHWPOISON,
626 #endif
627 };
628 
629 static inline int host_to_target_errno(int err)
630 {
631     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
632         host_to_target_errno_table[err]) {
633         return host_to_target_errno_table[err];
634     }
635     return err;
636 }
637 
638 static inline int target_to_host_errno(int err)
639 {
640     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
641         target_to_host_errno_table[err]) {
642         return target_to_host_errno_table[err];
643     }
644     return err;
645 }
646 
647 static inline abi_long get_errno(abi_long ret)
648 {
649     if (ret == -1)
650         return -host_to_target_errno(errno);
651     else
652         return ret;
653 }
654 
655 const char *target_strerror(int err)
656 {
657     if (err == TARGET_ERESTARTSYS) {
658         return "To be restarted";
659     }
660     if (err == TARGET_QEMU_ESIGRETURN) {
661         return "Successful exit from sigreturn";
662     }
663 
664     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
665         return NULL;
666     }
667     return strerror(target_to_host_errno(err));
668 }
669 
670 #define safe_syscall0(type, name) \
671 static type safe_##name(void) \
672 { \
673     return safe_syscall(__NR_##name); \
674 }
675 
676 #define safe_syscall1(type, name, type1, arg1) \
677 static type safe_##name(type1 arg1) \
678 { \
679     return safe_syscall(__NR_##name, arg1); \
680 }
681 
682 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
683 static type safe_##name(type1 arg1, type2 arg2) \
684 { \
685     return safe_syscall(__NR_##name, arg1, arg2); \
686 }
687 
688 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
689 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
690 { \
691     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
692 }
693 
694 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
695     type4, arg4) \
696 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
697 { \
698     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
699 }
700 
701 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
702     type4, arg4, type5, arg5) \
703 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
704     type5 arg5) \
705 { \
706     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
707 }
708 
709 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
710     type4, arg4, type5, arg5, type6, arg6) \
711 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
712     type5 arg5, type6 arg6) \
713 { \
714     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
715 }
716 
717 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
718 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
719 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
720               int, flags, mode_t, mode)
721 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
722               struct rusage *, rusage)
723 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
724               int, options, struct rusage *, rusage)
725 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
726 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
727               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
728 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
729               struct timespec *, tsp, const sigset_t *, sigmask,
730               size_t, sigsetsize)
731 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
732               int, maxevents, int, timeout, const sigset_t *, sigmask,
733               size_t, sigsetsize)
734 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
735               const struct timespec *,timeout,int *,uaddr2,int,val3)
736 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
737 safe_syscall2(int, kill, pid_t, pid, int, sig)
738 safe_syscall2(int, tkill, int, tid, int, sig)
739 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
740 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
741 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
742 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
743               unsigned long, pos_l, unsigned long, pos_h)
744 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
745               unsigned long, pos_l, unsigned long, pos_h)
746 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
747               socklen_t, addrlen)
748 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
749               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
750 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
751               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
752 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
753 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
754 safe_syscall2(int, flock, int, fd, int, operation)
755 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
756               const struct timespec *, uts, size_t, sigsetsize)
757 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
758               int, flags)
759 safe_syscall2(int, nanosleep, const struct timespec *, req,
760               struct timespec *, rem)
761 #ifdef TARGET_NR_clock_nanosleep
762 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
763               const struct timespec *, req, struct timespec *, rem)
764 #endif
765 #if !defined(__NR_msgsnd) || !defined(__NR_msgrcv) || !defined(__NR_semtimedop)
766 /* This host kernel architecture uses a single ipc syscall; fake up
767  * wrappers for the sub-operations to hide this implementation detail.
768  * Annoyingly we can't include linux/ipc.h to get the constant definitions
769  * for the call parameter because some structs in there conflict with the
770  * sys/ipc.h ones. So we just define them here, and rely on them being
771  * the same for all host architectures.
772  */
773 #define Q_SEMTIMEDOP 4
774 #define Q_MSGSND 11
775 #define Q_MSGRCV 12
776 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
777 
778 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
779               void *, ptr, long, fifth)
780 #endif
781 #ifdef __NR_msgsnd
782 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
783               int, flags)
784 #else
785 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
786 {
787     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
788 }
789 #endif
790 #ifdef __NR_msgrcv
791 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
792               long, msgtype, int, flags)
793 #else
794 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
795 {
796     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
797 }
798 #endif
799 #ifdef __NR_semtimedop
800 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
801               unsigned, nsops, const struct timespec *, timeout)
802 #else
803 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
804                            const struct timespec *timeout)
805 {
806     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
807                     (long)timeout);
808 }
809 #endif
810 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
811 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
812               size_t, len, unsigned, prio, const struct timespec *, timeout)
813 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
814               size_t, len, unsigned *, prio, const struct timespec *, timeout)
815 #endif
816 /* We do ioctl like this rather than via safe_syscall3 to preserve the
817  * "third argument might be integer or pointer or not present" behaviour of
818  * the libc function.
819  */
820 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
821 /* Similarly for fcntl. Note that callers must always:
822  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
823  *  use the flock64 struct rather than unsuffixed flock
824  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
825  */
826 #ifdef __NR_fcntl64
827 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
828 #else
829 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
830 #endif
831 
832 static inline int host_to_target_sock_type(int host_type)
833 {
834     int target_type;
835 
836     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
837     case SOCK_DGRAM:
838         target_type = TARGET_SOCK_DGRAM;
839         break;
840     case SOCK_STREAM:
841         target_type = TARGET_SOCK_STREAM;
842         break;
843     default:
844         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
845         break;
846     }
847 
848 #if defined(SOCK_CLOEXEC)
849     if (host_type & SOCK_CLOEXEC) {
850         target_type |= TARGET_SOCK_CLOEXEC;
851     }
852 #endif
853 
854 #if defined(SOCK_NONBLOCK)
855     if (host_type & SOCK_NONBLOCK) {
856         target_type |= TARGET_SOCK_NONBLOCK;
857     }
858 #endif
859 
860     return target_type;
861 }
862 
863 static abi_ulong target_brk;
864 static abi_ulong target_original_brk;
865 static abi_ulong brk_page;
866 
867 void target_set_brk(abi_ulong new_brk)
868 {
869     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
870     brk_page = HOST_PAGE_ALIGN(target_brk);
871 }
872 
873 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
874 #define DEBUGF_BRK(message, args...)
875 
876 /* do_brk() must return target values and target errnos. */
877 abi_long do_brk(abi_ulong new_brk)
878 {
879     abi_long mapped_addr;
880     abi_ulong new_alloc_size;
881 
882     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
883 
884     if (!new_brk) {
885         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
886         return target_brk;
887     }
888     if (new_brk < target_original_brk) {
889         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
890                    target_brk);
891         return target_brk;
892     }
893 
894     /* If the new brk is less than the highest page reserved to the
895      * target heap allocation, set it and we're almost done...  */
896     if (new_brk <= brk_page) {
897         /* Heap contents are initialized to zero, as for anonymous
898          * mapped pages.  */
899         if (new_brk > target_brk) {
900             memset(g2h(target_brk), 0, new_brk - target_brk);
901         }
902 	target_brk = new_brk;
903         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
904 	return target_brk;
905     }
906 
907     /* We need to allocate more memory after the brk... Note that
908      * we don't use MAP_FIXED because that will map over the top of
909      * any existing mapping (like the one with the host libc or qemu
910      * itself); instead we treat "mapped but at wrong address" as
911      * a failure and unmap again.
912      */
913     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
914     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
915                                         PROT_READ|PROT_WRITE,
916                                         MAP_ANON|MAP_PRIVATE, 0, 0));
917 
918     if (mapped_addr == brk_page) {
919         /* Heap contents are initialized to zero, as for anonymous
920          * mapped pages.  Technically the new pages are already
921          * initialized to zero since they *are* anonymous mapped
922          * pages, however we have to take care with the contents that
923          * come from the remaining part of the previous page: it may
924          * contains garbage data due to a previous heap usage (grown
925          * then shrunken).  */
926         memset(g2h(target_brk), 0, brk_page - target_brk);
927 
928         target_brk = new_brk;
929         brk_page = HOST_PAGE_ALIGN(target_brk);
930         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
931             target_brk);
932         return target_brk;
933     } else if (mapped_addr != -1) {
934         /* Mapped but at wrong address, meaning there wasn't actually
935          * enough space for this brk.
936          */
937         target_munmap(mapped_addr, new_alloc_size);
938         mapped_addr = -1;
939         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
940     }
941     else {
942         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
943     }
944 
945 #if defined(TARGET_ALPHA)
946     /* We (partially) emulate OSF/1 on Alpha, which requires we
947        return a proper errno, not an unchanged brk value.  */
948     return -TARGET_ENOMEM;
949 #endif
950     /* For everything else, return the previous break. */
951     return target_brk;
952 }
953 
954 static inline abi_long copy_from_user_fdset(fd_set *fds,
955                                             abi_ulong target_fds_addr,
956                                             int n)
957 {
958     int i, nw, j, k;
959     abi_ulong b, *target_fds;
960 
961     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
962     if (!(target_fds = lock_user(VERIFY_READ,
963                                  target_fds_addr,
964                                  sizeof(abi_ulong) * nw,
965                                  1)))
966         return -TARGET_EFAULT;
967 
968     FD_ZERO(fds);
969     k = 0;
970     for (i = 0; i < nw; i++) {
971         /* grab the abi_ulong */
972         __get_user(b, &target_fds[i]);
973         for (j = 0; j < TARGET_ABI_BITS; j++) {
974             /* check the bit inside the abi_ulong */
975             if ((b >> j) & 1)
976                 FD_SET(k, fds);
977             k++;
978         }
979     }
980 
981     unlock_user(target_fds, target_fds_addr, 0);
982 
983     return 0;
984 }
985 
986 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
987                                                  abi_ulong target_fds_addr,
988                                                  int n)
989 {
990     if (target_fds_addr) {
991         if (copy_from_user_fdset(fds, target_fds_addr, n))
992             return -TARGET_EFAULT;
993         *fds_ptr = fds;
994     } else {
995         *fds_ptr = NULL;
996     }
997     return 0;
998 }
999 
1000 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1001                                           const fd_set *fds,
1002                                           int n)
1003 {
1004     int i, nw, j, k;
1005     abi_long v;
1006     abi_ulong *target_fds;
1007 
1008     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1009     if (!(target_fds = lock_user(VERIFY_WRITE,
1010                                  target_fds_addr,
1011                                  sizeof(abi_ulong) * nw,
1012                                  0)))
1013         return -TARGET_EFAULT;
1014 
1015     k = 0;
1016     for (i = 0; i < nw; i++) {
1017         v = 0;
1018         for (j = 0; j < TARGET_ABI_BITS; j++) {
1019             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1020             k++;
1021         }
1022         __put_user(v, &target_fds[i]);
1023     }
1024 
1025     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1026 
1027     return 0;
1028 }
1029 
1030 #if defined(__alpha__)
1031 #define HOST_HZ 1024
1032 #else
1033 #define HOST_HZ 100
1034 #endif
1035 
1036 static inline abi_long host_to_target_clock_t(long ticks)
1037 {
1038 #if HOST_HZ == TARGET_HZ
1039     return ticks;
1040 #else
1041     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1042 #endif
1043 }
1044 
1045 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1046                                              const struct rusage *rusage)
1047 {
1048     struct target_rusage *target_rusage;
1049 
1050     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1051         return -TARGET_EFAULT;
1052     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1053     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1054     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1055     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1056     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1057     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1058     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1059     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1060     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1061     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1062     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1063     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1064     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1065     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1066     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1067     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1068     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1069     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1070     unlock_user_struct(target_rusage, target_addr, 1);
1071 
1072     return 0;
1073 }
1074 
1075 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1076 {
1077     abi_ulong target_rlim_swap;
1078     rlim_t result;
1079 
1080     target_rlim_swap = tswapal(target_rlim);
1081     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1082         return RLIM_INFINITY;
1083 
1084     result = target_rlim_swap;
1085     if (target_rlim_swap != (rlim_t)result)
1086         return RLIM_INFINITY;
1087 
1088     return result;
1089 }
1090 
1091 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1092 {
1093     abi_ulong target_rlim_swap;
1094     abi_ulong result;
1095 
1096     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1097         target_rlim_swap = TARGET_RLIM_INFINITY;
1098     else
1099         target_rlim_swap = rlim;
1100     result = tswapal(target_rlim_swap);
1101 
1102     return result;
1103 }
1104 
1105 static inline int target_to_host_resource(int code)
1106 {
1107     switch (code) {
1108     case TARGET_RLIMIT_AS:
1109         return RLIMIT_AS;
1110     case TARGET_RLIMIT_CORE:
1111         return RLIMIT_CORE;
1112     case TARGET_RLIMIT_CPU:
1113         return RLIMIT_CPU;
1114     case TARGET_RLIMIT_DATA:
1115         return RLIMIT_DATA;
1116     case TARGET_RLIMIT_FSIZE:
1117         return RLIMIT_FSIZE;
1118     case TARGET_RLIMIT_LOCKS:
1119         return RLIMIT_LOCKS;
1120     case TARGET_RLIMIT_MEMLOCK:
1121         return RLIMIT_MEMLOCK;
1122     case TARGET_RLIMIT_MSGQUEUE:
1123         return RLIMIT_MSGQUEUE;
1124     case TARGET_RLIMIT_NICE:
1125         return RLIMIT_NICE;
1126     case TARGET_RLIMIT_NOFILE:
1127         return RLIMIT_NOFILE;
1128     case TARGET_RLIMIT_NPROC:
1129         return RLIMIT_NPROC;
1130     case TARGET_RLIMIT_RSS:
1131         return RLIMIT_RSS;
1132     case TARGET_RLIMIT_RTPRIO:
1133         return RLIMIT_RTPRIO;
1134     case TARGET_RLIMIT_SIGPENDING:
1135         return RLIMIT_SIGPENDING;
1136     case TARGET_RLIMIT_STACK:
1137         return RLIMIT_STACK;
1138     default:
1139         return code;
1140     }
1141 }
1142 
1143 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1144                                               abi_ulong target_tv_addr)
1145 {
1146     struct target_timeval *target_tv;
1147 
1148     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1149         return -TARGET_EFAULT;
1150 
1151     __get_user(tv->tv_sec, &target_tv->tv_sec);
1152     __get_user(tv->tv_usec, &target_tv->tv_usec);
1153 
1154     unlock_user_struct(target_tv, target_tv_addr, 0);
1155 
1156     return 0;
1157 }
1158 
1159 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1160                                             const struct timeval *tv)
1161 {
1162     struct target_timeval *target_tv;
1163 
1164     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1165         return -TARGET_EFAULT;
1166 
1167     __put_user(tv->tv_sec, &target_tv->tv_sec);
1168     __put_user(tv->tv_usec, &target_tv->tv_usec);
1169 
1170     unlock_user_struct(target_tv, target_tv_addr, 1);
1171 
1172     return 0;
1173 }
1174 
1175 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1176                                                abi_ulong target_tz_addr)
1177 {
1178     struct target_timezone *target_tz;
1179 
1180     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1181         return -TARGET_EFAULT;
1182     }
1183 
1184     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1185     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1186 
1187     unlock_user_struct(target_tz, target_tz_addr, 0);
1188 
1189     return 0;
1190 }
1191 
1192 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1193 #include <mqueue.h>
1194 
1195 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1196                                               abi_ulong target_mq_attr_addr)
1197 {
1198     struct target_mq_attr *target_mq_attr;
1199 
1200     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1201                           target_mq_attr_addr, 1))
1202         return -TARGET_EFAULT;
1203 
1204     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1205     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1206     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1207     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1208 
1209     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1210 
1211     return 0;
1212 }
1213 
1214 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1215                                             const struct mq_attr *attr)
1216 {
1217     struct target_mq_attr *target_mq_attr;
1218 
1219     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1220                           target_mq_attr_addr, 0))
1221         return -TARGET_EFAULT;
1222 
1223     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1224     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1225     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1226     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1227 
1228     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1229 
1230     return 0;
1231 }
1232 #endif
1233 
1234 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1235 /* do_select() must return target values and target errnos. */
1236 static abi_long do_select(int n,
1237                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1238                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1239 {
1240     fd_set rfds, wfds, efds;
1241     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1242     struct timeval tv;
1243     struct timespec ts, *ts_ptr;
1244     abi_long ret;
1245 
1246     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1247     if (ret) {
1248         return ret;
1249     }
1250     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1251     if (ret) {
1252         return ret;
1253     }
1254     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1255     if (ret) {
1256         return ret;
1257     }
1258 
1259     if (target_tv_addr) {
1260         if (copy_from_user_timeval(&tv, target_tv_addr))
1261             return -TARGET_EFAULT;
1262         ts.tv_sec = tv.tv_sec;
1263         ts.tv_nsec = tv.tv_usec * 1000;
1264         ts_ptr = &ts;
1265     } else {
1266         ts_ptr = NULL;
1267     }
1268 
1269     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1270                                   ts_ptr, NULL));
1271 
1272     if (!is_error(ret)) {
1273         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1274             return -TARGET_EFAULT;
1275         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1276             return -TARGET_EFAULT;
1277         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1278             return -TARGET_EFAULT;
1279 
1280         if (target_tv_addr) {
1281             tv.tv_sec = ts.tv_sec;
1282             tv.tv_usec = ts.tv_nsec / 1000;
1283             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1284                 return -TARGET_EFAULT;
1285             }
1286         }
1287     }
1288 
1289     return ret;
1290 }
1291 
1292 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1293 static abi_long do_old_select(abi_ulong arg1)
1294 {
1295     struct target_sel_arg_struct *sel;
1296     abi_ulong inp, outp, exp, tvp;
1297     long nsel;
1298 
1299     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1300         return -TARGET_EFAULT;
1301     }
1302 
1303     nsel = tswapal(sel->n);
1304     inp = tswapal(sel->inp);
1305     outp = tswapal(sel->outp);
1306     exp = tswapal(sel->exp);
1307     tvp = tswapal(sel->tvp);
1308 
1309     unlock_user_struct(sel, arg1, 0);
1310 
1311     return do_select(nsel, inp, outp, exp, tvp);
1312 }
1313 #endif
1314 #endif
1315 
1316 static abi_long do_pipe2(int host_pipe[], int flags)
1317 {
1318 #ifdef CONFIG_PIPE2
1319     return pipe2(host_pipe, flags);
1320 #else
1321     return -ENOSYS;
1322 #endif
1323 }
1324 
1325 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1326                         int flags, int is_pipe2)
1327 {
1328     int host_pipe[2];
1329     abi_long ret;
1330     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1331 
1332     if (is_error(ret))
1333         return get_errno(ret);
1334 
1335     /* Several targets have special calling conventions for the original
1336        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1337     if (!is_pipe2) {
1338 #if defined(TARGET_ALPHA)
1339         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1340         return host_pipe[0];
1341 #elif defined(TARGET_MIPS)
1342         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1343         return host_pipe[0];
1344 #elif defined(TARGET_SH4)
1345         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1346         return host_pipe[0];
1347 #elif defined(TARGET_SPARC)
1348         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1349         return host_pipe[0];
1350 #endif
1351     }
1352 
1353     if (put_user_s32(host_pipe[0], pipedes)
1354         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1355         return -TARGET_EFAULT;
1356     return get_errno(ret);
1357 }
1358 
1359 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1360                                               abi_ulong target_addr,
1361                                               socklen_t len)
1362 {
1363     struct target_ip_mreqn *target_smreqn;
1364 
1365     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1366     if (!target_smreqn)
1367         return -TARGET_EFAULT;
1368     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1369     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1370     if (len == sizeof(struct target_ip_mreqn))
1371         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1372     unlock_user(target_smreqn, target_addr, 0);
1373 
1374     return 0;
1375 }
1376 
1377 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1378                                                abi_ulong target_addr,
1379                                                socklen_t len)
1380 {
1381     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1382     sa_family_t sa_family;
1383     struct target_sockaddr *target_saddr;
1384 
1385     if (fd_trans_target_to_host_addr(fd)) {
1386         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1387     }
1388 
1389     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1390     if (!target_saddr)
1391         return -TARGET_EFAULT;
1392 
1393     sa_family = tswap16(target_saddr->sa_family);
1394 
1395     /* Oops. The caller might send a incomplete sun_path; sun_path
1396      * must be terminated by \0 (see the manual page), but
1397      * unfortunately it is quite common to specify sockaddr_un
1398      * length as "strlen(x->sun_path)" while it should be
1399      * "strlen(...) + 1". We'll fix that here if needed.
1400      * Linux kernel has a similar feature.
1401      */
1402 
1403     if (sa_family == AF_UNIX) {
1404         if (len < unix_maxlen && len > 0) {
1405             char *cp = (char*)target_saddr;
1406 
1407             if ( cp[len-1] && !cp[len] )
1408                 len++;
1409         }
1410         if (len > unix_maxlen)
1411             len = unix_maxlen;
1412     }
1413 
1414     memcpy(addr, target_saddr, len);
1415     addr->sa_family = sa_family;
1416     if (sa_family == AF_NETLINK) {
1417         struct sockaddr_nl *nladdr;
1418 
1419         nladdr = (struct sockaddr_nl *)addr;
1420         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1421         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1422     } else if (sa_family == AF_PACKET) {
1423 	struct target_sockaddr_ll *lladdr;
1424 
1425 	lladdr = (struct target_sockaddr_ll *)addr;
1426 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1427 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1428     }
1429     unlock_user(target_saddr, target_addr, 0);
1430 
1431     return 0;
1432 }
1433 
1434 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1435                                                struct sockaddr *addr,
1436                                                socklen_t len)
1437 {
1438     struct target_sockaddr *target_saddr;
1439 
1440     if (len == 0) {
1441         return 0;
1442     }
1443     assert(addr);
1444 
1445     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1446     if (!target_saddr)
1447         return -TARGET_EFAULT;
1448     memcpy(target_saddr, addr, len);
1449     if (len >= offsetof(struct target_sockaddr, sa_family) +
1450         sizeof(target_saddr->sa_family)) {
1451         target_saddr->sa_family = tswap16(addr->sa_family);
1452     }
1453     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1454         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1455         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1456         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1457     } else if (addr->sa_family == AF_PACKET) {
1458         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1459         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1460         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1461     } else if (addr->sa_family == AF_INET6 &&
1462                len >= sizeof(struct target_sockaddr_in6)) {
1463         struct target_sockaddr_in6 *target_in6 =
1464                (struct target_sockaddr_in6 *)target_saddr;
1465         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1466     }
1467     unlock_user(target_saddr, target_addr, len);
1468 
1469     return 0;
1470 }
1471 
1472 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1473                                            struct target_msghdr *target_msgh)
1474 {
1475     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1476     abi_long msg_controllen;
1477     abi_ulong target_cmsg_addr;
1478     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1479     socklen_t space = 0;
1480 
1481     msg_controllen = tswapal(target_msgh->msg_controllen);
1482     if (msg_controllen < sizeof (struct target_cmsghdr))
1483         goto the_end;
1484     target_cmsg_addr = tswapal(target_msgh->msg_control);
1485     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1486     target_cmsg_start = target_cmsg;
1487     if (!target_cmsg)
1488         return -TARGET_EFAULT;
1489 
1490     while (cmsg && target_cmsg) {
1491         void *data = CMSG_DATA(cmsg);
1492         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1493 
1494         int len = tswapal(target_cmsg->cmsg_len)
1495             - sizeof(struct target_cmsghdr);
1496 
1497         space += CMSG_SPACE(len);
1498         if (space > msgh->msg_controllen) {
1499             space -= CMSG_SPACE(len);
1500             /* This is a QEMU bug, since we allocated the payload
1501              * area ourselves (unlike overflow in host-to-target
1502              * conversion, which is just the guest giving us a buffer
1503              * that's too small). It can't happen for the payload types
1504              * we currently support; if it becomes an issue in future
1505              * we would need to improve our allocation strategy to
1506              * something more intelligent than "twice the size of the
1507              * target buffer we're reading from".
1508              */
1509             gemu_log("Host cmsg overflow\n");
1510             break;
1511         }
1512 
1513         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1514             cmsg->cmsg_level = SOL_SOCKET;
1515         } else {
1516             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1517         }
1518         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1519         cmsg->cmsg_len = CMSG_LEN(len);
1520 
1521         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1522             int *fd = (int *)data;
1523             int *target_fd = (int *)target_data;
1524             int i, numfds = len / sizeof(int);
1525 
1526             for (i = 0; i < numfds; i++) {
1527                 __get_user(fd[i], target_fd + i);
1528             }
1529         } else if (cmsg->cmsg_level == SOL_SOCKET
1530                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1531             struct ucred *cred = (struct ucred *)data;
1532             struct target_ucred *target_cred =
1533                 (struct target_ucred *)target_data;
1534 
1535             __get_user(cred->pid, &target_cred->pid);
1536             __get_user(cred->uid, &target_cred->uid);
1537             __get_user(cred->gid, &target_cred->gid);
1538         } else {
1539             gemu_log("Unsupported ancillary data: %d/%d\n",
1540                                         cmsg->cmsg_level, cmsg->cmsg_type);
1541             memcpy(data, target_data, len);
1542         }
1543 
1544         cmsg = CMSG_NXTHDR(msgh, cmsg);
1545         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1546                                          target_cmsg_start);
1547     }
1548     unlock_user(target_cmsg, target_cmsg_addr, 0);
1549  the_end:
1550     msgh->msg_controllen = space;
1551     return 0;
1552 }
1553 
1554 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1555                                            struct msghdr *msgh)
1556 {
1557     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1558     abi_long msg_controllen;
1559     abi_ulong target_cmsg_addr;
1560     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1561     socklen_t space = 0;
1562 
1563     msg_controllen = tswapal(target_msgh->msg_controllen);
1564     if (msg_controllen < sizeof (struct target_cmsghdr))
1565         goto the_end;
1566     target_cmsg_addr = tswapal(target_msgh->msg_control);
1567     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1568     target_cmsg_start = target_cmsg;
1569     if (!target_cmsg)
1570         return -TARGET_EFAULT;
1571 
1572     while (cmsg && target_cmsg) {
1573         void *data = CMSG_DATA(cmsg);
1574         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1575 
1576         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1577         int tgt_len, tgt_space;
1578 
1579         /* We never copy a half-header but may copy half-data;
1580          * this is Linux's behaviour in put_cmsg(). Note that
1581          * truncation here is a guest problem (which we report
1582          * to the guest via the CTRUNC bit), unlike truncation
1583          * in target_to_host_cmsg, which is a QEMU bug.
1584          */
1585         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1586             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1587             break;
1588         }
1589 
1590         if (cmsg->cmsg_level == SOL_SOCKET) {
1591             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1592         } else {
1593             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1594         }
1595         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1596 
1597         /* Payload types which need a different size of payload on
1598          * the target must adjust tgt_len here.
1599          */
1600         tgt_len = len;
1601         switch (cmsg->cmsg_level) {
1602         case SOL_SOCKET:
1603             switch (cmsg->cmsg_type) {
1604             case SO_TIMESTAMP:
1605                 tgt_len = sizeof(struct target_timeval);
1606                 break;
1607             default:
1608                 break;
1609             }
1610             break;
1611         default:
1612             break;
1613         }
1614 
1615         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1616             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1617             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1618         }
1619 
1620         /* We must now copy-and-convert len bytes of payload
1621          * into tgt_len bytes of destination space. Bear in mind
1622          * that in both source and destination we may be dealing
1623          * with a truncated value!
1624          */
1625         switch (cmsg->cmsg_level) {
1626         case SOL_SOCKET:
1627             switch (cmsg->cmsg_type) {
1628             case SCM_RIGHTS:
1629             {
1630                 int *fd = (int *)data;
1631                 int *target_fd = (int *)target_data;
1632                 int i, numfds = tgt_len / sizeof(int);
1633 
1634                 for (i = 0; i < numfds; i++) {
1635                     __put_user(fd[i], target_fd + i);
1636                 }
1637                 break;
1638             }
1639             case SO_TIMESTAMP:
1640             {
1641                 struct timeval *tv = (struct timeval *)data;
1642                 struct target_timeval *target_tv =
1643                     (struct target_timeval *)target_data;
1644 
1645                 if (len != sizeof(struct timeval) ||
1646                     tgt_len != sizeof(struct target_timeval)) {
1647                     goto unimplemented;
1648                 }
1649 
1650                 /* copy struct timeval to target */
1651                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1652                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1653                 break;
1654             }
1655             case SCM_CREDENTIALS:
1656             {
1657                 struct ucred *cred = (struct ucred *)data;
1658                 struct target_ucred *target_cred =
1659                     (struct target_ucred *)target_data;
1660 
1661                 __put_user(cred->pid, &target_cred->pid);
1662                 __put_user(cred->uid, &target_cred->uid);
1663                 __put_user(cred->gid, &target_cred->gid);
1664                 break;
1665             }
1666             default:
1667                 goto unimplemented;
1668             }
1669             break;
1670 
1671         case SOL_IP:
1672             switch (cmsg->cmsg_type) {
1673             case IP_TTL:
1674             {
1675                 uint32_t *v = (uint32_t *)data;
1676                 uint32_t *t_int = (uint32_t *)target_data;
1677 
1678                 if (len != sizeof(uint32_t) ||
1679                     tgt_len != sizeof(uint32_t)) {
1680                     goto unimplemented;
1681                 }
1682                 __put_user(*v, t_int);
1683                 break;
1684             }
1685             case IP_RECVERR:
1686             {
1687                 struct errhdr_t {
1688                    struct sock_extended_err ee;
1689                    struct sockaddr_in offender;
1690                 };
1691                 struct errhdr_t *errh = (struct errhdr_t *)data;
1692                 struct errhdr_t *target_errh =
1693                     (struct errhdr_t *)target_data;
1694 
1695                 if (len != sizeof(struct errhdr_t) ||
1696                     tgt_len != sizeof(struct errhdr_t)) {
1697                     goto unimplemented;
1698                 }
1699                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1700                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1701                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1702                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1703                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1704                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1705                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1706                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1707                     (void *) &errh->offender, sizeof(errh->offender));
1708                 break;
1709             }
1710             default:
1711                 goto unimplemented;
1712             }
1713             break;
1714 
1715         case SOL_IPV6:
1716             switch (cmsg->cmsg_type) {
1717             case IPV6_HOPLIMIT:
1718             {
1719                 uint32_t *v = (uint32_t *)data;
1720                 uint32_t *t_int = (uint32_t *)target_data;
1721 
1722                 if (len != sizeof(uint32_t) ||
1723                     tgt_len != sizeof(uint32_t)) {
1724                     goto unimplemented;
1725                 }
1726                 __put_user(*v, t_int);
1727                 break;
1728             }
1729             case IPV6_RECVERR:
1730             {
1731                 struct errhdr6_t {
1732                    struct sock_extended_err ee;
1733                    struct sockaddr_in6 offender;
1734                 };
1735                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1736                 struct errhdr6_t *target_errh =
1737                     (struct errhdr6_t *)target_data;
1738 
1739                 if (len != sizeof(struct errhdr6_t) ||
1740                     tgt_len != sizeof(struct errhdr6_t)) {
1741                     goto unimplemented;
1742                 }
1743                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1744                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1745                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1746                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1747                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1748                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1749                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1750                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1751                     (void *) &errh->offender, sizeof(errh->offender));
1752                 break;
1753             }
1754             default:
1755                 goto unimplemented;
1756             }
1757             break;
1758 
1759         default:
1760         unimplemented:
1761             gemu_log("Unsupported ancillary data: %d/%d\n",
1762                                         cmsg->cmsg_level, cmsg->cmsg_type);
1763             memcpy(target_data, data, MIN(len, tgt_len));
1764             if (tgt_len > len) {
1765                 memset(target_data + len, 0, tgt_len - len);
1766             }
1767         }
1768 
1769         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1770         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1771         if (msg_controllen < tgt_space) {
1772             tgt_space = msg_controllen;
1773         }
1774         msg_controllen -= tgt_space;
1775         space += tgt_space;
1776         cmsg = CMSG_NXTHDR(msgh, cmsg);
1777         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1778                                          target_cmsg_start);
1779     }
1780     unlock_user(target_cmsg, target_cmsg_addr, space);
1781  the_end:
1782     target_msgh->msg_controllen = tswapal(space);
1783     return 0;
1784 }
1785 
1786 /* do_setsockopt() Must return target values and target errnos. */
1787 static abi_long do_setsockopt(int sockfd, int level, int optname,
1788                               abi_ulong optval_addr, socklen_t optlen)
1789 {
1790     abi_long ret;
1791     int val;
1792     struct ip_mreqn *ip_mreq;
1793     struct ip_mreq_source *ip_mreq_source;
1794 
1795     switch(level) {
1796     case SOL_TCP:
1797         /* TCP options all take an 'int' value.  */
1798         if (optlen < sizeof(uint32_t))
1799             return -TARGET_EINVAL;
1800 
1801         if (get_user_u32(val, optval_addr))
1802             return -TARGET_EFAULT;
1803         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1804         break;
1805     case SOL_IP:
1806         switch(optname) {
1807         case IP_TOS:
1808         case IP_TTL:
1809         case IP_HDRINCL:
1810         case IP_ROUTER_ALERT:
1811         case IP_RECVOPTS:
1812         case IP_RETOPTS:
1813         case IP_PKTINFO:
1814         case IP_MTU_DISCOVER:
1815         case IP_RECVERR:
1816         case IP_RECVTTL:
1817         case IP_RECVTOS:
1818 #ifdef IP_FREEBIND
1819         case IP_FREEBIND:
1820 #endif
1821         case IP_MULTICAST_TTL:
1822         case IP_MULTICAST_LOOP:
1823             val = 0;
1824             if (optlen >= sizeof(uint32_t)) {
1825                 if (get_user_u32(val, optval_addr))
1826                     return -TARGET_EFAULT;
1827             } else if (optlen >= 1) {
1828                 if (get_user_u8(val, optval_addr))
1829                     return -TARGET_EFAULT;
1830             }
1831             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1832             break;
1833         case IP_ADD_MEMBERSHIP:
1834         case IP_DROP_MEMBERSHIP:
1835             if (optlen < sizeof (struct target_ip_mreq) ||
1836                 optlen > sizeof (struct target_ip_mreqn))
1837                 return -TARGET_EINVAL;
1838 
1839             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1840             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1841             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1842             break;
1843 
1844         case IP_BLOCK_SOURCE:
1845         case IP_UNBLOCK_SOURCE:
1846         case IP_ADD_SOURCE_MEMBERSHIP:
1847         case IP_DROP_SOURCE_MEMBERSHIP:
1848             if (optlen != sizeof (struct target_ip_mreq_source))
1849                 return -TARGET_EINVAL;
1850 
1851             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1852             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1853             unlock_user (ip_mreq_source, optval_addr, 0);
1854             break;
1855 
1856         default:
1857             goto unimplemented;
1858         }
1859         break;
1860     case SOL_IPV6:
1861         switch (optname) {
1862         case IPV6_MTU_DISCOVER:
1863         case IPV6_MTU:
1864         case IPV6_V6ONLY:
1865         case IPV6_RECVPKTINFO:
1866         case IPV6_UNICAST_HOPS:
1867         case IPV6_MULTICAST_HOPS:
1868         case IPV6_MULTICAST_LOOP:
1869         case IPV6_RECVERR:
1870         case IPV6_RECVHOPLIMIT:
1871         case IPV6_2292HOPLIMIT:
1872         case IPV6_CHECKSUM:
1873         case IPV6_ADDRFORM:
1874         case IPV6_2292PKTINFO:
1875         case IPV6_RECVTCLASS:
1876         case IPV6_RECVRTHDR:
1877         case IPV6_2292RTHDR:
1878         case IPV6_RECVHOPOPTS:
1879         case IPV6_2292HOPOPTS:
1880         case IPV6_RECVDSTOPTS:
1881         case IPV6_2292DSTOPTS:
1882         case IPV6_TCLASS:
1883 #ifdef IPV6_RECVPATHMTU
1884         case IPV6_RECVPATHMTU:
1885 #endif
1886 #ifdef IPV6_TRANSPARENT
1887         case IPV6_TRANSPARENT:
1888 #endif
1889 #ifdef IPV6_FREEBIND
1890         case IPV6_FREEBIND:
1891 #endif
1892 #ifdef IPV6_RECVORIGDSTADDR
1893         case IPV6_RECVORIGDSTADDR:
1894 #endif
1895             val = 0;
1896             if (optlen < sizeof(uint32_t)) {
1897                 return -TARGET_EINVAL;
1898             }
1899             if (get_user_u32(val, optval_addr)) {
1900                 return -TARGET_EFAULT;
1901             }
1902             ret = get_errno(setsockopt(sockfd, level, optname,
1903                                        &val, sizeof(val)));
1904             break;
1905         case IPV6_PKTINFO:
1906         {
1907             struct in6_pktinfo pki;
1908 
1909             if (optlen < sizeof(pki)) {
1910                 return -TARGET_EINVAL;
1911             }
1912 
1913             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1914                 return -TARGET_EFAULT;
1915             }
1916 
1917             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1918 
1919             ret = get_errno(setsockopt(sockfd, level, optname,
1920                                        &pki, sizeof(pki)));
1921             break;
1922         }
1923         default:
1924             goto unimplemented;
1925         }
1926         break;
1927     case SOL_ICMPV6:
1928         switch (optname) {
1929         case ICMPV6_FILTER:
1930         {
1931             struct icmp6_filter icmp6f;
1932 
1933             if (optlen > sizeof(icmp6f)) {
1934                 optlen = sizeof(icmp6f);
1935             }
1936 
1937             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1938                 return -TARGET_EFAULT;
1939             }
1940 
1941             for (val = 0; val < 8; val++) {
1942                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1943             }
1944 
1945             ret = get_errno(setsockopt(sockfd, level, optname,
1946                                        &icmp6f, optlen));
1947             break;
1948         }
1949         default:
1950             goto unimplemented;
1951         }
1952         break;
1953     case SOL_RAW:
1954         switch (optname) {
1955         case ICMP_FILTER:
1956         case IPV6_CHECKSUM:
1957             /* those take an u32 value */
1958             if (optlen < sizeof(uint32_t)) {
1959                 return -TARGET_EINVAL;
1960             }
1961 
1962             if (get_user_u32(val, optval_addr)) {
1963                 return -TARGET_EFAULT;
1964             }
1965             ret = get_errno(setsockopt(sockfd, level, optname,
1966                                        &val, sizeof(val)));
1967             break;
1968 
1969         default:
1970             goto unimplemented;
1971         }
1972         break;
1973     case TARGET_SOL_SOCKET:
1974         switch (optname) {
1975         case TARGET_SO_RCVTIMEO:
1976         {
1977                 struct timeval tv;
1978 
1979                 optname = SO_RCVTIMEO;
1980 
1981 set_timeout:
1982                 if (optlen != sizeof(struct target_timeval)) {
1983                     return -TARGET_EINVAL;
1984                 }
1985 
1986                 if (copy_from_user_timeval(&tv, optval_addr)) {
1987                     return -TARGET_EFAULT;
1988                 }
1989 
1990                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1991                                 &tv, sizeof(tv)));
1992                 return ret;
1993         }
1994         case TARGET_SO_SNDTIMEO:
1995                 optname = SO_SNDTIMEO;
1996                 goto set_timeout;
1997         case TARGET_SO_ATTACH_FILTER:
1998         {
1999                 struct target_sock_fprog *tfprog;
2000                 struct target_sock_filter *tfilter;
2001                 struct sock_fprog fprog;
2002                 struct sock_filter *filter;
2003                 int i;
2004 
2005                 if (optlen != sizeof(*tfprog)) {
2006                     return -TARGET_EINVAL;
2007                 }
2008                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2009                     return -TARGET_EFAULT;
2010                 }
2011                 if (!lock_user_struct(VERIFY_READ, tfilter,
2012                                       tswapal(tfprog->filter), 0)) {
2013                     unlock_user_struct(tfprog, optval_addr, 1);
2014                     return -TARGET_EFAULT;
2015                 }
2016 
2017                 fprog.len = tswap16(tfprog->len);
2018                 filter = g_try_new(struct sock_filter, fprog.len);
2019                 if (filter == NULL) {
2020                     unlock_user_struct(tfilter, tfprog->filter, 1);
2021                     unlock_user_struct(tfprog, optval_addr, 1);
2022                     return -TARGET_ENOMEM;
2023                 }
2024                 for (i = 0; i < fprog.len; i++) {
2025                     filter[i].code = tswap16(tfilter[i].code);
2026                     filter[i].jt = tfilter[i].jt;
2027                     filter[i].jf = tfilter[i].jf;
2028                     filter[i].k = tswap32(tfilter[i].k);
2029                 }
2030                 fprog.filter = filter;
2031 
2032                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2033                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2034                 g_free(filter);
2035 
2036                 unlock_user_struct(tfilter, tfprog->filter, 1);
2037                 unlock_user_struct(tfprog, optval_addr, 1);
2038                 return ret;
2039         }
2040 	case TARGET_SO_BINDTODEVICE:
2041 	{
2042 		char *dev_ifname, *addr_ifname;
2043 
2044 		if (optlen > IFNAMSIZ - 1) {
2045 		    optlen = IFNAMSIZ - 1;
2046 		}
2047 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2048 		if (!dev_ifname) {
2049 		    return -TARGET_EFAULT;
2050 		}
2051 		optname = SO_BINDTODEVICE;
2052 		addr_ifname = alloca(IFNAMSIZ);
2053 		memcpy(addr_ifname, dev_ifname, optlen);
2054 		addr_ifname[optlen] = 0;
2055 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2056                                            addr_ifname, optlen));
2057 		unlock_user (dev_ifname, optval_addr, 0);
2058 		return ret;
2059 	}
2060         case TARGET_SO_LINGER:
2061         {
2062                 struct linger lg;
2063                 struct target_linger *tlg;
2064 
2065                 if (optlen != sizeof(struct target_linger)) {
2066                     return -TARGET_EINVAL;
2067                 }
2068                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2069                     return -TARGET_EFAULT;
2070                 }
2071                 __get_user(lg.l_onoff, &tlg->l_onoff);
2072                 __get_user(lg.l_linger, &tlg->l_linger);
2073                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2074                                 &lg, sizeof(lg)));
2075                 unlock_user_struct(tlg, optval_addr, 0);
2076                 return ret;
2077         }
2078             /* Options with 'int' argument.  */
2079         case TARGET_SO_DEBUG:
2080 		optname = SO_DEBUG;
2081 		break;
2082         case TARGET_SO_REUSEADDR:
2083 		optname = SO_REUSEADDR;
2084 		break;
2085 #ifdef SO_REUSEPORT
2086         case TARGET_SO_REUSEPORT:
2087                 optname = SO_REUSEPORT;
2088                 break;
2089 #endif
2090         case TARGET_SO_TYPE:
2091 		optname = SO_TYPE;
2092 		break;
2093         case TARGET_SO_ERROR:
2094 		optname = SO_ERROR;
2095 		break;
2096         case TARGET_SO_DONTROUTE:
2097 		optname = SO_DONTROUTE;
2098 		break;
2099         case TARGET_SO_BROADCAST:
2100 		optname = SO_BROADCAST;
2101 		break;
2102         case TARGET_SO_SNDBUF:
2103 		optname = SO_SNDBUF;
2104 		break;
2105         case TARGET_SO_SNDBUFFORCE:
2106                 optname = SO_SNDBUFFORCE;
2107                 break;
2108         case TARGET_SO_RCVBUF:
2109 		optname = SO_RCVBUF;
2110 		break;
2111         case TARGET_SO_RCVBUFFORCE:
2112                 optname = SO_RCVBUFFORCE;
2113                 break;
2114         case TARGET_SO_KEEPALIVE:
2115 		optname = SO_KEEPALIVE;
2116 		break;
2117         case TARGET_SO_OOBINLINE:
2118 		optname = SO_OOBINLINE;
2119 		break;
2120         case TARGET_SO_NO_CHECK:
2121 		optname = SO_NO_CHECK;
2122 		break;
2123         case TARGET_SO_PRIORITY:
2124 		optname = SO_PRIORITY;
2125 		break;
2126 #ifdef SO_BSDCOMPAT
2127         case TARGET_SO_BSDCOMPAT:
2128 		optname = SO_BSDCOMPAT;
2129 		break;
2130 #endif
2131         case TARGET_SO_PASSCRED:
2132 		optname = SO_PASSCRED;
2133 		break;
2134         case TARGET_SO_PASSSEC:
2135                 optname = SO_PASSSEC;
2136                 break;
2137         case TARGET_SO_TIMESTAMP:
2138 		optname = SO_TIMESTAMP;
2139 		break;
2140         case TARGET_SO_RCVLOWAT:
2141 		optname = SO_RCVLOWAT;
2142 		break;
2143         default:
2144             goto unimplemented;
2145         }
2146 	if (optlen < sizeof(uint32_t))
2147             return -TARGET_EINVAL;
2148 
2149 	if (get_user_u32(val, optval_addr))
2150             return -TARGET_EFAULT;
2151 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2152         break;
2153     default:
2154     unimplemented:
2155         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2156         ret = -TARGET_ENOPROTOOPT;
2157     }
2158     return ret;
2159 }
2160 
2161 /* do_getsockopt() Must return target values and target errnos. */
2162 static abi_long do_getsockopt(int sockfd, int level, int optname,
2163                               abi_ulong optval_addr, abi_ulong optlen)
2164 {
2165     abi_long ret;
2166     int len, val;
2167     socklen_t lv;
2168 
2169     switch(level) {
2170     case TARGET_SOL_SOCKET:
2171         level = SOL_SOCKET;
2172         switch (optname) {
2173         /* These don't just return a single integer */
2174         case TARGET_SO_RCVTIMEO:
2175         case TARGET_SO_SNDTIMEO:
2176         case TARGET_SO_PEERNAME:
2177             goto unimplemented;
2178         case TARGET_SO_PEERCRED: {
2179             struct ucred cr;
2180             socklen_t crlen;
2181             struct target_ucred *tcr;
2182 
2183             if (get_user_u32(len, optlen)) {
2184                 return -TARGET_EFAULT;
2185             }
2186             if (len < 0) {
2187                 return -TARGET_EINVAL;
2188             }
2189 
2190             crlen = sizeof(cr);
2191             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2192                                        &cr, &crlen));
2193             if (ret < 0) {
2194                 return ret;
2195             }
2196             if (len > crlen) {
2197                 len = crlen;
2198             }
2199             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2200                 return -TARGET_EFAULT;
2201             }
2202             __put_user(cr.pid, &tcr->pid);
2203             __put_user(cr.uid, &tcr->uid);
2204             __put_user(cr.gid, &tcr->gid);
2205             unlock_user_struct(tcr, optval_addr, 1);
2206             if (put_user_u32(len, optlen)) {
2207                 return -TARGET_EFAULT;
2208             }
2209             break;
2210         }
2211         case TARGET_SO_LINGER:
2212         {
2213             struct linger lg;
2214             socklen_t lglen;
2215             struct target_linger *tlg;
2216 
2217             if (get_user_u32(len, optlen)) {
2218                 return -TARGET_EFAULT;
2219             }
2220             if (len < 0) {
2221                 return -TARGET_EINVAL;
2222             }
2223 
2224             lglen = sizeof(lg);
2225             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2226                                        &lg, &lglen));
2227             if (ret < 0) {
2228                 return ret;
2229             }
2230             if (len > lglen) {
2231                 len = lglen;
2232             }
2233             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2234                 return -TARGET_EFAULT;
2235             }
2236             __put_user(lg.l_onoff, &tlg->l_onoff);
2237             __put_user(lg.l_linger, &tlg->l_linger);
2238             unlock_user_struct(tlg, optval_addr, 1);
2239             if (put_user_u32(len, optlen)) {
2240                 return -TARGET_EFAULT;
2241             }
2242             break;
2243         }
2244         /* Options with 'int' argument.  */
2245         case TARGET_SO_DEBUG:
2246             optname = SO_DEBUG;
2247             goto int_case;
2248         case TARGET_SO_REUSEADDR:
2249             optname = SO_REUSEADDR;
2250             goto int_case;
2251 #ifdef SO_REUSEPORT
2252         case TARGET_SO_REUSEPORT:
2253             optname = SO_REUSEPORT;
2254             goto int_case;
2255 #endif
2256         case TARGET_SO_TYPE:
2257             optname = SO_TYPE;
2258             goto int_case;
2259         case TARGET_SO_ERROR:
2260             optname = SO_ERROR;
2261             goto int_case;
2262         case TARGET_SO_DONTROUTE:
2263             optname = SO_DONTROUTE;
2264             goto int_case;
2265         case TARGET_SO_BROADCAST:
2266             optname = SO_BROADCAST;
2267             goto int_case;
2268         case TARGET_SO_SNDBUF:
2269             optname = SO_SNDBUF;
2270             goto int_case;
2271         case TARGET_SO_RCVBUF:
2272             optname = SO_RCVBUF;
2273             goto int_case;
2274         case TARGET_SO_KEEPALIVE:
2275             optname = SO_KEEPALIVE;
2276             goto int_case;
2277         case TARGET_SO_OOBINLINE:
2278             optname = SO_OOBINLINE;
2279             goto int_case;
2280         case TARGET_SO_NO_CHECK:
2281             optname = SO_NO_CHECK;
2282             goto int_case;
2283         case TARGET_SO_PRIORITY:
2284             optname = SO_PRIORITY;
2285             goto int_case;
2286 #ifdef SO_BSDCOMPAT
2287         case TARGET_SO_BSDCOMPAT:
2288             optname = SO_BSDCOMPAT;
2289             goto int_case;
2290 #endif
2291         case TARGET_SO_PASSCRED:
2292             optname = SO_PASSCRED;
2293             goto int_case;
2294         case TARGET_SO_TIMESTAMP:
2295             optname = SO_TIMESTAMP;
2296             goto int_case;
2297         case TARGET_SO_RCVLOWAT:
2298             optname = SO_RCVLOWAT;
2299             goto int_case;
2300         case TARGET_SO_ACCEPTCONN:
2301             optname = SO_ACCEPTCONN;
2302             goto int_case;
2303         default:
2304             goto int_case;
2305         }
2306         break;
2307     case SOL_TCP:
2308         /* TCP options all take an 'int' value.  */
2309     int_case:
2310         if (get_user_u32(len, optlen))
2311             return -TARGET_EFAULT;
2312         if (len < 0)
2313             return -TARGET_EINVAL;
2314         lv = sizeof(lv);
2315         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2316         if (ret < 0)
2317             return ret;
2318         if (optname == SO_TYPE) {
2319             val = host_to_target_sock_type(val);
2320         }
2321         if (len > lv)
2322             len = lv;
2323         if (len == 4) {
2324             if (put_user_u32(val, optval_addr))
2325                 return -TARGET_EFAULT;
2326         } else {
2327             if (put_user_u8(val, optval_addr))
2328                 return -TARGET_EFAULT;
2329         }
2330         if (put_user_u32(len, optlen))
2331             return -TARGET_EFAULT;
2332         break;
2333     case SOL_IP:
2334         switch(optname) {
2335         case IP_TOS:
2336         case IP_TTL:
2337         case IP_HDRINCL:
2338         case IP_ROUTER_ALERT:
2339         case IP_RECVOPTS:
2340         case IP_RETOPTS:
2341         case IP_PKTINFO:
2342         case IP_MTU_DISCOVER:
2343         case IP_RECVERR:
2344         case IP_RECVTOS:
2345 #ifdef IP_FREEBIND
2346         case IP_FREEBIND:
2347 #endif
2348         case IP_MULTICAST_TTL:
2349         case IP_MULTICAST_LOOP:
2350             if (get_user_u32(len, optlen))
2351                 return -TARGET_EFAULT;
2352             if (len < 0)
2353                 return -TARGET_EINVAL;
2354             lv = sizeof(lv);
2355             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2356             if (ret < 0)
2357                 return ret;
2358             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2359                 len = 1;
2360                 if (put_user_u32(len, optlen)
2361                     || put_user_u8(val, optval_addr))
2362                     return -TARGET_EFAULT;
2363             } else {
2364                 if (len > sizeof(int))
2365                     len = sizeof(int);
2366                 if (put_user_u32(len, optlen)
2367                     || put_user_u32(val, optval_addr))
2368                     return -TARGET_EFAULT;
2369             }
2370             break;
2371         default:
2372             ret = -TARGET_ENOPROTOOPT;
2373             break;
2374         }
2375         break;
2376     case SOL_IPV6:
2377         switch (optname) {
2378         case IPV6_MTU_DISCOVER:
2379         case IPV6_MTU:
2380         case IPV6_V6ONLY:
2381         case IPV6_RECVPKTINFO:
2382         case IPV6_UNICAST_HOPS:
2383         case IPV6_MULTICAST_HOPS:
2384         case IPV6_MULTICAST_LOOP:
2385         case IPV6_RECVERR:
2386         case IPV6_RECVHOPLIMIT:
2387         case IPV6_2292HOPLIMIT:
2388         case IPV6_CHECKSUM:
2389         case IPV6_ADDRFORM:
2390         case IPV6_2292PKTINFO:
2391         case IPV6_RECVTCLASS:
2392         case IPV6_RECVRTHDR:
2393         case IPV6_2292RTHDR:
2394         case IPV6_RECVHOPOPTS:
2395         case IPV6_2292HOPOPTS:
2396         case IPV6_RECVDSTOPTS:
2397         case IPV6_2292DSTOPTS:
2398         case IPV6_TCLASS:
2399 #ifdef IPV6_RECVPATHMTU
2400         case IPV6_RECVPATHMTU:
2401 #endif
2402 #ifdef IPV6_TRANSPARENT
2403         case IPV6_TRANSPARENT:
2404 #endif
2405 #ifdef IPV6_FREEBIND
2406         case IPV6_FREEBIND:
2407 #endif
2408 #ifdef IPV6_RECVORIGDSTADDR
2409         case IPV6_RECVORIGDSTADDR:
2410 #endif
2411             if (get_user_u32(len, optlen))
2412                 return -TARGET_EFAULT;
2413             if (len < 0)
2414                 return -TARGET_EINVAL;
2415             lv = sizeof(lv);
2416             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2417             if (ret < 0)
2418                 return ret;
2419             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2420                 len = 1;
2421                 if (put_user_u32(len, optlen)
2422                     || put_user_u8(val, optval_addr))
2423                     return -TARGET_EFAULT;
2424             } else {
2425                 if (len > sizeof(int))
2426                     len = sizeof(int);
2427                 if (put_user_u32(len, optlen)
2428                     || put_user_u32(val, optval_addr))
2429                     return -TARGET_EFAULT;
2430             }
2431             break;
2432         default:
2433             ret = -TARGET_ENOPROTOOPT;
2434             break;
2435         }
2436         break;
2437     default:
2438     unimplemented:
2439         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2440                  level, optname);
2441         ret = -TARGET_EOPNOTSUPP;
2442         break;
2443     }
2444     return ret;
2445 }
2446 
2447 /* Convert target low/high pair representing file offset into the host
2448  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2449  * as the kernel doesn't handle them either.
2450  */
2451 static void target_to_host_low_high(abi_ulong tlow,
2452                                     abi_ulong thigh,
2453                                     unsigned long *hlow,
2454                                     unsigned long *hhigh)
2455 {
2456     uint64_t off = tlow |
2457         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2458         TARGET_LONG_BITS / 2;
2459 
2460     *hlow = off;
2461     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2462 }
2463 
2464 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2465                                 abi_ulong count, int copy)
2466 {
2467     struct target_iovec *target_vec;
2468     struct iovec *vec;
2469     abi_ulong total_len, max_len;
2470     int i;
2471     int err = 0;
2472     bool bad_address = false;
2473 
2474     if (count == 0) {
2475         errno = 0;
2476         return NULL;
2477     }
2478     if (count > IOV_MAX) {
2479         errno = EINVAL;
2480         return NULL;
2481     }
2482 
2483     vec = g_try_new0(struct iovec, count);
2484     if (vec == NULL) {
2485         errno = ENOMEM;
2486         return NULL;
2487     }
2488 
2489     target_vec = lock_user(VERIFY_READ, target_addr,
2490                            count * sizeof(struct target_iovec), 1);
2491     if (target_vec == NULL) {
2492         err = EFAULT;
2493         goto fail2;
2494     }
2495 
2496     /* ??? If host page size > target page size, this will result in a
2497        value larger than what we can actually support.  */
2498     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2499     total_len = 0;
2500 
2501     for (i = 0; i < count; i++) {
2502         abi_ulong base = tswapal(target_vec[i].iov_base);
2503         abi_long len = tswapal(target_vec[i].iov_len);
2504 
2505         if (len < 0) {
2506             err = EINVAL;
2507             goto fail;
2508         } else if (len == 0) {
2509             /* Zero length pointer is ignored.  */
2510             vec[i].iov_base = 0;
2511         } else {
2512             vec[i].iov_base = lock_user(type, base, len, copy);
2513             /* If the first buffer pointer is bad, this is a fault.  But
2514              * subsequent bad buffers will result in a partial write; this
2515              * is realized by filling the vector with null pointers and
2516              * zero lengths. */
2517             if (!vec[i].iov_base) {
2518                 if (i == 0) {
2519                     err = EFAULT;
2520                     goto fail;
2521                 } else {
2522                     bad_address = true;
2523                 }
2524             }
2525             if (bad_address) {
2526                 len = 0;
2527             }
2528             if (len > max_len - total_len) {
2529                 len = max_len - total_len;
2530             }
2531         }
2532         vec[i].iov_len = len;
2533         total_len += len;
2534     }
2535 
2536     unlock_user(target_vec, target_addr, 0);
2537     return vec;
2538 
2539  fail:
2540     while (--i >= 0) {
2541         if (tswapal(target_vec[i].iov_len) > 0) {
2542             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2543         }
2544     }
2545     unlock_user(target_vec, target_addr, 0);
2546  fail2:
2547     g_free(vec);
2548     errno = err;
2549     return NULL;
2550 }
2551 
2552 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2553                          abi_ulong count, int copy)
2554 {
2555     struct target_iovec *target_vec;
2556     int i;
2557 
2558     target_vec = lock_user(VERIFY_READ, target_addr,
2559                            count * sizeof(struct target_iovec), 1);
2560     if (target_vec) {
2561         for (i = 0; i < count; i++) {
2562             abi_ulong base = tswapal(target_vec[i].iov_base);
2563             abi_long len = tswapal(target_vec[i].iov_len);
2564             if (len < 0) {
2565                 break;
2566             }
2567             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2568         }
2569         unlock_user(target_vec, target_addr, 0);
2570     }
2571 
2572     g_free(vec);
2573 }
2574 
2575 static inline int target_to_host_sock_type(int *type)
2576 {
2577     int host_type = 0;
2578     int target_type = *type;
2579 
2580     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2581     case TARGET_SOCK_DGRAM:
2582         host_type = SOCK_DGRAM;
2583         break;
2584     case TARGET_SOCK_STREAM:
2585         host_type = SOCK_STREAM;
2586         break;
2587     default:
2588         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2589         break;
2590     }
2591     if (target_type & TARGET_SOCK_CLOEXEC) {
2592 #if defined(SOCK_CLOEXEC)
2593         host_type |= SOCK_CLOEXEC;
2594 #else
2595         return -TARGET_EINVAL;
2596 #endif
2597     }
2598     if (target_type & TARGET_SOCK_NONBLOCK) {
2599 #if defined(SOCK_NONBLOCK)
2600         host_type |= SOCK_NONBLOCK;
2601 #elif !defined(O_NONBLOCK)
2602         return -TARGET_EINVAL;
2603 #endif
2604     }
2605     *type = host_type;
2606     return 0;
2607 }
2608 
2609 /* Try to emulate socket type flags after socket creation.  */
2610 static int sock_flags_fixup(int fd, int target_type)
2611 {
2612 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2613     if (target_type & TARGET_SOCK_NONBLOCK) {
2614         int flags = fcntl(fd, F_GETFL);
2615         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2616             close(fd);
2617             return -TARGET_EINVAL;
2618         }
2619     }
2620 #endif
2621     return fd;
2622 }
2623 
2624 /* do_socket() Must return target values and target errnos. */
2625 static abi_long do_socket(int domain, int type, int protocol)
2626 {
2627     int target_type = type;
2628     int ret;
2629 
2630     ret = target_to_host_sock_type(&type);
2631     if (ret) {
2632         return ret;
2633     }
2634 
2635     if (domain == PF_NETLINK && !(
2636 #ifdef CONFIG_RTNETLINK
2637          protocol == NETLINK_ROUTE ||
2638 #endif
2639          protocol == NETLINK_KOBJECT_UEVENT ||
2640          protocol == NETLINK_AUDIT)) {
2641         return -EPFNOSUPPORT;
2642     }
2643 
2644     if (domain == AF_PACKET ||
2645         (domain == AF_INET && type == SOCK_PACKET)) {
2646         protocol = tswap16(protocol);
2647     }
2648 
2649     ret = get_errno(socket(domain, type, protocol));
2650     if (ret >= 0) {
2651         ret = sock_flags_fixup(ret, target_type);
2652         if (type == SOCK_PACKET) {
2653             /* Manage an obsolete case :
2654              * if socket type is SOCK_PACKET, bind by name
2655              */
2656             fd_trans_register(ret, &target_packet_trans);
2657         } else if (domain == PF_NETLINK) {
2658             switch (protocol) {
2659 #ifdef CONFIG_RTNETLINK
2660             case NETLINK_ROUTE:
2661                 fd_trans_register(ret, &target_netlink_route_trans);
2662                 break;
2663 #endif
2664             case NETLINK_KOBJECT_UEVENT:
2665                 /* nothing to do: messages are strings */
2666                 break;
2667             case NETLINK_AUDIT:
2668                 fd_trans_register(ret, &target_netlink_audit_trans);
2669                 break;
2670             default:
2671                 g_assert_not_reached();
2672             }
2673         }
2674     }
2675     return ret;
2676 }
2677 
2678 /* do_bind() Must return target values and target errnos. */
2679 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2680                         socklen_t addrlen)
2681 {
2682     void *addr;
2683     abi_long ret;
2684 
2685     if ((int)addrlen < 0) {
2686         return -TARGET_EINVAL;
2687     }
2688 
2689     addr = alloca(addrlen+1);
2690 
2691     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2692     if (ret)
2693         return ret;
2694 
2695     return get_errno(bind(sockfd, addr, addrlen));
2696 }
2697 
2698 /* do_connect() Must return target values and target errnos. */
2699 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2700                            socklen_t addrlen)
2701 {
2702     void *addr;
2703     abi_long ret;
2704 
2705     if ((int)addrlen < 0) {
2706         return -TARGET_EINVAL;
2707     }
2708 
2709     addr = alloca(addrlen+1);
2710 
2711     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2712     if (ret)
2713         return ret;
2714 
2715     return get_errno(safe_connect(sockfd, addr, addrlen));
2716 }
2717 
2718 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2719 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2720                                       int flags, int send)
2721 {
2722     abi_long ret, len;
2723     struct msghdr msg;
2724     abi_ulong count;
2725     struct iovec *vec;
2726     abi_ulong target_vec;
2727 
2728     if (msgp->msg_name) {
2729         msg.msg_namelen = tswap32(msgp->msg_namelen);
2730         msg.msg_name = alloca(msg.msg_namelen+1);
2731         ret = target_to_host_sockaddr(fd, msg.msg_name,
2732                                       tswapal(msgp->msg_name),
2733                                       msg.msg_namelen);
2734         if (ret == -TARGET_EFAULT) {
2735             /* For connected sockets msg_name and msg_namelen must
2736              * be ignored, so returning EFAULT immediately is wrong.
2737              * Instead, pass a bad msg_name to the host kernel, and
2738              * let it decide whether to return EFAULT or not.
2739              */
2740             msg.msg_name = (void *)-1;
2741         } else if (ret) {
2742             goto out2;
2743         }
2744     } else {
2745         msg.msg_name = NULL;
2746         msg.msg_namelen = 0;
2747     }
2748     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2749     msg.msg_control = alloca(msg.msg_controllen);
2750     memset(msg.msg_control, 0, msg.msg_controllen);
2751 
2752     msg.msg_flags = tswap32(msgp->msg_flags);
2753 
2754     count = tswapal(msgp->msg_iovlen);
2755     target_vec = tswapal(msgp->msg_iov);
2756 
2757     if (count > IOV_MAX) {
2758         /* sendrcvmsg returns a different errno for this condition than
2759          * readv/writev, so we must catch it here before lock_iovec() does.
2760          */
2761         ret = -TARGET_EMSGSIZE;
2762         goto out2;
2763     }
2764 
2765     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2766                      target_vec, count, send);
2767     if (vec == NULL) {
2768         ret = -host_to_target_errno(errno);
2769         goto out2;
2770     }
2771     msg.msg_iovlen = count;
2772     msg.msg_iov = vec;
2773 
2774     if (send) {
2775         if (fd_trans_target_to_host_data(fd)) {
2776             void *host_msg;
2777 
2778             host_msg = g_malloc(msg.msg_iov->iov_len);
2779             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2780             ret = fd_trans_target_to_host_data(fd)(host_msg,
2781                                                    msg.msg_iov->iov_len);
2782             if (ret >= 0) {
2783                 msg.msg_iov->iov_base = host_msg;
2784                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2785             }
2786             g_free(host_msg);
2787         } else {
2788             ret = target_to_host_cmsg(&msg, msgp);
2789             if (ret == 0) {
2790                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2791             }
2792         }
2793     } else {
2794         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2795         if (!is_error(ret)) {
2796             len = ret;
2797             if (fd_trans_host_to_target_data(fd)) {
2798                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2799                                                MIN(msg.msg_iov->iov_len, len));
2800             } else {
2801                 ret = host_to_target_cmsg(msgp, &msg);
2802             }
2803             if (!is_error(ret)) {
2804                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2805                 msgp->msg_flags = tswap32(msg.msg_flags);
2806                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2807                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2808                                     msg.msg_name, msg.msg_namelen);
2809                     if (ret) {
2810                         goto out;
2811                     }
2812                 }
2813 
2814                 ret = len;
2815             }
2816         }
2817     }
2818 
2819 out:
2820     unlock_iovec(vec, target_vec, count, !send);
2821 out2:
2822     return ret;
2823 }
2824 
2825 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2826                                int flags, int send)
2827 {
2828     abi_long ret;
2829     struct target_msghdr *msgp;
2830 
2831     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2832                           msgp,
2833                           target_msg,
2834                           send ? 1 : 0)) {
2835         return -TARGET_EFAULT;
2836     }
2837     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2838     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2839     return ret;
2840 }
2841 
2842 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2843  * so it might not have this *mmsg-specific flag either.
2844  */
2845 #ifndef MSG_WAITFORONE
2846 #define MSG_WAITFORONE 0x10000
2847 #endif
2848 
2849 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2850                                 unsigned int vlen, unsigned int flags,
2851                                 int send)
2852 {
2853     struct target_mmsghdr *mmsgp;
2854     abi_long ret = 0;
2855     int i;
2856 
2857     if (vlen > UIO_MAXIOV) {
2858         vlen = UIO_MAXIOV;
2859     }
2860 
2861     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2862     if (!mmsgp) {
2863         return -TARGET_EFAULT;
2864     }
2865 
2866     for (i = 0; i < vlen; i++) {
2867         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2868         if (is_error(ret)) {
2869             break;
2870         }
2871         mmsgp[i].msg_len = tswap32(ret);
2872         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2873         if (flags & MSG_WAITFORONE) {
2874             flags |= MSG_DONTWAIT;
2875         }
2876     }
2877 
2878     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2879 
2880     /* Return number of datagrams sent if we sent any at all;
2881      * otherwise return the error.
2882      */
2883     if (i) {
2884         return i;
2885     }
2886     return ret;
2887 }
2888 
2889 /* do_accept4() Must return target values and target errnos. */
2890 static abi_long do_accept4(int fd, abi_ulong target_addr,
2891                            abi_ulong target_addrlen_addr, int flags)
2892 {
2893     socklen_t addrlen, ret_addrlen;
2894     void *addr;
2895     abi_long ret;
2896     int host_flags;
2897 
2898     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2899 
2900     if (target_addr == 0) {
2901         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2902     }
2903 
2904     /* linux returns EINVAL if addrlen pointer is invalid */
2905     if (get_user_u32(addrlen, target_addrlen_addr))
2906         return -TARGET_EINVAL;
2907 
2908     if ((int)addrlen < 0) {
2909         return -TARGET_EINVAL;
2910     }
2911 
2912     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2913         return -TARGET_EINVAL;
2914 
2915     addr = alloca(addrlen);
2916 
2917     ret_addrlen = addrlen;
2918     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2919     if (!is_error(ret)) {
2920         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2921         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2922             ret = -TARGET_EFAULT;
2923         }
2924     }
2925     return ret;
2926 }
2927 
2928 /* do_getpeername() Must return target values and target errnos. */
2929 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2930                                abi_ulong target_addrlen_addr)
2931 {
2932     socklen_t addrlen, ret_addrlen;
2933     void *addr;
2934     abi_long ret;
2935 
2936     if (get_user_u32(addrlen, target_addrlen_addr))
2937         return -TARGET_EFAULT;
2938 
2939     if ((int)addrlen < 0) {
2940         return -TARGET_EINVAL;
2941     }
2942 
2943     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2944         return -TARGET_EFAULT;
2945 
2946     addr = alloca(addrlen);
2947 
2948     ret_addrlen = addrlen;
2949     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2950     if (!is_error(ret)) {
2951         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2952         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2953             ret = -TARGET_EFAULT;
2954         }
2955     }
2956     return ret;
2957 }
2958 
2959 /* do_getsockname() Must return target values and target errnos. */
2960 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2961                                abi_ulong target_addrlen_addr)
2962 {
2963     socklen_t addrlen, ret_addrlen;
2964     void *addr;
2965     abi_long ret;
2966 
2967     if (get_user_u32(addrlen, target_addrlen_addr))
2968         return -TARGET_EFAULT;
2969 
2970     if ((int)addrlen < 0) {
2971         return -TARGET_EINVAL;
2972     }
2973 
2974     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2975         return -TARGET_EFAULT;
2976 
2977     addr = alloca(addrlen);
2978 
2979     ret_addrlen = addrlen;
2980     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2981     if (!is_error(ret)) {
2982         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2983         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2984             ret = -TARGET_EFAULT;
2985         }
2986     }
2987     return ret;
2988 }
2989 
2990 /* do_socketpair() Must return target values and target errnos. */
2991 static abi_long do_socketpair(int domain, int type, int protocol,
2992                               abi_ulong target_tab_addr)
2993 {
2994     int tab[2];
2995     abi_long ret;
2996 
2997     target_to_host_sock_type(&type);
2998 
2999     ret = get_errno(socketpair(domain, type, protocol, tab));
3000     if (!is_error(ret)) {
3001         if (put_user_s32(tab[0], target_tab_addr)
3002             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3003             ret = -TARGET_EFAULT;
3004     }
3005     return ret;
3006 }
3007 
3008 /* do_sendto() Must return target values and target errnos. */
3009 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3010                           abi_ulong target_addr, socklen_t addrlen)
3011 {
3012     void *addr;
3013     void *host_msg;
3014     void *copy_msg = NULL;
3015     abi_long ret;
3016 
3017     if ((int)addrlen < 0) {
3018         return -TARGET_EINVAL;
3019     }
3020 
3021     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3022     if (!host_msg)
3023         return -TARGET_EFAULT;
3024     if (fd_trans_target_to_host_data(fd)) {
3025         copy_msg = host_msg;
3026         host_msg = g_malloc(len);
3027         memcpy(host_msg, copy_msg, len);
3028         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3029         if (ret < 0) {
3030             goto fail;
3031         }
3032     }
3033     if (target_addr) {
3034         addr = alloca(addrlen+1);
3035         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3036         if (ret) {
3037             goto fail;
3038         }
3039         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3040     } else {
3041         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3042     }
3043 fail:
3044     if (copy_msg) {
3045         g_free(host_msg);
3046         host_msg = copy_msg;
3047     }
3048     unlock_user(host_msg, msg, 0);
3049     return ret;
3050 }
3051 
3052 /* do_recvfrom() Must return target values and target errnos. */
3053 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3054                             abi_ulong target_addr,
3055                             abi_ulong target_addrlen)
3056 {
3057     socklen_t addrlen, ret_addrlen;
3058     void *addr;
3059     void *host_msg;
3060     abi_long ret;
3061 
3062     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3063     if (!host_msg)
3064         return -TARGET_EFAULT;
3065     if (target_addr) {
3066         if (get_user_u32(addrlen, target_addrlen)) {
3067             ret = -TARGET_EFAULT;
3068             goto fail;
3069         }
3070         if ((int)addrlen < 0) {
3071             ret = -TARGET_EINVAL;
3072             goto fail;
3073         }
3074         addr = alloca(addrlen);
3075         ret_addrlen = addrlen;
3076         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3077                                       addr, &ret_addrlen));
3078     } else {
3079         addr = NULL; /* To keep compiler quiet.  */
3080         addrlen = 0; /* To keep compiler quiet.  */
3081         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3082     }
3083     if (!is_error(ret)) {
3084         if (fd_trans_host_to_target_data(fd)) {
3085             abi_long trans;
3086             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3087             if (is_error(trans)) {
3088                 ret = trans;
3089                 goto fail;
3090             }
3091         }
3092         if (target_addr) {
3093             host_to_target_sockaddr(target_addr, addr,
3094                                     MIN(addrlen, ret_addrlen));
3095             if (put_user_u32(ret_addrlen, target_addrlen)) {
3096                 ret = -TARGET_EFAULT;
3097                 goto fail;
3098             }
3099         }
3100         unlock_user(host_msg, msg, len);
3101     } else {
3102 fail:
3103         unlock_user(host_msg, msg, 0);
3104     }
3105     return ret;
3106 }
3107 
3108 #ifdef TARGET_NR_socketcall
3109 /* do_socketcall() must return target values and target errnos. */
3110 static abi_long do_socketcall(int num, abi_ulong vptr)
3111 {
3112     static const unsigned nargs[] = { /* number of arguments per operation */
3113         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3114         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3115         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3116         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3117         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3118         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3119         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3120         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3121         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3122         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3123         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3124         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3125         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3126         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3127         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3128         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3129         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3130         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3131         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3132         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3133     };
3134     abi_long a[6]; /* max 6 args */
3135     unsigned i;
3136 
3137     /* check the range of the first argument num */
3138     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3139     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3140         return -TARGET_EINVAL;
3141     }
3142     /* ensure we have space for args */
3143     if (nargs[num] > ARRAY_SIZE(a)) {
3144         return -TARGET_EINVAL;
3145     }
3146     /* collect the arguments in a[] according to nargs[] */
3147     for (i = 0; i < nargs[num]; ++i) {
3148         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3149             return -TARGET_EFAULT;
3150         }
3151     }
3152     /* now when we have the args, invoke the appropriate underlying function */
3153     switch (num) {
3154     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3155         return do_socket(a[0], a[1], a[2]);
3156     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3157         return do_bind(a[0], a[1], a[2]);
3158     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3159         return do_connect(a[0], a[1], a[2]);
3160     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3161         return get_errno(listen(a[0], a[1]));
3162     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3163         return do_accept4(a[0], a[1], a[2], 0);
3164     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3165         return do_getsockname(a[0], a[1], a[2]);
3166     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3167         return do_getpeername(a[0], a[1], a[2]);
3168     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3169         return do_socketpair(a[0], a[1], a[2], a[3]);
3170     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3171         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3172     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3173         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3174     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3175         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3176     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3177         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3178     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3179         return get_errno(shutdown(a[0], a[1]));
3180     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3181         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3182     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3183         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3184     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3185         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3186     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3187         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3188     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3189         return do_accept4(a[0], a[1], a[2], a[3]);
3190     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3191         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3192     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3193         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3194     default:
3195         gemu_log("Unsupported socketcall: %d\n", num);
3196         return -TARGET_EINVAL;
3197     }
3198 }
3199 #endif
3200 
3201 #define N_SHM_REGIONS	32
3202 
3203 static struct shm_region {
3204     abi_ulong start;
3205     abi_ulong size;
3206     bool in_use;
3207 } shm_regions[N_SHM_REGIONS];
3208 
3209 #ifndef TARGET_SEMID64_DS
3210 /* asm-generic version of this struct */
3211 struct target_semid64_ds
3212 {
3213   struct target_ipc_perm sem_perm;
3214   abi_ulong sem_otime;
3215 #if TARGET_ABI_BITS == 32
3216   abi_ulong __unused1;
3217 #endif
3218   abi_ulong sem_ctime;
3219 #if TARGET_ABI_BITS == 32
3220   abi_ulong __unused2;
3221 #endif
3222   abi_ulong sem_nsems;
3223   abi_ulong __unused3;
3224   abi_ulong __unused4;
3225 };
3226 #endif
3227 
3228 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3229                                                abi_ulong target_addr)
3230 {
3231     struct target_ipc_perm *target_ip;
3232     struct target_semid64_ds *target_sd;
3233 
3234     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3235         return -TARGET_EFAULT;
3236     target_ip = &(target_sd->sem_perm);
3237     host_ip->__key = tswap32(target_ip->__key);
3238     host_ip->uid = tswap32(target_ip->uid);
3239     host_ip->gid = tswap32(target_ip->gid);
3240     host_ip->cuid = tswap32(target_ip->cuid);
3241     host_ip->cgid = tswap32(target_ip->cgid);
3242 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3243     host_ip->mode = tswap32(target_ip->mode);
3244 #else
3245     host_ip->mode = tswap16(target_ip->mode);
3246 #endif
3247 #if defined(TARGET_PPC)
3248     host_ip->__seq = tswap32(target_ip->__seq);
3249 #else
3250     host_ip->__seq = tswap16(target_ip->__seq);
3251 #endif
3252     unlock_user_struct(target_sd, target_addr, 0);
3253     return 0;
3254 }
3255 
3256 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3257                                                struct ipc_perm *host_ip)
3258 {
3259     struct target_ipc_perm *target_ip;
3260     struct target_semid64_ds *target_sd;
3261 
3262     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3263         return -TARGET_EFAULT;
3264     target_ip = &(target_sd->sem_perm);
3265     target_ip->__key = tswap32(host_ip->__key);
3266     target_ip->uid = tswap32(host_ip->uid);
3267     target_ip->gid = tswap32(host_ip->gid);
3268     target_ip->cuid = tswap32(host_ip->cuid);
3269     target_ip->cgid = tswap32(host_ip->cgid);
3270 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3271     target_ip->mode = tswap32(host_ip->mode);
3272 #else
3273     target_ip->mode = tswap16(host_ip->mode);
3274 #endif
3275 #if defined(TARGET_PPC)
3276     target_ip->__seq = tswap32(host_ip->__seq);
3277 #else
3278     target_ip->__seq = tswap16(host_ip->__seq);
3279 #endif
3280     unlock_user_struct(target_sd, target_addr, 1);
3281     return 0;
3282 }
3283 
3284 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3285                                                abi_ulong target_addr)
3286 {
3287     struct target_semid64_ds *target_sd;
3288 
3289     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3290         return -TARGET_EFAULT;
3291     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3292         return -TARGET_EFAULT;
3293     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3294     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3295     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3296     unlock_user_struct(target_sd, target_addr, 0);
3297     return 0;
3298 }
3299 
3300 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3301                                                struct semid_ds *host_sd)
3302 {
3303     struct target_semid64_ds *target_sd;
3304 
3305     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3306         return -TARGET_EFAULT;
3307     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3308         return -TARGET_EFAULT;
3309     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3310     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3311     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3312     unlock_user_struct(target_sd, target_addr, 1);
3313     return 0;
3314 }
3315 
3316 struct target_seminfo {
3317     int semmap;
3318     int semmni;
3319     int semmns;
3320     int semmnu;
3321     int semmsl;
3322     int semopm;
3323     int semume;
3324     int semusz;
3325     int semvmx;
3326     int semaem;
3327 };
3328 
3329 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3330                                               struct seminfo *host_seminfo)
3331 {
3332     struct target_seminfo *target_seminfo;
3333     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3334         return -TARGET_EFAULT;
3335     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3336     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3337     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3338     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3339     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3340     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3341     __put_user(host_seminfo->semume, &target_seminfo->semume);
3342     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3343     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3344     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3345     unlock_user_struct(target_seminfo, target_addr, 1);
3346     return 0;
3347 }
3348 
3349 union semun {
3350 	int val;
3351 	struct semid_ds *buf;
3352 	unsigned short *array;
3353 	struct seminfo *__buf;
3354 };
3355 
3356 union target_semun {
3357 	int val;
3358 	abi_ulong buf;
3359 	abi_ulong array;
3360 	abi_ulong __buf;
3361 };
3362 
3363 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3364                                                abi_ulong target_addr)
3365 {
3366     int nsems;
3367     unsigned short *array;
3368     union semun semun;
3369     struct semid_ds semid_ds;
3370     int i, ret;
3371 
3372     semun.buf = &semid_ds;
3373 
3374     ret = semctl(semid, 0, IPC_STAT, semun);
3375     if (ret == -1)
3376         return get_errno(ret);
3377 
3378     nsems = semid_ds.sem_nsems;
3379 
3380     *host_array = g_try_new(unsigned short, nsems);
3381     if (!*host_array) {
3382         return -TARGET_ENOMEM;
3383     }
3384     array = lock_user(VERIFY_READ, target_addr,
3385                       nsems*sizeof(unsigned short), 1);
3386     if (!array) {
3387         g_free(*host_array);
3388         return -TARGET_EFAULT;
3389     }
3390 
3391     for(i=0; i<nsems; i++) {
3392         __get_user((*host_array)[i], &array[i]);
3393     }
3394     unlock_user(array, target_addr, 0);
3395 
3396     return 0;
3397 }
3398 
3399 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3400                                                unsigned short **host_array)
3401 {
3402     int nsems;
3403     unsigned short *array;
3404     union semun semun;
3405     struct semid_ds semid_ds;
3406     int i, ret;
3407 
3408     semun.buf = &semid_ds;
3409 
3410     ret = semctl(semid, 0, IPC_STAT, semun);
3411     if (ret == -1)
3412         return get_errno(ret);
3413 
3414     nsems = semid_ds.sem_nsems;
3415 
3416     array = lock_user(VERIFY_WRITE, target_addr,
3417                       nsems*sizeof(unsigned short), 0);
3418     if (!array)
3419         return -TARGET_EFAULT;
3420 
3421     for(i=0; i<nsems; i++) {
3422         __put_user((*host_array)[i], &array[i]);
3423     }
3424     g_free(*host_array);
3425     unlock_user(array, target_addr, 1);
3426 
3427     return 0;
3428 }
3429 
3430 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3431                                  abi_ulong target_arg)
3432 {
3433     union target_semun target_su = { .buf = target_arg };
3434     union semun arg;
3435     struct semid_ds dsarg;
3436     unsigned short *array = NULL;
3437     struct seminfo seminfo;
3438     abi_long ret = -TARGET_EINVAL;
3439     abi_long err;
3440     cmd &= 0xff;
3441 
3442     switch( cmd ) {
3443 	case GETVAL:
3444 	case SETVAL:
3445             /* In 64 bit cross-endian situations, we will erroneously pick up
3446              * the wrong half of the union for the "val" element.  To rectify
3447              * this, the entire 8-byte structure is byteswapped, followed by
3448 	     * a swap of the 4 byte val field. In other cases, the data is
3449 	     * already in proper host byte order. */
3450 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3451 		target_su.buf = tswapal(target_su.buf);
3452 		arg.val = tswap32(target_su.val);
3453 	    } else {
3454 		arg.val = target_su.val;
3455 	    }
3456             ret = get_errno(semctl(semid, semnum, cmd, arg));
3457             break;
3458 	case GETALL:
3459 	case SETALL:
3460             err = target_to_host_semarray(semid, &array, target_su.array);
3461             if (err)
3462                 return err;
3463             arg.array = array;
3464             ret = get_errno(semctl(semid, semnum, cmd, arg));
3465             err = host_to_target_semarray(semid, target_su.array, &array);
3466             if (err)
3467                 return err;
3468             break;
3469 	case IPC_STAT:
3470 	case IPC_SET:
3471 	case SEM_STAT:
3472             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3473             if (err)
3474                 return err;
3475             arg.buf = &dsarg;
3476             ret = get_errno(semctl(semid, semnum, cmd, arg));
3477             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3478             if (err)
3479                 return err;
3480             break;
3481 	case IPC_INFO:
3482 	case SEM_INFO:
3483             arg.__buf = &seminfo;
3484             ret = get_errno(semctl(semid, semnum, cmd, arg));
3485             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3486             if (err)
3487                 return err;
3488             break;
3489 	case IPC_RMID:
3490 	case GETPID:
3491 	case GETNCNT:
3492 	case GETZCNT:
3493             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3494             break;
3495     }
3496 
3497     return ret;
3498 }
3499 
3500 struct target_sembuf {
3501     unsigned short sem_num;
3502     short sem_op;
3503     short sem_flg;
3504 };
3505 
3506 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3507                                              abi_ulong target_addr,
3508                                              unsigned nsops)
3509 {
3510     struct target_sembuf *target_sembuf;
3511     int i;
3512 
3513     target_sembuf = lock_user(VERIFY_READ, target_addr,
3514                               nsops*sizeof(struct target_sembuf), 1);
3515     if (!target_sembuf)
3516         return -TARGET_EFAULT;
3517 
3518     for(i=0; i<nsops; i++) {
3519         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3520         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3521         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3522     }
3523 
3524     unlock_user(target_sembuf, target_addr, 0);
3525 
3526     return 0;
3527 }
3528 
3529 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3530 {
3531     struct sembuf sops[nsops];
3532 
3533     if (target_to_host_sembuf(sops, ptr, nsops))
3534         return -TARGET_EFAULT;
3535 
3536     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3537 }
3538 
3539 struct target_msqid_ds
3540 {
3541     struct target_ipc_perm msg_perm;
3542     abi_ulong msg_stime;
3543 #if TARGET_ABI_BITS == 32
3544     abi_ulong __unused1;
3545 #endif
3546     abi_ulong msg_rtime;
3547 #if TARGET_ABI_BITS == 32
3548     abi_ulong __unused2;
3549 #endif
3550     abi_ulong msg_ctime;
3551 #if TARGET_ABI_BITS == 32
3552     abi_ulong __unused3;
3553 #endif
3554     abi_ulong __msg_cbytes;
3555     abi_ulong msg_qnum;
3556     abi_ulong msg_qbytes;
3557     abi_ulong msg_lspid;
3558     abi_ulong msg_lrpid;
3559     abi_ulong __unused4;
3560     abi_ulong __unused5;
3561 };
3562 
3563 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3564                                                abi_ulong target_addr)
3565 {
3566     struct target_msqid_ds *target_md;
3567 
3568     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3569         return -TARGET_EFAULT;
3570     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3571         return -TARGET_EFAULT;
3572     host_md->msg_stime = tswapal(target_md->msg_stime);
3573     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3574     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3575     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3576     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3577     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3578     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3579     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3580     unlock_user_struct(target_md, target_addr, 0);
3581     return 0;
3582 }
3583 
3584 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3585                                                struct msqid_ds *host_md)
3586 {
3587     struct target_msqid_ds *target_md;
3588 
3589     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3590         return -TARGET_EFAULT;
3591     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3592         return -TARGET_EFAULT;
3593     target_md->msg_stime = tswapal(host_md->msg_stime);
3594     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3595     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3596     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3597     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3598     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3599     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3600     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3601     unlock_user_struct(target_md, target_addr, 1);
3602     return 0;
3603 }
3604 
3605 struct target_msginfo {
3606     int msgpool;
3607     int msgmap;
3608     int msgmax;
3609     int msgmnb;
3610     int msgmni;
3611     int msgssz;
3612     int msgtql;
3613     unsigned short int msgseg;
3614 };
3615 
3616 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3617                                               struct msginfo *host_msginfo)
3618 {
3619     struct target_msginfo *target_msginfo;
3620     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3621         return -TARGET_EFAULT;
3622     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3623     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3624     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3625     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3626     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3627     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3628     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3629     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3630     unlock_user_struct(target_msginfo, target_addr, 1);
3631     return 0;
3632 }
3633 
3634 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3635 {
3636     struct msqid_ds dsarg;
3637     struct msginfo msginfo;
3638     abi_long ret = -TARGET_EINVAL;
3639 
3640     cmd &= 0xff;
3641 
3642     switch (cmd) {
3643     case IPC_STAT:
3644     case IPC_SET:
3645     case MSG_STAT:
3646         if (target_to_host_msqid_ds(&dsarg,ptr))
3647             return -TARGET_EFAULT;
3648         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3649         if (host_to_target_msqid_ds(ptr,&dsarg))
3650             return -TARGET_EFAULT;
3651         break;
3652     case IPC_RMID:
3653         ret = get_errno(msgctl(msgid, cmd, NULL));
3654         break;
3655     case IPC_INFO:
3656     case MSG_INFO:
3657         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3658         if (host_to_target_msginfo(ptr, &msginfo))
3659             return -TARGET_EFAULT;
3660         break;
3661     }
3662 
3663     return ret;
3664 }
3665 
3666 struct target_msgbuf {
3667     abi_long mtype;
3668     char	mtext[1];
3669 };
3670 
3671 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3672                                  ssize_t msgsz, int msgflg)
3673 {
3674     struct target_msgbuf *target_mb;
3675     struct msgbuf *host_mb;
3676     abi_long ret = 0;
3677 
3678     if (msgsz < 0) {
3679         return -TARGET_EINVAL;
3680     }
3681 
3682     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3683         return -TARGET_EFAULT;
3684     host_mb = g_try_malloc(msgsz + sizeof(long));
3685     if (!host_mb) {
3686         unlock_user_struct(target_mb, msgp, 0);
3687         return -TARGET_ENOMEM;
3688     }
3689     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3690     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3691     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3692     g_free(host_mb);
3693     unlock_user_struct(target_mb, msgp, 0);
3694 
3695     return ret;
3696 }
3697 
3698 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3699                                  ssize_t msgsz, abi_long msgtyp,
3700                                  int msgflg)
3701 {
3702     struct target_msgbuf *target_mb;
3703     char *target_mtext;
3704     struct msgbuf *host_mb;
3705     abi_long ret = 0;
3706 
3707     if (msgsz < 0) {
3708         return -TARGET_EINVAL;
3709     }
3710 
3711     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3712         return -TARGET_EFAULT;
3713 
3714     host_mb = g_try_malloc(msgsz + sizeof(long));
3715     if (!host_mb) {
3716         ret = -TARGET_ENOMEM;
3717         goto end;
3718     }
3719     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3720 
3721     if (ret > 0) {
3722         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3723         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3724         if (!target_mtext) {
3725             ret = -TARGET_EFAULT;
3726             goto end;
3727         }
3728         memcpy(target_mb->mtext, host_mb->mtext, ret);
3729         unlock_user(target_mtext, target_mtext_addr, ret);
3730     }
3731 
3732     target_mb->mtype = tswapal(host_mb->mtype);
3733 
3734 end:
3735     if (target_mb)
3736         unlock_user_struct(target_mb, msgp, 1);
3737     g_free(host_mb);
3738     return ret;
3739 }
3740 
3741 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3742                                                abi_ulong target_addr)
3743 {
3744     struct target_shmid_ds *target_sd;
3745 
3746     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3747         return -TARGET_EFAULT;
3748     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3749         return -TARGET_EFAULT;
3750     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3751     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3752     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3753     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3754     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3755     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3756     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3757     unlock_user_struct(target_sd, target_addr, 0);
3758     return 0;
3759 }
3760 
3761 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3762                                                struct shmid_ds *host_sd)
3763 {
3764     struct target_shmid_ds *target_sd;
3765 
3766     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3767         return -TARGET_EFAULT;
3768     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3769         return -TARGET_EFAULT;
3770     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3771     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3772     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3773     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3774     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3775     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3776     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3777     unlock_user_struct(target_sd, target_addr, 1);
3778     return 0;
3779 }
3780 
3781 struct  target_shminfo {
3782     abi_ulong shmmax;
3783     abi_ulong shmmin;
3784     abi_ulong shmmni;
3785     abi_ulong shmseg;
3786     abi_ulong shmall;
3787 };
3788 
3789 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3790                                               struct shminfo *host_shminfo)
3791 {
3792     struct target_shminfo *target_shminfo;
3793     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3794         return -TARGET_EFAULT;
3795     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3796     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3797     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3798     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3799     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3800     unlock_user_struct(target_shminfo, target_addr, 1);
3801     return 0;
3802 }
3803 
3804 struct target_shm_info {
3805     int used_ids;
3806     abi_ulong shm_tot;
3807     abi_ulong shm_rss;
3808     abi_ulong shm_swp;
3809     abi_ulong swap_attempts;
3810     abi_ulong swap_successes;
3811 };
3812 
3813 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3814                                                struct shm_info *host_shm_info)
3815 {
3816     struct target_shm_info *target_shm_info;
3817     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3818         return -TARGET_EFAULT;
3819     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3820     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3821     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3822     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3823     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3824     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3825     unlock_user_struct(target_shm_info, target_addr, 1);
3826     return 0;
3827 }
3828 
3829 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3830 {
3831     struct shmid_ds dsarg;
3832     struct shminfo shminfo;
3833     struct shm_info shm_info;
3834     abi_long ret = -TARGET_EINVAL;
3835 
3836     cmd &= 0xff;
3837 
3838     switch(cmd) {
3839     case IPC_STAT:
3840     case IPC_SET:
3841     case SHM_STAT:
3842         if (target_to_host_shmid_ds(&dsarg, buf))
3843             return -TARGET_EFAULT;
3844         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3845         if (host_to_target_shmid_ds(buf, &dsarg))
3846             return -TARGET_EFAULT;
3847         break;
3848     case IPC_INFO:
3849         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3850         if (host_to_target_shminfo(buf, &shminfo))
3851             return -TARGET_EFAULT;
3852         break;
3853     case SHM_INFO:
3854         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3855         if (host_to_target_shm_info(buf, &shm_info))
3856             return -TARGET_EFAULT;
3857         break;
3858     case IPC_RMID:
3859     case SHM_LOCK:
3860     case SHM_UNLOCK:
3861         ret = get_errno(shmctl(shmid, cmd, NULL));
3862         break;
3863     }
3864 
3865     return ret;
3866 }
3867 
3868 #ifndef TARGET_FORCE_SHMLBA
3869 /* For most architectures, SHMLBA is the same as the page size;
3870  * some architectures have larger values, in which case they should
3871  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3872  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3873  * and defining its own value for SHMLBA.
3874  *
3875  * The kernel also permits SHMLBA to be set by the architecture to a
3876  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3877  * this means that addresses are rounded to the large size if
3878  * SHM_RND is set but addresses not aligned to that size are not rejected
3879  * as long as they are at least page-aligned. Since the only architecture
3880  * which uses this is ia64 this code doesn't provide for that oddity.
3881  */
3882 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3883 {
3884     return TARGET_PAGE_SIZE;
3885 }
3886 #endif
3887 
3888 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3889                                  int shmid, abi_ulong shmaddr, int shmflg)
3890 {
3891     abi_long raddr;
3892     void *host_raddr;
3893     struct shmid_ds shm_info;
3894     int i,ret;
3895     abi_ulong shmlba;
3896 
3897     /* find out the length of the shared memory segment */
3898     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3899     if (is_error(ret)) {
3900         /* can't get length, bail out */
3901         return ret;
3902     }
3903 
3904     shmlba = target_shmlba(cpu_env);
3905 
3906     if (shmaddr & (shmlba - 1)) {
3907         if (shmflg & SHM_RND) {
3908             shmaddr &= ~(shmlba - 1);
3909         } else {
3910             return -TARGET_EINVAL;
3911         }
3912     }
3913     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3914         return -TARGET_EINVAL;
3915     }
3916 
3917     mmap_lock();
3918 
3919     if (shmaddr)
3920         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3921     else {
3922         abi_ulong mmap_start;
3923 
3924         /* In order to use the host shmat, we need to honor host SHMLBA.  */
3925         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
3926 
3927         if (mmap_start == -1) {
3928             errno = ENOMEM;
3929             host_raddr = (void *)-1;
3930         } else
3931             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3932     }
3933 
3934     if (host_raddr == (void *)-1) {
3935         mmap_unlock();
3936         return get_errno((long)host_raddr);
3937     }
3938     raddr=h2g((unsigned long)host_raddr);
3939 
3940     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3941                    PAGE_VALID | PAGE_READ |
3942                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3943 
3944     for (i = 0; i < N_SHM_REGIONS; i++) {
3945         if (!shm_regions[i].in_use) {
3946             shm_regions[i].in_use = true;
3947             shm_regions[i].start = raddr;
3948             shm_regions[i].size = shm_info.shm_segsz;
3949             break;
3950         }
3951     }
3952 
3953     mmap_unlock();
3954     return raddr;
3955 
3956 }
3957 
3958 static inline abi_long do_shmdt(abi_ulong shmaddr)
3959 {
3960     int i;
3961     abi_long rv;
3962 
3963     mmap_lock();
3964 
3965     for (i = 0; i < N_SHM_REGIONS; ++i) {
3966         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3967             shm_regions[i].in_use = false;
3968             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3969             break;
3970         }
3971     }
3972     rv = get_errno(shmdt(g2h(shmaddr)));
3973 
3974     mmap_unlock();
3975 
3976     return rv;
3977 }
3978 
3979 #ifdef TARGET_NR_ipc
3980 /* ??? This only works with linear mappings.  */
3981 /* do_ipc() must return target values and target errnos. */
3982 static abi_long do_ipc(CPUArchState *cpu_env,
3983                        unsigned int call, abi_long first,
3984                        abi_long second, abi_long third,
3985                        abi_long ptr, abi_long fifth)
3986 {
3987     int version;
3988     abi_long ret = 0;
3989 
3990     version = call >> 16;
3991     call &= 0xffff;
3992 
3993     switch (call) {
3994     case IPCOP_semop:
3995         ret = do_semop(first, ptr, second);
3996         break;
3997 
3998     case IPCOP_semget:
3999         ret = get_errno(semget(first, second, third));
4000         break;
4001 
4002     case IPCOP_semctl: {
4003         /* The semun argument to semctl is passed by value, so dereference the
4004          * ptr argument. */
4005         abi_ulong atptr;
4006         get_user_ual(atptr, ptr);
4007         ret = do_semctl(first, second, third, atptr);
4008         break;
4009     }
4010 
4011     case IPCOP_msgget:
4012         ret = get_errno(msgget(first, second));
4013         break;
4014 
4015     case IPCOP_msgsnd:
4016         ret = do_msgsnd(first, ptr, second, third);
4017         break;
4018 
4019     case IPCOP_msgctl:
4020         ret = do_msgctl(first, second, ptr);
4021         break;
4022 
4023     case IPCOP_msgrcv:
4024         switch (version) {
4025         case 0:
4026             {
4027                 struct target_ipc_kludge {
4028                     abi_long msgp;
4029                     abi_long msgtyp;
4030                 } *tmp;
4031 
4032                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4033                     ret = -TARGET_EFAULT;
4034                     break;
4035                 }
4036 
4037                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4038 
4039                 unlock_user_struct(tmp, ptr, 0);
4040                 break;
4041             }
4042         default:
4043             ret = do_msgrcv(first, ptr, second, fifth, third);
4044         }
4045         break;
4046 
4047     case IPCOP_shmat:
4048         switch (version) {
4049         default:
4050         {
4051             abi_ulong raddr;
4052             raddr = do_shmat(cpu_env, first, ptr, second);
4053             if (is_error(raddr))
4054                 return get_errno(raddr);
4055             if (put_user_ual(raddr, third))
4056                 return -TARGET_EFAULT;
4057             break;
4058         }
4059         case 1:
4060             ret = -TARGET_EINVAL;
4061             break;
4062         }
4063 	break;
4064     case IPCOP_shmdt:
4065         ret = do_shmdt(ptr);
4066 	break;
4067 
4068     case IPCOP_shmget:
4069 	/* IPC_* flag values are the same on all linux platforms */
4070 	ret = get_errno(shmget(first, second, third));
4071 	break;
4072 
4073 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4074     case IPCOP_shmctl:
4075         ret = do_shmctl(first, second, ptr);
4076         break;
4077     default:
4078 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4079 	ret = -TARGET_ENOSYS;
4080 	break;
4081     }
4082     return ret;
4083 }
4084 #endif
4085 
4086 /* kernel structure types definitions */
4087 
4088 #define STRUCT(name, ...) STRUCT_ ## name,
4089 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4090 enum {
4091 #include "syscall_types.h"
4092 STRUCT_MAX
4093 };
4094 #undef STRUCT
4095 #undef STRUCT_SPECIAL
4096 
4097 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4098 #define STRUCT_SPECIAL(name)
4099 #include "syscall_types.h"
4100 #undef STRUCT
4101 #undef STRUCT_SPECIAL
4102 
4103 typedef struct IOCTLEntry IOCTLEntry;
4104 
4105 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4106                              int fd, int cmd, abi_long arg);
4107 
4108 struct IOCTLEntry {
4109     int target_cmd;
4110     unsigned int host_cmd;
4111     const char *name;
4112     int access;
4113     do_ioctl_fn *do_ioctl;
4114     const argtype arg_type[5];
4115 };
4116 
4117 #define IOC_R 0x0001
4118 #define IOC_W 0x0002
4119 #define IOC_RW (IOC_R | IOC_W)
4120 
4121 #define MAX_STRUCT_SIZE 4096
4122 
4123 #ifdef CONFIG_FIEMAP
4124 /* So fiemap access checks don't overflow on 32 bit systems.
4125  * This is very slightly smaller than the limit imposed by
4126  * the underlying kernel.
4127  */
4128 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4129                             / sizeof(struct fiemap_extent))
4130 
4131 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4132                                        int fd, int cmd, abi_long arg)
4133 {
4134     /* The parameter for this ioctl is a struct fiemap followed
4135      * by an array of struct fiemap_extent whose size is set
4136      * in fiemap->fm_extent_count. The array is filled in by the
4137      * ioctl.
4138      */
4139     int target_size_in, target_size_out;
4140     struct fiemap *fm;
4141     const argtype *arg_type = ie->arg_type;
4142     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4143     void *argptr, *p;
4144     abi_long ret;
4145     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4146     uint32_t outbufsz;
4147     int free_fm = 0;
4148 
4149     assert(arg_type[0] == TYPE_PTR);
4150     assert(ie->access == IOC_RW);
4151     arg_type++;
4152     target_size_in = thunk_type_size(arg_type, 0);
4153     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4154     if (!argptr) {
4155         return -TARGET_EFAULT;
4156     }
4157     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4158     unlock_user(argptr, arg, 0);
4159     fm = (struct fiemap *)buf_temp;
4160     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4161         return -TARGET_EINVAL;
4162     }
4163 
4164     outbufsz = sizeof (*fm) +
4165         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4166 
4167     if (outbufsz > MAX_STRUCT_SIZE) {
4168         /* We can't fit all the extents into the fixed size buffer.
4169          * Allocate one that is large enough and use it instead.
4170          */
4171         fm = g_try_malloc(outbufsz);
4172         if (!fm) {
4173             return -TARGET_ENOMEM;
4174         }
4175         memcpy(fm, buf_temp, sizeof(struct fiemap));
4176         free_fm = 1;
4177     }
4178     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4179     if (!is_error(ret)) {
4180         target_size_out = target_size_in;
4181         /* An extent_count of 0 means we were only counting the extents
4182          * so there are no structs to copy
4183          */
4184         if (fm->fm_extent_count != 0) {
4185             target_size_out += fm->fm_mapped_extents * extent_size;
4186         }
4187         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4188         if (!argptr) {
4189             ret = -TARGET_EFAULT;
4190         } else {
4191             /* Convert the struct fiemap */
4192             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4193             if (fm->fm_extent_count != 0) {
4194                 p = argptr + target_size_in;
4195                 /* ...and then all the struct fiemap_extents */
4196                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4197                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4198                                   THUNK_TARGET);
4199                     p += extent_size;
4200                 }
4201             }
4202             unlock_user(argptr, arg, target_size_out);
4203         }
4204     }
4205     if (free_fm) {
4206         g_free(fm);
4207     }
4208     return ret;
4209 }
4210 #endif
4211 
4212 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4213                                 int fd, int cmd, abi_long arg)
4214 {
4215     const argtype *arg_type = ie->arg_type;
4216     int target_size;
4217     void *argptr;
4218     int ret;
4219     struct ifconf *host_ifconf;
4220     uint32_t outbufsz;
4221     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4222     int target_ifreq_size;
4223     int nb_ifreq;
4224     int free_buf = 0;
4225     int i;
4226     int target_ifc_len;
4227     abi_long target_ifc_buf;
4228     int host_ifc_len;
4229     char *host_ifc_buf;
4230 
4231     assert(arg_type[0] == TYPE_PTR);
4232     assert(ie->access == IOC_RW);
4233 
4234     arg_type++;
4235     target_size = thunk_type_size(arg_type, 0);
4236 
4237     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4238     if (!argptr)
4239         return -TARGET_EFAULT;
4240     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4241     unlock_user(argptr, arg, 0);
4242 
4243     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4244     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4245     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4246 
4247     if (target_ifc_buf != 0) {
4248         target_ifc_len = host_ifconf->ifc_len;
4249         nb_ifreq = target_ifc_len / target_ifreq_size;
4250         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4251 
4252         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4253         if (outbufsz > MAX_STRUCT_SIZE) {
4254             /*
4255              * We can't fit all the extents into the fixed size buffer.
4256              * Allocate one that is large enough and use it instead.
4257              */
4258             host_ifconf = malloc(outbufsz);
4259             if (!host_ifconf) {
4260                 return -TARGET_ENOMEM;
4261             }
4262             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4263             free_buf = 1;
4264         }
4265         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4266 
4267         host_ifconf->ifc_len = host_ifc_len;
4268     } else {
4269       host_ifc_buf = NULL;
4270     }
4271     host_ifconf->ifc_buf = host_ifc_buf;
4272 
4273     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4274     if (!is_error(ret)) {
4275 	/* convert host ifc_len to target ifc_len */
4276 
4277         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4278         target_ifc_len = nb_ifreq * target_ifreq_size;
4279         host_ifconf->ifc_len = target_ifc_len;
4280 
4281 	/* restore target ifc_buf */
4282 
4283         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4284 
4285 	/* copy struct ifconf to target user */
4286 
4287         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4288         if (!argptr)
4289             return -TARGET_EFAULT;
4290         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4291         unlock_user(argptr, arg, target_size);
4292 
4293         if (target_ifc_buf != 0) {
4294             /* copy ifreq[] to target user */
4295             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4296             for (i = 0; i < nb_ifreq ; i++) {
4297                 thunk_convert(argptr + i * target_ifreq_size,
4298                               host_ifc_buf + i * sizeof(struct ifreq),
4299                               ifreq_arg_type, THUNK_TARGET);
4300             }
4301             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4302         }
4303     }
4304 
4305     if (free_buf) {
4306         free(host_ifconf);
4307     }
4308 
4309     return ret;
4310 }
4311 
4312 #if defined(CONFIG_USBFS)
4313 #if HOST_LONG_BITS > 64
4314 #error USBDEVFS thunks do not support >64 bit hosts yet.
4315 #endif
4316 struct live_urb {
4317     uint64_t target_urb_adr;
4318     uint64_t target_buf_adr;
4319     char *target_buf_ptr;
4320     struct usbdevfs_urb host_urb;
4321 };
4322 
4323 static GHashTable *usbdevfs_urb_hashtable(void)
4324 {
4325     static GHashTable *urb_hashtable;
4326 
4327     if (!urb_hashtable) {
4328         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4329     }
4330     return urb_hashtable;
4331 }
4332 
4333 static void urb_hashtable_insert(struct live_urb *urb)
4334 {
4335     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4336     g_hash_table_insert(urb_hashtable, urb, urb);
4337 }
4338 
4339 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4340 {
4341     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4342     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4343 }
4344 
4345 static void urb_hashtable_remove(struct live_urb *urb)
4346 {
4347     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4348     g_hash_table_remove(urb_hashtable, urb);
4349 }
4350 
4351 static abi_long
4352 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4353                           int fd, int cmd, abi_long arg)
4354 {
4355     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4356     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4357     struct live_urb *lurb;
4358     void *argptr;
4359     uint64_t hurb;
4360     int target_size;
4361     uintptr_t target_urb_adr;
4362     abi_long ret;
4363 
4364     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4365 
4366     memset(buf_temp, 0, sizeof(uint64_t));
4367     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4368     if (is_error(ret)) {
4369         return ret;
4370     }
4371 
4372     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4373     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4374     if (!lurb->target_urb_adr) {
4375         return -TARGET_EFAULT;
4376     }
4377     urb_hashtable_remove(lurb);
4378     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4379         lurb->host_urb.buffer_length);
4380     lurb->target_buf_ptr = NULL;
4381 
4382     /* restore the guest buffer pointer */
4383     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4384 
4385     /* update the guest urb struct */
4386     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4387     if (!argptr) {
4388         g_free(lurb);
4389         return -TARGET_EFAULT;
4390     }
4391     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4392     unlock_user(argptr, lurb->target_urb_adr, target_size);
4393 
4394     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4395     /* write back the urb handle */
4396     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4397     if (!argptr) {
4398         g_free(lurb);
4399         return -TARGET_EFAULT;
4400     }
4401 
4402     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4403     target_urb_adr = lurb->target_urb_adr;
4404     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4405     unlock_user(argptr, arg, target_size);
4406 
4407     g_free(lurb);
4408     return ret;
4409 }
4410 
4411 static abi_long
4412 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4413                              uint8_t *buf_temp __attribute__((unused)),
4414                              int fd, int cmd, abi_long arg)
4415 {
4416     struct live_urb *lurb;
4417 
4418     /* map target address back to host URB with metadata. */
4419     lurb = urb_hashtable_lookup(arg);
4420     if (!lurb) {
4421         return -TARGET_EFAULT;
4422     }
4423     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4424 }
4425 
4426 static abi_long
4427 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4428                             int fd, int cmd, abi_long arg)
4429 {
4430     const argtype *arg_type = ie->arg_type;
4431     int target_size;
4432     abi_long ret;
4433     void *argptr;
4434     int rw_dir;
4435     struct live_urb *lurb;
4436 
4437     /*
4438      * each submitted URB needs to map to a unique ID for the
4439      * kernel, and that unique ID needs to be a pointer to
4440      * host memory.  hence, we need to malloc for each URB.
4441      * isochronous transfers have a variable length struct.
4442      */
4443     arg_type++;
4444     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4445 
4446     /* construct host copy of urb and metadata */
4447     lurb = g_try_malloc0(sizeof(struct live_urb));
4448     if (!lurb) {
4449         return -TARGET_ENOMEM;
4450     }
4451 
4452     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4453     if (!argptr) {
4454         g_free(lurb);
4455         return -TARGET_EFAULT;
4456     }
4457     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4458     unlock_user(argptr, arg, 0);
4459 
4460     lurb->target_urb_adr = arg;
4461     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4462 
4463     /* buffer space used depends on endpoint type so lock the entire buffer */
4464     /* control type urbs should check the buffer contents for true direction */
4465     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4466     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4467         lurb->host_urb.buffer_length, 1);
4468     if (lurb->target_buf_ptr == NULL) {
4469         g_free(lurb);
4470         return -TARGET_EFAULT;
4471     }
4472 
4473     /* update buffer pointer in host copy */
4474     lurb->host_urb.buffer = lurb->target_buf_ptr;
4475 
4476     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4477     if (is_error(ret)) {
4478         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4479         g_free(lurb);
4480     } else {
4481         urb_hashtable_insert(lurb);
4482     }
4483 
4484     return ret;
4485 }
4486 #endif /* CONFIG_USBFS */
4487 
4488 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4489                             int cmd, abi_long arg)
4490 {
4491     void *argptr;
4492     struct dm_ioctl *host_dm;
4493     abi_long guest_data;
4494     uint32_t guest_data_size;
4495     int target_size;
4496     const argtype *arg_type = ie->arg_type;
4497     abi_long ret;
4498     void *big_buf = NULL;
4499     char *host_data;
4500 
4501     arg_type++;
4502     target_size = thunk_type_size(arg_type, 0);
4503     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4504     if (!argptr) {
4505         ret = -TARGET_EFAULT;
4506         goto out;
4507     }
4508     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4509     unlock_user(argptr, arg, 0);
4510 
4511     /* buf_temp is too small, so fetch things into a bigger buffer */
4512     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4513     memcpy(big_buf, buf_temp, target_size);
4514     buf_temp = big_buf;
4515     host_dm = big_buf;
4516 
4517     guest_data = arg + host_dm->data_start;
4518     if ((guest_data - arg) < 0) {
4519         ret = -TARGET_EINVAL;
4520         goto out;
4521     }
4522     guest_data_size = host_dm->data_size - host_dm->data_start;
4523     host_data = (char*)host_dm + host_dm->data_start;
4524 
4525     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4526     if (!argptr) {
4527         ret = -TARGET_EFAULT;
4528         goto out;
4529     }
4530 
4531     switch (ie->host_cmd) {
4532     case DM_REMOVE_ALL:
4533     case DM_LIST_DEVICES:
4534     case DM_DEV_CREATE:
4535     case DM_DEV_REMOVE:
4536     case DM_DEV_SUSPEND:
4537     case DM_DEV_STATUS:
4538     case DM_DEV_WAIT:
4539     case DM_TABLE_STATUS:
4540     case DM_TABLE_CLEAR:
4541     case DM_TABLE_DEPS:
4542     case DM_LIST_VERSIONS:
4543         /* no input data */
4544         break;
4545     case DM_DEV_RENAME:
4546     case DM_DEV_SET_GEOMETRY:
4547         /* data contains only strings */
4548         memcpy(host_data, argptr, guest_data_size);
4549         break;
4550     case DM_TARGET_MSG:
4551         memcpy(host_data, argptr, guest_data_size);
4552         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4553         break;
4554     case DM_TABLE_LOAD:
4555     {
4556         void *gspec = argptr;
4557         void *cur_data = host_data;
4558         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4559         int spec_size = thunk_type_size(arg_type, 0);
4560         int i;
4561 
4562         for (i = 0; i < host_dm->target_count; i++) {
4563             struct dm_target_spec *spec = cur_data;
4564             uint32_t next;
4565             int slen;
4566 
4567             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4568             slen = strlen((char*)gspec + spec_size) + 1;
4569             next = spec->next;
4570             spec->next = sizeof(*spec) + slen;
4571             strcpy((char*)&spec[1], gspec + spec_size);
4572             gspec += next;
4573             cur_data += spec->next;
4574         }
4575         break;
4576     }
4577     default:
4578         ret = -TARGET_EINVAL;
4579         unlock_user(argptr, guest_data, 0);
4580         goto out;
4581     }
4582     unlock_user(argptr, guest_data, 0);
4583 
4584     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4585     if (!is_error(ret)) {
4586         guest_data = arg + host_dm->data_start;
4587         guest_data_size = host_dm->data_size - host_dm->data_start;
4588         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4589         switch (ie->host_cmd) {
4590         case DM_REMOVE_ALL:
4591         case DM_DEV_CREATE:
4592         case DM_DEV_REMOVE:
4593         case DM_DEV_RENAME:
4594         case DM_DEV_SUSPEND:
4595         case DM_DEV_STATUS:
4596         case DM_TABLE_LOAD:
4597         case DM_TABLE_CLEAR:
4598         case DM_TARGET_MSG:
4599         case DM_DEV_SET_GEOMETRY:
4600             /* no return data */
4601             break;
4602         case DM_LIST_DEVICES:
4603         {
4604             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4605             uint32_t remaining_data = guest_data_size;
4606             void *cur_data = argptr;
4607             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4608             int nl_size = 12; /* can't use thunk_size due to alignment */
4609 
4610             while (1) {
4611                 uint32_t next = nl->next;
4612                 if (next) {
4613                     nl->next = nl_size + (strlen(nl->name) + 1);
4614                 }
4615                 if (remaining_data < nl->next) {
4616                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4617                     break;
4618                 }
4619                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4620                 strcpy(cur_data + nl_size, nl->name);
4621                 cur_data += nl->next;
4622                 remaining_data -= nl->next;
4623                 if (!next) {
4624                     break;
4625                 }
4626                 nl = (void*)nl + next;
4627             }
4628             break;
4629         }
4630         case DM_DEV_WAIT:
4631         case DM_TABLE_STATUS:
4632         {
4633             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4634             void *cur_data = argptr;
4635             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4636             int spec_size = thunk_type_size(arg_type, 0);
4637             int i;
4638 
4639             for (i = 0; i < host_dm->target_count; i++) {
4640                 uint32_t next = spec->next;
4641                 int slen = strlen((char*)&spec[1]) + 1;
4642                 spec->next = (cur_data - argptr) + spec_size + slen;
4643                 if (guest_data_size < spec->next) {
4644                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4645                     break;
4646                 }
4647                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4648                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4649                 cur_data = argptr + spec->next;
4650                 spec = (void*)host_dm + host_dm->data_start + next;
4651             }
4652             break;
4653         }
4654         case DM_TABLE_DEPS:
4655         {
4656             void *hdata = (void*)host_dm + host_dm->data_start;
4657             int count = *(uint32_t*)hdata;
4658             uint64_t *hdev = hdata + 8;
4659             uint64_t *gdev = argptr + 8;
4660             int i;
4661 
4662             *(uint32_t*)argptr = tswap32(count);
4663             for (i = 0; i < count; i++) {
4664                 *gdev = tswap64(*hdev);
4665                 gdev++;
4666                 hdev++;
4667             }
4668             break;
4669         }
4670         case DM_LIST_VERSIONS:
4671         {
4672             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4673             uint32_t remaining_data = guest_data_size;
4674             void *cur_data = argptr;
4675             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4676             int vers_size = thunk_type_size(arg_type, 0);
4677 
4678             while (1) {
4679                 uint32_t next = vers->next;
4680                 if (next) {
4681                     vers->next = vers_size + (strlen(vers->name) + 1);
4682                 }
4683                 if (remaining_data < vers->next) {
4684                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4685                     break;
4686                 }
4687                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4688                 strcpy(cur_data + vers_size, vers->name);
4689                 cur_data += vers->next;
4690                 remaining_data -= vers->next;
4691                 if (!next) {
4692                     break;
4693                 }
4694                 vers = (void*)vers + next;
4695             }
4696             break;
4697         }
4698         default:
4699             unlock_user(argptr, guest_data, 0);
4700             ret = -TARGET_EINVAL;
4701             goto out;
4702         }
4703         unlock_user(argptr, guest_data, guest_data_size);
4704 
4705         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4706         if (!argptr) {
4707             ret = -TARGET_EFAULT;
4708             goto out;
4709         }
4710         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4711         unlock_user(argptr, arg, target_size);
4712     }
4713 out:
4714     g_free(big_buf);
4715     return ret;
4716 }
4717 
4718 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4719                                int cmd, abi_long arg)
4720 {
4721     void *argptr;
4722     int target_size;
4723     const argtype *arg_type = ie->arg_type;
4724     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4725     abi_long ret;
4726 
4727     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4728     struct blkpg_partition host_part;
4729 
4730     /* Read and convert blkpg */
4731     arg_type++;
4732     target_size = thunk_type_size(arg_type, 0);
4733     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4734     if (!argptr) {
4735         ret = -TARGET_EFAULT;
4736         goto out;
4737     }
4738     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4739     unlock_user(argptr, arg, 0);
4740 
4741     switch (host_blkpg->op) {
4742     case BLKPG_ADD_PARTITION:
4743     case BLKPG_DEL_PARTITION:
4744         /* payload is struct blkpg_partition */
4745         break;
4746     default:
4747         /* Unknown opcode */
4748         ret = -TARGET_EINVAL;
4749         goto out;
4750     }
4751 
4752     /* Read and convert blkpg->data */
4753     arg = (abi_long)(uintptr_t)host_blkpg->data;
4754     target_size = thunk_type_size(part_arg_type, 0);
4755     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4756     if (!argptr) {
4757         ret = -TARGET_EFAULT;
4758         goto out;
4759     }
4760     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4761     unlock_user(argptr, arg, 0);
4762 
4763     /* Swizzle the data pointer to our local copy and call! */
4764     host_blkpg->data = &host_part;
4765     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4766 
4767 out:
4768     return ret;
4769 }
4770 
4771 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4772                                 int fd, int cmd, abi_long arg)
4773 {
4774     const argtype *arg_type = ie->arg_type;
4775     const StructEntry *se;
4776     const argtype *field_types;
4777     const int *dst_offsets, *src_offsets;
4778     int target_size;
4779     void *argptr;
4780     abi_ulong *target_rt_dev_ptr = NULL;
4781     unsigned long *host_rt_dev_ptr = NULL;
4782     abi_long ret;
4783     int i;
4784 
4785     assert(ie->access == IOC_W);
4786     assert(*arg_type == TYPE_PTR);
4787     arg_type++;
4788     assert(*arg_type == TYPE_STRUCT);
4789     target_size = thunk_type_size(arg_type, 0);
4790     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4791     if (!argptr) {
4792         return -TARGET_EFAULT;
4793     }
4794     arg_type++;
4795     assert(*arg_type == (int)STRUCT_rtentry);
4796     se = struct_entries + *arg_type++;
4797     assert(se->convert[0] == NULL);
4798     /* convert struct here to be able to catch rt_dev string */
4799     field_types = se->field_types;
4800     dst_offsets = se->field_offsets[THUNK_HOST];
4801     src_offsets = se->field_offsets[THUNK_TARGET];
4802     for (i = 0; i < se->nb_fields; i++) {
4803         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4804             assert(*field_types == TYPE_PTRVOID);
4805             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4806             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4807             if (*target_rt_dev_ptr != 0) {
4808                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4809                                                   tswapal(*target_rt_dev_ptr));
4810                 if (!*host_rt_dev_ptr) {
4811                     unlock_user(argptr, arg, 0);
4812                     return -TARGET_EFAULT;
4813                 }
4814             } else {
4815                 *host_rt_dev_ptr = 0;
4816             }
4817             field_types++;
4818             continue;
4819         }
4820         field_types = thunk_convert(buf_temp + dst_offsets[i],
4821                                     argptr + src_offsets[i],
4822                                     field_types, THUNK_HOST);
4823     }
4824     unlock_user(argptr, arg, 0);
4825 
4826     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4827 
4828     assert(host_rt_dev_ptr != NULL);
4829     assert(target_rt_dev_ptr != NULL);
4830     if (*host_rt_dev_ptr != 0) {
4831         unlock_user((void *)*host_rt_dev_ptr,
4832                     *target_rt_dev_ptr, 0);
4833     }
4834     return ret;
4835 }
4836 
4837 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4838                                      int fd, int cmd, abi_long arg)
4839 {
4840     int sig = target_to_host_signal(arg);
4841     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4842 }
4843 
4844 #ifdef TIOCGPTPEER
4845 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4846                                      int fd, int cmd, abi_long arg)
4847 {
4848     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4849     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4850 }
4851 #endif
4852 
4853 static IOCTLEntry ioctl_entries[] = {
4854 #define IOCTL(cmd, access, ...) \
4855     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4856 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4857     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4858 #define IOCTL_IGNORE(cmd) \
4859     { TARGET_ ## cmd, 0, #cmd },
4860 #include "ioctls.h"
4861     { 0, 0, },
4862 };
4863 
4864 /* ??? Implement proper locking for ioctls.  */
4865 /* do_ioctl() Must return target values and target errnos. */
4866 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4867 {
4868     const IOCTLEntry *ie;
4869     const argtype *arg_type;
4870     abi_long ret;
4871     uint8_t buf_temp[MAX_STRUCT_SIZE];
4872     int target_size;
4873     void *argptr;
4874 
4875     ie = ioctl_entries;
4876     for(;;) {
4877         if (ie->target_cmd == 0) {
4878             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4879             return -TARGET_ENOSYS;
4880         }
4881         if (ie->target_cmd == cmd)
4882             break;
4883         ie++;
4884     }
4885     arg_type = ie->arg_type;
4886     if (ie->do_ioctl) {
4887         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4888     } else if (!ie->host_cmd) {
4889         /* Some architectures define BSD ioctls in their headers
4890            that are not implemented in Linux.  */
4891         return -TARGET_ENOSYS;
4892     }
4893 
4894     switch(arg_type[0]) {
4895     case TYPE_NULL:
4896         /* no argument */
4897         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4898         break;
4899     case TYPE_PTRVOID:
4900     case TYPE_INT:
4901         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4902         break;
4903     case TYPE_PTR:
4904         arg_type++;
4905         target_size = thunk_type_size(arg_type, 0);
4906         switch(ie->access) {
4907         case IOC_R:
4908             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4909             if (!is_error(ret)) {
4910                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4911                 if (!argptr)
4912                     return -TARGET_EFAULT;
4913                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4914                 unlock_user(argptr, arg, target_size);
4915             }
4916             break;
4917         case IOC_W:
4918             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4919             if (!argptr)
4920                 return -TARGET_EFAULT;
4921             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4922             unlock_user(argptr, arg, 0);
4923             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4924             break;
4925         default:
4926         case IOC_RW:
4927             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4928             if (!argptr)
4929                 return -TARGET_EFAULT;
4930             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4931             unlock_user(argptr, arg, 0);
4932             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4933             if (!is_error(ret)) {
4934                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4935                 if (!argptr)
4936                     return -TARGET_EFAULT;
4937                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4938                 unlock_user(argptr, arg, target_size);
4939             }
4940             break;
4941         }
4942         break;
4943     default:
4944         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4945                  (long)cmd, arg_type[0]);
4946         ret = -TARGET_ENOSYS;
4947         break;
4948     }
4949     return ret;
4950 }
4951 
4952 static const bitmask_transtbl iflag_tbl[] = {
4953         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4954         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4955         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4956         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4957         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4958         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4959         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4960         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4961         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4962         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4963         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4964         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4965         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4966         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4967         { 0, 0, 0, 0 }
4968 };
4969 
4970 static const bitmask_transtbl oflag_tbl[] = {
4971 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4972 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4973 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4974 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4975 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4976 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4977 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4978 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4979 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4980 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4981 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4982 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4983 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4984 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4985 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4986 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4987 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4988 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4989 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4990 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4991 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4992 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4993 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4994 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4995 	{ 0, 0, 0, 0 }
4996 };
4997 
4998 static const bitmask_transtbl cflag_tbl[] = {
4999 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5000 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5001 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5002 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5003 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5004 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5005 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5006 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5007 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5008 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5009 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5010 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5011 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5012 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5013 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5014 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5015 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5016 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5017 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5018 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5019 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5020 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5021 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5022 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5023 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5024 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5025 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5026 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5027 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5028 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5029 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5030 	{ 0, 0, 0, 0 }
5031 };
5032 
5033 static const bitmask_transtbl lflag_tbl[] = {
5034 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5035 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5036 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5037 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5038 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5039 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5040 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5041 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5042 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5043 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5044 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5045 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5046 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5047 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5048 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5049 	{ 0, 0, 0, 0 }
5050 };
5051 
5052 static void target_to_host_termios (void *dst, const void *src)
5053 {
5054     struct host_termios *host = dst;
5055     const struct target_termios *target = src;
5056 
5057     host->c_iflag =
5058         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5059     host->c_oflag =
5060         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5061     host->c_cflag =
5062         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5063     host->c_lflag =
5064         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5065     host->c_line = target->c_line;
5066 
5067     memset(host->c_cc, 0, sizeof(host->c_cc));
5068     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5069     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5070     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5071     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5072     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5073     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5074     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5075     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5076     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5077     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5078     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5079     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5080     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5081     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5082     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5083     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5084     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5085 }
5086 
5087 static void host_to_target_termios (void *dst, const void *src)
5088 {
5089     struct target_termios *target = dst;
5090     const struct host_termios *host = src;
5091 
5092     target->c_iflag =
5093         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5094     target->c_oflag =
5095         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5096     target->c_cflag =
5097         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5098     target->c_lflag =
5099         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5100     target->c_line = host->c_line;
5101 
5102     memset(target->c_cc, 0, sizeof(target->c_cc));
5103     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5104     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5105     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5106     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5107     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5108     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5109     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5110     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5111     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5112     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5113     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5114     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5115     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5116     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5117     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5118     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5119     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5120 }
5121 
5122 static const StructEntry struct_termios_def = {
5123     .convert = { host_to_target_termios, target_to_host_termios },
5124     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5125     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5126 };
5127 
5128 static bitmask_transtbl mmap_flags_tbl[] = {
5129     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5130     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5131     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5132     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5133       MAP_ANONYMOUS, MAP_ANONYMOUS },
5134     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5135       MAP_GROWSDOWN, MAP_GROWSDOWN },
5136     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5137       MAP_DENYWRITE, MAP_DENYWRITE },
5138     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5139       MAP_EXECUTABLE, MAP_EXECUTABLE },
5140     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5141     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5142       MAP_NORESERVE, MAP_NORESERVE },
5143     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5144     /* MAP_STACK had been ignored by the kernel for quite some time.
5145        Recognize it for the target insofar as we do not want to pass
5146        it through to the host.  */
5147     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5148     { 0, 0, 0, 0 }
5149 };
5150 
5151 #if defined(TARGET_I386)
5152 
5153 /* NOTE: there is really one LDT for all the threads */
5154 static uint8_t *ldt_table;
5155 
5156 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5157 {
5158     int size;
5159     void *p;
5160 
5161     if (!ldt_table)
5162         return 0;
5163     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5164     if (size > bytecount)
5165         size = bytecount;
5166     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5167     if (!p)
5168         return -TARGET_EFAULT;
5169     /* ??? Should this by byteswapped?  */
5170     memcpy(p, ldt_table, size);
5171     unlock_user(p, ptr, size);
5172     return size;
5173 }
5174 
5175 /* XXX: add locking support */
5176 static abi_long write_ldt(CPUX86State *env,
5177                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5178 {
5179     struct target_modify_ldt_ldt_s ldt_info;
5180     struct target_modify_ldt_ldt_s *target_ldt_info;
5181     int seg_32bit, contents, read_exec_only, limit_in_pages;
5182     int seg_not_present, useable, lm;
5183     uint32_t *lp, entry_1, entry_2;
5184 
5185     if (bytecount != sizeof(ldt_info))
5186         return -TARGET_EINVAL;
5187     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5188         return -TARGET_EFAULT;
5189     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5190     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5191     ldt_info.limit = tswap32(target_ldt_info->limit);
5192     ldt_info.flags = tswap32(target_ldt_info->flags);
5193     unlock_user_struct(target_ldt_info, ptr, 0);
5194 
5195     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5196         return -TARGET_EINVAL;
5197     seg_32bit = ldt_info.flags & 1;
5198     contents = (ldt_info.flags >> 1) & 3;
5199     read_exec_only = (ldt_info.flags >> 3) & 1;
5200     limit_in_pages = (ldt_info.flags >> 4) & 1;
5201     seg_not_present = (ldt_info.flags >> 5) & 1;
5202     useable = (ldt_info.flags >> 6) & 1;
5203 #ifdef TARGET_ABI32
5204     lm = 0;
5205 #else
5206     lm = (ldt_info.flags >> 7) & 1;
5207 #endif
5208     if (contents == 3) {
5209         if (oldmode)
5210             return -TARGET_EINVAL;
5211         if (seg_not_present == 0)
5212             return -TARGET_EINVAL;
5213     }
5214     /* allocate the LDT */
5215     if (!ldt_table) {
5216         env->ldt.base = target_mmap(0,
5217                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5218                                     PROT_READ|PROT_WRITE,
5219                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5220         if (env->ldt.base == -1)
5221             return -TARGET_ENOMEM;
5222         memset(g2h(env->ldt.base), 0,
5223                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5224         env->ldt.limit = 0xffff;
5225         ldt_table = g2h(env->ldt.base);
5226     }
5227 
5228     /* NOTE: same code as Linux kernel */
5229     /* Allow LDTs to be cleared by the user. */
5230     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5231         if (oldmode ||
5232             (contents == 0		&&
5233              read_exec_only == 1	&&
5234              seg_32bit == 0		&&
5235              limit_in_pages == 0	&&
5236              seg_not_present == 1	&&
5237              useable == 0 )) {
5238             entry_1 = 0;
5239             entry_2 = 0;
5240             goto install;
5241         }
5242     }
5243 
5244     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5245         (ldt_info.limit & 0x0ffff);
5246     entry_2 = (ldt_info.base_addr & 0xff000000) |
5247         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5248         (ldt_info.limit & 0xf0000) |
5249         ((read_exec_only ^ 1) << 9) |
5250         (contents << 10) |
5251         ((seg_not_present ^ 1) << 15) |
5252         (seg_32bit << 22) |
5253         (limit_in_pages << 23) |
5254         (lm << 21) |
5255         0x7000;
5256     if (!oldmode)
5257         entry_2 |= (useable << 20);
5258 
5259     /* Install the new entry ...  */
5260 install:
5261     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5262     lp[0] = tswap32(entry_1);
5263     lp[1] = tswap32(entry_2);
5264     return 0;
5265 }
5266 
5267 /* specific and weird i386 syscalls */
5268 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5269                               unsigned long bytecount)
5270 {
5271     abi_long ret;
5272 
5273     switch (func) {
5274     case 0:
5275         ret = read_ldt(ptr, bytecount);
5276         break;
5277     case 1:
5278         ret = write_ldt(env, ptr, bytecount, 1);
5279         break;
5280     case 0x11:
5281         ret = write_ldt(env, ptr, bytecount, 0);
5282         break;
5283     default:
5284         ret = -TARGET_ENOSYS;
5285         break;
5286     }
5287     return ret;
5288 }
5289 
5290 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5291 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5292 {
5293     uint64_t *gdt_table = g2h(env->gdt.base);
5294     struct target_modify_ldt_ldt_s ldt_info;
5295     struct target_modify_ldt_ldt_s *target_ldt_info;
5296     int seg_32bit, contents, read_exec_only, limit_in_pages;
5297     int seg_not_present, useable, lm;
5298     uint32_t *lp, entry_1, entry_2;
5299     int i;
5300 
5301     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5302     if (!target_ldt_info)
5303         return -TARGET_EFAULT;
5304     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5305     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5306     ldt_info.limit = tswap32(target_ldt_info->limit);
5307     ldt_info.flags = tswap32(target_ldt_info->flags);
5308     if (ldt_info.entry_number == -1) {
5309         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5310             if (gdt_table[i] == 0) {
5311                 ldt_info.entry_number = i;
5312                 target_ldt_info->entry_number = tswap32(i);
5313                 break;
5314             }
5315         }
5316     }
5317     unlock_user_struct(target_ldt_info, ptr, 1);
5318 
5319     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5320         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5321            return -TARGET_EINVAL;
5322     seg_32bit = ldt_info.flags & 1;
5323     contents = (ldt_info.flags >> 1) & 3;
5324     read_exec_only = (ldt_info.flags >> 3) & 1;
5325     limit_in_pages = (ldt_info.flags >> 4) & 1;
5326     seg_not_present = (ldt_info.flags >> 5) & 1;
5327     useable = (ldt_info.flags >> 6) & 1;
5328 #ifdef TARGET_ABI32
5329     lm = 0;
5330 #else
5331     lm = (ldt_info.flags >> 7) & 1;
5332 #endif
5333 
5334     if (contents == 3) {
5335         if (seg_not_present == 0)
5336             return -TARGET_EINVAL;
5337     }
5338 
5339     /* NOTE: same code as Linux kernel */
5340     /* Allow LDTs to be cleared by the user. */
5341     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5342         if ((contents == 0             &&
5343              read_exec_only == 1       &&
5344              seg_32bit == 0            &&
5345              limit_in_pages == 0       &&
5346              seg_not_present == 1      &&
5347              useable == 0 )) {
5348             entry_1 = 0;
5349             entry_2 = 0;
5350             goto install;
5351         }
5352     }
5353 
5354     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5355         (ldt_info.limit & 0x0ffff);
5356     entry_2 = (ldt_info.base_addr & 0xff000000) |
5357         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5358         (ldt_info.limit & 0xf0000) |
5359         ((read_exec_only ^ 1) << 9) |
5360         (contents << 10) |
5361         ((seg_not_present ^ 1) << 15) |
5362         (seg_32bit << 22) |
5363         (limit_in_pages << 23) |
5364         (useable << 20) |
5365         (lm << 21) |
5366         0x7000;
5367 
5368     /* Install the new entry ...  */
5369 install:
5370     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5371     lp[0] = tswap32(entry_1);
5372     lp[1] = tswap32(entry_2);
5373     return 0;
5374 }
5375 
5376 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5377 {
5378     struct target_modify_ldt_ldt_s *target_ldt_info;
5379     uint64_t *gdt_table = g2h(env->gdt.base);
5380     uint32_t base_addr, limit, flags;
5381     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5382     int seg_not_present, useable, lm;
5383     uint32_t *lp, entry_1, entry_2;
5384 
5385     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5386     if (!target_ldt_info)
5387         return -TARGET_EFAULT;
5388     idx = tswap32(target_ldt_info->entry_number);
5389     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5390         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5391         unlock_user_struct(target_ldt_info, ptr, 1);
5392         return -TARGET_EINVAL;
5393     }
5394     lp = (uint32_t *)(gdt_table + idx);
5395     entry_1 = tswap32(lp[0]);
5396     entry_2 = tswap32(lp[1]);
5397 
5398     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5399     contents = (entry_2 >> 10) & 3;
5400     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5401     seg_32bit = (entry_2 >> 22) & 1;
5402     limit_in_pages = (entry_2 >> 23) & 1;
5403     useable = (entry_2 >> 20) & 1;
5404 #ifdef TARGET_ABI32
5405     lm = 0;
5406 #else
5407     lm = (entry_2 >> 21) & 1;
5408 #endif
5409     flags = (seg_32bit << 0) | (contents << 1) |
5410         (read_exec_only << 3) | (limit_in_pages << 4) |
5411         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5412     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5413     base_addr = (entry_1 >> 16) |
5414         (entry_2 & 0xff000000) |
5415         ((entry_2 & 0xff) << 16);
5416     target_ldt_info->base_addr = tswapal(base_addr);
5417     target_ldt_info->limit = tswap32(limit);
5418     target_ldt_info->flags = tswap32(flags);
5419     unlock_user_struct(target_ldt_info, ptr, 1);
5420     return 0;
5421 }
5422 #endif /* TARGET_I386 && TARGET_ABI32 */
5423 
5424 #ifndef TARGET_ABI32
5425 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5426 {
5427     abi_long ret = 0;
5428     abi_ulong val;
5429     int idx;
5430 
5431     switch(code) {
5432     case TARGET_ARCH_SET_GS:
5433     case TARGET_ARCH_SET_FS:
5434         if (code == TARGET_ARCH_SET_GS)
5435             idx = R_GS;
5436         else
5437             idx = R_FS;
5438         cpu_x86_load_seg(env, idx, 0);
5439         env->segs[idx].base = addr;
5440         break;
5441     case TARGET_ARCH_GET_GS:
5442     case TARGET_ARCH_GET_FS:
5443         if (code == TARGET_ARCH_GET_GS)
5444             idx = R_GS;
5445         else
5446             idx = R_FS;
5447         val = env->segs[idx].base;
5448         if (put_user(val, addr, abi_ulong))
5449             ret = -TARGET_EFAULT;
5450         break;
5451     default:
5452         ret = -TARGET_EINVAL;
5453         break;
5454     }
5455     return ret;
5456 }
5457 #endif
5458 
5459 #endif /* defined(TARGET_I386) */
5460 
5461 #define NEW_STACK_SIZE 0x40000
5462 
5463 
5464 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5465 typedef struct {
5466     CPUArchState *env;
5467     pthread_mutex_t mutex;
5468     pthread_cond_t cond;
5469     pthread_t thread;
5470     uint32_t tid;
5471     abi_ulong child_tidptr;
5472     abi_ulong parent_tidptr;
5473     sigset_t sigmask;
5474 } new_thread_info;
5475 
5476 static void *clone_func(void *arg)
5477 {
5478     new_thread_info *info = arg;
5479     CPUArchState *env;
5480     CPUState *cpu;
5481     TaskState *ts;
5482 
5483     rcu_register_thread();
5484     tcg_register_thread();
5485     env = info->env;
5486     cpu = env_cpu(env);
5487     thread_cpu = cpu;
5488     ts = (TaskState *)cpu->opaque;
5489     info->tid = sys_gettid();
5490     task_settid(ts);
5491     if (info->child_tidptr)
5492         put_user_u32(info->tid, info->child_tidptr);
5493     if (info->parent_tidptr)
5494         put_user_u32(info->tid, info->parent_tidptr);
5495     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5496     /* Enable signals.  */
5497     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5498     /* Signal to the parent that we're ready.  */
5499     pthread_mutex_lock(&info->mutex);
5500     pthread_cond_broadcast(&info->cond);
5501     pthread_mutex_unlock(&info->mutex);
5502     /* Wait until the parent has finished initializing the tls state.  */
5503     pthread_mutex_lock(&clone_lock);
5504     pthread_mutex_unlock(&clone_lock);
5505     cpu_loop(env);
5506     /* never exits */
5507     return NULL;
5508 }
5509 
5510 /* do_fork() Must return host values and target errnos (unlike most
5511    do_*() functions). */
5512 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5513                    abi_ulong parent_tidptr, target_ulong newtls,
5514                    abi_ulong child_tidptr)
5515 {
5516     CPUState *cpu = env_cpu(env);
5517     int ret;
5518     TaskState *ts;
5519     CPUState *new_cpu;
5520     CPUArchState *new_env;
5521     sigset_t sigmask;
5522 
5523     flags &= ~CLONE_IGNORED_FLAGS;
5524 
5525     /* Emulate vfork() with fork() */
5526     if (flags & CLONE_VFORK)
5527         flags &= ~(CLONE_VFORK | CLONE_VM);
5528 
5529     if (flags & CLONE_VM) {
5530         TaskState *parent_ts = (TaskState *)cpu->opaque;
5531         new_thread_info info;
5532         pthread_attr_t attr;
5533 
5534         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5535             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5536             return -TARGET_EINVAL;
5537         }
5538 
5539         ts = g_new0(TaskState, 1);
5540         init_task_state(ts);
5541 
5542         /* Grab a mutex so that thread setup appears atomic.  */
5543         pthread_mutex_lock(&clone_lock);
5544 
5545         /* we create a new CPU instance. */
5546         new_env = cpu_copy(env);
5547         /* Init regs that differ from the parent.  */
5548         cpu_clone_regs(new_env, newsp);
5549         new_cpu = env_cpu(new_env);
5550         new_cpu->opaque = ts;
5551         ts->bprm = parent_ts->bprm;
5552         ts->info = parent_ts->info;
5553         ts->signal_mask = parent_ts->signal_mask;
5554 
5555         if (flags & CLONE_CHILD_CLEARTID) {
5556             ts->child_tidptr = child_tidptr;
5557         }
5558 
5559         if (flags & CLONE_SETTLS) {
5560             cpu_set_tls (new_env, newtls);
5561         }
5562 
5563         memset(&info, 0, sizeof(info));
5564         pthread_mutex_init(&info.mutex, NULL);
5565         pthread_mutex_lock(&info.mutex);
5566         pthread_cond_init(&info.cond, NULL);
5567         info.env = new_env;
5568         if (flags & CLONE_CHILD_SETTID) {
5569             info.child_tidptr = child_tidptr;
5570         }
5571         if (flags & CLONE_PARENT_SETTID) {
5572             info.parent_tidptr = parent_tidptr;
5573         }
5574 
5575         ret = pthread_attr_init(&attr);
5576         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5577         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5578         /* It is not safe to deliver signals until the child has finished
5579            initializing, so temporarily block all signals.  */
5580         sigfillset(&sigmask);
5581         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5582         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5583 
5584         /* If this is our first additional thread, we need to ensure we
5585          * generate code for parallel execution and flush old translations.
5586          */
5587         if (!parallel_cpus) {
5588             parallel_cpus = true;
5589             tb_flush(cpu);
5590         }
5591 
5592         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5593         /* TODO: Free new CPU state if thread creation failed.  */
5594 
5595         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5596         pthread_attr_destroy(&attr);
5597         if (ret == 0) {
5598             /* Wait for the child to initialize.  */
5599             pthread_cond_wait(&info.cond, &info.mutex);
5600             ret = info.tid;
5601         } else {
5602             ret = -1;
5603         }
5604         pthread_mutex_unlock(&info.mutex);
5605         pthread_cond_destroy(&info.cond);
5606         pthread_mutex_destroy(&info.mutex);
5607         pthread_mutex_unlock(&clone_lock);
5608     } else {
5609         /* if no CLONE_VM, we consider it is a fork */
5610         if (flags & CLONE_INVALID_FORK_FLAGS) {
5611             return -TARGET_EINVAL;
5612         }
5613 
5614         /* We can't support custom termination signals */
5615         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5616             return -TARGET_EINVAL;
5617         }
5618 
5619         if (block_signals()) {
5620             return -TARGET_ERESTARTSYS;
5621         }
5622 
5623         fork_start();
5624         ret = fork();
5625         if (ret == 0) {
5626             /* Child Process.  */
5627             cpu_clone_regs(env, newsp);
5628             fork_end(1);
5629             /* There is a race condition here.  The parent process could
5630                theoretically read the TID in the child process before the child
5631                tid is set.  This would require using either ptrace
5632                (not implemented) or having *_tidptr to point at a shared memory
5633                mapping.  We can't repeat the spinlock hack used above because
5634                the child process gets its own copy of the lock.  */
5635             if (flags & CLONE_CHILD_SETTID)
5636                 put_user_u32(sys_gettid(), child_tidptr);
5637             if (flags & CLONE_PARENT_SETTID)
5638                 put_user_u32(sys_gettid(), parent_tidptr);
5639             ts = (TaskState *)cpu->opaque;
5640             if (flags & CLONE_SETTLS)
5641                 cpu_set_tls (env, newtls);
5642             if (flags & CLONE_CHILD_CLEARTID)
5643                 ts->child_tidptr = child_tidptr;
5644         } else {
5645             fork_end(0);
5646         }
5647     }
5648     return ret;
5649 }
5650 
5651 /* warning : doesn't handle linux specific flags... */
5652 static int target_to_host_fcntl_cmd(int cmd)
5653 {
5654     int ret;
5655 
5656     switch(cmd) {
5657     case TARGET_F_DUPFD:
5658     case TARGET_F_GETFD:
5659     case TARGET_F_SETFD:
5660     case TARGET_F_GETFL:
5661     case TARGET_F_SETFL:
5662         ret = cmd;
5663         break;
5664     case TARGET_F_GETLK:
5665         ret = F_GETLK64;
5666         break;
5667     case TARGET_F_SETLK:
5668         ret = F_SETLK64;
5669         break;
5670     case TARGET_F_SETLKW:
5671         ret = F_SETLKW64;
5672         break;
5673     case TARGET_F_GETOWN:
5674         ret = F_GETOWN;
5675         break;
5676     case TARGET_F_SETOWN:
5677         ret = F_SETOWN;
5678         break;
5679     case TARGET_F_GETSIG:
5680         ret = F_GETSIG;
5681         break;
5682     case TARGET_F_SETSIG:
5683         ret = F_SETSIG;
5684         break;
5685 #if TARGET_ABI_BITS == 32
5686     case TARGET_F_GETLK64:
5687         ret = F_GETLK64;
5688         break;
5689     case TARGET_F_SETLK64:
5690         ret = F_SETLK64;
5691         break;
5692     case TARGET_F_SETLKW64:
5693         ret = F_SETLKW64;
5694         break;
5695 #endif
5696     case TARGET_F_SETLEASE:
5697         ret = F_SETLEASE;
5698         break;
5699     case TARGET_F_GETLEASE:
5700         ret = F_GETLEASE;
5701         break;
5702 #ifdef F_DUPFD_CLOEXEC
5703     case TARGET_F_DUPFD_CLOEXEC:
5704         ret = F_DUPFD_CLOEXEC;
5705         break;
5706 #endif
5707     case TARGET_F_NOTIFY:
5708         ret = F_NOTIFY;
5709         break;
5710 #ifdef F_GETOWN_EX
5711     case TARGET_F_GETOWN_EX:
5712         ret = F_GETOWN_EX;
5713         break;
5714 #endif
5715 #ifdef F_SETOWN_EX
5716     case TARGET_F_SETOWN_EX:
5717         ret = F_SETOWN_EX;
5718         break;
5719 #endif
5720 #ifdef F_SETPIPE_SZ
5721     case TARGET_F_SETPIPE_SZ:
5722         ret = F_SETPIPE_SZ;
5723         break;
5724     case TARGET_F_GETPIPE_SZ:
5725         ret = F_GETPIPE_SZ;
5726         break;
5727 #endif
5728     default:
5729         ret = -TARGET_EINVAL;
5730         break;
5731     }
5732 
5733 #if defined(__powerpc64__)
5734     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5735      * is not supported by kernel. The glibc fcntl call actually adjusts
5736      * them to 5, 6 and 7 before making the syscall(). Since we make the
5737      * syscall directly, adjust to what is supported by the kernel.
5738      */
5739     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5740         ret -= F_GETLK64 - 5;
5741     }
5742 #endif
5743 
5744     return ret;
5745 }
5746 
5747 #define FLOCK_TRANSTBL \
5748     switch (type) { \
5749     TRANSTBL_CONVERT(F_RDLCK); \
5750     TRANSTBL_CONVERT(F_WRLCK); \
5751     TRANSTBL_CONVERT(F_UNLCK); \
5752     TRANSTBL_CONVERT(F_EXLCK); \
5753     TRANSTBL_CONVERT(F_SHLCK); \
5754     }
5755 
5756 static int target_to_host_flock(int type)
5757 {
5758 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5759     FLOCK_TRANSTBL
5760 #undef  TRANSTBL_CONVERT
5761     return -TARGET_EINVAL;
5762 }
5763 
5764 static int host_to_target_flock(int type)
5765 {
5766 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5767     FLOCK_TRANSTBL
5768 #undef  TRANSTBL_CONVERT
5769     /* if we don't know how to convert the value coming
5770      * from the host we copy to the target field as-is
5771      */
5772     return type;
5773 }
5774 
5775 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5776                                             abi_ulong target_flock_addr)
5777 {
5778     struct target_flock *target_fl;
5779     int l_type;
5780 
5781     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5782         return -TARGET_EFAULT;
5783     }
5784 
5785     __get_user(l_type, &target_fl->l_type);
5786     l_type = target_to_host_flock(l_type);
5787     if (l_type < 0) {
5788         return l_type;
5789     }
5790     fl->l_type = l_type;
5791     __get_user(fl->l_whence, &target_fl->l_whence);
5792     __get_user(fl->l_start, &target_fl->l_start);
5793     __get_user(fl->l_len, &target_fl->l_len);
5794     __get_user(fl->l_pid, &target_fl->l_pid);
5795     unlock_user_struct(target_fl, target_flock_addr, 0);
5796     return 0;
5797 }
5798 
5799 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5800                                           const struct flock64 *fl)
5801 {
5802     struct target_flock *target_fl;
5803     short l_type;
5804 
5805     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5806         return -TARGET_EFAULT;
5807     }
5808 
5809     l_type = host_to_target_flock(fl->l_type);
5810     __put_user(l_type, &target_fl->l_type);
5811     __put_user(fl->l_whence, &target_fl->l_whence);
5812     __put_user(fl->l_start, &target_fl->l_start);
5813     __put_user(fl->l_len, &target_fl->l_len);
5814     __put_user(fl->l_pid, &target_fl->l_pid);
5815     unlock_user_struct(target_fl, target_flock_addr, 1);
5816     return 0;
5817 }
5818 
5819 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5820 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5821 
5822 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5823 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5824                                                    abi_ulong target_flock_addr)
5825 {
5826     struct target_oabi_flock64 *target_fl;
5827     int l_type;
5828 
5829     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5830         return -TARGET_EFAULT;
5831     }
5832 
5833     __get_user(l_type, &target_fl->l_type);
5834     l_type = target_to_host_flock(l_type);
5835     if (l_type < 0) {
5836         return l_type;
5837     }
5838     fl->l_type = l_type;
5839     __get_user(fl->l_whence, &target_fl->l_whence);
5840     __get_user(fl->l_start, &target_fl->l_start);
5841     __get_user(fl->l_len, &target_fl->l_len);
5842     __get_user(fl->l_pid, &target_fl->l_pid);
5843     unlock_user_struct(target_fl, target_flock_addr, 0);
5844     return 0;
5845 }
5846 
5847 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5848                                                  const struct flock64 *fl)
5849 {
5850     struct target_oabi_flock64 *target_fl;
5851     short l_type;
5852 
5853     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5854         return -TARGET_EFAULT;
5855     }
5856 
5857     l_type = host_to_target_flock(fl->l_type);
5858     __put_user(l_type, &target_fl->l_type);
5859     __put_user(fl->l_whence, &target_fl->l_whence);
5860     __put_user(fl->l_start, &target_fl->l_start);
5861     __put_user(fl->l_len, &target_fl->l_len);
5862     __put_user(fl->l_pid, &target_fl->l_pid);
5863     unlock_user_struct(target_fl, target_flock_addr, 1);
5864     return 0;
5865 }
5866 #endif
5867 
5868 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5869                                               abi_ulong target_flock_addr)
5870 {
5871     struct target_flock64 *target_fl;
5872     int l_type;
5873 
5874     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5875         return -TARGET_EFAULT;
5876     }
5877 
5878     __get_user(l_type, &target_fl->l_type);
5879     l_type = target_to_host_flock(l_type);
5880     if (l_type < 0) {
5881         return l_type;
5882     }
5883     fl->l_type = l_type;
5884     __get_user(fl->l_whence, &target_fl->l_whence);
5885     __get_user(fl->l_start, &target_fl->l_start);
5886     __get_user(fl->l_len, &target_fl->l_len);
5887     __get_user(fl->l_pid, &target_fl->l_pid);
5888     unlock_user_struct(target_fl, target_flock_addr, 0);
5889     return 0;
5890 }
5891 
5892 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5893                                             const struct flock64 *fl)
5894 {
5895     struct target_flock64 *target_fl;
5896     short l_type;
5897 
5898     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5899         return -TARGET_EFAULT;
5900     }
5901 
5902     l_type = host_to_target_flock(fl->l_type);
5903     __put_user(l_type, &target_fl->l_type);
5904     __put_user(fl->l_whence, &target_fl->l_whence);
5905     __put_user(fl->l_start, &target_fl->l_start);
5906     __put_user(fl->l_len, &target_fl->l_len);
5907     __put_user(fl->l_pid, &target_fl->l_pid);
5908     unlock_user_struct(target_fl, target_flock_addr, 1);
5909     return 0;
5910 }
5911 
5912 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5913 {
5914     struct flock64 fl64;
5915 #ifdef F_GETOWN_EX
5916     struct f_owner_ex fox;
5917     struct target_f_owner_ex *target_fox;
5918 #endif
5919     abi_long ret;
5920     int host_cmd = target_to_host_fcntl_cmd(cmd);
5921 
5922     if (host_cmd == -TARGET_EINVAL)
5923 	    return host_cmd;
5924 
5925     switch(cmd) {
5926     case TARGET_F_GETLK:
5927         ret = copy_from_user_flock(&fl64, arg);
5928         if (ret) {
5929             return ret;
5930         }
5931         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5932         if (ret == 0) {
5933             ret = copy_to_user_flock(arg, &fl64);
5934         }
5935         break;
5936 
5937     case TARGET_F_SETLK:
5938     case TARGET_F_SETLKW:
5939         ret = copy_from_user_flock(&fl64, arg);
5940         if (ret) {
5941             return ret;
5942         }
5943         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5944         break;
5945 
5946     case TARGET_F_GETLK64:
5947         ret = copy_from_user_flock64(&fl64, arg);
5948         if (ret) {
5949             return ret;
5950         }
5951         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5952         if (ret == 0) {
5953             ret = copy_to_user_flock64(arg, &fl64);
5954         }
5955         break;
5956     case TARGET_F_SETLK64:
5957     case TARGET_F_SETLKW64:
5958         ret = copy_from_user_flock64(&fl64, arg);
5959         if (ret) {
5960             return ret;
5961         }
5962         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5963         break;
5964 
5965     case TARGET_F_GETFL:
5966         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5967         if (ret >= 0) {
5968             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5969         }
5970         break;
5971 
5972     case TARGET_F_SETFL:
5973         ret = get_errno(safe_fcntl(fd, host_cmd,
5974                                    target_to_host_bitmask(arg,
5975                                                           fcntl_flags_tbl)));
5976         break;
5977 
5978 #ifdef F_GETOWN_EX
5979     case TARGET_F_GETOWN_EX:
5980         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5981         if (ret >= 0) {
5982             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5983                 return -TARGET_EFAULT;
5984             target_fox->type = tswap32(fox.type);
5985             target_fox->pid = tswap32(fox.pid);
5986             unlock_user_struct(target_fox, arg, 1);
5987         }
5988         break;
5989 #endif
5990 
5991 #ifdef F_SETOWN_EX
5992     case TARGET_F_SETOWN_EX:
5993         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5994             return -TARGET_EFAULT;
5995         fox.type = tswap32(target_fox->type);
5996         fox.pid = tswap32(target_fox->pid);
5997         unlock_user_struct(target_fox, arg, 0);
5998         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5999         break;
6000 #endif
6001 
6002     case TARGET_F_SETOWN:
6003     case TARGET_F_GETOWN:
6004     case TARGET_F_SETSIG:
6005     case TARGET_F_GETSIG:
6006     case TARGET_F_SETLEASE:
6007     case TARGET_F_GETLEASE:
6008     case TARGET_F_SETPIPE_SZ:
6009     case TARGET_F_GETPIPE_SZ:
6010         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6011         break;
6012 
6013     default:
6014         ret = get_errno(safe_fcntl(fd, cmd, arg));
6015         break;
6016     }
6017     return ret;
6018 }
6019 
6020 #ifdef USE_UID16
6021 
6022 static inline int high2lowuid(int uid)
6023 {
6024     if (uid > 65535)
6025         return 65534;
6026     else
6027         return uid;
6028 }
6029 
6030 static inline int high2lowgid(int gid)
6031 {
6032     if (gid > 65535)
6033         return 65534;
6034     else
6035         return gid;
6036 }
6037 
6038 static inline int low2highuid(int uid)
6039 {
6040     if ((int16_t)uid == -1)
6041         return -1;
6042     else
6043         return uid;
6044 }
6045 
6046 static inline int low2highgid(int gid)
6047 {
6048     if ((int16_t)gid == -1)
6049         return -1;
6050     else
6051         return gid;
6052 }
6053 static inline int tswapid(int id)
6054 {
6055     return tswap16(id);
6056 }
6057 
6058 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6059 
6060 #else /* !USE_UID16 */
6061 static inline int high2lowuid(int uid)
6062 {
6063     return uid;
6064 }
6065 static inline int high2lowgid(int gid)
6066 {
6067     return gid;
6068 }
6069 static inline int low2highuid(int uid)
6070 {
6071     return uid;
6072 }
6073 static inline int low2highgid(int gid)
6074 {
6075     return gid;
6076 }
6077 static inline int tswapid(int id)
6078 {
6079     return tswap32(id);
6080 }
6081 
6082 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6083 
6084 #endif /* USE_UID16 */
6085 
6086 /* We must do direct syscalls for setting UID/GID, because we want to
6087  * implement the Linux system call semantics of "change only for this thread",
6088  * not the libc/POSIX semantics of "change for all threads in process".
6089  * (See http://ewontfix.com/17/ for more details.)
6090  * We use the 32-bit version of the syscalls if present; if it is not
6091  * then either the host architecture supports 32-bit UIDs natively with
6092  * the standard syscall, or the 16-bit UID is the best we can do.
6093  */
6094 #ifdef __NR_setuid32
6095 #define __NR_sys_setuid __NR_setuid32
6096 #else
6097 #define __NR_sys_setuid __NR_setuid
6098 #endif
6099 #ifdef __NR_setgid32
6100 #define __NR_sys_setgid __NR_setgid32
6101 #else
6102 #define __NR_sys_setgid __NR_setgid
6103 #endif
6104 #ifdef __NR_setresuid32
6105 #define __NR_sys_setresuid __NR_setresuid32
6106 #else
6107 #define __NR_sys_setresuid __NR_setresuid
6108 #endif
6109 #ifdef __NR_setresgid32
6110 #define __NR_sys_setresgid __NR_setresgid32
6111 #else
6112 #define __NR_sys_setresgid __NR_setresgid
6113 #endif
6114 
6115 _syscall1(int, sys_setuid, uid_t, uid)
6116 _syscall1(int, sys_setgid, gid_t, gid)
6117 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6118 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6119 
6120 void syscall_init(void)
6121 {
6122     IOCTLEntry *ie;
6123     const argtype *arg_type;
6124     int size;
6125     int i;
6126 
6127     thunk_init(STRUCT_MAX);
6128 
6129 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6130 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6131 #include "syscall_types.h"
6132 #undef STRUCT
6133 #undef STRUCT_SPECIAL
6134 
6135     /* Build target_to_host_errno_table[] table from
6136      * host_to_target_errno_table[]. */
6137     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6138         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6139     }
6140 
6141     /* we patch the ioctl size if necessary. We rely on the fact that
6142        no ioctl has all the bits at '1' in the size field */
6143     ie = ioctl_entries;
6144     while (ie->target_cmd != 0) {
6145         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6146             TARGET_IOC_SIZEMASK) {
6147             arg_type = ie->arg_type;
6148             if (arg_type[0] != TYPE_PTR) {
6149                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6150                         ie->target_cmd);
6151                 exit(1);
6152             }
6153             arg_type++;
6154             size = thunk_type_size(arg_type, 0);
6155             ie->target_cmd = (ie->target_cmd &
6156                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6157                 (size << TARGET_IOC_SIZESHIFT);
6158         }
6159 
6160         /* automatic consistency check if same arch */
6161 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6162     (defined(__x86_64__) && defined(TARGET_X86_64))
6163         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6164             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6165                     ie->name, ie->target_cmd, ie->host_cmd);
6166         }
6167 #endif
6168         ie++;
6169     }
6170 }
6171 
6172 #if TARGET_ABI_BITS == 32
6173 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6174 {
6175 #ifdef TARGET_WORDS_BIGENDIAN
6176     return ((uint64_t)word0 << 32) | word1;
6177 #else
6178     return ((uint64_t)word1 << 32) | word0;
6179 #endif
6180 }
6181 #else /* TARGET_ABI_BITS == 32 */
6182 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6183 {
6184     return word0;
6185 }
6186 #endif /* TARGET_ABI_BITS != 32 */
6187 
6188 #ifdef TARGET_NR_truncate64
6189 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6190                                          abi_long arg2,
6191                                          abi_long arg3,
6192                                          abi_long arg4)
6193 {
6194     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6195         arg2 = arg3;
6196         arg3 = arg4;
6197     }
6198     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6199 }
6200 #endif
6201 
6202 #ifdef TARGET_NR_ftruncate64
6203 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6204                                           abi_long arg2,
6205                                           abi_long arg3,
6206                                           abi_long arg4)
6207 {
6208     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6209         arg2 = arg3;
6210         arg3 = arg4;
6211     }
6212     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6213 }
6214 #endif
6215 
6216 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6217                                                abi_ulong target_addr)
6218 {
6219     struct target_timespec *target_ts;
6220 
6221     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6222         return -TARGET_EFAULT;
6223     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6224     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6225     unlock_user_struct(target_ts, target_addr, 0);
6226     return 0;
6227 }
6228 
6229 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6230                                                struct timespec *host_ts)
6231 {
6232     struct target_timespec *target_ts;
6233 
6234     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6235         return -TARGET_EFAULT;
6236     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6237     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6238     unlock_user_struct(target_ts, target_addr, 1);
6239     return 0;
6240 }
6241 
6242 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6243                                                  abi_ulong target_addr)
6244 {
6245     struct target_itimerspec *target_itspec;
6246 
6247     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6248         return -TARGET_EFAULT;
6249     }
6250 
6251     host_itspec->it_interval.tv_sec =
6252                             tswapal(target_itspec->it_interval.tv_sec);
6253     host_itspec->it_interval.tv_nsec =
6254                             tswapal(target_itspec->it_interval.tv_nsec);
6255     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6256     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6257 
6258     unlock_user_struct(target_itspec, target_addr, 1);
6259     return 0;
6260 }
6261 
6262 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6263                                                struct itimerspec *host_its)
6264 {
6265     struct target_itimerspec *target_itspec;
6266 
6267     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6268         return -TARGET_EFAULT;
6269     }
6270 
6271     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6272     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6273 
6274     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6275     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6276 
6277     unlock_user_struct(target_itspec, target_addr, 0);
6278     return 0;
6279 }
6280 
6281 static inline abi_long target_to_host_timex(struct timex *host_tx,
6282                                             abi_long target_addr)
6283 {
6284     struct target_timex *target_tx;
6285 
6286     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6287         return -TARGET_EFAULT;
6288     }
6289 
6290     __get_user(host_tx->modes, &target_tx->modes);
6291     __get_user(host_tx->offset, &target_tx->offset);
6292     __get_user(host_tx->freq, &target_tx->freq);
6293     __get_user(host_tx->maxerror, &target_tx->maxerror);
6294     __get_user(host_tx->esterror, &target_tx->esterror);
6295     __get_user(host_tx->status, &target_tx->status);
6296     __get_user(host_tx->constant, &target_tx->constant);
6297     __get_user(host_tx->precision, &target_tx->precision);
6298     __get_user(host_tx->tolerance, &target_tx->tolerance);
6299     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6300     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6301     __get_user(host_tx->tick, &target_tx->tick);
6302     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6303     __get_user(host_tx->jitter, &target_tx->jitter);
6304     __get_user(host_tx->shift, &target_tx->shift);
6305     __get_user(host_tx->stabil, &target_tx->stabil);
6306     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6307     __get_user(host_tx->calcnt, &target_tx->calcnt);
6308     __get_user(host_tx->errcnt, &target_tx->errcnt);
6309     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6310     __get_user(host_tx->tai, &target_tx->tai);
6311 
6312     unlock_user_struct(target_tx, target_addr, 0);
6313     return 0;
6314 }
6315 
6316 static inline abi_long host_to_target_timex(abi_long target_addr,
6317                                             struct timex *host_tx)
6318 {
6319     struct target_timex *target_tx;
6320 
6321     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6322         return -TARGET_EFAULT;
6323     }
6324 
6325     __put_user(host_tx->modes, &target_tx->modes);
6326     __put_user(host_tx->offset, &target_tx->offset);
6327     __put_user(host_tx->freq, &target_tx->freq);
6328     __put_user(host_tx->maxerror, &target_tx->maxerror);
6329     __put_user(host_tx->esterror, &target_tx->esterror);
6330     __put_user(host_tx->status, &target_tx->status);
6331     __put_user(host_tx->constant, &target_tx->constant);
6332     __put_user(host_tx->precision, &target_tx->precision);
6333     __put_user(host_tx->tolerance, &target_tx->tolerance);
6334     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6335     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6336     __put_user(host_tx->tick, &target_tx->tick);
6337     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6338     __put_user(host_tx->jitter, &target_tx->jitter);
6339     __put_user(host_tx->shift, &target_tx->shift);
6340     __put_user(host_tx->stabil, &target_tx->stabil);
6341     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6342     __put_user(host_tx->calcnt, &target_tx->calcnt);
6343     __put_user(host_tx->errcnt, &target_tx->errcnt);
6344     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6345     __put_user(host_tx->tai, &target_tx->tai);
6346 
6347     unlock_user_struct(target_tx, target_addr, 1);
6348     return 0;
6349 }
6350 
6351 
6352 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6353                                                abi_ulong target_addr)
6354 {
6355     struct target_sigevent *target_sevp;
6356 
6357     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6358         return -TARGET_EFAULT;
6359     }
6360 
6361     /* This union is awkward on 64 bit systems because it has a 32 bit
6362      * integer and a pointer in it; we follow the conversion approach
6363      * used for handling sigval types in signal.c so the guest should get
6364      * the correct value back even if we did a 64 bit byteswap and it's
6365      * using the 32 bit integer.
6366      */
6367     host_sevp->sigev_value.sival_ptr =
6368         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6369     host_sevp->sigev_signo =
6370         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6371     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6372     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6373 
6374     unlock_user_struct(target_sevp, target_addr, 1);
6375     return 0;
6376 }
6377 
6378 #if defined(TARGET_NR_mlockall)
6379 static inline int target_to_host_mlockall_arg(int arg)
6380 {
6381     int result = 0;
6382 
6383     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6384         result |= MCL_CURRENT;
6385     }
6386     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6387         result |= MCL_FUTURE;
6388     }
6389     return result;
6390 }
6391 #endif
6392 
6393 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6394      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6395      defined(TARGET_NR_newfstatat))
6396 static inline abi_long host_to_target_stat64(void *cpu_env,
6397                                              abi_ulong target_addr,
6398                                              struct stat *host_st)
6399 {
6400 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6401     if (((CPUARMState *)cpu_env)->eabi) {
6402         struct target_eabi_stat64 *target_st;
6403 
6404         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6405             return -TARGET_EFAULT;
6406         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6407         __put_user(host_st->st_dev, &target_st->st_dev);
6408         __put_user(host_st->st_ino, &target_st->st_ino);
6409 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6410         __put_user(host_st->st_ino, &target_st->__st_ino);
6411 #endif
6412         __put_user(host_st->st_mode, &target_st->st_mode);
6413         __put_user(host_st->st_nlink, &target_st->st_nlink);
6414         __put_user(host_st->st_uid, &target_st->st_uid);
6415         __put_user(host_st->st_gid, &target_st->st_gid);
6416         __put_user(host_st->st_rdev, &target_st->st_rdev);
6417         __put_user(host_st->st_size, &target_st->st_size);
6418         __put_user(host_st->st_blksize, &target_st->st_blksize);
6419         __put_user(host_st->st_blocks, &target_st->st_blocks);
6420         __put_user(host_st->st_atime, &target_st->target_st_atime);
6421         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6422         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6423 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6424         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6425         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6426         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6427 #endif
6428         unlock_user_struct(target_st, target_addr, 1);
6429     } else
6430 #endif
6431     {
6432 #if defined(TARGET_HAS_STRUCT_STAT64)
6433         struct target_stat64 *target_st;
6434 #else
6435         struct target_stat *target_st;
6436 #endif
6437 
6438         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6439             return -TARGET_EFAULT;
6440         memset(target_st, 0, sizeof(*target_st));
6441         __put_user(host_st->st_dev, &target_st->st_dev);
6442         __put_user(host_st->st_ino, &target_st->st_ino);
6443 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6444         __put_user(host_st->st_ino, &target_st->__st_ino);
6445 #endif
6446         __put_user(host_st->st_mode, &target_st->st_mode);
6447         __put_user(host_st->st_nlink, &target_st->st_nlink);
6448         __put_user(host_st->st_uid, &target_st->st_uid);
6449         __put_user(host_st->st_gid, &target_st->st_gid);
6450         __put_user(host_st->st_rdev, &target_st->st_rdev);
6451         /* XXX: better use of kernel struct */
6452         __put_user(host_st->st_size, &target_st->st_size);
6453         __put_user(host_st->st_blksize, &target_st->st_blksize);
6454         __put_user(host_st->st_blocks, &target_st->st_blocks);
6455         __put_user(host_st->st_atime, &target_st->target_st_atime);
6456         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6457         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6458 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6459         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6460         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6461         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6462 #endif
6463         unlock_user_struct(target_st, target_addr, 1);
6464     }
6465 
6466     return 0;
6467 }
6468 #endif
6469 
6470 /* ??? Using host futex calls even when target atomic operations
6471    are not really atomic probably breaks things.  However implementing
6472    futexes locally would make futexes shared between multiple processes
6473    tricky.  However they're probably useless because guest atomic
6474    operations won't work either.  */
6475 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6476                     target_ulong uaddr2, int val3)
6477 {
6478     struct timespec ts, *pts;
6479     int base_op;
6480 
6481     /* ??? We assume FUTEX_* constants are the same on both host
6482        and target.  */
6483 #ifdef FUTEX_CMD_MASK
6484     base_op = op & FUTEX_CMD_MASK;
6485 #else
6486     base_op = op;
6487 #endif
6488     switch (base_op) {
6489     case FUTEX_WAIT:
6490     case FUTEX_WAIT_BITSET:
6491         if (timeout) {
6492             pts = &ts;
6493             target_to_host_timespec(pts, timeout);
6494         } else {
6495             pts = NULL;
6496         }
6497         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6498                          pts, NULL, val3));
6499     case FUTEX_WAKE:
6500         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6501     case FUTEX_FD:
6502         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6503     case FUTEX_REQUEUE:
6504     case FUTEX_CMP_REQUEUE:
6505     case FUTEX_WAKE_OP:
6506         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6507            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6508            But the prototype takes a `struct timespec *'; insert casts
6509            to satisfy the compiler.  We do not need to tswap TIMEOUT
6510            since it's not compared to guest memory.  */
6511         pts = (struct timespec *)(uintptr_t) timeout;
6512         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6513                                     g2h(uaddr2),
6514                                     (base_op == FUTEX_CMP_REQUEUE
6515                                      ? tswap32(val3)
6516                                      : val3)));
6517     default:
6518         return -TARGET_ENOSYS;
6519     }
6520 }
6521 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6522 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6523                                      abi_long handle, abi_long mount_id,
6524                                      abi_long flags)
6525 {
6526     struct file_handle *target_fh;
6527     struct file_handle *fh;
6528     int mid = 0;
6529     abi_long ret;
6530     char *name;
6531     unsigned int size, total_size;
6532 
6533     if (get_user_s32(size, handle)) {
6534         return -TARGET_EFAULT;
6535     }
6536 
6537     name = lock_user_string(pathname);
6538     if (!name) {
6539         return -TARGET_EFAULT;
6540     }
6541 
6542     total_size = sizeof(struct file_handle) + size;
6543     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6544     if (!target_fh) {
6545         unlock_user(name, pathname, 0);
6546         return -TARGET_EFAULT;
6547     }
6548 
6549     fh = g_malloc0(total_size);
6550     fh->handle_bytes = size;
6551 
6552     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6553     unlock_user(name, pathname, 0);
6554 
6555     /* man name_to_handle_at(2):
6556      * Other than the use of the handle_bytes field, the caller should treat
6557      * the file_handle structure as an opaque data type
6558      */
6559 
6560     memcpy(target_fh, fh, total_size);
6561     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6562     target_fh->handle_type = tswap32(fh->handle_type);
6563     g_free(fh);
6564     unlock_user(target_fh, handle, total_size);
6565 
6566     if (put_user_s32(mid, mount_id)) {
6567         return -TARGET_EFAULT;
6568     }
6569 
6570     return ret;
6571 
6572 }
6573 #endif
6574 
6575 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6576 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6577                                      abi_long flags)
6578 {
6579     struct file_handle *target_fh;
6580     struct file_handle *fh;
6581     unsigned int size, total_size;
6582     abi_long ret;
6583 
6584     if (get_user_s32(size, handle)) {
6585         return -TARGET_EFAULT;
6586     }
6587 
6588     total_size = sizeof(struct file_handle) + size;
6589     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6590     if (!target_fh) {
6591         return -TARGET_EFAULT;
6592     }
6593 
6594     fh = g_memdup(target_fh, total_size);
6595     fh->handle_bytes = size;
6596     fh->handle_type = tswap32(target_fh->handle_type);
6597 
6598     ret = get_errno(open_by_handle_at(mount_fd, fh,
6599                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6600 
6601     g_free(fh);
6602 
6603     unlock_user(target_fh, handle, total_size);
6604 
6605     return ret;
6606 }
6607 #endif
6608 
6609 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6610 
6611 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6612 {
6613     int host_flags;
6614     target_sigset_t *target_mask;
6615     sigset_t host_mask;
6616     abi_long ret;
6617 
6618     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6619         return -TARGET_EINVAL;
6620     }
6621     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6622         return -TARGET_EFAULT;
6623     }
6624 
6625     target_to_host_sigset(&host_mask, target_mask);
6626 
6627     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6628 
6629     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6630     if (ret >= 0) {
6631         fd_trans_register(ret, &target_signalfd_trans);
6632     }
6633 
6634     unlock_user_struct(target_mask, mask, 0);
6635 
6636     return ret;
6637 }
6638 #endif
6639 
6640 /* Map host to target signal numbers for the wait family of syscalls.
6641    Assume all other status bits are the same.  */
6642 int host_to_target_waitstatus(int status)
6643 {
6644     if (WIFSIGNALED(status)) {
6645         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6646     }
6647     if (WIFSTOPPED(status)) {
6648         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6649                | (status & 0xff);
6650     }
6651     return status;
6652 }
6653 
6654 static int open_self_cmdline(void *cpu_env, int fd)
6655 {
6656     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6657     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6658     int i;
6659 
6660     for (i = 0; i < bprm->argc; i++) {
6661         size_t len = strlen(bprm->argv[i]) + 1;
6662 
6663         if (write(fd, bprm->argv[i], len) != len) {
6664             return -1;
6665         }
6666     }
6667 
6668     return 0;
6669 }
6670 
6671 static int open_self_maps(void *cpu_env, int fd)
6672 {
6673     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6674     TaskState *ts = cpu->opaque;
6675     FILE *fp;
6676     char *line = NULL;
6677     size_t len = 0;
6678     ssize_t read;
6679 
6680     fp = fopen("/proc/self/maps", "r");
6681     if (fp == NULL) {
6682         return -1;
6683     }
6684 
6685     while ((read = getline(&line, &len, fp)) != -1) {
6686         int fields, dev_maj, dev_min, inode;
6687         uint64_t min, max, offset;
6688         char flag_r, flag_w, flag_x, flag_p;
6689         char path[512] = "";
6690         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6691                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6692                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6693 
6694         if ((fields < 10) || (fields > 11)) {
6695             continue;
6696         }
6697         if (h2g_valid(min)) {
6698             int flags = page_get_flags(h2g(min));
6699             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6700             if (page_check_range(h2g(min), max - min, flags) == -1) {
6701                 continue;
6702             }
6703             if (h2g(min) == ts->info->stack_limit) {
6704                 pstrcpy(path, sizeof(path), "      [stack]");
6705             }
6706             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6707                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6708                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6709                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6710                     path[0] ? "         " : "", path);
6711         }
6712     }
6713 
6714     free(line);
6715     fclose(fp);
6716 
6717     return 0;
6718 }
6719 
6720 static int open_self_stat(void *cpu_env, int fd)
6721 {
6722     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6723     TaskState *ts = cpu->opaque;
6724     abi_ulong start_stack = ts->info->start_stack;
6725     int i;
6726 
6727     for (i = 0; i < 44; i++) {
6728       char buf[128];
6729       int len;
6730       uint64_t val = 0;
6731 
6732       if (i == 0) {
6733         /* pid */
6734         val = getpid();
6735         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6736       } else if (i == 1) {
6737         /* app name */
6738         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6739       } else if (i == 27) {
6740         /* stack bottom */
6741         val = start_stack;
6742         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6743       } else {
6744         /* for the rest, there is MasterCard */
6745         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6746       }
6747 
6748       len = strlen(buf);
6749       if (write(fd, buf, len) != len) {
6750           return -1;
6751       }
6752     }
6753 
6754     return 0;
6755 }
6756 
6757 static int open_self_auxv(void *cpu_env, int fd)
6758 {
6759     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6760     TaskState *ts = cpu->opaque;
6761     abi_ulong auxv = ts->info->saved_auxv;
6762     abi_ulong len = ts->info->auxv_len;
6763     char *ptr;
6764 
6765     /*
6766      * Auxiliary vector is stored in target process stack.
6767      * read in whole auxv vector and copy it to file
6768      */
6769     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6770     if (ptr != NULL) {
6771         while (len > 0) {
6772             ssize_t r;
6773             r = write(fd, ptr, len);
6774             if (r <= 0) {
6775                 break;
6776             }
6777             len -= r;
6778             ptr += r;
6779         }
6780         lseek(fd, 0, SEEK_SET);
6781         unlock_user(ptr, auxv, len);
6782     }
6783 
6784     return 0;
6785 }
6786 
6787 static int is_proc_myself(const char *filename, const char *entry)
6788 {
6789     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6790         filename += strlen("/proc/");
6791         if (!strncmp(filename, "self/", strlen("self/"))) {
6792             filename += strlen("self/");
6793         } else if (*filename >= '1' && *filename <= '9') {
6794             char myself[80];
6795             snprintf(myself, sizeof(myself), "%d/", getpid());
6796             if (!strncmp(filename, myself, strlen(myself))) {
6797                 filename += strlen(myself);
6798             } else {
6799                 return 0;
6800             }
6801         } else {
6802             return 0;
6803         }
6804         if (!strcmp(filename, entry)) {
6805             return 1;
6806         }
6807     }
6808     return 0;
6809 }
6810 
6811 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6812     defined(TARGET_SPARC) || defined(TARGET_M68K)
6813 static int is_proc(const char *filename, const char *entry)
6814 {
6815     return strcmp(filename, entry) == 0;
6816 }
6817 #endif
6818 
6819 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6820 static int open_net_route(void *cpu_env, int fd)
6821 {
6822     FILE *fp;
6823     char *line = NULL;
6824     size_t len = 0;
6825     ssize_t read;
6826 
6827     fp = fopen("/proc/net/route", "r");
6828     if (fp == NULL) {
6829         return -1;
6830     }
6831 
6832     /* read header */
6833 
6834     read = getline(&line, &len, fp);
6835     dprintf(fd, "%s", line);
6836 
6837     /* read routes */
6838 
6839     while ((read = getline(&line, &len, fp)) != -1) {
6840         char iface[16];
6841         uint32_t dest, gw, mask;
6842         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6843         int fields;
6844 
6845         fields = sscanf(line,
6846                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6847                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6848                         &mask, &mtu, &window, &irtt);
6849         if (fields != 11) {
6850             continue;
6851         }
6852         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6853                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6854                 metric, tswap32(mask), mtu, window, irtt);
6855     }
6856 
6857     free(line);
6858     fclose(fp);
6859 
6860     return 0;
6861 }
6862 #endif
6863 
6864 #if defined(TARGET_SPARC)
6865 static int open_cpuinfo(void *cpu_env, int fd)
6866 {
6867     dprintf(fd, "type\t\t: sun4u\n");
6868     return 0;
6869 }
6870 #endif
6871 
6872 #if defined(TARGET_M68K)
6873 static int open_hardware(void *cpu_env, int fd)
6874 {
6875     dprintf(fd, "Model:\t\tqemu-m68k\n");
6876     return 0;
6877 }
6878 #endif
6879 
6880 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6881 {
6882     struct fake_open {
6883         const char *filename;
6884         int (*fill)(void *cpu_env, int fd);
6885         int (*cmp)(const char *s1, const char *s2);
6886     };
6887     const struct fake_open *fake_open;
6888     static const struct fake_open fakes[] = {
6889         { "maps", open_self_maps, is_proc_myself },
6890         { "stat", open_self_stat, is_proc_myself },
6891         { "auxv", open_self_auxv, is_proc_myself },
6892         { "cmdline", open_self_cmdline, is_proc_myself },
6893 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6894         { "/proc/net/route", open_net_route, is_proc },
6895 #endif
6896 #if defined(TARGET_SPARC)
6897         { "/proc/cpuinfo", open_cpuinfo, is_proc },
6898 #endif
6899 #if defined(TARGET_M68K)
6900         { "/proc/hardware", open_hardware, is_proc },
6901 #endif
6902         { NULL, NULL, NULL }
6903     };
6904 
6905     if (is_proc_myself(pathname, "exe")) {
6906         int execfd = qemu_getauxval(AT_EXECFD);
6907         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6908     }
6909 
6910     for (fake_open = fakes; fake_open->filename; fake_open++) {
6911         if (fake_open->cmp(pathname, fake_open->filename)) {
6912             break;
6913         }
6914     }
6915 
6916     if (fake_open->filename) {
6917         const char *tmpdir;
6918         char filename[PATH_MAX];
6919         int fd, r;
6920 
6921         /* create temporary file to map stat to */
6922         tmpdir = getenv("TMPDIR");
6923         if (!tmpdir)
6924             tmpdir = "/tmp";
6925         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6926         fd = mkstemp(filename);
6927         if (fd < 0) {
6928             return fd;
6929         }
6930         unlink(filename);
6931 
6932         if ((r = fake_open->fill(cpu_env, fd))) {
6933             int e = errno;
6934             close(fd);
6935             errno = e;
6936             return r;
6937         }
6938         lseek(fd, 0, SEEK_SET);
6939 
6940         return fd;
6941     }
6942 
6943     return safe_openat(dirfd, path(pathname), flags, mode);
6944 }
6945 
6946 #define TIMER_MAGIC 0x0caf0000
6947 #define TIMER_MAGIC_MASK 0xffff0000
6948 
6949 /* Convert QEMU provided timer ID back to internal 16bit index format */
6950 static target_timer_t get_timer_id(abi_long arg)
6951 {
6952     target_timer_t timerid = arg;
6953 
6954     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6955         return -TARGET_EINVAL;
6956     }
6957 
6958     timerid &= 0xffff;
6959 
6960     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6961         return -TARGET_EINVAL;
6962     }
6963 
6964     return timerid;
6965 }
6966 
6967 static int target_to_host_cpu_mask(unsigned long *host_mask,
6968                                    size_t host_size,
6969                                    abi_ulong target_addr,
6970                                    size_t target_size)
6971 {
6972     unsigned target_bits = sizeof(abi_ulong) * 8;
6973     unsigned host_bits = sizeof(*host_mask) * 8;
6974     abi_ulong *target_mask;
6975     unsigned i, j;
6976 
6977     assert(host_size >= target_size);
6978 
6979     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6980     if (!target_mask) {
6981         return -TARGET_EFAULT;
6982     }
6983     memset(host_mask, 0, host_size);
6984 
6985     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6986         unsigned bit = i * target_bits;
6987         abi_ulong val;
6988 
6989         __get_user(val, &target_mask[i]);
6990         for (j = 0; j < target_bits; j++, bit++) {
6991             if (val & (1UL << j)) {
6992                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6993             }
6994         }
6995     }
6996 
6997     unlock_user(target_mask, target_addr, 0);
6998     return 0;
6999 }
7000 
7001 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7002                                    size_t host_size,
7003                                    abi_ulong target_addr,
7004                                    size_t target_size)
7005 {
7006     unsigned target_bits = sizeof(abi_ulong) * 8;
7007     unsigned host_bits = sizeof(*host_mask) * 8;
7008     abi_ulong *target_mask;
7009     unsigned i, j;
7010 
7011     assert(host_size >= target_size);
7012 
7013     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7014     if (!target_mask) {
7015         return -TARGET_EFAULT;
7016     }
7017 
7018     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7019         unsigned bit = i * target_bits;
7020         abi_ulong val = 0;
7021 
7022         for (j = 0; j < target_bits; j++, bit++) {
7023             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7024                 val |= 1UL << j;
7025             }
7026         }
7027         __put_user(val, &target_mask[i]);
7028     }
7029 
7030     unlock_user(target_mask, target_addr, target_size);
7031     return 0;
7032 }
7033 
7034 /* This is an internal helper for do_syscall so that it is easier
7035  * to have a single return point, so that actions, such as logging
7036  * of syscall results, can be performed.
7037  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7038  */
7039 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7040                             abi_long arg2, abi_long arg3, abi_long arg4,
7041                             abi_long arg5, abi_long arg6, abi_long arg7,
7042                             abi_long arg8)
7043 {
7044     CPUState *cpu = env_cpu(cpu_env);
7045     abi_long ret;
7046 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7047     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7048     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7049     struct stat st;
7050 #endif
7051 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7052     || defined(TARGET_NR_fstatfs)
7053     struct statfs stfs;
7054 #endif
7055     void *p;
7056 
7057     switch(num) {
7058     case TARGET_NR_exit:
7059         /* In old applications this may be used to implement _exit(2).
7060            However in threaded applictions it is used for thread termination,
7061            and _exit_group is used for application termination.
7062            Do thread termination if we have more then one thread.  */
7063 
7064         if (block_signals()) {
7065             return -TARGET_ERESTARTSYS;
7066         }
7067 
7068         cpu_list_lock();
7069 
7070         if (CPU_NEXT(first_cpu)) {
7071             TaskState *ts;
7072 
7073             /* Remove the CPU from the list.  */
7074             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7075 
7076             cpu_list_unlock();
7077 
7078             ts = cpu->opaque;
7079             if (ts->child_tidptr) {
7080                 put_user_u32(0, ts->child_tidptr);
7081                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7082                           NULL, NULL, 0);
7083             }
7084             thread_cpu = NULL;
7085             object_unref(OBJECT(cpu));
7086             g_free(ts);
7087             rcu_unregister_thread();
7088             pthread_exit(NULL);
7089         }
7090 
7091         cpu_list_unlock();
7092         preexit_cleanup(cpu_env, arg1);
7093         _exit(arg1);
7094         return 0; /* avoid warning */
7095     case TARGET_NR_read:
7096         if (arg2 == 0 && arg3 == 0) {
7097             return get_errno(safe_read(arg1, 0, 0));
7098         } else {
7099             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7100                 return -TARGET_EFAULT;
7101             ret = get_errno(safe_read(arg1, p, arg3));
7102             if (ret >= 0 &&
7103                 fd_trans_host_to_target_data(arg1)) {
7104                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7105             }
7106             unlock_user(p, arg2, ret);
7107         }
7108         return ret;
7109     case TARGET_NR_write:
7110         if (arg2 == 0 && arg3 == 0) {
7111             return get_errno(safe_write(arg1, 0, 0));
7112         }
7113         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7114             return -TARGET_EFAULT;
7115         if (fd_trans_target_to_host_data(arg1)) {
7116             void *copy = g_malloc(arg3);
7117             memcpy(copy, p, arg3);
7118             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7119             if (ret >= 0) {
7120                 ret = get_errno(safe_write(arg1, copy, ret));
7121             }
7122             g_free(copy);
7123         } else {
7124             ret = get_errno(safe_write(arg1, p, arg3));
7125         }
7126         unlock_user(p, arg2, 0);
7127         return ret;
7128 
7129 #ifdef TARGET_NR_open
7130     case TARGET_NR_open:
7131         if (!(p = lock_user_string(arg1)))
7132             return -TARGET_EFAULT;
7133         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7134                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7135                                   arg3));
7136         fd_trans_unregister(ret);
7137         unlock_user(p, arg1, 0);
7138         return ret;
7139 #endif
7140     case TARGET_NR_openat:
7141         if (!(p = lock_user_string(arg2)))
7142             return -TARGET_EFAULT;
7143         ret = get_errno(do_openat(cpu_env, arg1, p,
7144                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7145                                   arg4));
7146         fd_trans_unregister(ret);
7147         unlock_user(p, arg2, 0);
7148         return ret;
7149 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7150     case TARGET_NR_name_to_handle_at:
7151         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7152         return ret;
7153 #endif
7154 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7155     case TARGET_NR_open_by_handle_at:
7156         ret = do_open_by_handle_at(arg1, arg2, arg3);
7157         fd_trans_unregister(ret);
7158         return ret;
7159 #endif
7160     case TARGET_NR_close:
7161         fd_trans_unregister(arg1);
7162         return get_errno(close(arg1));
7163 
7164     case TARGET_NR_brk:
7165         return do_brk(arg1);
7166 #ifdef TARGET_NR_fork
7167     case TARGET_NR_fork:
7168         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7169 #endif
7170 #ifdef TARGET_NR_waitpid
7171     case TARGET_NR_waitpid:
7172         {
7173             int status;
7174             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7175             if (!is_error(ret) && arg2 && ret
7176                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7177                 return -TARGET_EFAULT;
7178         }
7179         return ret;
7180 #endif
7181 #ifdef TARGET_NR_waitid
7182     case TARGET_NR_waitid:
7183         {
7184             siginfo_t info;
7185             info.si_pid = 0;
7186             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7187             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7188                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7189                     return -TARGET_EFAULT;
7190                 host_to_target_siginfo(p, &info);
7191                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7192             }
7193         }
7194         return ret;
7195 #endif
7196 #ifdef TARGET_NR_creat /* not on alpha */
7197     case TARGET_NR_creat:
7198         if (!(p = lock_user_string(arg1)))
7199             return -TARGET_EFAULT;
7200         ret = get_errno(creat(p, arg2));
7201         fd_trans_unregister(ret);
7202         unlock_user(p, arg1, 0);
7203         return ret;
7204 #endif
7205 #ifdef TARGET_NR_link
7206     case TARGET_NR_link:
7207         {
7208             void * p2;
7209             p = lock_user_string(arg1);
7210             p2 = lock_user_string(arg2);
7211             if (!p || !p2)
7212                 ret = -TARGET_EFAULT;
7213             else
7214                 ret = get_errno(link(p, p2));
7215             unlock_user(p2, arg2, 0);
7216             unlock_user(p, arg1, 0);
7217         }
7218         return ret;
7219 #endif
7220 #if defined(TARGET_NR_linkat)
7221     case TARGET_NR_linkat:
7222         {
7223             void * p2 = NULL;
7224             if (!arg2 || !arg4)
7225                 return -TARGET_EFAULT;
7226             p  = lock_user_string(arg2);
7227             p2 = lock_user_string(arg4);
7228             if (!p || !p2)
7229                 ret = -TARGET_EFAULT;
7230             else
7231                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7232             unlock_user(p, arg2, 0);
7233             unlock_user(p2, arg4, 0);
7234         }
7235         return ret;
7236 #endif
7237 #ifdef TARGET_NR_unlink
7238     case TARGET_NR_unlink:
7239         if (!(p = lock_user_string(arg1)))
7240             return -TARGET_EFAULT;
7241         ret = get_errno(unlink(p));
7242         unlock_user(p, arg1, 0);
7243         return ret;
7244 #endif
7245 #if defined(TARGET_NR_unlinkat)
7246     case TARGET_NR_unlinkat:
7247         if (!(p = lock_user_string(arg2)))
7248             return -TARGET_EFAULT;
7249         ret = get_errno(unlinkat(arg1, p, arg3));
7250         unlock_user(p, arg2, 0);
7251         return ret;
7252 #endif
7253     case TARGET_NR_execve:
7254         {
7255             char **argp, **envp;
7256             int argc, envc;
7257             abi_ulong gp;
7258             abi_ulong guest_argp;
7259             abi_ulong guest_envp;
7260             abi_ulong addr;
7261             char **q;
7262             int total_size = 0;
7263 
7264             argc = 0;
7265             guest_argp = arg2;
7266             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7267                 if (get_user_ual(addr, gp))
7268                     return -TARGET_EFAULT;
7269                 if (!addr)
7270                     break;
7271                 argc++;
7272             }
7273             envc = 0;
7274             guest_envp = arg3;
7275             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7276                 if (get_user_ual(addr, gp))
7277                     return -TARGET_EFAULT;
7278                 if (!addr)
7279                     break;
7280                 envc++;
7281             }
7282 
7283             argp = g_new0(char *, argc + 1);
7284             envp = g_new0(char *, envc + 1);
7285 
7286             for (gp = guest_argp, q = argp; gp;
7287                   gp += sizeof(abi_ulong), q++) {
7288                 if (get_user_ual(addr, gp))
7289                     goto execve_efault;
7290                 if (!addr)
7291                     break;
7292                 if (!(*q = lock_user_string(addr)))
7293                     goto execve_efault;
7294                 total_size += strlen(*q) + 1;
7295             }
7296             *q = NULL;
7297 
7298             for (gp = guest_envp, q = envp; gp;
7299                   gp += sizeof(abi_ulong), q++) {
7300                 if (get_user_ual(addr, gp))
7301                     goto execve_efault;
7302                 if (!addr)
7303                     break;
7304                 if (!(*q = lock_user_string(addr)))
7305                     goto execve_efault;
7306                 total_size += strlen(*q) + 1;
7307             }
7308             *q = NULL;
7309 
7310             if (!(p = lock_user_string(arg1)))
7311                 goto execve_efault;
7312             /* Although execve() is not an interruptible syscall it is
7313              * a special case where we must use the safe_syscall wrapper:
7314              * if we allow a signal to happen before we make the host
7315              * syscall then we will 'lose' it, because at the point of
7316              * execve the process leaves QEMU's control. So we use the
7317              * safe syscall wrapper to ensure that we either take the
7318              * signal as a guest signal, or else it does not happen
7319              * before the execve completes and makes it the other
7320              * program's problem.
7321              */
7322             ret = get_errno(safe_execve(p, argp, envp));
7323             unlock_user(p, arg1, 0);
7324 
7325             goto execve_end;
7326 
7327         execve_efault:
7328             ret = -TARGET_EFAULT;
7329 
7330         execve_end:
7331             for (gp = guest_argp, q = argp; *q;
7332                   gp += sizeof(abi_ulong), q++) {
7333                 if (get_user_ual(addr, gp)
7334                     || !addr)
7335                     break;
7336                 unlock_user(*q, addr, 0);
7337             }
7338             for (gp = guest_envp, q = envp; *q;
7339                   gp += sizeof(abi_ulong), q++) {
7340                 if (get_user_ual(addr, gp)
7341                     || !addr)
7342                     break;
7343                 unlock_user(*q, addr, 0);
7344             }
7345 
7346             g_free(argp);
7347             g_free(envp);
7348         }
7349         return ret;
7350     case TARGET_NR_chdir:
7351         if (!(p = lock_user_string(arg1)))
7352             return -TARGET_EFAULT;
7353         ret = get_errno(chdir(p));
7354         unlock_user(p, arg1, 0);
7355         return ret;
7356 #ifdef TARGET_NR_time
7357     case TARGET_NR_time:
7358         {
7359             time_t host_time;
7360             ret = get_errno(time(&host_time));
7361             if (!is_error(ret)
7362                 && arg1
7363                 && put_user_sal(host_time, arg1))
7364                 return -TARGET_EFAULT;
7365         }
7366         return ret;
7367 #endif
7368 #ifdef TARGET_NR_mknod
7369     case TARGET_NR_mknod:
7370         if (!(p = lock_user_string(arg1)))
7371             return -TARGET_EFAULT;
7372         ret = get_errno(mknod(p, arg2, arg3));
7373         unlock_user(p, arg1, 0);
7374         return ret;
7375 #endif
7376 #if defined(TARGET_NR_mknodat)
7377     case TARGET_NR_mknodat:
7378         if (!(p = lock_user_string(arg2)))
7379             return -TARGET_EFAULT;
7380         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7381         unlock_user(p, arg2, 0);
7382         return ret;
7383 #endif
7384 #ifdef TARGET_NR_chmod
7385     case TARGET_NR_chmod:
7386         if (!(p = lock_user_string(arg1)))
7387             return -TARGET_EFAULT;
7388         ret = get_errno(chmod(p, arg2));
7389         unlock_user(p, arg1, 0);
7390         return ret;
7391 #endif
7392 #ifdef TARGET_NR_lseek
7393     case TARGET_NR_lseek:
7394         return get_errno(lseek(arg1, arg2, arg3));
7395 #endif
7396 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7397     /* Alpha specific */
7398     case TARGET_NR_getxpid:
7399         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7400         return get_errno(getpid());
7401 #endif
7402 #ifdef TARGET_NR_getpid
7403     case TARGET_NR_getpid:
7404         return get_errno(getpid());
7405 #endif
7406     case TARGET_NR_mount:
7407         {
7408             /* need to look at the data field */
7409             void *p2, *p3;
7410 
7411             if (arg1) {
7412                 p = lock_user_string(arg1);
7413                 if (!p) {
7414                     return -TARGET_EFAULT;
7415                 }
7416             } else {
7417                 p = NULL;
7418             }
7419 
7420             p2 = lock_user_string(arg2);
7421             if (!p2) {
7422                 if (arg1) {
7423                     unlock_user(p, arg1, 0);
7424                 }
7425                 return -TARGET_EFAULT;
7426             }
7427 
7428             if (arg3) {
7429                 p3 = lock_user_string(arg3);
7430                 if (!p3) {
7431                     if (arg1) {
7432                         unlock_user(p, arg1, 0);
7433                     }
7434                     unlock_user(p2, arg2, 0);
7435                     return -TARGET_EFAULT;
7436                 }
7437             } else {
7438                 p3 = NULL;
7439             }
7440 
7441             /* FIXME - arg5 should be locked, but it isn't clear how to
7442              * do that since it's not guaranteed to be a NULL-terminated
7443              * string.
7444              */
7445             if (!arg5) {
7446                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7447             } else {
7448                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7449             }
7450             ret = get_errno(ret);
7451 
7452             if (arg1) {
7453                 unlock_user(p, arg1, 0);
7454             }
7455             unlock_user(p2, arg2, 0);
7456             if (arg3) {
7457                 unlock_user(p3, arg3, 0);
7458             }
7459         }
7460         return ret;
7461 #ifdef TARGET_NR_umount
7462     case TARGET_NR_umount:
7463         if (!(p = lock_user_string(arg1)))
7464             return -TARGET_EFAULT;
7465         ret = get_errno(umount(p));
7466         unlock_user(p, arg1, 0);
7467         return ret;
7468 #endif
7469 #ifdef TARGET_NR_stime /* not on alpha */
7470     case TARGET_NR_stime:
7471         {
7472             time_t host_time;
7473             if (get_user_sal(host_time, arg1))
7474                 return -TARGET_EFAULT;
7475             return get_errno(stime(&host_time));
7476         }
7477 #endif
7478 #ifdef TARGET_NR_alarm /* not on alpha */
7479     case TARGET_NR_alarm:
7480         return alarm(arg1);
7481 #endif
7482 #ifdef TARGET_NR_pause /* not on alpha */
7483     case TARGET_NR_pause:
7484         if (!block_signals()) {
7485             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7486         }
7487         return -TARGET_EINTR;
7488 #endif
7489 #ifdef TARGET_NR_utime
7490     case TARGET_NR_utime:
7491         {
7492             struct utimbuf tbuf, *host_tbuf;
7493             struct target_utimbuf *target_tbuf;
7494             if (arg2) {
7495                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7496                     return -TARGET_EFAULT;
7497                 tbuf.actime = tswapal(target_tbuf->actime);
7498                 tbuf.modtime = tswapal(target_tbuf->modtime);
7499                 unlock_user_struct(target_tbuf, arg2, 0);
7500                 host_tbuf = &tbuf;
7501             } else {
7502                 host_tbuf = NULL;
7503             }
7504             if (!(p = lock_user_string(arg1)))
7505                 return -TARGET_EFAULT;
7506             ret = get_errno(utime(p, host_tbuf));
7507             unlock_user(p, arg1, 0);
7508         }
7509         return ret;
7510 #endif
7511 #ifdef TARGET_NR_utimes
7512     case TARGET_NR_utimes:
7513         {
7514             struct timeval *tvp, tv[2];
7515             if (arg2) {
7516                 if (copy_from_user_timeval(&tv[0], arg2)
7517                     || copy_from_user_timeval(&tv[1],
7518                                               arg2 + sizeof(struct target_timeval)))
7519                     return -TARGET_EFAULT;
7520                 tvp = tv;
7521             } else {
7522                 tvp = NULL;
7523             }
7524             if (!(p = lock_user_string(arg1)))
7525                 return -TARGET_EFAULT;
7526             ret = get_errno(utimes(p, tvp));
7527             unlock_user(p, arg1, 0);
7528         }
7529         return ret;
7530 #endif
7531 #if defined(TARGET_NR_futimesat)
7532     case TARGET_NR_futimesat:
7533         {
7534             struct timeval *tvp, tv[2];
7535             if (arg3) {
7536                 if (copy_from_user_timeval(&tv[0], arg3)
7537                     || copy_from_user_timeval(&tv[1],
7538                                               arg3 + sizeof(struct target_timeval)))
7539                     return -TARGET_EFAULT;
7540                 tvp = tv;
7541             } else {
7542                 tvp = NULL;
7543             }
7544             if (!(p = lock_user_string(arg2))) {
7545                 return -TARGET_EFAULT;
7546             }
7547             ret = get_errno(futimesat(arg1, path(p), tvp));
7548             unlock_user(p, arg2, 0);
7549         }
7550         return ret;
7551 #endif
7552 #ifdef TARGET_NR_access
7553     case TARGET_NR_access:
7554         if (!(p = lock_user_string(arg1))) {
7555             return -TARGET_EFAULT;
7556         }
7557         ret = get_errno(access(path(p), arg2));
7558         unlock_user(p, arg1, 0);
7559         return ret;
7560 #endif
7561 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7562     case TARGET_NR_faccessat:
7563         if (!(p = lock_user_string(arg2))) {
7564             return -TARGET_EFAULT;
7565         }
7566         ret = get_errno(faccessat(arg1, p, arg3, 0));
7567         unlock_user(p, arg2, 0);
7568         return ret;
7569 #endif
7570 #ifdef TARGET_NR_nice /* not on alpha */
7571     case TARGET_NR_nice:
7572         return get_errno(nice(arg1));
7573 #endif
7574     case TARGET_NR_sync:
7575         sync();
7576         return 0;
7577 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7578     case TARGET_NR_syncfs:
7579         return get_errno(syncfs(arg1));
7580 #endif
7581     case TARGET_NR_kill:
7582         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7583 #ifdef TARGET_NR_rename
7584     case TARGET_NR_rename:
7585         {
7586             void *p2;
7587             p = lock_user_string(arg1);
7588             p2 = lock_user_string(arg2);
7589             if (!p || !p2)
7590                 ret = -TARGET_EFAULT;
7591             else
7592                 ret = get_errno(rename(p, p2));
7593             unlock_user(p2, arg2, 0);
7594             unlock_user(p, arg1, 0);
7595         }
7596         return ret;
7597 #endif
7598 #if defined(TARGET_NR_renameat)
7599     case TARGET_NR_renameat:
7600         {
7601             void *p2;
7602             p  = lock_user_string(arg2);
7603             p2 = lock_user_string(arg4);
7604             if (!p || !p2)
7605                 ret = -TARGET_EFAULT;
7606             else
7607                 ret = get_errno(renameat(arg1, p, arg3, p2));
7608             unlock_user(p2, arg4, 0);
7609             unlock_user(p, arg2, 0);
7610         }
7611         return ret;
7612 #endif
7613 #if defined(TARGET_NR_renameat2)
7614     case TARGET_NR_renameat2:
7615         {
7616             void *p2;
7617             p  = lock_user_string(arg2);
7618             p2 = lock_user_string(arg4);
7619             if (!p || !p2) {
7620                 ret = -TARGET_EFAULT;
7621             } else {
7622                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7623             }
7624             unlock_user(p2, arg4, 0);
7625             unlock_user(p, arg2, 0);
7626         }
7627         return ret;
7628 #endif
7629 #ifdef TARGET_NR_mkdir
7630     case TARGET_NR_mkdir:
7631         if (!(p = lock_user_string(arg1)))
7632             return -TARGET_EFAULT;
7633         ret = get_errno(mkdir(p, arg2));
7634         unlock_user(p, arg1, 0);
7635         return ret;
7636 #endif
7637 #if defined(TARGET_NR_mkdirat)
7638     case TARGET_NR_mkdirat:
7639         if (!(p = lock_user_string(arg2)))
7640             return -TARGET_EFAULT;
7641         ret = get_errno(mkdirat(arg1, p, arg3));
7642         unlock_user(p, arg2, 0);
7643         return ret;
7644 #endif
7645 #ifdef TARGET_NR_rmdir
7646     case TARGET_NR_rmdir:
7647         if (!(p = lock_user_string(arg1)))
7648             return -TARGET_EFAULT;
7649         ret = get_errno(rmdir(p));
7650         unlock_user(p, arg1, 0);
7651         return ret;
7652 #endif
7653     case TARGET_NR_dup:
7654         ret = get_errno(dup(arg1));
7655         if (ret >= 0) {
7656             fd_trans_dup(arg1, ret);
7657         }
7658         return ret;
7659 #ifdef TARGET_NR_pipe
7660     case TARGET_NR_pipe:
7661         return do_pipe(cpu_env, arg1, 0, 0);
7662 #endif
7663 #ifdef TARGET_NR_pipe2
7664     case TARGET_NR_pipe2:
7665         return do_pipe(cpu_env, arg1,
7666                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7667 #endif
7668     case TARGET_NR_times:
7669         {
7670             struct target_tms *tmsp;
7671             struct tms tms;
7672             ret = get_errno(times(&tms));
7673             if (arg1) {
7674                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7675                 if (!tmsp)
7676                     return -TARGET_EFAULT;
7677                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7678                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7679                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7680                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7681             }
7682             if (!is_error(ret))
7683                 ret = host_to_target_clock_t(ret);
7684         }
7685         return ret;
7686     case TARGET_NR_acct:
7687         if (arg1 == 0) {
7688             ret = get_errno(acct(NULL));
7689         } else {
7690             if (!(p = lock_user_string(arg1))) {
7691                 return -TARGET_EFAULT;
7692             }
7693             ret = get_errno(acct(path(p)));
7694             unlock_user(p, arg1, 0);
7695         }
7696         return ret;
7697 #ifdef TARGET_NR_umount2
7698     case TARGET_NR_umount2:
7699         if (!(p = lock_user_string(arg1)))
7700             return -TARGET_EFAULT;
7701         ret = get_errno(umount2(p, arg2));
7702         unlock_user(p, arg1, 0);
7703         return ret;
7704 #endif
7705     case TARGET_NR_ioctl:
7706         return do_ioctl(arg1, arg2, arg3);
7707 #ifdef TARGET_NR_fcntl
7708     case TARGET_NR_fcntl:
7709         return do_fcntl(arg1, arg2, arg3);
7710 #endif
7711     case TARGET_NR_setpgid:
7712         return get_errno(setpgid(arg1, arg2));
7713     case TARGET_NR_umask:
7714         return get_errno(umask(arg1));
7715     case TARGET_NR_chroot:
7716         if (!(p = lock_user_string(arg1)))
7717             return -TARGET_EFAULT;
7718         ret = get_errno(chroot(p));
7719         unlock_user(p, arg1, 0);
7720         return ret;
7721 #ifdef TARGET_NR_dup2
7722     case TARGET_NR_dup2:
7723         ret = get_errno(dup2(arg1, arg2));
7724         if (ret >= 0) {
7725             fd_trans_dup(arg1, arg2);
7726         }
7727         return ret;
7728 #endif
7729 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7730     case TARGET_NR_dup3:
7731     {
7732         int host_flags;
7733 
7734         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7735             return -EINVAL;
7736         }
7737         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7738         ret = get_errno(dup3(arg1, arg2, host_flags));
7739         if (ret >= 0) {
7740             fd_trans_dup(arg1, arg2);
7741         }
7742         return ret;
7743     }
7744 #endif
7745 #ifdef TARGET_NR_getppid /* not on alpha */
7746     case TARGET_NR_getppid:
7747         return get_errno(getppid());
7748 #endif
7749 #ifdef TARGET_NR_getpgrp
7750     case TARGET_NR_getpgrp:
7751         return get_errno(getpgrp());
7752 #endif
7753     case TARGET_NR_setsid:
7754         return get_errno(setsid());
7755 #ifdef TARGET_NR_sigaction
7756     case TARGET_NR_sigaction:
7757         {
7758 #if defined(TARGET_ALPHA)
7759             struct target_sigaction act, oact, *pact = 0;
7760             struct target_old_sigaction *old_act;
7761             if (arg2) {
7762                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7763                     return -TARGET_EFAULT;
7764                 act._sa_handler = old_act->_sa_handler;
7765                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7766                 act.sa_flags = old_act->sa_flags;
7767                 act.sa_restorer = 0;
7768                 unlock_user_struct(old_act, arg2, 0);
7769                 pact = &act;
7770             }
7771             ret = get_errno(do_sigaction(arg1, pact, &oact));
7772             if (!is_error(ret) && arg3) {
7773                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7774                     return -TARGET_EFAULT;
7775                 old_act->_sa_handler = oact._sa_handler;
7776                 old_act->sa_mask = oact.sa_mask.sig[0];
7777                 old_act->sa_flags = oact.sa_flags;
7778                 unlock_user_struct(old_act, arg3, 1);
7779             }
7780 #elif defined(TARGET_MIPS)
7781 	    struct target_sigaction act, oact, *pact, *old_act;
7782 
7783 	    if (arg2) {
7784                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7785                     return -TARGET_EFAULT;
7786 		act._sa_handler = old_act->_sa_handler;
7787 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7788 		act.sa_flags = old_act->sa_flags;
7789 		unlock_user_struct(old_act, arg2, 0);
7790 		pact = &act;
7791 	    } else {
7792 		pact = NULL;
7793 	    }
7794 
7795 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7796 
7797 	    if (!is_error(ret) && arg3) {
7798                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7799                     return -TARGET_EFAULT;
7800 		old_act->_sa_handler = oact._sa_handler;
7801 		old_act->sa_flags = oact.sa_flags;
7802 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7803 		old_act->sa_mask.sig[1] = 0;
7804 		old_act->sa_mask.sig[2] = 0;
7805 		old_act->sa_mask.sig[3] = 0;
7806 		unlock_user_struct(old_act, arg3, 1);
7807 	    }
7808 #else
7809             struct target_old_sigaction *old_act;
7810             struct target_sigaction act, oact, *pact;
7811             if (arg2) {
7812                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7813                     return -TARGET_EFAULT;
7814                 act._sa_handler = old_act->_sa_handler;
7815                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7816                 act.sa_flags = old_act->sa_flags;
7817                 act.sa_restorer = old_act->sa_restorer;
7818 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7819                 act.ka_restorer = 0;
7820 #endif
7821                 unlock_user_struct(old_act, arg2, 0);
7822                 pact = &act;
7823             } else {
7824                 pact = NULL;
7825             }
7826             ret = get_errno(do_sigaction(arg1, pact, &oact));
7827             if (!is_error(ret) && arg3) {
7828                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7829                     return -TARGET_EFAULT;
7830                 old_act->_sa_handler = oact._sa_handler;
7831                 old_act->sa_mask = oact.sa_mask.sig[0];
7832                 old_act->sa_flags = oact.sa_flags;
7833                 old_act->sa_restorer = oact.sa_restorer;
7834                 unlock_user_struct(old_act, arg3, 1);
7835             }
7836 #endif
7837         }
7838         return ret;
7839 #endif
7840     case TARGET_NR_rt_sigaction:
7841         {
7842 #if defined(TARGET_ALPHA)
7843             /* For Alpha and SPARC this is a 5 argument syscall, with
7844              * a 'restorer' parameter which must be copied into the
7845              * sa_restorer field of the sigaction struct.
7846              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7847              * and arg5 is the sigsetsize.
7848              * Alpha also has a separate rt_sigaction struct that it uses
7849              * here; SPARC uses the usual sigaction struct.
7850              */
7851             struct target_rt_sigaction *rt_act;
7852             struct target_sigaction act, oact, *pact = 0;
7853 
7854             if (arg4 != sizeof(target_sigset_t)) {
7855                 return -TARGET_EINVAL;
7856             }
7857             if (arg2) {
7858                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7859                     return -TARGET_EFAULT;
7860                 act._sa_handler = rt_act->_sa_handler;
7861                 act.sa_mask = rt_act->sa_mask;
7862                 act.sa_flags = rt_act->sa_flags;
7863                 act.sa_restorer = arg5;
7864                 unlock_user_struct(rt_act, arg2, 0);
7865                 pact = &act;
7866             }
7867             ret = get_errno(do_sigaction(arg1, pact, &oact));
7868             if (!is_error(ret) && arg3) {
7869                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7870                     return -TARGET_EFAULT;
7871                 rt_act->_sa_handler = oact._sa_handler;
7872                 rt_act->sa_mask = oact.sa_mask;
7873                 rt_act->sa_flags = oact.sa_flags;
7874                 unlock_user_struct(rt_act, arg3, 1);
7875             }
7876 #else
7877 #ifdef TARGET_SPARC
7878             target_ulong restorer = arg4;
7879             target_ulong sigsetsize = arg5;
7880 #else
7881             target_ulong sigsetsize = arg4;
7882 #endif
7883             struct target_sigaction *act;
7884             struct target_sigaction *oact;
7885 
7886             if (sigsetsize != sizeof(target_sigset_t)) {
7887                 return -TARGET_EINVAL;
7888             }
7889             if (arg2) {
7890                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7891                     return -TARGET_EFAULT;
7892                 }
7893 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7894                 act->ka_restorer = restorer;
7895 #endif
7896             } else {
7897                 act = NULL;
7898             }
7899             if (arg3) {
7900                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7901                     ret = -TARGET_EFAULT;
7902                     goto rt_sigaction_fail;
7903                 }
7904             } else
7905                 oact = NULL;
7906             ret = get_errno(do_sigaction(arg1, act, oact));
7907 	rt_sigaction_fail:
7908             if (act)
7909                 unlock_user_struct(act, arg2, 0);
7910             if (oact)
7911                 unlock_user_struct(oact, arg3, 1);
7912 #endif
7913         }
7914         return ret;
7915 #ifdef TARGET_NR_sgetmask /* not on alpha */
7916     case TARGET_NR_sgetmask:
7917         {
7918             sigset_t cur_set;
7919             abi_ulong target_set;
7920             ret = do_sigprocmask(0, NULL, &cur_set);
7921             if (!ret) {
7922                 host_to_target_old_sigset(&target_set, &cur_set);
7923                 ret = target_set;
7924             }
7925         }
7926         return ret;
7927 #endif
7928 #ifdef TARGET_NR_ssetmask /* not on alpha */
7929     case TARGET_NR_ssetmask:
7930         {
7931             sigset_t set, oset;
7932             abi_ulong target_set = arg1;
7933             target_to_host_old_sigset(&set, &target_set);
7934             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7935             if (!ret) {
7936                 host_to_target_old_sigset(&target_set, &oset);
7937                 ret = target_set;
7938             }
7939         }
7940         return ret;
7941 #endif
7942 #ifdef TARGET_NR_sigprocmask
7943     case TARGET_NR_sigprocmask:
7944         {
7945 #if defined(TARGET_ALPHA)
7946             sigset_t set, oldset;
7947             abi_ulong mask;
7948             int how;
7949 
7950             switch (arg1) {
7951             case TARGET_SIG_BLOCK:
7952                 how = SIG_BLOCK;
7953                 break;
7954             case TARGET_SIG_UNBLOCK:
7955                 how = SIG_UNBLOCK;
7956                 break;
7957             case TARGET_SIG_SETMASK:
7958                 how = SIG_SETMASK;
7959                 break;
7960             default:
7961                 return -TARGET_EINVAL;
7962             }
7963             mask = arg2;
7964             target_to_host_old_sigset(&set, &mask);
7965 
7966             ret = do_sigprocmask(how, &set, &oldset);
7967             if (!is_error(ret)) {
7968                 host_to_target_old_sigset(&mask, &oldset);
7969                 ret = mask;
7970                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7971             }
7972 #else
7973             sigset_t set, oldset, *set_ptr;
7974             int how;
7975 
7976             if (arg2) {
7977                 switch (arg1) {
7978                 case TARGET_SIG_BLOCK:
7979                     how = SIG_BLOCK;
7980                     break;
7981                 case TARGET_SIG_UNBLOCK:
7982                     how = SIG_UNBLOCK;
7983                     break;
7984                 case TARGET_SIG_SETMASK:
7985                     how = SIG_SETMASK;
7986                     break;
7987                 default:
7988                     return -TARGET_EINVAL;
7989                 }
7990                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7991                     return -TARGET_EFAULT;
7992                 target_to_host_old_sigset(&set, p);
7993                 unlock_user(p, arg2, 0);
7994                 set_ptr = &set;
7995             } else {
7996                 how = 0;
7997                 set_ptr = NULL;
7998             }
7999             ret = do_sigprocmask(how, set_ptr, &oldset);
8000             if (!is_error(ret) && arg3) {
8001                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8002                     return -TARGET_EFAULT;
8003                 host_to_target_old_sigset(p, &oldset);
8004                 unlock_user(p, arg3, sizeof(target_sigset_t));
8005             }
8006 #endif
8007         }
8008         return ret;
8009 #endif
8010     case TARGET_NR_rt_sigprocmask:
8011         {
8012             int how = arg1;
8013             sigset_t set, oldset, *set_ptr;
8014 
8015             if (arg4 != sizeof(target_sigset_t)) {
8016                 return -TARGET_EINVAL;
8017             }
8018 
8019             if (arg2) {
8020                 switch(how) {
8021                 case TARGET_SIG_BLOCK:
8022                     how = SIG_BLOCK;
8023                     break;
8024                 case TARGET_SIG_UNBLOCK:
8025                     how = SIG_UNBLOCK;
8026                     break;
8027                 case TARGET_SIG_SETMASK:
8028                     how = SIG_SETMASK;
8029                     break;
8030                 default:
8031                     return -TARGET_EINVAL;
8032                 }
8033                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8034                     return -TARGET_EFAULT;
8035                 target_to_host_sigset(&set, p);
8036                 unlock_user(p, arg2, 0);
8037                 set_ptr = &set;
8038             } else {
8039                 how = 0;
8040                 set_ptr = NULL;
8041             }
8042             ret = do_sigprocmask(how, set_ptr, &oldset);
8043             if (!is_error(ret) && arg3) {
8044                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8045                     return -TARGET_EFAULT;
8046                 host_to_target_sigset(p, &oldset);
8047                 unlock_user(p, arg3, sizeof(target_sigset_t));
8048             }
8049         }
8050         return ret;
8051 #ifdef TARGET_NR_sigpending
8052     case TARGET_NR_sigpending:
8053         {
8054             sigset_t set;
8055             ret = get_errno(sigpending(&set));
8056             if (!is_error(ret)) {
8057                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8058                     return -TARGET_EFAULT;
8059                 host_to_target_old_sigset(p, &set);
8060                 unlock_user(p, arg1, sizeof(target_sigset_t));
8061             }
8062         }
8063         return ret;
8064 #endif
8065     case TARGET_NR_rt_sigpending:
8066         {
8067             sigset_t set;
8068 
8069             /* Yes, this check is >, not != like most. We follow the kernel's
8070              * logic and it does it like this because it implements
8071              * NR_sigpending through the same code path, and in that case
8072              * the old_sigset_t is smaller in size.
8073              */
8074             if (arg2 > sizeof(target_sigset_t)) {
8075                 return -TARGET_EINVAL;
8076             }
8077 
8078             ret = get_errno(sigpending(&set));
8079             if (!is_error(ret)) {
8080                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8081                     return -TARGET_EFAULT;
8082                 host_to_target_sigset(p, &set);
8083                 unlock_user(p, arg1, sizeof(target_sigset_t));
8084             }
8085         }
8086         return ret;
8087 #ifdef TARGET_NR_sigsuspend
8088     case TARGET_NR_sigsuspend:
8089         {
8090             TaskState *ts = cpu->opaque;
8091 #if defined(TARGET_ALPHA)
8092             abi_ulong mask = arg1;
8093             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8094 #else
8095             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8096                 return -TARGET_EFAULT;
8097             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8098             unlock_user(p, arg1, 0);
8099 #endif
8100             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8101                                                SIGSET_T_SIZE));
8102             if (ret != -TARGET_ERESTARTSYS) {
8103                 ts->in_sigsuspend = 1;
8104             }
8105         }
8106         return ret;
8107 #endif
8108     case TARGET_NR_rt_sigsuspend:
8109         {
8110             TaskState *ts = cpu->opaque;
8111 
8112             if (arg2 != sizeof(target_sigset_t)) {
8113                 return -TARGET_EINVAL;
8114             }
8115             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8116                 return -TARGET_EFAULT;
8117             target_to_host_sigset(&ts->sigsuspend_mask, p);
8118             unlock_user(p, arg1, 0);
8119             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8120                                                SIGSET_T_SIZE));
8121             if (ret != -TARGET_ERESTARTSYS) {
8122                 ts->in_sigsuspend = 1;
8123             }
8124         }
8125         return ret;
8126     case TARGET_NR_rt_sigtimedwait:
8127         {
8128             sigset_t set;
8129             struct timespec uts, *puts;
8130             siginfo_t uinfo;
8131 
8132             if (arg4 != sizeof(target_sigset_t)) {
8133                 return -TARGET_EINVAL;
8134             }
8135 
8136             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8137                 return -TARGET_EFAULT;
8138             target_to_host_sigset(&set, p);
8139             unlock_user(p, arg1, 0);
8140             if (arg3) {
8141                 puts = &uts;
8142                 target_to_host_timespec(puts, arg3);
8143             } else {
8144                 puts = NULL;
8145             }
8146             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8147                                                  SIGSET_T_SIZE));
8148             if (!is_error(ret)) {
8149                 if (arg2) {
8150                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8151                                   0);
8152                     if (!p) {
8153                         return -TARGET_EFAULT;
8154                     }
8155                     host_to_target_siginfo(p, &uinfo);
8156                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8157                 }
8158                 ret = host_to_target_signal(ret);
8159             }
8160         }
8161         return ret;
8162     case TARGET_NR_rt_sigqueueinfo:
8163         {
8164             siginfo_t uinfo;
8165 
8166             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8167             if (!p) {
8168                 return -TARGET_EFAULT;
8169             }
8170             target_to_host_siginfo(&uinfo, p);
8171             unlock_user(p, arg3, 0);
8172             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8173         }
8174         return ret;
8175     case TARGET_NR_rt_tgsigqueueinfo:
8176         {
8177             siginfo_t uinfo;
8178 
8179             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8180             if (!p) {
8181                 return -TARGET_EFAULT;
8182             }
8183             target_to_host_siginfo(&uinfo, p);
8184             unlock_user(p, arg4, 0);
8185             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8186         }
8187         return ret;
8188 #ifdef TARGET_NR_sigreturn
8189     case TARGET_NR_sigreturn:
8190         if (block_signals()) {
8191             return -TARGET_ERESTARTSYS;
8192         }
8193         return do_sigreturn(cpu_env);
8194 #endif
8195     case TARGET_NR_rt_sigreturn:
8196         if (block_signals()) {
8197             return -TARGET_ERESTARTSYS;
8198         }
8199         return do_rt_sigreturn(cpu_env);
8200     case TARGET_NR_sethostname:
8201         if (!(p = lock_user_string(arg1)))
8202             return -TARGET_EFAULT;
8203         ret = get_errno(sethostname(p, arg2));
8204         unlock_user(p, arg1, 0);
8205         return ret;
8206 #ifdef TARGET_NR_setrlimit
8207     case TARGET_NR_setrlimit:
8208         {
8209             int resource = target_to_host_resource(arg1);
8210             struct target_rlimit *target_rlim;
8211             struct rlimit rlim;
8212             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8213                 return -TARGET_EFAULT;
8214             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8215             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8216             unlock_user_struct(target_rlim, arg2, 0);
8217             /*
8218              * If we just passed through resource limit settings for memory then
8219              * they would also apply to QEMU's own allocations, and QEMU will
8220              * crash or hang or die if its allocations fail. Ideally we would
8221              * track the guest allocations in QEMU and apply the limits ourselves.
8222              * For now, just tell the guest the call succeeded but don't actually
8223              * limit anything.
8224              */
8225             if (resource != RLIMIT_AS &&
8226                 resource != RLIMIT_DATA &&
8227                 resource != RLIMIT_STACK) {
8228                 return get_errno(setrlimit(resource, &rlim));
8229             } else {
8230                 return 0;
8231             }
8232         }
8233 #endif
8234 #ifdef TARGET_NR_getrlimit
8235     case TARGET_NR_getrlimit:
8236         {
8237             int resource = target_to_host_resource(arg1);
8238             struct target_rlimit *target_rlim;
8239             struct rlimit rlim;
8240 
8241             ret = get_errno(getrlimit(resource, &rlim));
8242             if (!is_error(ret)) {
8243                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8244                     return -TARGET_EFAULT;
8245                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8246                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8247                 unlock_user_struct(target_rlim, arg2, 1);
8248             }
8249         }
8250         return ret;
8251 #endif
8252     case TARGET_NR_getrusage:
8253         {
8254             struct rusage rusage;
8255             ret = get_errno(getrusage(arg1, &rusage));
8256             if (!is_error(ret)) {
8257                 ret = host_to_target_rusage(arg2, &rusage);
8258             }
8259         }
8260         return ret;
8261     case TARGET_NR_gettimeofday:
8262         {
8263             struct timeval tv;
8264             ret = get_errno(gettimeofday(&tv, NULL));
8265             if (!is_error(ret)) {
8266                 if (copy_to_user_timeval(arg1, &tv))
8267                     return -TARGET_EFAULT;
8268             }
8269         }
8270         return ret;
8271     case TARGET_NR_settimeofday:
8272         {
8273             struct timeval tv, *ptv = NULL;
8274             struct timezone tz, *ptz = NULL;
8275 
8276             if (arg1) {
8277                 if (copy_from_user_timeval(&tv, arg1)) {
8278                     return -TARGET_EFAULT;
8279                 }
8280                 ptv = &tv;
8281             }
8282 
8283             if (arg2) {
8284                 if (copy_from_user_timezone(&tz, arg2)) {
8285                     return -TARGET_EFAULT;
8286                 }
8287                 ptz = &tz;
8288             }
8289 
8290             return get_errno(settimeofday(ptv, ptz));
8291         }
8292 #if defined(TARGET_NR_select)
8293     case TARGET_NR_select:
8294 #if defined(TARGET_WANT_NI_OLD_SELECT)
8295         /* some architectures used to have old_select here
8296          * but now ENOSYS it.
8297          */
8298         ret = -TARGET_ENOSYS;
8299 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8300         ret = do_old_select(arg1);
8301 #else
8302         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8303 #endif
8304         return ret;
8305 #endif
8306 #ifdef TARGET_NR_pselect6
8307     case TARGET_NR_pselect6:
8308         {
8309             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8310             fd_set rfds, wfds, efds;
8311             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8312             struct timespec ts, *ts_ptr;
8313 
8314             /*
8315              * The 6th arg is actually two args smashed together,
8316              * so we cannot use the C library.
8317              */
8318             sigset_t set;
8319             struct {
8320                 sigset_t *set;
8321                 size_t size;
8322             } sig, *sig_ptr;
8323 
8324             abi_ulong arg_sigset, arg_sigsize, *arg7;
8325             target_sigset_t *target_sigset;
8326 
8327             n = arg1;
8328             rfd_addr = arg2;
8329             wfd_addr = arg3;
8330             efd_addr = arg4;
8331             ts_addr = arg5;
8332 
8333             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8334             if (ret) {
8335                 return ret;
8336             }
8337             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8338             if (ret) {
8339                 return ret;
8340             }
8341             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8342             if (ret) {
8343                 return ret;
8344             }
8345 
8346             /*
8347              * This takes a timespec, and not a timeval, so we cannot
8348              * use the do_select() helper ...
8349              */
8350             if (ts_addr) {
8351                 if (target_to_host_timespec(&ts, ts_addr)) {
8352                     return -TARGET_EFAULT;
8353                 }
8354                 ts_ptr = &ts;
8355             } else {
8356                 ts_ptr = NULL;
8357             }
8358 
8359             /* Extract the two packed args for the sigset */
8360             if (arg6) {
8361                 sig_ptr = &sig;
8362                 sig.size = SIGSET_T_SIZE;
8363 
8364                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8365                 if (!arg7) {
8366                     return -TARGET_EFAULT;
8367                 }
8368                 arg_sigset = tswapal(arg7[0]);
8369                 arg_sigsize = tswapal(arg7[1]);
8370                 unlock_user(arg7, arg6, 0);
8371 
8372                 if (arg_sigset) {
8373                     sig.set = &set;
8374                     if (arg_sigsize != sizeof(*target_sigset)) {
8375                         /* Like the kernel, we enforce correct size sigsets */
8376                         return -TARGET_EINVAL;
8377                     }
8378                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8379                                               sizeof(*target_sigset), 1);
8380                     if (!target_sigset) {
8381                         return -TARGET_EFAULT;
8382                     }
8383                     target_to_host_sigset(&set, target_sigset);
8384                     unlock_user(target_sigset, arg_sigset, 0);
8385                 } else {
8386                     sig.set = NULL;
8387                 }
8388             } else {
8389                 sig_ptr = NULL;
8390             }
8391 
8392             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8393                                           ts_ptr, sig_ptr));
8394 
8395             if (!is_error(ret)) {
8396                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8397                     return -TARGET_EFAULT;
8398                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8399                     return -TARGET_EFAULT;
8400                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8401                     return -TARGET_EFAULT;
8402 
8403                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8404                     return -TARGET_EFAULT;
8405             }
8406         }
8407         return ret;
8408 #endif
8409 #ifdef TARGET_NR_symlink
8410     case TARGET_NR_symlink:
8411         {
8412             void *p2;
8413             p = lock_user_string(arg1);
8414             p2 = lock_user_string(arg2);
8415             if (!p || !p2)
8416                 ret = -TARGET_EFAULT;
8417             else
8418                 ret = get_errno(symlink(p, p2));
8419             unlock_user(p2, arg2, 0);
8420             unlock_user(p, arg1, 0);
8421         }
8422         return ret;
8423 #endif
8424 #if defined(TARGET_NR_symlinkat)
8425     case TARGET_NR_symlinkat:
8426         {
8427             void *p2;
8428             p  = lock_user_string(arg1);
8429             p2 = lock_user_string(arg3);
8430             if (!p || !p2)
8431                 ret = -TARGET_EFAULT;
8432             else
8433                 ret = get_errno(symlinkat(p, arg2, p2));
8434             unlock_user(p2, arg3, 0);
8435             unlock_user(p, arg1, 0);
8436         }
8437         return ret;
8438 #endif
8439 #ifdef TARGET_NR_readlink
8440     case TARGET_NR_readlink:
8441         {
8442             void *p2;
8443             p = lock_user_string(arg1);
8444             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8445             if (!p || !p2) {
8446                 ret = -TARGET_EFAULT;
8447             } else if (!arg3) {
8448                 /* Short circuit this for the magic exe check. */
8449                 ret = -TARGET_EINVAL;
8450             } else if (is_proc_myself((const char *)p, "exe")) {
8451                 char real[PATH_MAX], *temp;
8452                 temp = realpath(exec_path, real);
8453                 /* Return value is # of bytes that we wrote to the buffer. */
8454                 if (temp == NULL) {
8455                     ret = get_errno(-1);
8456                 } else {
8457                     /* Don't worry about sign mismatch as earlier mapping
8458                      * logic would have thrown a bad address error. */
8459                     ret = MIN(strlen(real), arg3);
8460                     /* We cannot NUL terminate the string. */
8461                     memcpy(p2, real, ret);
8462                 }
8463             } else {
8464                 ret = get_errno(readlink(path(p), p2, arg3));
8465             }
8466             unlock_user(p2, arg2, ret);
8467             unlock_user(p, arg1, 0);
8468         }
8469         return ret;
8470 #endif
8471 #if defined(TARGET_NR_readlinkat)
8472     case TARGET_NR_readlinkat:
8473         {
8474             void *p2;
8475             p  = lock_user_string(arg2);
8476             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8477             if (!p || !p2) {
8478                 ret = -TARGET_EFAULT;
8479             } else if (is_proc_myself((const char *)p, "exe")) {
8480                 char real[PATH_MAX], *temp;
8481                 temp = realpath(exec_path, real);
8482                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8483                 snprintf((char *)p2, arg4, "%s", real);
8484             } else {
8485                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8486             }
8487             unlock_user(p2, arg3, ret);
8488             unlock_user(p, arg2, 0);
8489         }
8490         return ret;
8491 #endif
8492 #ifdef TARGET_NR_swapon
8493     case TARGET_NR_swapon:
8494         if (!(p = lock_user_string(arg1)))
8495             return -TARGET_EFAULT;
8496         ret = get_errno(swapon(p, arg2));
8497         unlock_user(p, arg1, 0);
8498         return ret;
8499 #endif
8500     case TARGET_NR_reboot:
8501         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8502            /* arg4 must be ignored in all other cases */
8503            p = lock_user_string(arg4);
8504            if (!p) {
8505                return -TARGET_EFAULT;
8506            }
8507            ret = get_errno(reboot(arg1, arg2, arg3, p));
8508            unlock_user(p, arg4, 0);
8509         } else {
8510            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8511         }
8512         return ret;
8513 #ifdef TARGET_NR_mmap
8514     case TARGET_NR_mmap:
8515 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8516     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8517     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8518     || defined(TARGET_S390X)
8519         {
8520             abi_ulong *v;
8521             abi_ulong v1, v2, v3, v4, v5, v6;
8522             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8523                 return -TARGET_EFAULT;
8524             v1 = tswapal(v[0]);
8525             v2 = tswapal(v[1]);
8526             v3 = tswapal(v[2]);
8527             v4 = tswapal(v[3]);
8528             v5 = tswapal(v[4]);
8529             v6 = tswapal(v[5]);
8530             unlock_user(v, arg1, 0);
8531             ret = get_errno(target_mmap(v1, v2, v3,
8532                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8533                                         v5, v6));
8534         }
8535 #else
8536         ret = get_errno(target_mmap(arg1, arg2, arg3,
8537                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8538                                     arg5,
8539                                     arg6));
8540 #endif
8541         return ret;
8542 #endif
8543 #ifdef TARGET_NR_mmap2
8544     case TARGET_NR_mmap2:
8545 #ifndef MMAP_SHIFT
8546 #define MMAP_SHIFT 12
8547 #endif
8548         ret = target_mmap(arg1, arg2, arg3,
8549                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8550                           arg5, arg6 << MMAP_SHIFT);
8551         return get_errno(ret);
8552 #endif
8553     case TARGET_NR_munmap:
8554         return get_errno(target_munmap(arg1, arg2));
8555     case TARGET_NR_mprotect:
8556         {
8557             TaskState *ts = cpu->opaque;
8558             /* Special hack to detect libc making the stack executable.  */
8559             if ((arg3 & PROT_GROWSDOWN)
8560                 && arg1 >= ts->info->stack_limit
8561                 && arg1 <= ts->info->start_stack) {
8562                 arg3 &= ~PROT_GROWSDOWN;
8563                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8564                 arg1 = ts->info->stack_limit;
8565             }
8566         }
8567         return get_errno(target_mprotect(arg1, arg2, arg3));
8568 #ifdef TARGET_NR_mremap
8569     case TARGET_NR_mremap:
8570         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8571 #endif
8572         /* ??? msync/mlock/munlock are broken for softmmu.  */
8573 #ifdef TARGET_NR_msync
8574     case TARGET_NR_msync:
8575         return get_errno(msync(g2h(arg1), arg2, arg3));
8576 #endif
8577 #ifdef TARGET_NR_mlock
8578     case TARGET_NR_mlock:
8579         return get_errno(mlock(g2h(arg1), arg2));
8580 #endif
8581 #ifdef TARGET_NR_munlock
8582     case TARGET_NR_munlock:
8583         return get_errno(munlock(g2h(arg1), arg2));
8584 #endif
8585 #ifdef TARGET_NR_mlockall
8586     case TARGET_NR_mlockall:
8587         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8588 #endif
8589 #ifdef TARGET_NR_munlockall
8590     case TARGET_NR_munlockall:
8591         return get_errno(munlockall());
8592 #endif
8593 #ifdef TARGET_NR_truncate
8594     case TARGET_NR_truncate:
8595         if (!(p = lock_user_string(arg1)))
8596             return -TARGET_EFAULT;
8597         ret = get_errno(truncate(p, arg2));
8598         unlock_user(p, arg1, 0);
8599         return ret;
8600 #endif
8601 #ifdef TARGET_NR_ftruncate
8602     case TARGET_NR_ftruncate:
8603         return get_errno(ftruncate(arg1, arg2));
8604 #endif
8605     case TARGET_NR_fchmod:
8606         return get_errno(fchmod(arg1, arg2));
8607 #if defined(TARGET_NR_fchmodat)
8608     case TARGET_NR_fchmodat:
8609         if (!(p = lock_user_string(arg2)))
8610             return -TARGET_EFAULT;
8611         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8612         unlock_user(p, arg2, 0);
8613         return ret;
8614 #endif
8615     case TARGET_NR_getpriority:
8616         /* Note that negative values are valid for getpriority, so we must
8617            differentiate based on errno settings.  */
8618         errno = 0;
8619         ret = getpriority(arg1, arg2);
8620         if (ret == -1 && errno != 0) {
8621             return -host_to_target_errno(errno);
8622         }
8623 #ifdef TARGET_ALPHA
8624         /* Return value is the unbiased priority.  Signal no error.  */
8625         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8626 #else
8627         /* Return value is a biased priority to avoid negative numbers.  */
8628         ret = 20 - ret;
8629 #endif
8630         return ret;
8631     case TARGET_NR_setpriority:
8632         return get_errno(setpriority(arg1, arg2, arg3));
8633 #ifdef TARGET_NR_statfs
8634     case TARGET_NR_statfs:
8635         if (!(p = lock_user_string(arg1))) {
8636             return -TARGET_EFAULT;
8637         }
8638         ret = get_errno(statfs(path(p), &stfs));
8639         unlock_user(p, arg1, 0);
8640     convert_statfs:
8641         if (!is_error(ret)) {
8642             struct target_statfs *target_stfs;
8643 
8644             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8645                 return -TARGET_EFAULT;
8646             __put_user(stfs.f_type, &target_stfs->f_type);
8647             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8648             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8649             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8650             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8651             __put_user(stfs.f_files, &target_stfs->f_files);
8652             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8653             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8654             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8655             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8656             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8657 #ifdef _STATFS_F_FLAGS
8658             __put_user(stfs.f_flags, &target_stfs->f_flags);
8659 #else
8660             __put_user(0, &target_stfs->f_flags);
8661 #endif
8662             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8663             unlock_user_struct(target_stfs, arg2, 1);
8664         }
8665         return ret;
8666 #endif
8667 #ifdef TARGET_NR_fstatfs
8668     case TARGET_NR_fstatfs:
8669         ret = get_errno(fstatfs(arg1, &stfs));
8670         goto convert_statfs;
8671 #endif
8672 #ifdef TARGET_NR_statfs64
8673     case TARGET_NR_statfs64:
8674         if (!(p = lock_user_string(arg1))) {
8675             return -TARGET_EFAULT;
8676         }
8677         ret = get_errno(statfs(path(p), &stfs));
8678         unlock_user(p, arg1, 0);
8679     convert_statfs64:
8680         if (!is_error(ret)) {
8681             struct target_statfs64 *target_stfs;
8682 
8683             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8684                 return -TARGET_EFAULT;
8685             __put_user(stfs.f_type, &target_stfs->f_type);
8686             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8687             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8688             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8689             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8690             __put_user(stfs.f_files, &target_stfs->f_files);
8691             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8692             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8693             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8694             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8695             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8696             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8697             unlock_user_struct(target_stfs, arg3, 1);
8698         }
8699         return ret;
8700     case TARGET_NR_fstatfs64:
8701         ret = get_errno(fstatfs(arg1, &stfs));
8702         goto convert_statfs64;
8703 #endif
8704 #ifdef TARGET_NR_socketcall
8705     case TARGET_NR_socketcall:
8706         return do_socketcall(arg1, arg2);
8707 #endif
8708 #ifdef TARGET_NR_accept
8709     case TARGET_NR_accept:
8710         return do_accept4(arg1, arg2, arg3, 0);
8711 #endif
8712 #ifdef TARGET_NR_accept4
8713     case TARGET_NR_accept4:
8714         return do_accept4(arg1, arg2, arg3, arg4);
8715 #endif
8716 #ifdef TARGET_NR_bind
8717     case TARGET_NR_bind:
8718         return do_bind(arg1, arg2, arg3);
8719 #endif
8720 #ifdef TARGET_NR_connect
8721     case TARGET_NR_connect:
8722         return do_connect(arg1, arg2, arg3);
8723 #endif
8724 #ifdef TARGET_NR_getpeername
8725     case TARGET_NR_getpeername:
8726         return do_getpeername(arg1, arg2, arg3);
8727 #endif
8728 #ifdef TARGET_NR_getsockname
8729     case TARGET_NR_getsockname:
8730         return do_getsockname(arg1, arg2, arg3);
8731 #endif
8732 #ifdef TARGET_NR_getsockopt
8733     case TARGET_NR_getsockopt:
8734         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8735 #endif
8736 #ifdef TARGET_NR_listen
8737     case TARGET_NR_listen:
8738         return get_errno(listen(arg1, arg2));
8739 #endif
8740 #ifdef TARGET_NR_recv
8741     case TARGET_NR_recv:
8742         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8743 #endif
8744 #ifdef TARGET_NR_recvfrom
8745     case TARGET_NR_recvfrom:
8746         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8747 #endif
8748 #ifdef TARGET_NR_recvmsg
8749     case TARGET_NR_recvmsg:
8750         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8751 #endif
8752 #ifdef TARGET_NR_send
8753     case TARGET_NR_send:
8754         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8755 #endif
8756 #ifdef TARGET_NR_sendmsg
8757     case TARGET_NR_sendmsg:
8758         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8759 #endif
8760 #ifdef TARGET_NR_sendmmsg
8761     case TARGET_NR_sendmmsg:
8762         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8763     case TARGET_NR_recvmmsg:
8764         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8765 #endif
8766 #ifdef TARGET_NR_sendto
8767     case TARGET_NR_sendto:
8768         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8769 #endif
8770 #ifdef TARGET_NR_shutdown
8771     case TARGET_NR_shutdown:
8772         return get_errno(shutdown(arg1, arg2));
8773 #endif
8774 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8775     case TARGET_NR_getrandom:
8776         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8777         if (!p) {
8778             return -TARGET_EFAULT;
8779         }
8780         ret = get_errno(getrandom(p, arg2, arg3));
8781         unlock_user(p, arg1, ret);
8782         return ret;
8783 #endif
8784 #ifdef TARGET_NR_socket
8785     case TARGET_NR_socket:
8786         return do_socket(arg1, arg2, arg3);
8787 #endif
8788 #ifdef TARGET_NR_socketpair
8789     case TARGET_NR_socketpair:
8790         return do_socketpair(arg1, arg2, arg3, arg4);
8791 #endif
8792 #ifdef TARGET_NR_setsockopt
8793     case TARGET_NR_setsockopt:
8794         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8795 #endif
8796 #if defined(TARGET_NR_syslog)
8797     case TARGET_NR_syslog:
8798         {
8799             int len = arg2;
8800 
8801             switch (arg1) {
8802             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8803             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8804             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8805             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8806             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8807             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8808             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8809             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8810                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8811             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8812             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8813             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8814                 {
8815                     if (len < 0) {
8816                         return -TARGET_EINVAL;
8817                     }
8818                     if (len == 0) {
8819                         return 0;
8820                     }
8821                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8822                     if (!p) {
8823                         return -TARGET_EFAULT;
8824                     }
8825                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8826                     unlock_user(p, arg2, arg3);
8827                 }
8828                 return ret;
8829             default:
8830                 return -TARGET_EINVAL;
8831             }
8832         }
8833         break;
8834 #endif
8835     case TARGET_NR_setitimer:
8836         {
8837             struct itimerval value, ovalue, *pvalue;
8838 
8839             if (arg2) {
8840                 pvalue = &value;
8841                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8842                     || copy_from_user_timeval(&pvalue->it_value,
8843                                               arg2 + sizeof(struct target_timeval)))
8844                     return -TARGET_EFAULT;
8845             } else {
8846                 pvalue = NULL;
8847             }
8848             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8849             if (!is_error(ret) && arg3) {
8850                 if (copy_to_user_timeval(arg3,
8851                                          &ovalue.it_interval)
8852                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8853                                             &ovalue.it_value))
8854                     return -TARGET_EFAULT;
8855             }
8856         }
8857         return ret;
8858     case TARGET_NR_getitimer:
8859         {
8860             struct itimerval value;
8861 
8862             ret = get_errno(getitimer(arg1, &value));
8863             if (!is_error(ret) && arg2) {
8864                 if (copy_to_user_timeval(arg2,
8865                                          &value.it_interval)
8866                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8867                                             &value.it_value))
8868                     return -TARGET_EFAULT;
8869             }
8870         }
8871         return ret;
8872 #ifdef TARGET_NR_stat
8873     case TARGET_NR_stat:
8874         if (!(p = lock_user_string(arg1))) {
8875             return -TARGET_EFAULT;
8876         }
8877         ret = get_errno(stat(path(p), &st));
8878         unlock_user(p, arg1, 0);
8879         goto do_stat;
8880 #endif
8881 #ifdef TARGET_NR_lstat
8882     case TARGET_NR_lstat:
8883         if (!(p = lock_user_string(arg1))) {
8884             return -TARGET_EFAULT;
8885         }
8886         ret = get_errno(lstat(path(p), &st));
8887         unlock_user(p, arg1, 0);
8888         goto do_stat;
8889 #endif
8890 #ifdef TARGET_NR_fstat
8891     case TARGET_NR_fstat:
8892         {
8893             ret = get_errno(fstat(arg1, &st));
8894 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8895         do_stat:
8896 #endif
8897             if (!is_error(ret)) {
8898                 struct target_stat *target_st;
8899 
8900                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8901                     return -TARGET_EFAULT;
8902                 memset(target_st, 0, sizeof(*target_st));
8903                 __put_user(st.st_dev, &target_st->st_dev);
8904                 __put_user(st.st_ino, &target_st->st_ino);
8905                 __put_user(st.st_mode, &target_st->st_mode);
8906                 __put_user(st.st_uid, &target_st->st_uid);
8907                 __put_user(st.st_gid, &target_st->st_gid);
8908                 __put_user(st.st_nlink, &target_st->st_nlink);
8909                 __put_user(st.st_rdev, &target_st->st_rdev);
8910                 __put_user(st.st_size, &target_st->st_size);
8911                 __put_user(st.st_blksize, &target_st->st_blksize);
8912                 __put_user(st.st_blocks, &target_st->st_blocks);
8913                 __put_user(st.st_atime, &target_st->target_st_atime);
8914                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8915                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8916 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
8917     defined(TARGET_STAT_HAVE_NSEC)
8918                 __put_user(st.st_atim.tv_nsec,
8919                            &target_st->target_st_atime_nsec);
8920                 __put_user(st.st_mtim.tv_nsec,
8921                            &target_st->target_st_mtime_nsec);
8922                 __put_user(st.st_ctim.tv_nsec,
8923                            &target_st->target_st_ctime_nsec);
8924 #endif
8925                 unlock_user_struct(target_st, arg2, 1);
8926             }
8927         }
8928         return ret;
8929 #endif
8930     case TARGET_NR_vhangup:
8931         return get_errno(vhangup());
8932 #ifdef TARGET_NR_syscall
8933     case TARGET_NR_syscall:
8934         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8935                           arg6, arg7, arg8, 0);
8936 #endif
8937     case TARGET_NR_wait4:
8938         {
8939             int status;
8940             abi_long status_ptr = arg2;
8941             struct rusage rusage, *rusage_ptr;
8942             abi_ulong target_rusage = arg4;
8943             abi_long rusage_err;
8944             if (target_rusage)
8945                 rusage_ptr = &rusage;
8946             else
8947                 rusage_ptr = NULL;
8948             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8949             if (!is_error(ret)) {
8950                 if (status_ptr && ret) {
8951                     status = host_to_target_waitstatus(status);
8952                     if (put_user_s32(status, status_ptr))
8953                         return -TARGET_EFAULT;
8954                 }
8955                 if (target_rusage) {
8956                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8957                     if (rusage_err) {
8958                         ret = rusage_err;
8959                     }
8960                 }
8961             }
8962         }
8963         return ret;
8964 #ifdef TARGET_NR_swapoff
8965     case TARGET_NR_swapoff:
8966         if (!(p = lock_user_string(arg1)))
8967             return -TARGET_EFAULT;
8968         ret = get_errno(swapoff(p));
8969         unlock_user(p, arg1, 0);
8970         return ret;
8971 #endif
8972     case TARGET_NR_sysinfo:
8973         {
8974             struct target_sysinfo *target_value;
8975             struct sysinfo value;
8976             ret = get_errno(sysinfo(&value));
8977             if (!is_error(ret) && arg1)
8978             {
8979                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8980                     return -TARGET_EFAULT;
8981                 __put_user(value.uptime, &target_value->uptime);
8982                 __put_user(value.loads[0], &target_value->loads[0]);
8983                 __put_user(value.loads[1], &target_value->loads[1]);
8984                 __put_user(value.loads[2], &target_value->loads[2]);
8985                 __put_user(value.totalram, &target_value->totalram);
8986                 __put_user(value.freeram, &target_value->freeram);
8987                 __put_user(value.sharedram, &target_value->sharedram);
8988                 __put_user(value.bufferram, &target_value->bufferram);
8989                 __put_user(value.totalswap, &target_value->totalswap);
8990                 __put_user(value.freeswap, &target_value->freeswap);
8991                 __put_user(value.procs, &target_value->procs);
8992                 __put_user(value.totalhigh, &target_value->totalhigh);
8993                 __put_user(value.freehigh, &target_value->freehigh);
8994                 __put_user(value.mem_unit, &target_value->mem_unit);
8995                 unlock_user_struct(target_value, arg1, 1);
8996             }
8997         }
8998         return ret;
8999 #ifdef TARGET_NR_ipc
9000     case TARGET_NR_ipc:
9001         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9002 #endif
9003 #ifdef TARGET_NR_semget
9004     case TARGET_NR_semget:
9005         return get_errno(semget(arg1, arg2, arg3));
9006 #endif
9007 #ifdef TARGET_NR_semop
9008     case TARGET_NR_semop:
9009         return do_semop(arg1, arg2, arg3);
9010 #endif
9011 #ifdef TARGET_NR_semctl
9012     case TARGET_NR_semctl:
9013         return do_semctl(arg1, arg2, arg3, arg4);
9014 #endif
9015 #ifdef TARGET_NR_msgctl
9016     case TARGET_NR_msgctl:
9017         return do_msgctl(arg1, arg2, arg3);
9018 #endif
9019 #ifdef TARGET_NR_msgget
9020     case TARGET_NR_msgget:
9021         return get_errno(msgget(arg1, arg2));
9022 #endif
9023 #ifdef TARGET_NR_msgrcv
9024     case TARGET_NR_msgrcv:
9025         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9026 #endif
9027 #ifdef TARGET_NR_msgsnd
9028     case TARGET_NR_msgsnd:
9029         return do_msgsnd(arg1, arg2, arg3, arg4);
9030 #endif
9031 #ifdef TARGET_NR_shmget
9032     case TARGET_NR_shmget:
9033         return get_errno(shmget(arg1, arg2, arg3));
9034 #endif
9035 #ifdef TARGET_NR_shmctl
9036     case TARGET_NR_shmctl:
9037         return do_shmctl(arg1, arg2, arg3);
9038 #endif
9039 #ifdef TARGET_NR_shmat
9040     case TARGET_NR_shmat:
9041         return do_shmat(cpu_env, arg1, arg2, arg3);
9042 #endif
9043 #ifdef TARGET_NR_shmdt
9044     case TARGET_NR_shmdt:
9045         return do_shmdt(arg1);
9046 #endif
9047     case TARGET_NR_fsync:
9048         return get_errno(fsync(arg1));
9049     case TARGET_NR_clone:
9050         /* Linux manages to have three different orderings for its
9051          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9052          * match the kernel's CONFIG_CLONE_* settings.
9053          * Microblaze is further special in that it uses a sixth
9054          * implicit argument to clone for the TLS pointer.
9055          */
9056 #if defined(TARGET_MICROBLAZE)
9057         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9058 #elif defined(TARGET_CLONE_BACKWARDS)
9059         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9060 #elif defined(TARGET_CLONE_BACKWARDS2)
9061         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9062 #else
9063         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9064 #endif
9065         return ret;
9066 #ifdef __NR_exit_group
9067         /* new thread calls */
9068     case TARGET_NR_exit_group:
9069         preexit_cleanup(cpu_env, arg1);
9070         return get_errno(exit_group(arg1));
9071 #endif
9072     case TARGET_NR_setdomainname:
9073         if (!(p = lock_user_string(arg1)))
9074             return -TARGET_EFAULT;
9075         ret = get_errno(setdomainname(p, arg2));
9076         unlock_user(p, arg1, 0);
9077         return ret;
9078     case TARGET_NR_uname:
9079         /* no need to transcode because we use the linux syscall */
9080         {
9081             struct new_utsname * buf;
9082 
9083             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9084                 return -TARGET_EFAULT;
9085             ret = get_errno(sys_uname(buf));
9086             if (!is_error(ret)) {
9087                 /* Overwrite the native machine name with whatever is being
9088                    emulated. */
9089                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9090                           sizeof(buf->machine));
9091                 /* Allow the user to override the reported release.  */
9092                 if (qemu_uname_release && *qemu_uname_release) {
9093                     g_strlcpy(buf->release, qemu_uname_release,
9094                               sizeof(buf->release));
9095                 }
9096             }
9097             unlock_user_struct(buf, arg1, 1);
9098         }
9099         return ret;
9100 #ifdef TARGET_I386
9101     case TARGET_NR_modify_ldt:
9102         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9103 #if !defined(TARGET_X86_64)
9104     case TARGET_NR_vm86:
9105         return do_vm86(cpu_env, arg1, arg2);
9106 #endif
9107 #endif
9108     case TARGET_NR_adjtimex:
9109         {
9110             struct timex host_buf;
9111 
9112             if (target_to_host_timex(&host_buf, arg1) != 0) {
9113                 return -TARGET_EFAULT;
9114             }
9115             ret = get_errno(adjtimex(&host_buf));
9116             if (!is_error(ret)) {
9117                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9118                     return -TARGET_EFAULT;
9119                 }
9120             }
9121         }
9122         return ret;
9123 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9124     case TARGET_NR_clock_adjtime:
9125         {
9126             struct timex htx, *phtx = &htx;
9127 
9128             if (target_to_host_timex(phtx, arg2) != 0) {
9129                 return -TARGET_EFAULT;
9130             }
9131             ret = get_errno(clock_adjtime(arg1, phtx));
9132             if (!is_error(ret) && phtx) {
9133                 if (host_to_target_timex(arg2, phtx) != 0) {
9134                     return -TARGET_EFAULT;
9135                 }
9136             }
9137         }
9138         return ret;
9139 #endif
9140     case TARGET_NR_getpgid:
9141         return get_errno(getpgid(arg1));
9142     case TARGET_NR_fchdir:
9143         return get_errno(fchdir(arg1));
9144     case TARGET_NR_personality:
9145         return get_errno(personality(arg1));
9146 #ifdef TARGET_NR__llseek /* Not on alpha */
9147     case TARGET_NR__llseek:
9148         {
9149             int64_t res;
9150 #if !defined(__NR_llseek)
9151             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9152             if (res == -1) {
9153                 ret = get_errno(res);
9154             } else {
9155                 ret = 0;
9156             }
9157 #else
9158             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9159 #endif
9160             if ((ret == 0) && put_user_s64(res, arg4)) {
9161                 return -TARGET_EFAULT;
9162             }
9163         }
9164         return ret;
9165 #endif
9166 #ifdef TARGET_NR_getdents
9167     case TARGET_NR_getdents:
9168 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9169 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9170         {
9171             struct target_dirent *target_dirp;
9172             struct linux_dirent *dirp;
9173             abi_long count = arg3;
9174 
9175             dirp = g_try_malloc(count);
9176             if (!dirp) {
9177                 return -TARGET_ENOMEM;
9178             }
9179 
9180             ret = get_errno(sys_getdents(arg1, dirp, count));
9181             if (!is_error(ret)) {
9182                 struct linux_dirent *de;
9183 		struct target_dirent *tde;
9184                 int len = ret;
9185                 int reclen, treclen;
9186 		int count1, tnamelen;
9187 
9188 		count1 = 0;
9189                 de = dirp;
9190                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9191                     return -TARGET_EFAULT;
9192 		tde = target_dirp;
9193                 while (len > 0) {
9194                     reclen = de->d_reclen;
9195                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9196                     assert(tnamelen >= 0);
9197                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9198                     assert(count1 + treclen <= count);
9199                     tde->d_reclen = tswap16(treclen);
9200                     tde->d_ino = tswapal(de->d_ino);
9201                     tde->d_off = tswapal(de->d_off);
9202                     memcpy(tde->d_name, de->d_name, tnamelen);
9203                     de = (struct linux_dirent *)((char *)de + reclen);
9204                     len -= reclen;
9205                     tde = (struct target_dirent *)((char *)tde + treclen);
9206 		    count1 += treclen;
9207                 }
9208 		ret = count1;
9209                 unlock_user(target_dirp, arg2, ret);
9210             }
9211             g_free(dirp);
9212         }
9213 #else
9214         {
9215             struct linux_dirent *dirp;
9216             abi_long count = arg3;
9217 
9218             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9219                 return -TARGET_EFAULT;
9220             ret = get_errno(sys_getdents(arg1, dirp, count));
9221             if (!is_error(ret)) {
9222                 struct linux_dirent *de;
9223                 int len = ret;
9224                 int reclen;
9225                 de = dirp;
9226                 while (len > 0) {
9227                     reclen = de->d_reclen;
9228                     if (reclen > len)
9229                         break;
9230                     de->d_reclen = tswap16(reclen);
9231                     tswapls(&de->d_ino);
9232                     tswapls(&de->d_off);
9233                     de = (struct linux_dirent *)((char *)de + reclen);
9234                     len -= reclen;
9235                 }
9236             }
9237             unlock_user(dirp, arg2, ret);
9238         }
9239 #endif
9240 #else
9241         /* Implement getdents in terms of getdents64 */
9242         {
9243             struct linux_dirent64 *dirp;
9244             abi_long count = arg3;
9245 
9246             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9247             if (!dirp) {
9248                 return -TARGET_EFAULT;
9249             }
9250             ret = get_errno(sys_getdents64(arg1, dirp, count));
9251             if (!is_error(ret)) {
9252                 /* Convert the dirent64 structs to target dirent.  We do this
9253                  * in-place, since we can guarantee that a target_dirent is no
9254                  * larger than a dirent64; however this means we have to be
9255                  * careful to read everything before writing in the new format.
9256                  */
9257                 struct linux_dirent64 *de;
9258                 struct target_dirent *tde;
9259                 int len = ret;
9260                 int tlen = 0;
9261 
9262                 de = dirp;
9263                 tde = (struct target_dirent *)dirp;
9264                 while (len > 0) {
9265                     int namelen, treclen;
9266                     int reclen = de->d_reclen;
9267                     uint64_t ino = de->d_ino;
9268                     int64_t off = de->d_off;
9269                     uint8_t type = de->d_type;
9270 
9271                     namelen = strlen(de->d_name);
9272                     treclen = offsetof(struct target_dirent, d_name)
9273                         + namelen + 2;
9274                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9275 
9276                     memmove(tde->d_name, de->d_name, namelen + 1);
9277                     tde->d_ino = tswapal(ino);
9278                     tde->d_off = tswapal(off);
9279                     tde->d_reclen = tswap16(treclen);
9280                     /* The target_dirent type is in what was formerly a padding
9281                      * byte at the end of the structure:
9282                      */
9283                     *(((char *)tde) + treclen - 1) = type;
9284 
9285                     de = (struct linux_dirent64 *)((char *)de + reclen);
9286                     tde = (struct target_dirent *)((char *)tde + treclen);
9287                     len -= reclen;
9288                     tlen += treclen;
9289                 }
9290                 ret = tlen;
9291             }
9292             unlock_user(dirp, arg2, ret);
9293         }
9294 #endif
9295         return ret;
9296 #endif /* TARGET_NR_getdents */
9297 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9298     case TARGET_NR_getdents64:
9299         {
9300             struct linux_dirent64 *dirp;
9301             abi_long count = arg3;
9302             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9303                 return -TARGET_EFAULT;
9304             ret = get_errno(sys_getdents64(arg1, dirp, count));
9305             if (!is_error(ret)) {
9306                 struct linux_dirent64 *de;
9307                 int len = ret;
9308                 int reclen;
9309                 de = dirp;
9310                 while (len > 0) {
9311                     reclen = de->d_reclen;
9312                     if (reclen > len)
9313                         break;
9314                     de->d_reclen = tswap16(reclen);
9315                     tswap64s((uint64_t *)&de->d_ino);
9316                     tswap64s((uint64_t *)&de->d_off);
9317                     de = (struct linux_dirent64 *)((char *)de + reclen);
9318                     len -= reclen;
9319                 }
9320             }
9321             unlock_user(dirp, arg2, ret);
9322         }
9323         return ret;
9324 #endif /* TARGET_NR_getdents64 */
9325 #if defined(TARGET_NR__newselect)
9326     case TARGET_NR__newselect:
9327         return do_select(arg1, arg2, arg3, arg4, arg5);
9328 #endif
9329 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9330 # ifdef TARGET_NR_poll
9331     case TARGET_NR_poll:
9332 # endif
9333 # ifdef TARGET_NR_ppoll
9334     case TARGET_NR_ppoll:
9335 # endif
9336         {
9337             struct target_pollfd *target_pfd;
9338             unsigned int nfds = arg2;
9339             struct pollfd *pfd;
9340             unsigned int i;
9341 
9342             pfd = NULL;
9343             target_pfd = NULL;
9344             if (nfds) {
9345                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9346                     return -TARGET_EINVAL;
9347                 }
9348 
9349                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9350                                        sizeof(struct target_pollfd) * nfds, 1);
9351                 if (!target_pfd) {
9352                     return -TARGET_EFAULT;
9353                 }
9354 
9355                 pfd = alloca(sizeof(struct pollfd) * nfds);
9356                 for (i = 0; i < nfds; i++) {
9357                     pfd[i].fd = tswap32(target_pfd[i].fd);
9358                     pfd[i].events = tswap16(target_pfd[i].events);
9359                 }
9360             }
9361 
9362             switch (num) {
9363 # ifdef TARGET_NR_ppoll
9364             case TARGET_NR_ppoll:
9365             {
9366                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9367                 target_sigset_t *target_set;
9368                 sigset_t _set, *set = &_set;
9369 
9370                 if (arg3) {
9371                     if (target_to_host_timespec(timeout_ts, arg3)) {
9372                         unlock_user(target_pfd, arg1, 0);
9373                         return -TARGET_EFAULT;
9374                     }
9375                 } else {
9376                     timeout_ts = NULL;
9377                 }
9378 
9379                 if (arg4) {
9380                     if (arg5 != sizeof(target_sigset_t)) {
9381                         unlock_user(target_pfd, arg1, 0);
9382                         return -TARGET_EINVAL;
9383                     }
9384 
9385                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9386                     if (!target_set) {
9387                         unlock_user(target_pfd, arg1, 0);
9388                         return -TARGET_EFAULT;
9389                     }
9390                     target_to_host_sigset(set, target_set);
9391                 } else {
9392                     set = NULL;
9393                 }
9394 
9395                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9396                                            set, SIGSET_T_SIZE));
9397 
9398                 if (!is_error(ret) && arg3) {
9399                     host_to_target_timespec(arg3, timeout_ts);
9400                 }
9401                 if (arg4) {
9402                     unlock_user(target_set, arg4, 0);
9403                 }
9404                 break;
9405             }
9406 # endif
9407 # ifdef TARGET_NR_poll
9408             case TARGET_NR_poll:
9409             {
9410                 struct timespec ts, *pts;
9411 
9412                 if (arg3 >= 0) {
9413                     /* Convert ms to secs, ns */
9414                     ts.tv_sec = arg3 / 1000;
9415                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9416                     pts = &ts;
9417                 } else {
9418                     /* -ve poll() timeout means "infinite" */
9419                     pts = NULL;
9420                 }
9421                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9422                 break;
9423             }
9424 # endif
9425             default:
9426                 g_assert_not_reached();
9427             }
9428 
9429             if (!is_error(ret)) {
9430                 for(i = 0; i < nfds; i++) {
9431                     target_pfd[i].revents = tswap16(pfd[i].revents);
9432                 }
9433             }
9434             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9435         }
9436         return ret;
9437 #endif
9438     case TARGET_NR_flock:
9439         /* NOTE: the flock constant seems to be the same for every
9440            Linux platform */
9441         return get_errno(safe_flock(arg1, arg2));
9442     case TARGET_NR_readv:
9443         {
9444             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9445             if (vec != NULL) {
9446                 ret = get_errno(safe_readv(arg1, vec, arg3));
9447                 unlock_iovec(vec, arg2, arg3, 1);
9448             } else {
9449                 ret = -host_to_target_errno(errno);
9450             }
9451         }
9452         return ret;
9453     case TARGET_NR_writev:
9454         {
9455             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9456             if (vec != NULL) {
9457                 ret = get_errno(safe_writev(arg1, vec, arg3));
9458                 unlock_iovec(vec, arg2, arg3, 0);
9459             } else {
9460                 ret = -host_to_target_errno(errno);
9461             }
9462         }
9463         return ret;
9464 #if defined(TARGET_NR_preadv)
9465     case TARGET_NR_preadv:
9466         {
9467             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9468             if (vec != NULL) {
9469                 unsigned long low, high;
9470 
9471                 target_to_host_low_high(arg4, arg5, &low, &high);
9472                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9473                 unlock_iovec(vec, arg2, arg3, 1);
9474             } else {
9475                 ret = -host_to_target_errno(errno);
9476            }
9477         }
9478         return ret;
9479 #endif
9480 #if defined(TARGET_NR_pwritev)
9481     case TARGET_NR_pwritev:
9482         {
9483             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9484             if (vec != NULL) {
9485                 unsigned long low, high;
9486 
9487                 target_to_host_low_high(arg4, arg5, &low, &high);
9488                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9489                 unlock_iovec(vec, arg2, arg3, 0);
9490             } else {
9491                 ret = -host_to_target_errno(errno);
9492            }
9493         }
9494         return ret;
9495 #endif
9496     case TARGET_NR_getsid:
9497         return get_errno(getsid(arg1));
9498 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9499     case TARGET_NR_fdatasync:
9500         return get_errno(fdatasync(arg1));
9501 #endif
9502 #ifdef TARGET_NR__sysctl
9503     case TARGET_NR__sysctl:
9504         /* We don't implement this, but ENOTDIR is always a safe
9505            return value. */
9506         return -TARGET_ENOTDIR;
9507 #endif
9508     case TARGET_NR_sched_getaffinity:
9509         {
9510             unsigned int mask_size;
9511             unsigned long *mask;
9512 
9513             /*
9514              * sched_getaffinity needs multiples of ulong, so need to take
9515              * care of mismatches between target ulong and host ulong sizes.
9516              */
9517             if (arg2 & (sizeof(abi_ulong) - 1)) {
9518                 return -TARGET_EINVAL;
9519             }
9520             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9521 
9522             mask = alloca(mask_size);
9523             memset(mask, 0, mask_size);
9524             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9525 
9526             if (!is_error(ret)) {
9527                 if (ret > arg2) {
9528                     /* More data returned than the caller's buffer will fit.
9529                      * This only happens if sizeof(abi_long) < sizeof(long)
9530                      * and the caller passed us a buffer holding an odd number
9531                      * of abi_longs. If the host kernel is actually using the
9532                      * extra 4 bytes then fail EINVAL; otherwise we can just
9533                      * ignore them and only copy the interesting part.
9534                      */
9535                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9536                     if (numcpus > arg2 * 8) {
9537                         return -TARGET_EINVAL;
9538                     }
9539                     ret = arg2;
9540                 }
9541 
9542                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9543                     return -TARGET_EFAULT;
9544                 }
9545             }
9546         }
9547         return ret;
9548     case TARGET_NR_sched_setaffinity:
9549         {
9550             unsigned int mask_size;
9551             unsigned long *mask;
9552 
9553             /*
9554              * sched_setaffinity needs multiples of ulong, so need to take
9555              * care of mismatches between target ulong and host ulong sizes.
9556              */
9557             if (arg2 & (sizeof(abi_ulong) - 1)) {
9558                 return -TARGET_EINVAL;
9559             }
9560             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9561             mask = alloca(mask_size);
9562 
9563             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9564             if (ret) {
9565                 return ret;
9566             }
9567 
9568             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9569         }
9570     case TARGET_NR_getcpu:
9571         {
9572             unsigned cpu, node;
9573             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9574                                        arg2 ? &node : NULL,
9575                                        NULL));
9576             if (is_error(ret)) {
9577                 return ret;
9578             }
9579             if (arg1 && put_user_u32(cpu, arg1)) {
9580                 return -TARGET_EFAULT;
9581             }
9582             if (arg2 && put_user_u32(node, arg2)) {
9583                 return -TARGET_EFAULT;
9584             }
9585         }
9586         return ret;
9587     case TARGET_NR_sched_setparam:
9588         {
9589             struct sched_param *target_schp;
9590             struct sched_param schp;
9591 
9592             if (arg2 == 0) {
9593                 return -TARGET_EINVAL;
9594             }
9595             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9596                 return -TARGET_EFAULT;
9597             schp.sched_priority = tswap32(target_schp->sched_priority);
9598             unlock_user_struct(target_schp, arg2, 0);
9599             return get_errno(sched_setparam(arg1, &schp));
9600         }
9601     case TARGET_NR_sched_getparam:
9602         {
9603             struct sched_param *target_schp;
9604             struct sched_param schp;
9605 
9606             if (arg2 == 0) {
9607                 return -TARGET_EINVAL;
9608             }
9609             ret = get_errno(sched_getparam(arg1, &schp));
9610             if (!is_error(ret)) {
9611                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9612                     return -TARGET_EFAULT;
9613                 target_schp->sched_priority = tswap32(schp.sched_priority);
9614                 unlock_user_struct(target_schp, arg2, 1);
9615             }
9616         }
9617         return ret;
9618     case TARGET_NR_sched_setscheduler:
9619         {
9620             struct sched_param *target_schp;
9621             struct sched_param schp;
9622             if (arg3 == 0) {
9623                 return -TARGET_EINVAL;
9624             }
9625             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9626                 return -TARGET_EFAULT;
9627             schp.sched_priority = tswap32(target_schp->sched_priority);
9628             unlock_user_struct(target_schp, arg3, 0);
9629             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9630         }
9631     case TARGET_NR_sched_getscheduler:
9632         return get_errno(sched_getscheduler(arg1));
9633     case TARGET_NR_sched_yield:
9634         return get_errno(sched_yield());
9635     case TARGET_NR_sched_get_priority_max:
9636         return get_errno(sched_get_priority_max(arg1));
9637     case TARGET_NR_sched_get_priority_min:
9638         return get_errno(sched_get_priority_min(arg1));
9639     case TARGET_NR_sched_rr_get_interval:
9640         {
9641             struct timespec ts;
9642             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9643             if (!is_error(ret)) {
9644                 ret = host_to_target_timespec(arg2, &ts);
9645             }
9646         }
9647         return ret;
9648     case TARGET_NR_nanosleep:
9649         {
9650             struct timespec req, rem;
9651             target_to_host_timespec(&req, arg1);
9652             ret = get_errno(safe_nanosleep(&req, &rem));
9653             if (is_error(ret) && arg2) {
9654                 host_to_target_timespec(arg2, &rem);
9655             }
9656         }
9657         return ret;
9658     case TARGET_NR_prctl:
9659         switch (arg1) {
9660         case PR_GET_PDEATHSIG:
9661         {
9662             int deathsig;
9663             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9664             if (!is_error(ret) && arg2
9665                 && put_user_ual(deathsig, arg2)) {
9666                 return -TARGET_EFAULT;
9667             }
9668             return ret;
9669         }
9670 #ifdef PR_GET_NAME
9671         case PR_GET_NAME:
9672         {
9673             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9674             if (!name) {
9675                 return -TARGET_EFAULT;
9676             }
9677             ret = get_errno(prctl(arg1, (unsigned long)name,
9678                                   arg3, arg4, arg5));
9679             unlock_user(name, arg2, 16);
9680             return ret;
9681         }
9682         case PR_SET_NAME:
9683         {
9684             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9685             if (!name) {
9686                 return -TARGET_EFAULT;
9687             }
9688             ret = get_errno(prctl(arg1, (unsigned long)name,
9689                                   arg3, arg4, arg5));
9690             unlock_user(name, arg2, 0);
9691             return ret;
9692         }
9693 #endif
9694 #ifdef TARGET_MIPS
9695         case TARGET_PR_GET_FP_MODE:
9696         {
9697             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9698             ret = 0;
9699             if (env->CP0_Status & (1 << CP0St_FR)) {
9700                 ret |= TARGET_PR_FP_MODE_FR;
9701             }
9702             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9703                 ret |= TARGET_PR_FP_MODE_FRE;
9704             }
9705             return ret;
9706         }
9707         case TARGET_PR_SET_FP_MODE:
9708         {
9709             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9710             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9711             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9712             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9713             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9714 
9715             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9716                                             TARGET_PR_FP_MODE_FRE;
9717 
9718             /* If nothing to change, return right away, successfully.  */
9719             if (old_fr == new_fr && old_fre == new_fre) {
9720                 return 0;
9721             }
9722             /* Check the value is valid */
9723             if (arg2 & ~known_bits) {
9724                 return -TARGET_EOPNOTSUPP;
9725             }
9726             /* Setting FRE without FR is not supported.  */
9727             if (new_fre && !new_fr) {
9728                 return -TARGET_EOPNOTSUPP;
9729             }
9730             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9731                 /* FR1 is not supported */
9732                 return -TARGET_EOPNOTSUPP;
9733             }
9734             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9735                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9736                 /* cannot set FR=0 */
9737                 return -TARGET_EOPNOTSUPP;
9738             }
9739             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9740                 /* Cannot set FRE=1 */
9741                 return -TARGET_EOPNOTSUPP;
9742             }
9743 
9744             int i;
9745             fpr_t *fpr = env->active_fpu.fpr;
9746             for (i = 0; i < 32 ; i += 2) {
9747                 if (!old_fr && new_fr) {
9748                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9749                 } else if (old_fr && !new_fr) {
9750                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9751                 }
9752             }
9753 
9754             if (new_fr) {
9755                 env->CP0_Status |= (1 << CP0St_FR);
9756                 env->hflags |= MIPS_HFLAG_F64;
9757             } else {
9758                 env->CP0_Status &= ~(1 << CP0St_FR);
9759                 env->hflags &= ~MIPS_HFLAG_F64;
9760             }
9761             if (new_fre) {
9762                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9763                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9764                     env->hflags |= MIPS_HFLAG_FRE;
9765                 }
9766             } else {
9767                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9768                 env->hflags &= ~MIPS_HFLAG_FRE;
9769             }
9770 
9771             return 0;
9772         }
9773 #endif /* MIPS */
9774 #ifdef TARGET_AARCH64
9775         case TARGET_PR_SVE_SET_VL:
9776             /*
9777              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9778              * PR_SVE_VL_INHERIT.  Note the kernel definition
9779              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9780              * even though the current architectural maximum is VQ=16.
9781              */
9782             ret = -TARGET_EINVAL;
9783             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
9784                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9785                 CPUARMState *env = cpu_env;
9786                 ARMCPU *cpu = env_archcpu(env);
9787                 uint32_t vq, old_vq;
9788 
9789                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9790                 vq = MAX(arg2 / 16, 1);
9791                 vq = MIN(vq, cpu->sve_max_vq);
9792 
9793                 if (vq < old_vq) {
9794                     aarch64_sve_narrow_vq(env, vq);
9795                 }
9796                 env->vfp.zcr_el[1] = vq - 1;
9797                 ret = vq * 16;
9798             }
9799             return ret;
9800         case TARGET_PR_SVE_GET_VL:
9801             ret = -TARGET_EINVAL;
9802             {
9803                 ARMCPU *cpu = env_archcpu(cpu_env);
9804                 if (cpu_isar_feature(aa64_sve, cpu)) {
9805                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9806                 }
9807             }
9808             return ret;
9809         case TARGET_PR_PAC_RESET_KEYS:
9810             {
9811                 CPUARMState *env = cpu_env;
9812                 ARMCPU *cpu = env_archcpu(env);
9813 
9814                 if (arg3 || arg4 || arg5) {
9815                     return -TARGET_EINVAL;
9816                 }
9817                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9818                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9819                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9820                                TARGET_PR_PAC_APGAKEY);
9821                     int ret = 0;
9822                     Error *err = NULL;
9823 
9824                     if (arg2 == 0) {
9825                         arg2 = all;
9826                     } else if (arg2 & ~all) {
9827                         return -TARGET_EINVAL;
9828                     }
9829                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9830                         ret |= qemu_guest_getrandom(&env->keys.apia,
9831                                                     sizeof(ARMPACKey), &err);
9832                     }
9833                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9834                         ret |= qemu_guest_getrandom(&env->keys.apib,
9835                                                     sizeof(ARMPACKey), &err);
9836                     }
9837                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9838                         ret |= qemu_guest_getrandom(&env->keys.apda,
9839                                                     sizeof(ARMPACKey), &err);
9840                     }
9841                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9842                         ret |= qemu_guest_getrandom(&env->keys.apdb,
9843                                                     sizeof(ARMPACKey), &err);
9844                     }
9845                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9846                         ret |= qemu_guest_getrandom(&env->keys.apga,
9847                                                     sizeof(ARMPACKey), &err);
9848                     }
9849                     if (ret != 0) {
9850                         /*
9851                          * Some unknown failure in the crypto.  The best
9852                          * we can do is log it and fail the syscall.
9853                          * The real syscall cannot fail this way.
9854                          */
9855                         qemu_log_mask(LOG_UNIMP,
9856                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
9857                                       error_get_pretty(err));
9858                         error_free(err);
9859                         return -TARGET_EIO;
9860                     }
9861                     return 0;
9862                 }
9863             }
9864             return -TARGET_EINVAL;
9865 #endif /* AARCH64 */
9866         case PR_GET_SECCOMP:
9867         case PR_SET_SECCOMP:
9868             /* Disable seccomp to prevent the target disabling syscalls we
9869              * need. */
9870             return -TARGET_EINVAL;
9871         default:
9872             /* Most prctl options have no pointer arguments */
9873             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9874         }
9875         break;
9876 #ifdef TARGET_NR_arch_prctl
9877     case TARGET_NR_arch_prctl:
9878 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9879         return do_arch_prctl(cpu_env, arg1, arg2);
9880 #else
9881 #error unreachable
9882 #endif
9883 #endif
9884 #ifdef TARGET_NR_pread64
9885     case TARGET_NR_pread64:
9886         if (regpairs_aligned(cpu_env, num)) {
9887             arg4 = arg5;
9888             arg5 = arg6;
9889         }
9890         if (arg2 == 0 && arg3 == 0) {
9891             /* Special-case NULL buffer and zero length, which should succeed */
9892             p = 0;
9893         } else {
9894             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9895             if (!p) {
9896                 return -TARGET_EFAULT;
9897             }
9898         }
9899         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9900         unlock_user(p, arg2, ret);
9901         return ret;
9902     case TARGET_NR_pwrite64:
9903         if (regpairs_aligned(cpu_env, num)) {
9904             arg4 = arg5;
9905             arg5 = arg6;
9906         }
9907         if (arg2 == 0 && arg3 == 0) {
9908             /* Special-case NULL buffer and zero length, which should succeed */
9909             p = 0;
9910         } else {
9911             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9912             if (!p) {
9913                 return -TARGET_EFAULT;
9914             }
9915         }
9916         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9917         unlock_user(p, arg2, 0);
9918         return ret;
9919 #endif
9920     case TARGET_NR_getcwd:
9921         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9922             return -TARGET_EFAULT;
9923         ret = get_errno(sys_getcwd1(p, arg2));
9924         unlock_user(p, arg1, ret);
9925         return ret;
9926     case TARGET_NR_capget:
9927     case TARGET_NR_capset:
9928     {
9929         struct target_user_cap_header *target_header;
9930         struct target_user_cap_data *target_data = NULL;
9931         struct __user_cap_header_struct header;
9932         struct __user_cap_data_struct data[2];
9933         struct __user_cap_data_struct *dataptr = NULL;
9934         int i, target_datalen;
9935         int data_items = 1;
9936 
9937         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9938             return -TARGET_EFAULT;
9939         }
9940         header.version = tswap32(target_header->version);
9941         header.pid = tswap32(target_header->pid);
9942 
9943         if (header.version != _LINUX_CAPABILITY_VERSION) {
9944             /* Version 2 and up takes pointer to two user_data structs */
9945             data_items = 2;
9946         }
9947 
9948         target_datalen = sizeof(*target_data) * data_items;
9949 
9950         if (arg2) {
9951             if (num == TARGET_NR_capget) {
9952                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9953             } else {
9954                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9955             }
9956             if (!target_data) {
9957                 unlock_user_struct(target_header, arg1, 0);
9958                 return -TARGET_EFAULT;
9959             }
9960 
9961             if (num == TARGET_NR_capset) {
9962                 for (i = 0; i < data_items; i++) {
9963                     data[i].effective = tswap32(target_data[i].effective);
9964                     data[i].permitted = tswap32(target_data[i].permitted);
9965                     data[i].inheritable = tswap32(target_data[i].inheritable);
9966                 }
9967             }
9968 
9969             dataptr = data;
9970         }
9971 
9972         if (num == TARGET_NR_capget) {
9973             ret = get_errno(capget(&header, dataptr));
9974         } else {
9975             ret = get_errno(capset(&header, dataptr));
9976         }
9977 
9978         /* The kernel always updates version for both capget and capset */
9979         target_header->version = tswap32(header.version);
9980         unlock_user_struct(target_header, arg1, 1);
9981 
9982         if (arg2) {
9983             if (num == TARGET_NR_capget) {
9984                 for (i = 0; i < data_items; i++) {
9985                     target_data[i].effective = tswap32(data[i].effective);
9986                     target_data[i].permitted = tswap32(data[i].permitted);
9987                     target_data[i].inheritable = tswap32(data[i].inheritable);
9988                 }
9989                 unlock_user(target_data, arg2, target_datalen);
9990             } else {
9991                 unlock_user(target_data, arg2, 0);
9992             }
9993         }
9994         return ret;
9995     }
9996     case TARGET_NR_sigaltstack:
9997         return do_sigaltstack(arg1, arg2,
9998                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9999 
10000 #ifdef CONFIG_SENDFILE
10001 #ifdef TARGET_NR_sendfile
10002     case TARGET_NR_sendfile:
10003     {
10004         off_t *offp = NULL;
10005         off_t off;
10006         if (arg3) {
10007             ret = get_user_sal(off, arg3);
10008             if (is_error(ret)) {
10009                 return ret;
10010             }
10011             offp = &off;
10012         }
10013         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10014         if (!is_error(ret) && arg3) {
10015             abi_long ret2 = put_user_sal(off, arg3);
10016             if (is_error(ret2)) {
10017                 ret = ret2;
10018             }
10019         }
10020         return ret;
10021     }
10022 #endif
10023 #ifdef TARGET_NR_sendfile64
10024     case TARGET_NR_sendfile64:
10025     {
10026         off_t *offp = NULL;
10027         off_t off;
10028         if (arg3) {
10029             ret = get_user_s64(off, arg3);
10030             if (is_error(ret)) {
10031                 return ret;
10032             }
10033             offp = &off;
10034         }
10035         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10036         if (!is_error(ret) && arg3) {
10037             abi_long ret2 = put_user_s64(off, arg3);
10038             if (is_error(ret2)) {
10039                 ret = ret2;
10040             }
10041         }
10042         return ret;
10043     }
10044 #endif
10045 #endif
10046 #ifdef TARGET_NR_vfork
10047     case TARGET_NR_vfork:
10048         return get_errno(do_fork(cpu_env,
10049                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10050                          0, 0, 0, 0));
10051 #endif
10052 #ifdef TARGET_NR_ugetrlimit
10053     case TARGET_NR_ugetrlimit:
10054     {
10055 	struct rlimit rlim;
10056 	int resource = target_to_host_resource(arg1);
10057 	ret = get_errno(getrlimit(resource, &rlim));
10058 	if (!is_error(ret)) {
10059 	    struct target_rlimit *target_rlim;
10060             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10061                 return -TARGET_EFAULT;
10062 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10063 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10064             unlock_user_struct(target_rlim, arg2, 1);
10065 	}
10066         return ret;
10067     }
10068 #endif
10069 #ifdef TARGET_NR_truncate64
10070     case TARGET_NR_truncate64:
10071         if (!(p = lock_user_string(arg1)))
10072             return -TARGET_EFAULT;
10073 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10074         unlock_user(p, arg1, 0);
10075         return ret;
10076 #endif
10077 #ifdef TARGET_NR_ftruncate64
10078     case TARGET_NR_ftruncate64:
10079         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10080 #endif
10081 #ifdef TARGET_NR_stat64
10082     case TARGET_NR_stat64:
10083         if (!(p = lock_user_string(arg1))) {
10084             return -TARGET_EFAULT;
10085         }
10086         ret = get_errno(stat(path(p), &st));
10087         unlock_user(p, arg1, 0);
10088         if (!is_error(ret))
10089             ret = host_to_target_stat64(cpu_env, arg2, &st);
10090         return ret;
10091 #endif
10092 #ifdef TARGET_NR_lstat64
10093     case TARGET_NR_lstat64:
10094         if (!(p = lock_user_string(arg1))) {
10095             return -TARGET_EFAULT;
10096         }
10097         ret = get_errno(lstat(path(p), &st));
10098         unlock_user(p, arg1, 0);
10099         if (!is_error(ret))
10100             ret = host_to_target_stat64(cpu_env, arg2, &st);
10101         return ret;
10102 #endif
10103 #ifdef TARGET_NR_fstat64
10104     case TARGET_NR_fstat64:
10105         ret = get_errno(fstat(arg1, &st));
10106         if (!is_error(ret))
10107             ret = host_to_target_stat64(cpu_env, arg2, &st);
10108         return ret;
10109 #endif
10110 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10111 #ifdef TARGET_NR_fstatat64
10112     case TARGET_NR_fstatat64:
10113 #endif
10114 #ifdef TARGET_NR_newfstatat
10115     case TARGET_NR_newfstatat:
10116 #endif
10117         if (!(p = lock_user_string(arg2))) {
10118             return -TARGET_EFAULT;
10119         }
10120         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10121         unlock_user(p, arg2, 0);
10122         if (!is_error(ret))
10123             ret = host_to_target_stat64(cpu_env, arg3, &st);
10124         return ret;
10125 #endif
10126 #ifdef TARGET_NR_lchown
10127     case TARGET_NR_lchown:
10128         if (!(p = lock_user_string(arg1)))
10129             return -TARGET_EFAULT;
10130         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10131         unlock_user(p, arg1, 0);
10132         return ret;
10133 #endif
10134 #ifdef TARGET_NR_getuid
10135     case TARGET_NR_getuid:
10136         return get_errno(high2lowuid(getuid()));
10137 #endif
10138 #ifdef TARGET_NR_getgid
10139     case TARGET_NR_getgid:
10140         return get_errno(high2lowgid(getgid()));
10141 #endif
10142 #ifdef TARGET_NR_geteuid
10143     case TARGET_NR_geteuid:
10144         return get_errno(high2lowuid(geteuid()));
10145 #endif
10146 #ifdef TARGET_NR_getegid
10147     case TARGET_NR_getegid:
10148         return get_errno(high2lowgid(getegid()));
10149 #endif
10150     case TARGET_NR_setreuid:
10151         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10152     case TARGET_NR_setregid:
10153         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10154     case TARGET_NR_getgroups:
10155         {
10156             int gidsetsize = arg1;
10157             target_id *target_grouplist;
10158             gid_t *grouplist;
10159             int i;
10160 
10161             grouplist = alloca(gidsetsize * sizeof(gid_t));
10162             ret = get_errno(getgroups(gidsetsize, grouplist));
10163             if (gidsetsize == 0)
10164                 return ret;
10165             if (!is_error(ret)) {
10166                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10167                 if (!target_grouplist)
10168                     return -TARGET_EFAULT;
10169                 for(i = 0;i < ret; i++)
10170                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10171                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10172             }
10173         }
10174         return ret;
10175     case TARGET_NR_setgroups:
10176         {
10177             int gidsetsize = arg1;
10178             target_id *target_grouplist;
10179             gid_t *grouplist = NULL;
10180             int i;
10181             if (gidsetsize) {
10182                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10183                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10184                 if (!target_grouplist) {
10185                     return -TARGET_EFAULT;
10186                 }
10187                 for (i = 0; i < gidsetsize; i++) {
10188                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10189                 }
10190                 unlock_user(target_grouplist, arg2, 0);
10191             }
10192             return get_errno(setgroups(gidsetsize, grouplist));
10193         }
10194     case TARGET_NR_fchown:
10195         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10196 #if defined(TARGET_NR_fchownat)
10197     case TARGET_NR_fchownat:
10198         if (!(p = lock_user_string(arg2)))
10199             return -TARGET_EFAULT;
10200         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10201                                  low2highgid(arg4), arg5));
10202         unlock_user(p, arg2, 0);
10203         return ret;
10204 #endif
10205 #ifdef TARGET_NR_setresuid
10206     case TARGET_NR_setresuid:
10207         return get_errno(sys_setresuid(low2highuid(arg1),
10208                                        low2highuid(arg2),
10209                                        low2highuid(arg3)));
10210 #endif
10211 #ifdef TARGET_NR_getresuid
10212     case TARGET_NR_getresuid:
10213         {
10214             uid_t ruid, euid, suid;
10215             ret = get_errno(getresuid(&ruid, &euid, &suid));
10216             if (!is_error(ret)) {
10217                 if (put_user_id(high2lowuid(ruid), arg1)
10218                     || put_user_id(high2lowuid(euid), arg2)
10219                     || put_user_id(high2lowuid(suid), arg3))
10220                     return -TARGET_EFAULT;
10221             }
10222         }
10223         return ret;
10224 #endif
10225 #ifdef TARGET_NR_getresgid
10226     case TARGET_NR_setresgid:
10227         return get_errno(sys_setresgid(low2highgid(arg1),
10228                                        low2highgid(arg2),
10229                                        low2highgid(arg3)));
10230 #endif
10231 #ifdef TARGET_NR_getresgid
10232     case TARGET_NR_getresgid:
10233         {
10234             gid_t rgid, egid, sgid;
10235             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10236             if (!is_error(ret)) {
10237                 if (put_user_id(high2lowgid(rgid), arg1)
10238                     || put_user_id(high2lowgid(egid), arg2)
10239                     || put_user_id(high2lowgid(sgid), arg3))
10240                     return -TARGET_EFAULT;
10241             }
10242         }
10243         return ret;
10244 #endif
10245 #ifdef TARGET_NR_chown
10246     case TARGET_NR_chown:
10247         if (!(p = lock_user_string(arg1)))
10248             return -TARGET_EFAULT;
10249         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10250         unlock_user(p, arg1, 0);
10251         return ret;
10252 #endif
10253     case TARGET_NR_setuid:
10254         return get_errno(sys_setuid(low2highuid(arg1)));
10255     case TARGET_NR_setgid:
10256         return get_errno(sys_setgid(low2highgid(arg1)));
10257     case TARGET_NR_setfsuid:
10258         return get_errno(setfsuid(arg1));
10259     case TARGET_NR_setfsgid:
10260         return get_errno(setfsgid(arg1));
10261 
10262 #ifdef TARGET_NR_lchown32
10263     case TARGET_NR_lchown32:
10264         if (!(p = lock_user_string(arg1)))
10265             return -TARGET_EFAULT;
10266         ret = get_errno(lchown(p, arg2, arg3));
10267         unlock_user(p, arg1, 0);
10268         return ret;
10269 #endif
10270 #ifdef TARGET_NR_getuid32
10271     case TARGET_NR_getuid32:
10272         return get_errno(getuid());
10273 #endif
10274 
10275 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10276    /* Alpha specific */
10277     case TARGET_NR_getxuid:
10278          {
10279             uid_t euid;
10280             euid=geteuid();
10281             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10282          }
10283         return get_errno(getuid());
10284 #endif
10285 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10286    /* Alpha specific */
10287     case TARGET_NR_getxgid:
10288          {
10289             uid_t egid;
10290             egid=getegid();
10291             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10292          }
10293         return get_errno(getgid());
10294 #endif
10295 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10296     /* Alpha specific */
10297     case TARGET_NR_osf_getsysinfo:
10298         ret = -TARGET_EOPNOTSUPP;
10299         switch (arg1) {
10300           case TARGET_GSI_IEEE_FP_CONTROL:
10301             {
10302                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10303                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10304 
10305                 swcr &= ~SWCR_STATUS_MASK;
10306                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10307 
10308                 if (put_user_u64 (swcr, arg2))
10309                         return -TARGET_EFAULT;
10310                 ret = 0;
10311             }
10312             break;
10313 
10314           /* case GSI_IEEE_STATE_AT_SIGNAL:
10315              -- Not implemented in linux kernel.
10316              case GSI_UACPROC:
10317              -- Retrieves current unaligned access state; not much used.
10318              case GSI_PROC_TYPE:
10319              -- Retrieves implver information; surely not used.
10320              case GSI_GET_HWRPB:
10321              -- Grabs a copy of the HWRPB; surely not used.
10322           */
10323         }
10324         return ret;
10325 #endif
10326 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10327     /* Alpha specific */
10328     case TARGET_NR_osf_setsysinfo:
10329         ret = -TARGET_EOPNOTSUPP;
10330         switch (arg1) {
10331           case TARGET_SSI_IEEE_FP_CONTROL:
10332             {
10333                 uint64_t swcr, fpcr;
10334 
10335                 if (get_user_u64 (swcr, arg2)) {
10336                     return -TARGET_EFAULT;
10337                 }
10338 
10339                 /*
10340                  * The kernel calls swcr_update_status to update the
10341                  * status bits from the fpcr at every point that it
10342                  * could be queried.  Therefore, we store the status
10343                  * bits only in FPCR.
10344                  */
10345                 ((CPUAlphaState *)cpu_env)->swcr
10346                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10347 
10348                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10349                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10350                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10351                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10352                 ret = 0;
10353             }
10354             break;
10355 
10356           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10357             {
10358                 uint64_t exc, fpcr, fex;
10359 
10360                 if (get_user_u64(exc, arg2)) {
10361                     return -TARGET_EFAULT;
10362                 }
10363                 exc &= SWCR_STATUS_MASK;
10364                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10365 
10366                 /* Old exceptions are not signaled.  */
10367                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10368                 fex = exc & ~fex;
10369                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10370                 fex &= ((CPUArchState *)cpu_env)->swcr;
10371 
10372                 /* Update the hardware fpcr.  */
10373                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10374                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10375 
10376                 if (fex) {
10377                     int si_code = TARGET_FPE_FLTUNK;
10378                     target_siginfo_t info;
10379 
10380                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10381                         si_code = TARGET_FPE_FLTUND;
10382                     }
10383                     if (fex & SWCR_TRAP_ENABLE_INE) {
10384                         si_code = TARGET_FPE_FLTRES;
10385                     }
10386                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10387                         si_code = TARGET_FPE_FLTUND;
10388                     }
10389                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10390                         si_code = TARGET_FPE_FLTOVF;
10391                     }
10392                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10393                         si_code = TARGET_FPE_FLTDIV;
10394                     }
10395                     if (fex & SWCR_TRAP_ENABLE_INV) {
10396                         si_code = TARGET_FPE_FLTINV;
10397                     }
10398 
10399                     info.si_signo = SIGFPE;
10400                     info.si_errno = 0;
10401                     info.si_code = si_code;
10402                     info._sifields._sigfault._addr
10403                         = ((CPUArchState *)cpu_env)->pc;
10404                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10405                                  QEMU_SI_FAULT, &info);
10406                 }
10407                 ret = 0;
10408             }
10409             break;
10410 
10411           /* case SSI_NVPAIRS:
10412              -- Used with SSIN_UACPROC to enable unaligned accesses.
10413              case SSI_IEEE_STATE_AT_SIGNAL:
10414              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10415              -- Not implemented in linux kernel
10416           */
10417         }
10418         return ret;
10419 #endif
10420 #ifdef TARGET_NR_osf_sigprocmask
10421     /* Alpha specific.  */
10422     case TARGET_NR_osf_sigprocmask:
10423         {
10424             abi_ulong mask;
10425             int how;
10426             sigset_t set, oldset;
10427 
10428             switch(arg1) {
10429             case TARGET_SIG_BLOCK:
10430                 how = SIG_BLOCK;
10431                 break;
10432             case TARGET_SIG_UNBLOCK:
10433                 how = SIG_UNBLOCK;
10434                 break;
10435             case TARGET_SIG_SETMASK:
10436                 how = SIG_SETMASK;
10437                 break;
10438             default:
10439                 return -TARGET_EINVAL;
10440             }
10441             mask = arg2;
10442             target_to_host_old_sigset(&set, &mask);
10443             ret = do_sigprocmask(how, &set, &oldset);
10444             if (!ret) {
10445                 host_to_target_old_sigset(&mask, &oldset);
10446                 ret = mask;
10447             }
10448         }
10449         return ret;
10450 #endif
10451 
10452 #ifdef TARGET_NR_getgid32
10453     case TARGET_NR_getgid32:
10454         return get_errno(getgid());
10455 #endif
10456 #ifdef TARGET_NR_geteuid32
10457     case TARGET_NR_geteuid32:
10458         return get_errno(geteuid());
10459 #endif
10460 #ifdef TARGET_NR_getegid32
10461     case TARGET_NR_getegid32:
10462         return get_errno(getegid());
10463 #endif
10464 #ifdef TARGET_NR_setreuid32
10465     case TARGET_NR_setreuid32:
10466         return get_errno(setreuid(arg1, arg2));
10467 #endif
10468 #ifdef TARGET_NR_setregid32
10469     case TARGET_NR_setregid32:
10470         return get_errno(setregid(arg1, arg2));
10471 #endif
10472 #ifdef TARGET_NR_getgroups32
10473     case TARGET_NR_getgroups32:
10474         {
10475             int gidsetsize = arg1;
10476             uint32_t *target_grouplist;
10477             gid_t *grouplist;
10478             int i;
10479 
10480             grouplist = alloca(gidsetsize * sizeof(gid_t));
10481             ret = get_errno(getgroups(gidsetsize, grouplist));
10482             if (gidsetsize == 0)
10483                 return ret;
10484             if (!is_error(ret)) {
10485                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10486                 if (!target_grouplist) {
10487                     return -TARGET_EFAULT;
10488                 }
10489                 for(i = 0;i < ret; i++)
10490                     target_grouplist[i] = tswap32(grouplist[i]);
10491                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10492             }
10493         }
10494         return ret;
10495 #endif
10496 #ifdef TARGET_NR_setgroups32
10497     case TARGET_NR_setgroups32:
10498         {
10499             int gidsetsize = arg1;
10500             uint32_t *target_grouplist;
10501             gid_t *grouplist;
10502             int i;
10503 
10504             grouplist = alloca(gidsetsize * sizeof(gid_t));
10505             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10506             if (!target_grouplist) {
10507                 return -TARGET_EFAULT;
10508             }
10509             for(i = 0;i < gidsetsize; i++)
10510                 grouplist[i] = tswap32(target_grouplist[i]);
10511             unlock_user(target_grouplist, arg2, 0);
10512             return get_errno(setgroups(gidsetsize, grouplist));
10513         }
10514 #endif
10515 #ifdef TARGET_NR_fchown32
10516     case TARGET_NR_fchown32:
10517         return get_errno(fchown(arg1, arg2, arg3));
10518 #endif
10519 #ifdef TARGET_NR_setresuid32
10520     case TARGET_NR_setresuid32:
10521         return get_errno(sys_setresuid(arg1, arg2, arg3));
10522 #endif
10523 #ifdef TARGET_NR_getresuid32
10524     case TARGET_NR_getresuid32:
10525         {
10526             uid_t ruid, euid, suid;
10527             ret = get_errno(getresuid(&ruid, &euid, &suid));
10528             if (!is_error(ret)) {
10529                 if (put_user_u32(ruid, arg1)
10530                     || put_user_u32(euid, arg2)
10531                     || put_user_u32(suid, arg3))
10532                     return -TARGET_EFAULT;
10533             }
10534         }
10535         return ret;
10536 #endif
10537 #ifdef TARGET_NR_setresgid32
10538     case TARGET_NR_setresgid32:
10539         return get_errno(sys_setresgid(arg1, arg2, arg3));
10540 #endif
10541 #ifdef TARGET_NR_getresgid32
10542     case TARGET_NR_getresgid32:
10543         {
10544             gid_t rgid, egid, sgid;
10545             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10546             if (!is_error(ret)) {
10547                 if (put_user_u32(rgid, arg1)
10548                     || put_user_u32(egid, arg2)
10549                     || put_user_u32(sgid, arg3))
10550                     return -TARGET_EFAULT;
10551             }
10552         }
10553         return ret;
10554 #endif
10555 #ifdef TARGET_NR_chown32
10556     case TARGET_NR_chown32:
10557         if (!(p = lock_user_string(arg1)))
10558             return -TARGET_EFAULT;
10559         ret = get_errno(chown(p, arg2, arg3));
10560         unlock_user(p, arg1, 0);
10561         return ret;
10562 #endif
10563 #ifdef TARGET_NR_setuid32
10564     case TARGET_NR_setuid32:
10565         return get_errno(sys_setuid(arg1));
10566 #endif
10567 #ifdef TARGET_NR_setgid32
10568     case TARGET_NR_setgid32:
10569         return get_errno(sys_setgid(arg1));
10570 #endif
10571 #ifdef TARGET_NR_setfsuid32
10572     case TARGET_NR_setfsuid32:
10573         return get_errno(setfsuid(arg1));
10574 #endif
10575 #ifdef TARGET_NR_setfsgid32
10576     case TARGET_NR_setfsgid32:
10577         return get_errno(setfsgid(arg1));
10578 #endif
10579 #ifdef TARGET_NR_mincore
10580     case TARGET_NR_mincore:
10581         {
10582             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10583             if (!a) {
10584                 return -TARGET_ENOMEM;
10585             }
10586             p = lock_user_string(arg3);
10587             if (!p) {
10588                 ret = -TARGET_EFAULT;
10589             } else {
10590                 ret = get_errno(mincore(a, arg2, p));
10591                 unlock_user(p, arg3, ret);
10592             }
10593             unlock_user(a, arg1, 0);
10594         }
10595         return ret;
10596 #endif
10597 #ifdef TARGET_NR_arm_fadvise64_64
10598     case TARGET_NR_arm_fadvise64_64:
10599         /* arm_fadvise64_64 looks like fadvise64_64 but
10600          * with different argument order: fd, advice, offset, len
10601          * rather than the usual fd, offset, len, advice.
10602          * Note that offset and len are both 64-bit so appear as
10603          * pairs of 32-bit registers.
10604          */
10605         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10606                             target_offset64(arg5, arg6), arg2);
10607         return -host_to_target_errno(ret);
10608 #endif
10609 
10610 #if TARGET_ABI_BITS == 32
10611 
10612 #ifdef TARGET_NR_fadvise64_64
10613     case TARGET_NR_fadvise64_64:
10614 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10615         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10616         ret = arg2;
10617         arg2 = arg3;
10618         arg3 = arg4;
10619         arg4 = arg5;
10620         arg5 = arg6;
10621         arg6 = ret;
10622 #else
10623         /* 6 args: fd, offset (high, low), len (high, low), advice */
10624         if (regpairs_aligned(cpu_env, num)) {
10625             /* offset is in (3,4), len in (5,6) and advice in 7 */
10626             arg2 = arg3;
10627             arg3 = arg4;
10628             arg4 = arg5;
10629             arg5 = arg6;
10630             arg6 = arg7;
10631         }
10632 #endif
10633         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10634                             target_offset64(arg4, arg5), arg6);
10635         return -host_to_target_errno(ret);
10636 #endif
10637 
10638 #ifdef TARGET_NR_fadvise64
10639     case TARGET_NR_fadvise64:
10640         /* 5 args: fd, offset (high, low), len, advice */
10641         if (regpairs_aligned(cpu_env, num)) {
10642             /* offset is in (3,4), len in 5 and advice in 6 */
10643             arg2 = arg3;
10644             arg3 = arg4;
10645             arg4 = arg5;
10646             arg5 = arg6;
10647         }
10648         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10649         return -host_to_target_errno(ret);
10650 #endif
10651 
10652 #else /* not a 32-bit ABI */
10653 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10654 #ifdef TARGET_NR_fadvise64_64
10655     case TARGET_NR_fadvise64_64:
10656 #endif
10657 #ifdef TARGET_NR_fadvise64
10658     case TARGET_NR_fadvise64:
10659 #endif
10660 #ifdef TARGET_S390X
10661         switch (arg4) {
10662         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10663         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10664         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10665         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10666         default: break;
10667         }
10668 #endif
10669         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10670 #endif
10671 #endif /* end of 64-bit ABI fadvise handling */
10672 
10673 #ifdef TARGET_NR_madvise
10674     case TARGET_NR_madvise:
10675         /* A straight passthrough may not be safe because qemu sometimes
10676            turns private file-backed mappings into anonymous mappings.
10677            This will break MADV_DONTNEED.
10678            This is a hint, so ignoring and returning success is ok.  */
10679         return 0;
10680 #endif
10681 #if TARGET_ABI_BITS == 32
10682     case TARGET_NR_fcntl64:
10683     {
10684 	int cmd;
10685 	struct flock64 fl;
10686         from_flock64_fn *copyfrom = copy_from_user_flock64;
10687         to_flock64_fn *copyto = copy_to_user_flock64;
10688 
10689 #ifdef TARGET_ARM
10690         if (!((CPUARMState *)cpu_env)->eabi) {
10691             copyfrom = copy_from_user_oabi_flock64;
10692             copyto = copy_to_user_oabi_flock64;
10693         }
10694 #endif
10695 
10696 	cmd = target_to_host_fcntl_cmd(arg2);
10697         if (cmd == -TARGET_EINVAL) {
10698             return cmd;
10699         }
10700 
10701         switch(arg2) {
10702         case TARGET_F_GETLK64:
10703             ret = copyfrom(&fl, arg3);
10704             if (ret) {
10705                 break;
10706             }
10707             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10708             if (ret == 0) {
10709                 ret = copyto(arg3, &fl);
10710             }
10711 	    break;
10712 
10713         case TARGET_F_SETLK64:
10714         case TARGET_F_SETLKW64:
10715             ret = copyfrom(&fl, arg3);
10716             if (ret) {
10717                 break;
10718             }
10719             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10720 	    break;
10721         default:
10722             ret = do_fcntl(arg1, arg2, arg3);
10723             break;
10724         }
10725         return ret;
10726     }
10727 #endif
10728 #ifdef TARGET_NR_cacheflush
10729     case TARGET_NR_cacheflush:
10730         /* self-modifying code is handled automatically, so nothing needed */
10731         return 0;
10732 #endif
10733 #ifdef TARGET_NR_getpagesize
10734     case TARGET_NR_getpagesize:
10735         return TARGET_PAGE_SIZE;
10736 #endif
10737     case TARGET_NR_gettid:
10738         return get_errno(sys_gettid());
10739 #ifdef TARGET_NR_readahead
10740     case TARGET_NR_readahead:
10741 #if TARGET_ABI_BITS == 32
10742         if (regpairs_aligned(cpu_env, num)) {
10743             arg2 = arg3;
10744             arg3 = arg4;
10745             arg4 = arg5;
10746         }
10747         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10748 #else
10749         ret = get_errno(readahead(arg1, arg2, arg3));
10750 #endif
10751         return ret;
10752 #endif
10753 #ifdef CONFIG_ATTR
10754 #ifdef TARGET_NR_setxattr
10755     case TARGET_NR_listxattr:
10756     case TARGET_NR_llistxattr:
10757     {
10758         void *p, *b = 0;
10759         if (arg2) {
10760             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10761             if (!b) {
10762                 return -TARGET_EFAULT;
10763             }
10764         }
10765         p = lock_user_string(arg1);
10766         if (p) {
10767             if (num == TARGET_NR_listxattr) {
10768                 ret = get_errno(listxattr(p, b, arg3));
10769             } else {
10770                 ret = get_errno(llistxattr(p, b, arg3));
10771             }
10772         } else {
10773             ret = -TARGET_EFAULT;
10774         }
10775         unlock_user(p, arg1, 0);
10776         unlock_user(b, arg2, arg3);
10777         return ret;
10778     }
10779     case TARGET_NR_flistxattr:
10780     {
10781         void *b = 0;
10782         if (arg2) {
10783             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10784             if (!b) {
10785                 return -TARGET_EFAULT;
10786             }
10787         }
10788         ret = get_errno(flistxattr(arg1, b, arg3));
10789         unlock_user(b, arg2, arg3);
10790         return ret;
10791     }
10792     case TARGET_NR_setxattr:
10793     case TARGET_NR_lsetxattr:
10794         {
10795             void *p, *n, *v = 0;
10796             if (arg3) {
10797                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10798                 if (!v) {
10799                     return -TARGET_EFAULT;
10800                 }
10801             }
10802             p = lock_user_string(arg1);
10803             n = lock_user_string(arg2);
10804             if (p && n) {
10805                 if (num == TARGET_NR_setxattr) {
10806                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10807                 } else {
10808                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10809                 }
10810             } else {
10811                 ret = -TARGET_EFAULT;
10812             }
10813             unlock_user(p, arg1, 0);
10814             unlock_user(n, arg2, 0);
10815             unlock_user(v, arg3, 0);
10816         }
10817         return ret;
10818     case TARGET_NR_fsetxattr:
10819         {
10820             void *n, *v = 0;
10821             if (arg3) {
10822                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10823                 if (!v) {
10824                     return -TARGET_EFAULT;
10825                 }
10826             }
10827             n = lock_user_string(arg2);
10828             if (n) {
10829                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10830             } else {
10831                 ret = -TARGET_EFAULT;
10832             }
10833             unlock_user(n, arg2, 0);
10834             unlock_user(v, arg3, 0);
10835         }
10836         return ret;
10837     case TARGET_NR_getxattr:
10838     case TARGET_NR_lgetxattr:
10839         {
10840             void *p, *n, *v = 0;
10841             if (arg3) {
10842                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10843                 if (!v) {
10844                     return -TARGET_EFAULT;
10845                 }
10846             }
10847             p = lock_user_string(arg1);
10848             n = lock_user_string(arg2);
10849             if (p && n) {
10850                 if (num == TARGET_NR_getxattr) {
10851                     ret = get_errno(getxattr(p, n, v, arg4));
10852                 } else {
10853                     ret = get_errno(lgetxattr(p, n, v, arg4));
10854                 }
10855             } else {
10856                 ret = -TARGET_EFAULT;
10857             }
10858             unlock_user(p, arg1, 0);
10859             unlock_user(n, arg2, 0);
10860             unlock_user(v, arg3, arg4);
10861         }
10862         return ret;
10863     case TARGET_NR_fgetxattr:
10864         {
10865             void *n, *v = 0;
10866             if (arg3) {
10867                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10868                 if (!v) {
10869                     return -TARGET_EFAULT;
10870                 }
10871             }
10872             n = lock_user_string(arg2);
10873             if (n) {
10874                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10875             } else {
10876                 ret = -TARGET_EFAULT;
10877             }
10878             unlock_user(n, arg2, 0);
10879             unlock_user(v, arg3, arg4);
10880         }
10881         return ret;
10882     case TARGET_NR_removexattr:
10883     case TARGET_NR_lremovexattr:
10884         {
10885             void *p, *n;
10886             p = lock_user_string(arg1);
10887             n = lock_user_string(arg2);
10888             if (p && n) {
10889                 if (num == TARGET_NR_removexattr) {
10890                     ret = get_errno(removexattr(p, n));
10891                 } else {
10892                     ret = get_errno(lremovexattr(p, n));
10893                 }
10894             } else {
10895                 ret = -TARGET_EFAULT;
10896             }
10897             unlock_user(p, arg1, 0);
10898             unlock_user(n, arg2, 0);
10899         }
10900         return ret;
10901     case TARGET_NR_fremovexattr:
10902         {
10903             void *n;
10904             n = lock_user_string(arg2);
10905             if (n) {
10906                 ret = get_errno(fremovexattr(arg1, n));
10907             } else {
10908                 ret = -TARGET_EFAULT;
10909             }
10910             unlock_user(n, arg2, 0);
10911         }
10912         return ret;
10913 #endif
10914 #endif /* CONFIG_ATTR */
10915 #ifdef TARGET_NR_set_thread_area
10916     case TARGET_NR_set_thread_area:
10917 #if defined(TARGET_MIPS)
10918       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10919       return 0;
10920 #elif defined(TARGET_CRIS)
10921       if (arg1 & 0xff)
10922           ret = -TARGET_EINVAL;
10923       else {
10924           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10925           ret = 0;
10926       }
10927       return ret;
10928 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10929       return do_set_thread_area(cpu_env, arg1);
10930 #elif defined(TARGET_M68K)
10931       {
10932           TaskState *ts = cpu->opaque;
10933           ts->tp_value = arg1;
10934           return 0;
10935       }
10936 #else
10937       return -TARGET_ENOSYS;
10938 #endif
10939 #endif
10940 #ifdef TARGET_NR_get_thread_area
10941     case TARGET_NR_get_thread_area:
10942 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10943         return do_get_thread_area(cpu_env, arg1);
10944 #elif defined(TARGET_M68K)
10945         {
10946             TaskState *ts = cpu->opaque;
10947             return ts->tp_value;
10948         }
10949 #else
10950         return -TARGET_ENOSYS;
10951 #endif
10952 #endif
10953 #ifdef TARGET_NR_getdomainname
10954     case TARGET_NR_getdomainname:
10955         return -TARGET_ENOSYS;
10956 #endif
10957 
10958 #ifdef TARGET_NR_clock_settime
10959     case TARGET_NR_clock_settime:
10960     {
10961         struct timespec ts;
10962 
10963         ret = target_to_host_timespec(&ts, arg2);
10964         if (!is_error(ret)) {
10965             ret = get_errno(clock_settime(arg1, &ts));
10966         }
10967         return ret;
10968     }
10969 #endif
10970 #ifdef TARGET_NR_clock_gettime
10971     case TARGET_NR_clock_gettime:
10972     {
10973         struct timespec ts;
10974         ret = get_errno(clock_gettime(arg1, &ts));
10975         if (!is_error(ret)) {
10976             ret = host_to_target_timespec(arg2, &ts);
10977         }
10978         return ret;
10979     }
10980 #endif
10981 #ifdef TARGET_NR_clock_getres
10982     case TARGET_NR_clock_getres:
10983     {
10984         struct timespec ts;
10985         ret = get_errno(clock_getres(arg1, &ts));
10986         if (!is_error(ret)) {
10987             host_to_target_timespec(arg2, &ts);
10988         }
10989         return ret;
10990     }
10991 #endif
10992 #ifdef TARGET_NR_clock_nanosleep
10993     case TARGET_NR_clock_nanosleep:
10994     {
10995         struct timespec ts;
10996         target_to_host_timespec(&ts, arg3);
10997         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10998                                              &ts, arg4 ? &ts : NULL));
10999         if (arg4)
11000             host_to_target_timespec(arg4, &ts);
11001 
11002 #if defined(TARGET_PPC)
11003         /* clock_nanosleep is odd in that it returns positive errno values.
11004          * On PPC, CR0 bit 3 should be set in such a situation. */
11005         if (ret && ret != -TARGET_ERESTARTSYS) {
11006             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11007         }
11008 #endif
11009         return ret;
11010     }
11011 #endif
11012 
11013 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11014     case TARGET_NR_set_tid_address:
11015         return get_errno(set_tid_address((int *)g2h(arg1)));
11016 #endif
11017 
11018     case TARGET_NR_tkill:
11019         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11020 
11021     case TARGET_NR_tgkill:
11022         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11023                          target_to_host_signal(arg3)));
11024 
11025 #ifdef TARGET_NR_set_robust_list
11026     case TARGET_NR_set_robust_list:
11027     case TARGET_NR_get_robust_list:
11028         /* The ABI for supporting robust futexes has userspace pass
11029          * the kernel a pointer to a linked list which is updated by
11030          * userspace after the syscall; the list is walked by the kernel
11031          * when the thread exits. Since the linked list in QEMU guest
11032          * memory isn't a valid linked list for the host and we have
11033          * no way to reliably intercept the thread-death event, we can't
11034          * support these. Silently return ENOSYS so that guest userspace
11035          * falls back to a non-robust futex implementation (which should
11036          * be OK except in the corner case of the guest crashing while
11037          * holding a mutex that is shared with another process via
11038          * shared memory).
11039          */
11040         return -TARGET_ENOSYS;
11041 #endif
11042 
11043 #if defined(TARGET_NR_utimensat)
11044     case TARGET_NR_utimensat:
11045         {
11046             struct timespec *tsp, ts[2];
11047             if (!arg3) {
11048                 tsp = NULL;
11049             } else {
11050                 target_to_host_timespec(ts, arg3);
11051                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11052                 tsp = ts;
11053             }
11054             if (!arg2)
11055                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11056             else {
11057                 if (!(p = lock_user_string(arg2))) {
11058                     return -TARGET_EFAULT;
11059                 }
11060                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11061                 unlock_user(p, arg2, 0);
11062             }
11063         }
11064         return ret;
11065 #endif
11066     case TARGET_NR_futex:
11067         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11068 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11069     case TARGET_NR_inotify_init:
11070         ret = get_errno(sys_inotify_init());
11071         if (ret >= 0) {
11072             fd_trans_register(ret, &target_inotify_trans);
11073         }
11074         return ret;
11075 #endif
11076 #ifdef CONFIG_INOTIFY1
11077 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11078     case TARGET_NR_inotify_init1:
11079         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11080                                           fcntl_flags_tbl)));
11081         if (ret >= 0) {
11082             fd_trans_register(ret, &target_inotify_trans);
11083         }
11084         return ret;
11085 #endif
11086 #endif
11087 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11088     case TARGET_NR_inotify_add_watch:
11089         p = lock_user_string(arg2);
11090         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11091         unlock_user(p, arg2, 0);
11092         return ret;
11093 #endif
11094 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11095     case TARGET_NR_inotify_rm_watch:
11096         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11097 #endif
11098 
11099 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11100     case TARGET_NR_mq_open:
11101         {
11102             struct mq_attr posix_mq_attr;
11103             struct mq_attr *pposix_mq_attr;
11104             int host_flags;
11105 
11106             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11107             pposix_mq_attr = NULL;
11108             if (arg4) {
11109                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11110                     return -TARGET_EFAULT;
11111                 }
11112                 pposix_mq_attr = &posix_mq_attr;
11113             }
11114             p = lock_user_string(arg1 - 1);
11115             if (!p) {
11116                 return -TARGET_EFAULT;
11117             }
11118             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11119             unlock_user (p, arg1, 0);
11120         }
11121         return ret;
11122 
11123     case TARGET_NR_mq_unlink:
11124         p = lock_user_string(arg1 - 1);
11125         if (!p) {
11126             return -TARGET_EFAULT;
11127         }
11128         ret = get_errno(mq_unlink(p));
11129         unlock_user (p, arg1, 0);
11130         return ret;
11131 
11132     case TARGET_NR_mq_timedsend:
11133         {
11134             struct timespec ts;
11135 
11136             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11137             if (arg5 != 0) {
11138                 target_to_host_timespec(&ts, arg5);
11139                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11140                 host_to_target_timespec(arg5, &ts);
11141             } else {
11142                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11143             }
11144             unlock_user (p, arg2, arg3);
11145         }
11146         return ret;
11147 
11148     case TARGET_NR_mq_timedreceive:
11149         {
11150             struct timespec ts;
11151             unsigned int prio;
11152 
11153             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11154             if (arg5 != 0) {
11155                 target_to_host_timespec(&ts, arg5);
11156                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11157                                                      &prio, &ts));
11158                 host_to_target_timespec(arg5, &ts);
11159             } else {
11160                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11161                                                      &prio, NULL));
11162             }
11163             unlock_user (p, arg2, arg3);
11164             if (arg4 != 0)
11165                 put_user_u32(prio, arg4);
11166         }
11167         return ret;
11168 
11169     /* Not implemented for now... */
11170 /*     case TARGET_NR_mq_notify: */
11171 /*         break; */
11172 
11173     case TARGET_NR_mq_getsetattr:
11174         {
11175             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11176             ret = 0;
11177             if (arg2 != 0) {
11178                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11179                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11180                                            &posix_mq_attr_out));
11181             } else if (arg3 != 0) {
11182                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11183             }
11184             if (ret == 0 && arg3 != 0) {
11185                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11186             }
11187         }
11188         return ret;
11189 #endif
11190 
11191 #ifdef CONFIG_SPLICE
11192 #ifdef TARGET_NR_tee
11193     case TARGET_NR_tee:
11194         {
11195             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11196         }
11197         return ret;
11198 #endif
11199 #ifdef TARGET_NR_splice
11200     case TARGET_NR_splice:
11201         {
11202             loff_t loff_in, loff_out;
11203             loff_t *ploff_in = NULL, *ploff_out = NULL;
11204             if (arg2) {
11205                 if (get_user_u64(loff_in, arg2)) {
11206                     return -TARGET_EFAULT;
11207                 }
11208                 ploff_in = &loff_in;
11209             }
11210             if (arg4) {
11211                 if (get_user_u64(loff_out, arg4)) {
11212                     return -TARGET_EFAULT;
11213                 }
11214                 ploff_out = &loff_out;
11215             }
11216             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11217             if (arg2) {
11218                 if (put_user_u64(loff_in, arg2)) {
11219                     return -TARGET_EFAULT;
11220                 }
11221             }
11222             if (arg4) {
11223                 if (put_user_u64(loff_out, arg4)) {
11224                     return -TARGET_EFAULT;
11225                 }
11226             }
11227         }
11228         return ret;
11229 #endif
11230 #ifdef TARGET_NR_vmsplice
11231 	case TARGET_NR_vmsplice:
11232         {
11233             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11234             if (vec != NULL) {
11235                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11236                 unlock_iovec(vec, arg2, arg3, 0);
11237             } else {
11238                 ret = -host_to_target_errno(errno);
11239             }
11240         }
11241         return ret;
11242 #endif
11243 #endif /* CONFIG_SPLICE */
11244 #ifdef CONFIG_EVENTFD
11245 #if defined(TARGET_NR_eventfd)
11246     case TARGET_NR_eventfd:
11247         ret = get_errno(eventfd(arg1, 0));
11248         if (ret >= 0) {
11249             fd_trans_register(ret, &target_eventfd_trans);
11250         }
11251         return ret;
11252 #endif
11253 #if defined(TARGET_NR_eventfd2)
11254     case TARGET_NR_eventfd2:
11255     {
11256         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11257         if (arg2 & TARGET_O_NONBLOCK) {
11258             host_flags |= O_NONBLOCK;
11259         }
11260         if (arg2 & TARGET_O_CLOEXEC) {
11261             host_flags |= O_CLOEXEC;
11262         }
11263         ret = get_errno(eventfd(arg1, host_flags));
11264         if (ret >= 0) {
11265             fd_trans_register(ret, &target_eventfd_trans);
11266         }
11267         return ret;
11268     }
11269 #endif
11270 #endif /* CONFIG_EVENTFD  */
11271 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11272     case TARGET_NR_fallocate:
11273 #if TARGET_ABI_BITS == 32
11274         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11275                                   target_offset64(arg5, arg6)));
11276 #else
11277         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11278 #endif
11279         return ret;
11280 #endif
11281 #if defined(CONFIG_SYNC_FILE_RANGE)
11282 #if defined(TARGET_NR_sync_file_range)
11283     case TARGET_NR_sync_file_range:
11284 #if TARGET_ABI_BITS == 32
11285 #if defined(TARGET_MIPS)
11286         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11287                                         target_offset64(arg5, arg6), arg7));
11288 #else
11289         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11290                                         target_offset64(arg4, arg5), arg6));
11291 #endif /* !TARGET_MIPS */
11292 #else
11293         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11294 #endif
11295         return ret;
11296 #endif
11297 #if defined(TARGET_NR_sync_file_range2)
11298     case TARGET_NR_sync_file_range2:
11299         /* This is like sync_file_range but the arguments are reordered */
11300 #if TARGET_ABI_BITS == 32
11301         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11302                                         target_offset64(arg5, arg6), arg2));
11303 #else
11304         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11305 #endif
11306         return ret;
11307 #endif
11308 #endif
11309 #if defined(TARGET_NR_signalfd4)
11310     case TARGET_NR_signalfd4:
11311         return do_signalfd4(arg1, arg2, arg4);
11312 #endif
11313 #if defined(TARGET_NR_signalfd)
11314     case TARGET_NR_signalfd:
11315         return do_signalfd4(arg1, arg2, 0);
11316 #endif
11317 #if defined(CONFIG_EPOLL)
11318 #if defined(TARGET_NR_epoll_create)
11319     case TARGET_NR_epoll_create:
11320         return get_errno(epoll_create(arg1));
11321 #endif
11322 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11323     case TARGET_NR_epoll_create1:
11324         return get_errno(epoll_create1(arg1));
11325 #endif
11326 #if defined(TARGET_NR_epoll_ctl)
11327     case TARGET_NR_epoll_ctl:
11328     {
11329         struct epoll_event ep;
11330         struct epoll_event *epp = 0;
11331         if (arg4) {
11332             struct target_epoll_event *target_ep;
11333             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11334                 return -TARGET_EFAULT;
11335             }
11336             ep.events = tswap32(target_ep->events);
11337             /* The epoll_data_t union is just opaque data to the kernel,
11338              * so we transfer all 64 bits across and need not worry what
11339              * actual data type it is.
11340              */
11341             ep.data.u64 = tswap64(target_ep->data.u64);
11342             unlock_user_struct(target_ep, arg4, 0);
11343             epp = &ep;
11344         }
11345         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11346     }
11347 #endif
11348 
11349 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11350 #if defined(TARGET_NR_epoll_wait)
11351     case TARGET_NR_epoll_wait:
11352 #endif
11353 #if defined(TARGET_NR_epoll_pwait)
11354     case TARGET_NR_epoll_pwait:
11355 #endif
11356     {
11357         struct target_epoll_event *target_ep;
11358         struct epoll_event *ep;
11359         int epfd = arg1;
11360         int maxevents = arg3;
11361         int timeout = arg4;
11362 
11363         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11364             return -TARGET_EINVAL;
11365         }
11366 
11367         target_ep = lock_user(VERIFY_WRITE, arg2,
11368                               maxevents * sizeof(struct target_epoll_event), 1);
11369         if (!target_ep) {
11370             return -TARGET_EFAULT;
11371         }
11372 
11373         ep = g_try_new(struct epoll_event, maxevents);
11374         if (!ep) {
11375             unlock_user(target_ep, arg2, 0);
11376             return -TARGET_ENOMEM;
11377         }
11378 
11379         switch (num) {
11380 #if defined(TARGET_NR_epoll_pwait)
11381         case TARGET_NR_epoll_pwait:
11382         {
11383             target_sigset_t *target_set;
11384             sigset_t _set, *set = &_set;
11385 
11386             if (arg5) {
11387                 if (arg6 != sizeof(target_sigset_t)) {
11388                     ret = -TARGET_EINVAL;
11389                     break;
11390                 }
11391 
11392                 target_set = lock_user(VERIFY_READ, arg5,
11393                                        sizeof(target_sigset_t), 1);
11394                 if (!target_set) {
11395                     ret = -TARGET_EFAULT;
11396                     break;
11397                 }
11398                 target_to_host_sigset(set, target_set);
11399                 unlock_user(target_set, arg5, 0);
11400             } else {
11401                 set = NULL;
11402             }
11403 
11404             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11405                                              set, SIGSET_T_SIZE));
11406             break;
11407         }
11408 #endif
11409 #if defined(TARGET_NR_epoll_wait)
11410         case TARGET_NR_epoll_wait:
11411             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11412                                              NULL, 0));
11413             break;
11414 #endif
11415         default:
11416             ret = -TARGET_ENOSYS;
11417         }
11418         if (!is_error(ret)) {
11419             int i;
11420             for (i = 0; i < ret; i++) {
11421                 target_ep[i].events = tswap32(ep[i].events);
11422                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11423             }
11424             unlock_user(target_ep, arg2,
11425                         ret * sizeof(struct target_epoll_event));
11426         } else {
11427             unlock_user(target_ep, arg2, 0);
11428         }
11429         g_free(ep);
11430         return ret;
11431     }
11432 #endif
11433 #endif
11434 #ifdef TARGET_NR_prlimit64
11435     case TARGET_NR_prlimit64:
11436     {
11437         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11438         struct target_rlimit64 *target_rnew, *target_rold;
11439         struct host_rlimit64 rnew, rold, *rnewp = 0;
11440         int resource = target_to_host_resource(arg2);
11441         if (arg3) {
11442             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11443                 return -TARGET_EFAULT;
11444             }
11445             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11446             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11447             unlock_user_struct(target_rnew, arg3, 0);
11448             rnewp = &rnew;
11449         }
11450 
11451         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11452         if (!is_error(ret) && arg4) {
11453             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11454                 return -TARGET_EFAULT;
11455             }
11456             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11457             target_rold->rlim_max = tswap64(rold.rlim_max);
11458             unlock_user_struct(target_rold, arg4, 1);
11459         }
11460         return ret;
11461     }
11462 #endif
11463 #ifdef TARGET_NR_gethostname
11464     case TARGET_NR_gethostname:
11465     {
11466         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11467         if (name) {
11468             ret = get_errno(gethostname(name, arg2));
11469             unlock_user(name, arg1, arg2);
11470         } else {
11471             ret = -TARGET_EFAULT;
11472         }
11473         return ret;
11474     }
11475 #endif
11476 #ifdef TARGET_NR_atomic_cmpxchg_32
11477     case TARGET_NR_atomic_cmpxchg_32:
11478     {
11479         /* should use start_exclusive from main.c */
11480         abi_ulong mem_value;
11481         if (get_user_u32(mem_value, arg6)) {
11482             target_siginfo_t info;
11483             info.si_signo = SIGSEGV;
11484             info.si_errno = 0;
11485             info.si_code = TARGET_SEGV_MAPERR;
11486             info._sifields._sigfault._addr = arg6;
11487             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11488                          QEMU_SI_FAULT, &info);
11489             ret = 0xdeadbeef;
11490 
11491         }
11492         if (mem_value == arg2)
11493             put_user_u32(arg1, arg6);
11494         return mem_value;
11495     }
11496 #endif
11497 #ifdef TARGET_NR_atomic_barrier
11498     case TARGET_NR_atomic_barrier:
11499         /* Like the kernel implementation and the
11500            qemu arm barrier, no-op this? */
11501         return 0;
11502 #endif
11503 
11504 #ifdef TARGET_NR_timer_create
11505     case TARGET_NR_timer_create:
11506     {
11507         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11508 
11509         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11510 
11511         int clkid = arg1;
11512         int timer_index = next_free_host_timer();
11513 
11514         if (timer_index < 0) {
11515             ret = -TARGET_EAGAIN;
11516         } else {
11517             timer_t *phtimer = g_posix_timers  + timer_index;
11518 
11519             if (arg2) {
11520                 phost_sevp = &host_sevp;
11521                 ret = target_to_host_sigevent(phost_sevp, arg2);
11522                 if (ret != 0) {
11523                     return ret;
11524                 }
11525             }
11526 
11527             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11528             if (ret) {
11529                 phtimer = NULL;
11530             } else {
11531                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11532                     return -TARGET_EFAULT;
11533                 }
11534             }
11535         }
11536         return ret;
11537     }
11538 #endif
11539 
11540 #ifdef TARGET_NR_timer_settime
11541     case TARGET_NR_timer_settime:
11542     {
11543         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11544          * struct itimerspec * old_value */
11545         target_timer_t timerid = get_timer_id(arg1);
11546 
11547         if (timerid < 0) {
11548             ret = timerid;
11549         } else if (arg3 == 0) {
11550             ret = -TARGET_EINVAL;
11551         } else {
11552             timer_t htimer = g_posix_timers[timerid];
11553             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11554 
11555             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11556                 return -TARGET_EFAULT;
11557             }
11558             ret = get_errno(
11559                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11560             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11561                 return -TARGET_EFAULT;
11562             }
11563         }
11564         return ret;
11565     }
11566 #endif
11567 
11568 #ifdef TARGET_NR_timer_gettime
11569     case TARGET_NR_timer_gettime:
11570     {
11571         /* args: timer_t timerid, struct itimerspec *curr_value */
11572         target_timer_t timerid = get_timer_id(arg1);
11573 
11574         if (timerid < 0) {
11575             ret = timerid;
11576         } else if (!arg2) {
11577             ret = -TARGET_EFAULT;
11578         } else {
11579             timer_t htimer = g_posix_timers[timerid];
11580             struct itimerspec hspec;
11581             ret = get_errno(timer_gettime(htimer, &hspec));
11582 
11583             if (host_to_target_itimerspec(arg2, &hspec)) {
11584                 ret = -TARGET_EFAULT;
11585             }
11586         }
11587         return ret;
11588     }
11589 #endif
11590 
11591 #ifdef TARGET_NR_timer_getoverrun
11592     case TARGET_NR_timer_getoverrun:
11593     {
11594         /* args: timer_t timerid */
11595         target_timer_t timerid = get_timer_id(arg1);
11596 
11597         if (timerid < 0) {
11598             ret = timerid;
11599         } else {
11600             timer_t htimer = g_posix_timers[timerid];
11601             ret = get_errno(timer_getoverrun(htimer));
11602         }
11603         fd_trans_unregister(ret);
11604         return ret;
11605     }
11606 #endif
11607 
11608 #ifdef TARGET_NR_timer_delete
11609     case TARGET_NR_timer_delete:
11610     {
11611         /* args: timer_t timerid */
11612         target_timer_t timerid = get_timer_id(arg1);
11613 
11614         if (timerid < 0) {
11615             ret = timerid;
11616         } else {
11617             timer_t htimer = g_posix_timers[timerid];
11618             ret = get_errno(timer_delete(htimer));
11619             g_posix_timers[timerid] = 0;
11620         }
11621         return ret;
11622     }
11623 #endif
11624 
11625 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11626     case TARGET_NR_timerfd_create:
11627         return get_errno(timerfd_create(arg1,
11628                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11629 #endif
11630 
11631 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11632     case TARGET_NR_timerfd_gettime:
11633         {
11634             struct itimerspec its_curr;
11635 
11636             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11637 
11638             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11639                 return -TARGET_EFAULT;
11640             }
11641         }
11642         return ret;
11643 #endif
11644 
11645 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11646     case TARGET_NR_timerfd_settime:
11647         {
11648             struct itimerspec its_new, its_old, *p_new;
11649 
11650             if (arg3) {
11651                 if (target_to_host_itimerspec(&its_new, arg3)) {
11652                     return -TARGET_EFAULT;
11653                 }
11654                 p_new = &its_new;
11655             } else {
11656                 p_new = NULL;
11657             }
11658 
11659             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11660 
11661             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11662                 return -TARGET_EFAULT;
11663             }
11664         }
11665         return ret;
11666 #endif
11667 
11668 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11669     case TARGET_NR_ioprio_get:
11670         return get_errno(ioprio_get(arg1, arg2));
11671 #endif
11672 
11673 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11674     case TARGET_NR_ioprio_set:
11675         return get_errno(ioprio_set(arg1, arg2, arg3));
11676 #endif
11677 
11678 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11679     case TARGET_NR_setns:
11680         return get_errno(setns(arg1, arg2));
11681 #endif
11682 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11683     case TARGET_NR_unshare:
11684         return get_errno(unshare(arg1));
11685 #endif
11686 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11687     case TARGET_NR_kcmp:
11688         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11689 #endif
11690 #ifdef TARGET_NR_swapcontext
11691     case TARGET_NR_swapcontext:
11692         /* PowerPC specific.  */
11693         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11694 #endif
11695 
11696     default:
11697         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11698         return -TARGET_ENOSYS;
11699     }
11700     return ret;
11701 }
11702 
11703 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11704                     abi_long arg2, abi_long arg3, abi_long arg4,
11705                     abi_long arg5, abi_long arg6, abi_long arg7,
11706                     abi_long arg8)
11707 {
11708     CPUState *cpu = env_cpu(cpu_env);
11709     abi_long ret;
11710 
11711 #ifdef DEBUG_ERESTARTSYS
11712     /* Debug-only code for exercising the syscall-restart code paths
11713      * in the per-architecture cpu main loops: restart every syscall
11714      * the guest makes once before letting it through.
11715      */
11716     {
11717         static bool flag;
11718         flag = !flag;
11719         if (flag) {
11720             return -TARGET_ERESTARTSYS;
11721         }
11722     }
11723 #endif
11724 
11725     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11726                              arg5, arg6, arg7, arg8);
11727 
11728     if (unlikely(do_strace)) {
11729         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11730         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11731                           arg5, arg6, arg7, arg8);
11732         print_syscall_ret(num, ret);
11733     } else {
11734         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11735                           arg5, arg6, arg7, arg8);
11736     }
11737 
11738     trace_guest_user_syscall_ret(cpu, num, ret);
11739     return ret;
11740 }
11741