xref: /qemu/linux-user/syscall.c (revision 1394dc06)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #include "linux_loop.h"
116 #include "uname.h"
117 
118 #include "qemu.h"
119 #include "qemu/guest-random.h"
120 #include "user/syscall-trace.h"
121 #include "qapi/error.h"
122 #include "fd-trans.h"
123 #include "tcg/tcg.h"
124 
125 #ifndef CLONE_IO
126 #define CLONE_IO                0x80000000      /* Clone io context */
127 #endif
128 
129 /* We can't directly call the host clone syscall, because this will
130  * badly confuse libc (breaking mutexes, for example). So we must
131  * divide clone flags into:
132  *  * flag combinations that look like pthread_create()
133  *  * flag combinations that look like fork()
134  *  * flags we can implement within QEMU itself
135  *  * flags we can't support and will return an error for
136  */
137 /* For thread creation, all these flags must be present; for
138  * fork, none must be present.
139  */
140 #define CLONE_THREAD_FLAGS                              \
141     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
142      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
143 
144 /* These flags are ignored:
145  * CLONE_DETACHED is now ignored by the kernel;
146  * CLONE_IO is just an optimisation hint to the I/O scheduler
147  */
148 #define CLONE_IGNORED_FLAGS                     \
149     (CLONE_DETACHED | CLONE_IO)
150 
151 /* Flags for fork which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_FORK_FLAGS               \
153     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
154      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
155 
156 /* Flags for thread creation which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
158     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
159      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
160 
161 #define CLONE_INVALID_FORK_FLAGS                                        \
162     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
163 
164 #define CLONE_INVALID_THREAD_FLAGS                                      \
165     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
166        CLONE_IGNORED_FLAGS))
167 
168 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
169  * have almost all been allocated. We cannot support any of
170  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
171  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
172  * The checks against the invalid thread masks above will catch these.
173  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
174  */
175 
176 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
177  * once. This exercises the codepaths for restart.
178  */
179 //#define DEBUG_ERESTARTSYS
180 
181 //#include <linux/msdos_fs.h>
182 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
183 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
184 
185 #undef _syscall0
186 #undef _syscall1
187 #undef _syscall2
188 #undef _syscall3
189 #undef _syscall4
190 #undef _syscall5
191 #undef _syscall6
192 
193 #define _syscall0(type,name)		\
194 static type name (void)			\
195 {					\
196 	return syscall(__NR_##name);	\
197 }
198 
199 #define _syscall1(type,name,type1,arg1)		\
200 static type name (type1 arg1)			\
201 {						\
202 	return syscall(__NR_##name, arg1);	\
203 }
204 
205 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
206 static type name (type1 arg1,type2 arg2)		\
207 {							\
208 	return syscall(__NR_##name, arg1, arg2);	\
209 }
210 
211 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
212 static type name (type1 arg1,type2 arg2,type3 arg3)		\
213 {								\
214 	return syscall(__NR_##name, arg1, arg2, arg3);		\
215 }
216 
217 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
219 {										\
220 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
221 }
222 
223 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
224 		  type5,arg5)							\
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
226 {										\
227 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
228 }
229 
230 
231 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
232 		  type5,arg5,type6,arg6)					\
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
234                   type6 arg6)							\
235 {										\
236 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
237 }
238 
239 
240 #define __NR_sys_uname __NR_uname
241 #define __NR_sys_getcwd1 __NR_getcwd
242 #define __NR_sys_getdents __NR_getdents
243 #define __NR_sys_getdents64 __NR_getdents64
244 #define __NR_sys_getpriority __NR_getpriority
245 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
246 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
247 #define __NR_sys_syslog __NR_syslog
248 #if defined(__NR_futex)
249 # define __NR_sys_futex __NR_futex
250 #endif
251 #if defined(__NR_futex_time64)
252 # define __NR_sys_futex_time64 __NR_futex_time64
253 #endif
254 #define __NR_sys_inotify_init __NR_inotify_init
255 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
256 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
257 #define __NR_sys_statx __NR_statx
258 
259 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
260 #define __NR__llseek __NR_lseek
261 #endif
262 
263 /* Newer kernel ports have llseek() instead of _llseek() */
264 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
265 #define TARGET_NR__llseek TARGET_NR_llseek
266 #endif
267 
268 #define __NR_sys_gettid __NR_gettid
269 _syscall0(int, sys_gettid)
270 
271 /* For the 64-bit guest on 32-bit host case we must emulate
272  * getdents using getdents64, because otherwise the host
273  * might hand us back more dirent records than we can fit
274  * into the guest buffer after structure format conversion.
275  * Otherwise we emulate getdents with getdents if the host has it.
276  */
277 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
278 #define EMULATE_GETDENTS_WITH_GETDENTS
279 #endif
280 
281 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
282 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
283 #endif
284 #if (defined(TARGET_NR_getdents) && \
285       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
286     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
287 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
288 #endif
289 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
290 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
291           loff_t *, res, uint, wh);
292 #endif
293 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
294 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
295           siginfo_t *, uinfo)
296 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
297 #ifdef __NR_exit_group
298 _syscall1(int,exit_group,int,error_code)
299 #endif
300 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
301 _syscall1(int,set_tid_address,int *,tidptr)
302 #endif
303 #if defined(__NR_futex)
304 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
305           const struct timespec *,timeout,int *,uaddr2,int,val3)
306 #endif
307 #if defined(__NR_futex_time64)
308 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
309           const struct timespec *,timeout,int *,uaddr2,int,val3)
310 #endif
311 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
312 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
313           unsigned long *, user_mask_ptr);
314 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
315 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
316           unsigned long *, user_mask_ptr);
317 #define __NR_sys_getcpu __NR_getcpu
318 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
319 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
320           void *, arg);
321 _syscall2(int, capget, struct __user_cap_header_struct *, header,
322           struct __user_cap_data_struct *, data);
323 _syscall2(int, capset, struct __user_cap_header_struct *, header,
324           struct __user_cap_data_struct *, data);
325 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
326 _syscall2(int, ioprio_get, int, which, int, who)
327 #endif
328 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
329 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
330 #endif
331 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
332 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
333 #endif
334 
335 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
336 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
337           unsigned long, idx1, unsigned long, idx2)
338 #endif
339 
340 /*
341  * It is assumed that struct statx is architecture independent.
342  */
343 #if defined(TARGET_NR_statx) && defined(__NR_statx)
344 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
345           unsigned int, mask, struct target_statx *, statxbuf)
346 #endif
347 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
348 _syscall2(int, membarrier, int, cmd, int, flags)
349 #endif
350 
351 static bitmask_transtbl fcntl_flags_tbl[] = {
352   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
353   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
354   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
355   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
356   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
357   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
358   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
359   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
360   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
361   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
362   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
363   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
364   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
365 #if defined(O_DIRECT)
366   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
367 #endif
368 #if defined(O_NOATIME)
369   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
370 #endif
371 #if defined(O_CLOEXEC)
372   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
373 #endif
374 #if defined(O_PATH)
375   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
376 #endif
377 #if defined(O_TMPFILE)
378   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
379 #endif
380   /* Don't terminate the list prematurely on 64-bit host+guest.  */
381 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
382   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
383 #endif
384   { 0, 0, 0, 0 }
385 };
386 
387 static int sys_getcwd1(char *buf, size_t size)
388 {
389   if (getcwd(buf, size) == NULL) {
390       /* getcwd() sets errno */
391       return (-1);
392   }
393   return strlen(buf)+1;
394 }
395 
396 #ifdef TARGET_NR_utimensat
397 #if defined(__NR_utimensat)
398 #define __NR_sys_utimensat __NR_utimensat
399 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
400           const struct timespec *,tsp,int,flags)
401 #else
402 static int sys_utimensat(int dirfd, const char *pathname,
403                          const struct timespec times[2], int flags)
404 {
405     errno = ENOSYS;
406     return -1;
407 }
408 #endif
409 #endif /* TARGET_NR_utimensat */
410 
411 #ifdef TARGET_NR_renameat2
412 #if defined(__NR_renameat2)
413 #define __NR_sys_renameat2 __NR_renameat2
414 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
415           const char *, new, unsigned int, flags)
416 #else
417 static int sys_renameat2(int oldfd, const char *old,
418                          int newfd, const char *new, int flags)
419 {
420     if (flags == 0) {
421         return renameat(oldfd, old, newfd, new);
422     }
423     errno = ENOSYS;
424     return -1;
425 }
426 #endif
427 #endif /* TARGET_NR_renameat2 */
428 
429 #ifdef CONFIG_INOTIFY
430 #include <sys/inotify.h>
431 
432 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
433 static int sys_inotify_init(void)
434 {
435   return (inotify_init());
436 }
437 #endif
438 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
439 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
440 {
441   return (inotify_add_watch(fd, pathname, mask));
442 }
443 #endif
444 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
445 static int sys_inotify_rm_watch(int fd, int32_t wd)
446 {
447   return (inotify_rm_watch(fd, wd));
448 }
449 #endif
450 #ifdef CONFIG_INOTIFY1
451 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
452 static int sys_inotify_init1(int flags)
453 {
454   return (inotify_init1(flags));
455 }
456 #endif
457 #endif
458 #else
459 /* Userspace can usually survive runtime without inotify */
460 #undef TARGET_NR_inotify_init
461 #undef TARGET_NR_inotify_init1
462 #undef TARGET_NR_inotify_add_watch
463 #undef TARGET_NR_inotify_rm_watch
464 #endif /* CONFIG_INOTIFY  */
465 
466 #if defined(TARGET_NR_prlimit64)
467 #ifndef __NR_prlimit64
468 # define __NR_prlimit64 -1
469 #endif
470 #define __NR_sys_prlimit64 __NR_prlimit64
471 /* The glibc rlimit structure may not be that used by the underlying syscall */
472 struct host_rlimit64 {
473     uint64_t rlim_cur;
474     uint64_t rlim_max;
475 };
476 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
477           const struct host_rlimit64 *, new_limit,
478           struct host_rlimit64 *, old_limit)
479 #endif
480 
481 
482 #if defined(TARGET_NR_timer_create)
483 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
484 static timer_t g_posix_timers[32] = { 0, } ;
485 
486 static inline int next_free_host_timer(void)
487 {
488     int k ;
489     /* FIXME: Does finding the next free slot require a lock? */
490     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
491         if (g_posix_timers[k] == 0) {
492             g_posix_timers[k] = (timer_t) 1;
493             return k;
494         }
495     }
496     return -1;
497 }
498 #endif
499 
500 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
501 #ifdef TARGET_ARM
502 static inline int regpairs_aligned(void *cpu_env, int num)
503 {
504     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
505 }
506 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
507 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
508 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
509 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
510  * of registers which translates to the same as ARM/MIPS, because we start with
511  * r3 as arg1 */
512 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
513 #elif defined(TARGET_SH4)
514 /* SH4 doesn't align register pairs, except for p{read,write}64 */
515 static inline int regpairs_aligned(void *cpu_env, int num)
516 {
517     switch (num) {
518     case TARGET_NR_pread64:
519     case TARGET_NR_pwrite64:
520         return 1;
521 
522     default:
523         return 0;
524     }
525 }
526 #elif defined(TARGET_XTENSA)
527 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
528 #else
529 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
530 #endif
531 
532 #define ERRNO_TABLE_SIZE 1200
533 
534 /* target_to_host_errno_table[] is initialized from
535  * host_to_target_errno_table[] in syscall_init(). */
536 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
537 };
538 
539 /*
540  * This list is the union of errno values overridden in asm-<arch>/errno.h
541  * minus the errnos that are not actually generic to all archs.
542  */
543 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
544     [EAGAIN]		= TARGET_EAGAIN,
545     [EIDRM]		= TARGET_EIDRM,
546     [ECHRNG]		= TARGET_ECHRNG,
547     [EL2NSYNC]		= TARGET_EL2NSYNC,
548     [EL3HLT]		= TARGET_EL3HLT,
549     [EL3RST]		= TARGET_EL3RST,
550     [ELNRNG]		= TARGET_ELNRNG,
551     [EUNATCH]		= TARGET_EUNATCH,
552     [ENOCSI]		= TARGET_ENOCSI,
553     [EL2HLT]		= TARGET_EL2HLT,
554     [EDEADLK]		= TARGET_EDEADLK,
555     [ENOLCK]		= TARGET_ENOLCK,
556     [EBADE]		= TARGET_EBADE,
557     [EBADR]		= TARGET_EBADR,
558     [EXFULL]		= TARGET_EXFULL,
559     [ENOANO]		= TARGET_ENOANO,
560     [EBADRQC]		= TARGET_EBADRQC,
561     [EBADSLT]		= TARGET_EBADSLT,
562     [EBFONT]		= TARGET_EBFONT,
563     [ENOSTR]		= TARGET_ENOSTR,
564     [ENODATA]		= TARGET_ENODATA,
565     [ETIME]		= TARGET_ETIME,
566     [ENOSR]		= TARGET_ENOSR,
567     [ENONET]		= TARGET_ENONET,
568     [ENOPKG]		= TARGET_ENOPKG,
569     [EREMOTE]		= TARGET_EREMOTE,
570     [ENOLINK]		= TARGET_ENOLINK,
571     [EADV]		= TARGET_EADV,
572     [ESRMNT]		= TARGET_ESRMNT,
573     [ECOMM]		= TARGET_ECOMM,
574     [EPROTO]		= TARGET_EPROTO,
575     [EDOTDOT]		= TARGET_EDOTDOT,
576     [EMULTIHOP]		= TARGET_EMULTIHOP,
577     [EBADMSG]		= TARGET_EBADMSG,
578     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
579     [EOVERFLOW]		= TARGET_EOVERFLOW,
580     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
581     [EBADFD]		= TARGET_EBADFD,
582     [EREMCHG]		= TARGET_EREMCHG,
583     [ELIBACC]		= TARGET_ELIBACC,
584     [ELIBBAD]		= TARGET_ELIBBAD,
585     [ELIBSCN]		= TARGET_ELIBSCN,
586     [ELIBMAX]		= TARGET_ELIBMAX,
587     [ELIBEXEC]		= TARGET_ELIBEXEC,
588     [EILSEQ]		= TARGET_EILSEQ,
589     [ENOSYS]		= TARGET_ENOSYS,
590     [ELOOP]		= TARGET_ELOOP,
591     [ERESTART]		= TARGET_ERESTART,
592     [ESTRPIPE]		= TARGET_ESTRPIPE,
593     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
594     [EUSERS]		= TARGET_EUSERS,
595     [ENOTSOCK]		= TARGET_ENOTSOCK,
596     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
597     [EMSGSIZE]		= TARGET_EMSGSIZE,
598     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
599     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
600     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
601     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
602     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
603     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
604     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
605     [EADDRINUSE]	= TARGET_EADDRINUSE,
606     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
607     [ENETDOWN]		= TARGET_ENETDOWN,
608     [ENETUNREACH]	= TARGET_ENETUNREACH,
609     [ENETRESET]		= TARGET_ENETRESET,
610     [ECONNABORTED]	= TARGET_ECONNABORTED,
611     [ECONNRESET]	= TARGET_ECONNRESET,
612     [ENOBUFS]		= TARGET_ENOBUFS,
613     [EISCONN]		= TARGET_EISCONN,
614     [ENOTCONN]		= TARGET_ENOTCONN,
615     [EUCLEAN]		= TARGET_EUCLEAN,
616     [ENOTNAM]		= TARGET_ENOTNAM,
617     [ENAVAIL]		= TARGET_ENAVAIL,
618     [EISNAM]		= TARGET_EISNAM,
619     [EREMOTEIO]		= TARGET_EREMOTEIO,
620     [EDQUOT]            = TARGET_EDQUOT,
621     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
622     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
623     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
624     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
625     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
626     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
627     [EALREADY]		= TARGET_EALREADY,
628     [EINPROGRESS]	= TARGET_EINPROGRESS,
629     [ESTALE]		= TARGET_ESTALE,
630     [ECANCELED]		= TARGET_ECANCELED,
631     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
632     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
633 #ifdef ENOKEY
634     [ENOKEY]		= TARGET_ENOKEY,
635 #endif
636 #ifdef EKEYEXPIRED
637     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
638 #endif
639 #ifdef EKEYREVOKED
640     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
641 #endif
642 #ifdef EKEYREJECTED
643     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
644 #endif
645 #ifdef EOWNERDEAD
646     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
647 #endif
648 #ifdef ENOTRECOVERABLE
649     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
650 #endif
651 #ifdef ENOMSG
652     [ENOMSG]            = TARGET_ENOMSG,
653 #endif
654 #ifdef ERKFILL
655     [ERFKILL]           = TARGET_ERFKILL,
656 #endif
657 #ifdef EHWPOISON
658     [EHWPOISON]         = TARGET_EHWPOISON,
659 #endif
660 };
661 
662 static inline int host_to_target_errno(int err)
663 {
664     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
665         host_to_target_errno_table[err]) {
666         return host_to_target_errno_table[err];
667     }
668     return err;
669 }
670 
671 static inline int target_to_host_errno(int err)
672 {
673     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
674         target_to_host_errno_table[err]) {
675         return target_to_host_errno_table[err];
676     }
677     return err;
678 }
679 
680 static inline abi_long get_errno(abi_long ret)
681 {
682     if (ret == -1)
683         return -host_to_target_errno(errno);
684     else
685         return ret;
686 }
687 
688 const char *target_strerror(int err)
689 {
690     if (err == TARGET_ERESTARTSYS) {
691         return "To be restarted";
692     }
693     if (err == TARGET_QEMU_ESIGRETURN) {
694         return "Successful exit from sigreturn";
695     }
696 
697     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
698         return NULL;
699     }
700     return strerror(target_to_host_errno(err));
701 }
702 
703 #define safe_syscall0(type, name) \
704 static type safe_##name(void) \
705 { \
706     return safe_syscall(__NR_##name); \
707 }
708 
709 #define safe_syscall1(type, name, type1, arg1) \
710 static type safe_##name(type1 arg1) \
711 { \
712     return safe_syscall(__NR_##name, arg1); \
713 }
714 
715 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
716 static type safe_##name(type1 arg1, type2 arg2) \
717 { \
718     return safe_syscall(__NR_##name, arg1, arg2); \
719 }
720 
721 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
723 { \
724     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
725 }
726 
727 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
728     type4, arg4) \
729 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
730 { \
731     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
732 }
733 
734 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
735     type4, arg4, type5, arg5) \
736 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
737     type5 arg5) \
738 { \
739     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
740 }
741 
742 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
743     type4, arg4, type5, arg5, type6, arg6) \
744 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
745     type5 arg5, type6 arg6) \
746 { \
747     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
748 }
749 
750 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
751 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
752 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
753               int, flags, mode_t, mode)
754 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
755 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
756               struct rusage *, rusage)
757 #endif
758 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
759               int, options, struct rusage *, rusage)
760 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
761 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
762     defined(TARGET_NR_pselect6)
763 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
764               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
765 #endif
766 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
767 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
768               struct timespec *, tsp, const sigset_t *, sigmask,
769               size_t, sigsetsize)
770 #endif
771 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
772               int, maxevents, int, timeout, const sigset_t *, sigmask,
773               size_t, sigsetsize)
774 #if defined(__NR_futex)
775 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
776               const struct timespec *,timeout,int *,uaddr2,int,val3)
777 #endif
778 #if defined(__NR_futex_time64)
779 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
780               const struct timespec *,timeout,int *,uaddr2,int,val3)
781 #endif
782 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
783 safe_syscall2(int, kill, pid_t, pid, int, sig)
784 safe_syscall2(int, tkill, int, tid, int, sig)
785 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
786 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
787 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
788 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
789               unsigned long, pos_l, unsigned long, pos_h)
790 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
791               unsigned long, pos_l, unsigned long, pos_h)
792 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
793               socklen_t, addrlen)
794 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
795               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
796 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
797               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
798 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
799 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
800 safe_syscall2(int, flock, int, fd, int, operation)
801 #ifdef TARGET_NR_rt_sigtimedwait
802 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
803               const struct timespec *, uts, size_t, sigsetsize)
804 #endif
805 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
806               int, flags)
807 #if defined(TARGET_NR_nanosleep)
808 safe_syscall2(int, nanosleep, const struct timespec *, req,
809               struct timespec *, rem)
810 #endif
811 #ifdef TARGET_NR_clock_nanosleep
812 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
813               const struct timespec *, req, struct timespec *, rem)
814 #endif
815 #ifdef __NR_ipc
816 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
817               void *, ptr, long, fifth)
818 #endif
819 #ifdef __NR_msgsnd
820 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
821               int, flags)
822 #endif
823 #ifdef __NR_msgrcv
824 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
825               long, msgtype, int, flags)
826 #endif
827 #ifdef __NR_semtimedop
828 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
829               unsigned, nsops, const struct timespec *, timeout)
830 #endif
831 #ifdef TARGET_NR_mq_timedsend
832 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
833               size_t, len, unsigned, prio, const struct timespec *, timeout)
834 #endif
835 #ifdef TARGET_NR_mq_timedreceive
836 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
837               size_t, len, unsigned *, prio, const struct timespec *, timeout)
838 #endif
839 /* We do ioctl like this rather than via safe_syscall3 to preserve the
840  * "third argument might be integer or pointer or not present" behaviour of
841  * the libc function.
842  */
843 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
844 /* Similarly for fcntl. Note that callers must always:
845  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
846  *  use the flock64 struct rather than unsuffixed flock
847  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
848  */
849 #ifdef __NR_fcntl64
850 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
851 #else
852 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
853 #endif
854 
855 static inline int host_to_target_sock_type(int host_type)
856 {
857     int target_type;
858 
859     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
860     case SOCK_DGRAM:
861         target_type = TARGET_SOCK_DGRAM;
862         break;
863     case SOCK_STREAM:
864         target_type = TARGET_SOCK_STREAM;
865         break;
866     default:
867         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
868         break;
869     }
870 
871 #if defined(SOCK_CLOEXEC)
872     if (host_type & SOCK_CLOEXEC) {
873         target_type |= TARGET_SOCK_CLOEXEC;
874     }
875 #endif
876 
877 #if defined(SOCK_NONBLOCK)
878     if (host_type & SOCK_NONBLOCK) {
879         target_type |= TARGET_SOCK_NONBLOCK;
880     }
881 #endif
882 
883     return target_type;
884 }
885 
886 static abi_ulong target_brk;
887 static abi_ulong target_original_brk;
888 static abi_ulong brk_page;
889 
890 void target_set_brk(abi_ulong new_brk)
891 {
892     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
893     brk_page = HOST_PAGE_ALIGN(target_brk);
894 }
895 
896 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
897 #define DEBUGF_BRK(message, args...)
898 
899 /* do_brk() must return target values and target errnos. */
900 abi_long do_brk(abi_ulong new_brk)
901 {
902     abi_long mapped_addr;
903     abi_ulong new_alloc_size;
904 
905     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
906 
907     if (!new_brk) {
908         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
909         return target_brk;
910     }
911     if (new_brk < target_original_brk) {
912         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
913                    target_brk);
914         return target_brk;
915     }
916 
917     /* If the new brk is less than the highest page reserved to the
918      * target heap allocation, set it and we're almost done...  */
919     if (new_brk <= brk_page) {
920         /* Heap contents are initialized to zero, as for anonymous
921          * mapped pages.  */
922         if (new_brk > target_brk) {
923             memset(g2h(target_brk), 0, new_brk - target_brk);
924         }
925 	target_brk = new_brk;
926         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
927 	return target_brk;
928     }
929 
930     /* We need to allocate more memory after the brk... Note that
931      * we don't use MAP_FIXED because that will map over the top of
932      * any existing mapping (like the one with the host libc or qemu
933      * itself); instead we treat "mapped but at wrong address" as
934      * a failure and unmap again.
935      */
936     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
937     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
938                                         PROT_READ|PROT_WRITE,
939                                         MAP_ANON|MAP_PRIVATE, 0, 0));
940 
941     if (mapped_addr == brk_page) {
942         /* Heap contents are initialized to zero, as for anonymous
943          * mapped pages.  Technically the new pages are already
944          * initialized to zero since they *are* anonymous mapped
945          * pages, however we have to take care with the contents that
946          * come from the remaining part of the previous page: it may
947          * contains garbage data due to a previous heap usage (grown
948          * then shrunken).  */
949         memset(g2h(target_brk), 0, brk_page - target_brk);
950 
951         target_brk = new_brk;
952         brk_page = HOST_PAGE_ALIGN(target_brk);
953         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
954             target_brk);
955         return target_brk;
956     } else if (mapped_addr != -1) {
957         /* Mapped but at wrong address, meaning there wasn't actually
958          * enough space for this brk.
959          */
960         target_munmap(mapped_addr, new_alloc_size);
961         mapped_addr = -1;
962         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
963     }
964     else {
965         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
966     }
967 
968 #if defined(TARGET_ALPHA)
969     /* We (partially) emulate OSF/1 on Alpha, which requires we
970        return a proper errno, not an unchanged brk value.  */
971     return -TARGET_ENOMEM;
972 #endif
973     /* For everything else, return the previous break. */
974     return target_brk;
975 }
976 
977 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
978     defined(TARGET_NR_pselect6)
979 static inline abi_long copy_from_user_fdset(fd_set *fds,
980                                             abi_ulong target_fds_addr,
981                                             int n)
982 {
983     int i, nw, j, k;
984     abi_ulong b, *target_fds;
985 
986     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
987     if (!(target_fds = lock_user(VERIFY_READ,
988                                  target_fds_addr,
989                                  sizeof(abi_ulong) * nw,
990                                  1)))
991         return -TARGET_EFAULT;
992 
993     FD_ZERO(fds);
994     k = 0;
995     for (i = 0; i < nw; i++) {
996         /* grab the abi_ulong */
997         __get_user(b, &target_fds[i]);
998         for (j = 0; j < TARGET_ABI_BITS; j++) {
999             /* check the bit inside the abi_ulong */
1000             if ((b >> j) & 1)
1001                 FD_SET(k, fds);
1002             k++;
1003         }
1004     }
1005 
1006     unlock_user(target_fds, target_fds_addr, 0);
1007 
1008     return 0;
1009 }
1010 
1011 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1012                                                  abi_ulong target_fds_addr,
1013                                                  int n)
1014 {
1015     if (target_fds_addr) {
1016         if (copy_from_user_fdset(fds, target_fds_addr, n))
1017             return -TARGET_EFAULT;
1018         *fds_ptr = fds;
1019     } else {
1020         *fds_ptr = NULL;
1021     }
1022     return 0;
1023 }
1024 
1025 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1026                                           const fd_set *fds,
1027                                           int n)
1028 {
1029     int i, nw, j, k;
1030     abi_long v;
1031     abi_ulong *target_fds;
1032 
1033     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1034     if (!(target_fds = lock_user(VERIFY_WRITE,
1035                                  target_fds_addr,
1036                                  sizeof(abi_ulong) * nw,
1037                                  0)))
1038         return -TARGET_EFAULT;
1039 
1040     k = 0;
1041     for (i = 0; i < nw; i++) {
1042         v = 0;
1043         for (j = 0; j < TARGET_ABI_BITS; j++) {
1044             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1045             k++;
1046         }
1047         __put_user(v, &target_fds[i]);
1048     }
1049 
1050     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1051 
1052     return 0;
1053 }
1054 #endif
1055 
1056 #if defined(__alpha__)
1057 #define HOST_HZ 1024
1058 #else
1059 #define HOST_HZ 100
1060 #endif
1061 
1062 static inline abi_long host_to_target_clock_t(long ticks)
1063 {
1064 #if HOST_HZ == TARGET_HZ
1065     return ticks;
1066 #else
1067     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1068 #endif
1069 }
1070 
1071 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1072                                              const struct rusage *rusage)
1073 {
1074     struct target_rusage *target_rusage;
1075 
1076     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1077         return -TARGET_EFAULT;
1078     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1079     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1080     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1081     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1082     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1083     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1084     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1085     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1086     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1087     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1088     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1089     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1090     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1091     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1092     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1093     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1094     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1095     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1096     unlock_user_struct(target_rusage, target_addr, 1);
1097 
1098     return 0;
1099 }
1100 
1101 #ifdef TARGET_NR_setrlimit
1102 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1103 {
1104     abi_ulong target_rlim_swap;
1105     rlim_t result;
1106 
1107     target_rlim_swap = tswapal(target_rlim);
1108     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1109         return RLIM_INFINITY;
1110 
1111     result = target_rlim_swap;
1112     if (target_rlim_swap != (rlim_t)result)
1113         return RLIM_INFINITY;
1114 
1115     return result;
1116 }
1117 #endif
1118 
1119 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1120 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1121 {
1122     abi_ulong target_rlim_swap;
1123     abi_ulong result;
1124 
1125     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1126         target_rlim_swap = TARGET_RLIM_INFINITY;
1127     else
1128         target_rlim_swap = rlim;
1129     result = tswapal(target_rlim_swap);
1130 
1131     return result;
1132 }
1133 #endif
1134 
1135 static inline int target_to_host_resource(int code)
1136 {
1137     switch (code) {
1138     case TARGET_RLIMIT_AS:
1139         return RLIMIT_AS;
1140     case TARGET_RLIMIT_CORE:
1141         return RLIMIT_CORE;
1142     case TARGET_RLIMIT_CPU:
1143         return RLIMIT_CPU;
1144     case TARGET_RLIMIT_DATA:
1145         return RLIMIT_DATA;
1146     case TARGET_RLIMIT_FSIZE:
1147         return RLIMIT_FSIZE;
1148     case TARGET_RLIMIT_LOCKS:
1149         return RLIMIT_LOCKS;
1150     case TARGET_RLIMIT_MEMLOCK:
1151         return RLIMIT_MEMLOCK;
1152     case TARGET_RLIMIT_MSGQUEUE:
1153         return RLIMIT_MSGQUEUE;
1154     case TARGET_RLIMIT_NICE:
1155         return RLIMIT_NICE;
1156     case TARGET_RLIMIT_NOFILE:
1157         return RLIMIT_NOFILE;
1158     case TARGET_RLIMIT_NPROC:
1159         return RLIMIT_NPROC;
1160     case TARGET_RLIMIT_RSS:
1161         return RLIMIT_RSS;
1162     case TARGET_RLIMIT_RTPRIO:
1163         return RLIMIT_RTPRIO;
1164     case TARGET_RLIMIT_SIGPENDING:
1165         return RLIMIT_SIGPENDING;
1166     case TARGET_RLIMIT_STACK:
1167         return RLIMIT_STACK;
1168     default:
1169         return code;
1170     }
1171 }
1172 
1173 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1174                                               abi_ulong target_tv_addr)
1175 {
1176     struct target_timeval *target_tv;
1177 
1178     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1179         return -TARGET_EFAULT;
1180     }
1181 
1182     __get_user(tv->tv_sec, &target_tv->tv_sec);
1183     __get_user(tv->tv_usec, &target_tv->tv_usec);
1184 
1185     unlock_user_struct(target_tv, target_tv_addr, 0);
1186 
1187     return 0;
1188 }
1189 
1190 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1191                                             const struct timeval *tv)
1192 {
1193     struct target_timeval *target_tv;
1194 
1195     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1196         return -TARGET_EFAULT;
1197     }
1198 
1199     __put_user(tv->tv_sec, &target_tv->tv_sec);
1200     __put_user(tv->tv_usec, &target_tv->tv_usec);
1201 
1202     unlock_user_struct(target_tv, target_tv_addr, 1);
1203 
1204     return 0;
1205 }
1206 
1207 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1208                                              const struct timeval *tv)
1209 {
1210     struct target__kernel_sock_timeval *target_tv;
1211 
1212     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1213         return -TARGET_EFAULT;
1214     }
1215 
1216     __put_user(tv->tv_sec, &target_tv->tv_sec);
1217     __put_user(tv->tv_usec, &target_tv->tv_usec);
1218 
1219     unlock_user_struct(target_tv, target_tv_addr, 1);
1220 
1221     return 0;
1222 }
1223 
1224 #if defined(TARGET_NR_futex) || \
1225     defined(TARGET_NR_rt_sigtimedwait) || \
1226     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1227     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1228     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1229     defined(TARGET_NR_mq_timedreceive)
1230 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1231                                                abi_ulong target_addr)
1232 {
1233     struct target_timespec *target_ts;
1234 
1235     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1236         return -TARGET_EFAULT;
1237     }
1238     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1239     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1240     unlock_user_struct(target_ts, target_addr, 0);
1241     return 0;
1242 }
1243 #endif
1244 
1245 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1246 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1247                                                  abi_ulong target_addr)
1248 {
1249     struct target__kernel_timespec *target_ts;
1250 
1251     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1252         return -TARGET_EFAULT;
1253     }
1254     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1255     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1256     unlock_user_struct(target_ts, target_addr, 0);
1257     return 0;
1258 }
1259 #endif
1260 
1261 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1262                                                struct timespec *host_ts)
1263 {
1264     struct target_timespec *target_ts;
1265 
1266     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1267         return -TARGET_EFAULT;
1268     }
1269     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1270     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1271     unlock_user_struct(target_ts, target_addr, 1);
1272     return 0;
1273 }
1274 
1275 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1276                                                  struct timespec *host_ts)
1277 {
1278     struct target__kernel_timespec *target_ts;
1279 
1280     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1281         return -TARGET_EFAULT;
1282     }
1283     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1284     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1285     unlock_user_struct(target_ts, target_addr, 1);
1286     return 0;
1287 }
1288 
1289 #if defined(TARGET_NR_gettimeofday)
1290 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1291                                              struct timezone *tz)
1292 {
1293     struct target_timezone *target_tz;
1294 
1295     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1296         return -TARGET_EFAULT;
1297     }
1298 
1299     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1300     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1301 
1302     unlock_user_struct(target_tz, target_tz_addr, 1);
1303 
1304     return 0;
1305 }
1306 #endif
1307 
1308 #if defined(TARGET_NR_settimeofday)
1309 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1310                                                abi_ulong target_tz_addr)
1311 {
1312     struct target_timezone *target_tz;
1313 
1314     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1315         return -TARGET_EFAULT;
1316     }
1317 
1318     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1319     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1320 
1321     unlock_user_struct(target_tz, target_tz_addr, 0);
1322 
1323     return 0;
1324 }
1325 #endif
1326 
1327 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1328 #include <mqueue.h>
1329 
1330 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1331                                               abi_ulong target_mq_attr_addr)
1332 {
1333     struct target_mq_attr *target_mq_attr;
1334 
1335     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1336                           target_mq_attr_addr, 1))
1337         return -TARGET_EFAULT;
1338 
1339     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1340     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1341     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1342     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1343 
1344     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1345 
1346     return 0;
1347 }
1348 
1349 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1350                                             const struct mq_attr *attr)
1351 {
1352     struct target_mq_attr *target_mq_attr;
1353 
1354     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1355                           target_mq_attr_addr, 0))
1356         return -TARGET_EFAULT;
1357 
1358     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1359     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1360     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1361     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1362 
1363     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1364 
1365     return 0;
1366 }
1367 #endif
1368 
1369 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1370 /* do_select() must return target values and target errnos. */
1371 static abi_long do_select(int n,
1372                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1373                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1374 {
1375     fd_set rfds, wfds, efds;
1376     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1377     struct timeval tv;
1378     struct timespec ts, *ts_ptr;
1379     abi_long ret;
1380 
1381     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1382     if (ret) {
1383         return ret;
1384     }
1385     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1386     if (ret) {
1387         return ret;
1388     }
1389     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1390     if (ret) {
1391         return ret;
1392     }
1393 
1394     if (target_tv_addr) {
1395         if (copy_from_user_timeval(&tv, target_tv_addr))
1396             return -TARGET_EFAULT;
1397         ts.tv_sec = tv.tv_sec;
1398         ts.tv_nsec = tv.tv_usec * 1000;
1399         ts_ptr = &ts;
1400     } else {
1401         ts_ptr = NULL;
1402     }
1403 
1404     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1405                                   ts_ptr, NULL));
1406 
1407     if (!is_error(ret)) {
1408         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1409             return -TARGET_EFAULT;
1410         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1411             return -TARGET_EFAULT;
1412         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1413             return -TARGET_EFAULT;
1414 
1415         if (target_tv_addr) {
1416             tv.tv_sec = ts.tv_sec;
1417             tv.tv_usec = ts.tv_nsec / 1000;
1418             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1419                 return -TARGET_EFAULT;
1420             }
1421         }
1422     }
1423 
1424     return ret;
1425 }
1426 
1427 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1428 static abi_long do_old_select(abi_ulong arg1)
1429 {
1430     struct target_sel_arg_struct *sel;
1431     abi_ulong inp, outp, exp, tvp;
1432     long nsel;
1433 
1434     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1435         return -TARGET_EFAULT;
1436     }
1437 
1438     nsel = tswapal(sel->n);
1439     inp = tswapal(sel->inp);
1440     outp = tswapal(sel->outp);
1441     exp = tswapal(sel->exp);
1442     tvp = tswapal(sel->tvp);
1443 
1444     unlock_user_struct(sel, arg1, 0);
1445 
1446     return do_select(nsel, inp, outp, exp, tvp);
1447 }
1448 #endif
1449 #endif
1450 
1451 static abi_long do_pipe2(int host_pipe[], int flags)
1452 {
1453 #ifdef CONFIG_PIPE2
1454     return pipe2(host_pipe, flags);
1455 #else
1456     return -ENOSYS;
1457 #endif
1458 }
1459 
1460 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1461                         int flags, int is_pipe2)
1462 {
1463     int host_pipe[2];
1464     abi_long ret;
1465     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1466 
1467     if (is_error(ret))
1468         return get_errno(ret);
1469 
1470     /* Several targets have special calling conventions for the original
1471        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1472     if (!is_pipe2) {
1473 #if defined(TARGET_ALPHA)
1474         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1475         return host_pipe[0];
1476 #elif defined(TARGET_MIPS)
1477         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1478         return host_pipe[0];
1479 #elif defined(TARGET_SH4)
1480         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1481         return host_pipe[0];
1482 #elif defined(TARGET_SPARC)
1483         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1484         return host_pipe[0];
1485 #endif
1486     }
1487 
1488     if (put_user_s32(host_pipe[0], pipedes)
1489         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1490         return -TARGET_EFAULT;
1491     return get_errno(ret);
1492 }
1493 
1494 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1495                                               abi_ulong target_addr,
1496                                               socklen_t len)
1497 {
1498     struct target_ip_mreqn *target_smreqn;
1499 
1500     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1501     if (!target_smreqn)
1502         return -TARGET_EFAULT;
1503     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1504     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1505     if (len == sizeof(struct target_ip_mreqn))
1506         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1507     unlock_user(target_smreqn, target_addr, 0);
1508 
1509     return 0;
1510 }
1511 
1512 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1513                                                abi_ulong target_addr,
1514                                                socklen_t len)
1515 {
1516     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1517     sa_family_t sa_family;
1518     struct target_sockaddr *target_saddr;
1519 
1520     if (fd_trans_target_to_host_addr(fd)) {
1521         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1522     }
1523 
1524     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1525     if (!target_saddr)
1526         return -TARGET_EFAULT;
1527 
1528     sa_family = tswap16(target_saddr->sa_family);
1529 
1530     /* Oops. The caller might send a incomplete sun_path; sun_path
1531      * must be terminated by \0 (see the manual page), but
1532      * unfortunately it is quite common to specify sockaddr_un
1533      * length as "strlen(x->sun_path)" while it should be
1534      * "strlen(...) + 1". We'll fix that here if needed.
1535      * Linux kernel has a similar feature.
1536      */
1537 
1538     if (sa_family == AF_UNIX) {
1539         if (len < unix_maxlen && len > 0) {
1540             char *cp = (char*)target_saddr;
1541 
1542             if ( cp[len-1] && !cp[len] )
1543                 len++;
1544         }
1545         if (len > unix_maxlen)
1546             len = unix_maxlen;
1547     }
1548 
1549     memcpy(addr, target_saddr, len);
1550     addr->sa_family = sa_family;
1551     if (sa_family == AF_NETLINK) {
1552         struct sockaddr_nl *nladdr;
1553 
1554         nladdr = (struct sockaddr_nl *)addr;
1555         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1556         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1557     } else if (sa_family == AF_PACKET) {
1558 	struct target_sockaddr_ll *lladdr;
1559 
1560 	lladdr = (struct target_sockaddr_ll *)addr;
1561 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1562 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1563     }
1564     unlock_user(target_saddr, target_addr, 0);
1565 
1566     return 0;
1567 }
1568 
1569 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1570                                                struct sockaddr *addr,
1571                                                socklen_t len)
1572 {
1573     struct target_sockaddr *target_saddr;
1574 
1575     if (len == 0) {
1576         return 0;
1577     }
1578     assert(addr);
1579 
1580     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1581     if (!target_saddr)
1582         return -TARGET_EFAULT;
1583     memcpy(target_saddr, addr, len);
1584     if (len >= offsetof(struct target_sockaddr, sa_family) +
1585         sizeof(target_saddr->sa_family)) {
1586         target_saddr->sa_family = tswap16(addr->sa_family);
1587     }
1588     if (addr->sa_family == AF_NETLINK &&
1589         len >= sizeof(struct target_sockaddr_nl)) {
1590         struct target_sockaddr_nl *target_nl =
1591                (struct target_sockaddr_nl *)target_saddr;
1592         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1593         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1594     } else if (addr->sa_family == AF_PACKET) {
1595         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1596         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1597         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1598     } else if (addr->sa_family == AF_INET6 &&
1599                len >= sizeof(struct target_sockaddr_in6)) {
1600         struct target_sockaddr_in6 *target_in6 =
1601                (struct target_sockaddr_in6 *)target_saddr;
1602         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1603     }
1604     unlock_user(target_saddr, target_addr, len);
1605 
1606     return 0;
1607 }
1608 
1609 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1610                                            struct target_msghdr *target_msgh)
1611 {
1612     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1613     abi_long msg_controllen;
1614     abi_ulong target_cmsg_addr;
1615     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1616     socklen_t space = 0;
1617 
1618     msg_controllen = tswapal(target_msgh->msg_controllen);
1619     if (msg_controllen < sizeof (struct target_cmsghdr))
1620         goto the_end;
1621     target_cmsg_addr = tswapal(target_msgh->msg_control);
1622     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1623     target_cmsg_start = target_cmsg;
1624     if (!target_cmsg)
1625         return -TARGET_EFAULT;
1626 
1627     while (cmsg && target_cmsg) {
1628         void *data = CMSG_DATA(cmsg);
1629         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1630 
1631         int len = tswapal(target_cmsg->cmsg_len)
1632             - sizeof(struct target_cmsghdr);
1633 
1634         space += CMSG_SPACE(len);
1635         if (space > msgh->msg_controllen) {
1636             space -= CMSG_SPACE(len);
1637             /* This is a QEMU bug, since we allocated the payload
1638              * area ourselves (unlike overflow in host-to-target
1639              * conversion, which is just the guest giving us a buffer
1640              * that's too small). It can't happen for the payload types
1641              * we currently support; if it becomes an issue in future
1642              * we would need to improve our allocation strategy to
1643              * something more intelligent than "twice the size of the
1644              * target buffer we're reading from".
1645              */
1646             qemu_log_mask(LOG_UNIMP,
1647                           ("Unsupported ancillary data %d/%d: "
1648                            "unhandled msg size\n"),
1649                           tswap32(target_cmsg->cmsg_level),
1650                           tswap32(target_cmsg->cmsg_type));
1651             break;
1652         }
1653 
1654         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1655             cmsg->cmsg_level = SOL_SOCKET;
1656         } else {
1657             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1658         }
1659         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1660         cmsg->cmsg_len = CMSG_LEN(len);
1661 
1662         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1663             int *fd = (int *)data;
1664             int *target_fd = (int *)target_data;
1665             int i, numfds = len / sizeof(int);
1666 
1667             for (i = 0; i < numfds; i++) {
1668                 __get_user(fd[i], target_fd + i);
1669             }
1670         } else if (cmsg->cmsg_level == SOL_SOCKET
1671                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1672             struct ucred *cred = (struct ucred *)data;
1673             struct target_ucred *target_cred =
1674                 (struct target_ucred *)target_data;
1675 
1676             __get_user(cred->pid, &target_cred->pid);
1677             __get_user(cred->uid, &target_cred->uid);
1678             __get_user(cred->gid, &target_cred->gid);
1679         } else {
1680             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1681                           cmsg->cmsg_level, cmsg->cmsg_type);
1682             memcpy(data, target_data, len);
1683         }
1684 
1685         cmsg = CMSG_NXTHDR(msgh, cmsg);
1686         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1687                                          target_cmsg_start);
1688     }
1689     unlock_user(target_cmsg, target_cmsg_addr, 0);
1690  the_end:
1691     msgh->msg_controllen = space;
1692     return 0;
1693 }
1694 
1695 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1696                                            struct msghdr *msgh)
1697 {
1698     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1699     abi_long msg_controllen;
1700     abi_ulong target_cmsg_addr;
1701     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1702     socklen_t space = 0;
1703 
1704     msg_controllen = tswapal(target_msgh->msg_controllen);
1705     if (msg_controllen < sizeof (struct target_cmsghdr))
1706         goto the_end;
1707     target_cmsg_addr = tswapal(target_msgh->msg_control);
1708     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1709     target_cmsg_start = target_cmsg;
1710     if (!target_cmsg)
1711         return -TARGET_EFAULT;
1712 
1713     while (cmsg && target_cmsg) {
1714         void *data = CMSG_DATA(cmsg);
1715         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1716 
1717         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1718         int tgt_len, tgt_space;
1719 
1720         /* We never copy a half-header but may copy half-data;
1721          * this is Linux's behaviour in put_cmsg(). Note that
1722          * truncation here is a guest problem (which we report
1723          * to the guest via the CTRUNC bit), unlike truncation
1724          * in target_to_host_cmsg, which is a QEMU bug.
1725          */
1726         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1727             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1728             break;
1729         }
1730 
1731         if (cmsg->cmsg_level == SOL_SOCKET) {
1732             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1733         } else {
1734             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1735         }
1736         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1737 
1738         /* Payload types which need a different size of payload on
1739          * the target must adjust tgt_len here.
1740          */
1741         tgt_len = len;
1742         switch (cmsg->cmsg_level) {
1743         case SOL_SOCKET:
1744             switch (cmsg->cmsg_type) {
1745             case SO_TIMESTAMP:
1746                 tgt_len = sizeof(struct target_timeval);
1747                 break;
1748             default:
1749                 break;
1750             }
1751             break;
1752         default:
1753             break;
1754         }
1755 
1756         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1757             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1758             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1759         }
1760 
1761         /* We must now copy-and-convert len bytes of payload
1762          * into tgt_len bytes of destination space. Bear in mind
1763          * that in both source and destination we may be dealing
1764          * with a truncated value!
1765          */
1766         switch (cmsg->cmsg_level) {
1767         case SOL_SOCKET:
1768             switch (cmsg->cmsg_type) {
1769             case SCM_RIGHTS:
1770             {
1771                 int *fd = (int *)data;
1772                 int *target_fd = (int *)target_data;
1773                 int i, numfds = tgt_len / sizeof(int);
1774 
1775                 for (i = 0; i < numfds; i++) {
1776                     __put_user(fd[i], target_fd + i);
1777                 }
1778                 break;
1779             }
1780             case SO_TIMESTAMP:
1781             {
1782                 struct timeval *tv = (struct timeval *)data;
1783                 struct target_timeval *target_tv =
1784                     (struct target_timeval *)target_data;
1785 
1786                 if (len != sizeof(struct timeval) ||
1787                     tgt_len != sizeof(struct target_timeval)) {
1788                     goto unimplemented;
1789                 }
1790 
1791                 /* copy struct timeval to target */
1792                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1793                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1794                 break;
1795             }
1796             case SCM_CREDENTIALS:
1797             {
1798                 struct ucred *cred = (struct ucred *)data;
1799                 struct target_ucred *target_cred =
1800                     (struct target_ucred *)target_data;
1801 
1802                 __put_user(cred->pid, &target_cred->pid);
1803                 __put_user(cred->uid, &target_cred->uid);
1804                 __put_user(cred->gid, &target_cred->gid);
1805                 break;
1806             }
1807             default:
1808                 goto unimplemented;
1809             }
1810             break;
1811 
1812         case SOL_IP:
1813             switch (cmsg->cmsg_type) {
1814             case IP_TTL:
1815             {
1816                 uint32_t *v = (uint32_t *)data;
1817                 uint32_t *t_int = (uint32_t *)target_data;
1818 
1819                 if (len != sizeof(uint32_t) ||
1820                     tgt_len != sizeof(uint32_t)) {
1821                     goto unimplemented;
1822                 }
1823                 __put_user(*v, t_int);
1824                 break;
1825             }
1826             case IP_RECVERR:
1827             {
1828                 struct errhdr_t {
1829                    struct sock_extended_err ee;
1830                    struct sockaddr_in offender;
1831                 };
1832                 struct errhdr_t *errh = (struct errhdr_t *)data;
1833                 struct errhdr_t *target_errh =
1834                     (struct errhdr_t *)target_data;
1835 
1836                 if (len != sizeof(struct errhdr_t) ||
1837                     tgt_len != sizeof(struct errhdr_t)) {
1838                     goto unimplemented;
1839                 }
1840                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1841                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1842                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1843                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1844                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1845                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1846                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1847                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1848                     (void *) &errh->offender, sizeof(errh->offender));
1849                 break;
1850             }
1851             default:
1852                 goto unimplemented;
1853             }
1854             break;
1855 
1856         case SOL_IPV6:
1857             switch (cmsg->cmsg_type) {
1858             case IPV6_HOPLIMIT:
1859             {
1860                 uint32_t *v = (uint32_t *)data;
1861                 uint32_t *t_int = (uint32_t *)target_data;
1862 
1863                 if (len != sizeof(uint32_t) ||
1864                     tgt_len != sizeof(uint32_t)) {
1865                     goto unimplemented;
1866                 }
1867                 __put_user(*v, t_int);
1868                 break;
1869             }
1870             case IPV6_RECVERR:
1871             {
1872                 struct errhdr6_t {
1873                    struct sock_extended_err ee;
1874                    struct sockaddr_in6 offender;
1875                 };
1876                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1877                 struct errhdr6_t *target_errh =
1878                     (struct errhdr6_t *)target_data;
1879 
1880                 if (len != sizeof(struct errhdr6_t) ||
1881                     tgt_len != sizeof(struct errhdr6_t)) {
1882                     goto unimplemented;
1883                 }
1884                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1885                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1886                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1887                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1888                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1889                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1890                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1891                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1892                     (void *) &errh->offender, sizeof(errh->offender));
1893                 break;
1894             }
1895             default:
1896                 goto unimplemented;
1897             }
1898             break;
1899 
1900         default:
1901         unimplemented:
1902             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1903                           cmsg->cmsg_level, cmsg->cmsg_type);
1904             memcpy(target_data, data, MIN(len, tgt_len));
1905             if (tgt_len > len) {
1906                 memset(target_data + len, 0, tgt_len - len);
1907             }
1908         }
1909 
1910         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1911         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1912         if (msg_controllen < tgt_space) {
1913             tgt_space = msg_controllen;
1914         }
1915         msg_controllen -= tgt_space;
1916         space += tgt_space;
1917         cmsg = CMSG_NXTHDR(msgh, cmsg);
1918         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1919                                          target_cmsg_start);
1920     }
1921     unlock_user(target_cmsg, target_cmsg_addr, space);
1922  the_end:
1923     target_msgh->msg_controllen = tswapal(space);
1924     return 0;
1925 }
1926 
1927 /* do_setsockopt() Must return target values and target errnos. */
1928 static abi_long do_setsockopt(int sockfd, int level, int optname,
1929                               abi_ulong optval_addr, socklen_t optlen)
1930 {
1931     abi_long ret;
1932     int val;
1933     struct ip_mreqn *ip_mreq;
1934     struct ip_mreq_source *ip_mreq_source;
1935 
1936     switch(level) {
1937     case SOL_TCP:
1938         /* TCP options all take an 'int' value.  */
1939         if (optlen < sizeof(uint32_t))
1940             return -TARGET_EINVAL;
1941 
1942         if (get_user_u32(val, optval_addr))
1943             return -TARGET_EFAULT;
1944         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1945         break;
1946     case SOL_IP:
1947         switch(optname) {
1948         case IP_TOS:
1949         case IP_TTL:
1950         case IP_HDRINCL:
1951         case IP_ROUTER_ALERT:
1952         case IP_RECVOPTS:
1953         case IP_RETOPTS:
1954         case IP_PKTINFO:
1955         case IP_MTU_DISCOVER:
1956         case IP_RECVERR:
1957         case IP_RECVTTL:
1958         case IP_RECVTOS:
1959 #ifdef IP_FREEBIND
1960         case IP_FREEBIND:
1961 #endif
1962         case IP_MULTICAST_TTL:
1963         case IP_MULTICAST_LOOP:
1964             val = 0;
1965             if (optlen >= sizeof(uint32_t)) {
1966                 if (get_user_u32(val, optval_addr))
1967                     return -TARGET_EFAULT;
1968             } else if (optlen >= 1) {
1969                 if (get_user_u8(val, optval_addr))
1970                     return -TARGET_EFAULT;
1971             }
1972             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1973             break;
1974         case IP_ADD_MEMBERSHIP:
1975         case IP_DROP_MEMBERSHIP:
1976             if (optlen < sizeof (struct target_ip_mreq) ||
1977                 optlen > sizeof (struct target_ip_mreqn))
1978                 return -TARGET_EINVAL;
1979 
1980             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1981             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1982             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1983             break;
1984 
1985         case IP_BLOCK_SOURCE:
1986         case IP_UNBLOCK_SOURCE:
1987         case IP_ADD_SOURCE_MEMBERSHIP:
1988         case IP_DROP_SOURCE_MEMBERSHIP:
1989             if (optlen != sizeof (struct target_ip_mreq_source))
1990                 return -TARGET_EINVAL;
1991 
1992             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1993             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1994             unlock_user (ip_mreq_source, optval_addr, 0);
1995             break;
1996 
1997         default:
1998             goto unimplemented;
1999         }
2000         break;
2001     case SOL_IPV6:
2002         switch (optname) {
2003         case IPV6_MTU_DISCOVER:
2004         case IPV6_MTU:
2005         case IPV6_V6ONLY:
2006         case IPV6_RECVPKTINFO:
2007         case IPV6_UNICAST_HOPS:
2008         case IPV6_MULTICAST_HOPS:
2009         case IPV6_MULTICAST_LOOP:
2010         case IPV6_RECVERR:
2011         case IPV6_RECVHOPLIMIT:
2012         case IPV6_2292HOPLIMIT:
2013         case IPV6_CHECKSUM:
2014         case IPV6_ADDRFORM:
2015         case IPV6_2292PKTINFO:
2016         case IPV6_RECVTCLASS:
2017         case IPV6_RECVRTHDR:
2018         case IPV6_2292RTHDR:
2019         case IPV6_RECVHOPOPTS:
2020         case IPV6_2292HOPOPTS:
2021         case IPV6_RECVDSTOPTS:
2022         case IPV6_2292DSTOPTS:
2023         case IPV6_TCLASS:
2024 #ifdef IPV6_RECVPATHMTU
2025         case IPV6_RECVPATHMTU:
2026 #endif
2027 #ifdef IPV6_TRANSPARENT
2028         case IPV6_TRANSPARENT:
2029 #endif
2030 #ifdef IPV6_FREEBIND
2031         case IPV6_FREEBIND:
2032 #endif
2033 #ifdef IPV6_RECVORIGDSTADDR
2034         case IPV6_RECVORIGDSTADDR:
2035 #endif
2036             val = 0;
2037             if (optlen < sizeof(uint32_t)) {
2038                 return -TARGET_EINVAL;
2039             }
2040             if (get_user_u32(val, optval_addr)) {
2041                 return -TARGET_EFAULT;
2042             }
2043             ret = get_errno(setsockopt(sockfd, level, optname,
2044                                        &val, sizeof(val)));
2045             break;
2046         case IPV6_PKTINFO:
2047         {
2048             struct in6_pktinfo pki;
2049 
2050             if (optlen < sizeof(pki)) {
2051                 return -TARGET_EINVAL;
2052             }
2053 
2054             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2055                 return -TARGET_EFAULT;
2056             }
2057 
2058             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2059 
2060             ret = get_errno(setsockopt(sockfd, level, optname,
2061                                        &pki, sizeof(pki)));
2062             break;
2063         }
2064         case IPV6_ADD_MEMBERSHIP:
2065         case IPV6_DROP_MEMBERSHIP:
2066         {
2067             struct ipv6_mreq ipv6mreq;
2068 
2069             if (optlen < sizeof(ipv6mreq)) {
2070                 return -TARGET_EINVAL;
2071             }
2072 
2073             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2074                 return -TARGET_EFAULT;
2075             }
2076 
2077             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2078 
2079             ret = get_errno(setsockopt(sockfd, level, optname,
2080                                        &ipv6mreq, sizeof(ipv6mreq)));
2081             break;
2082         }
2083         default:
2084             goto unimplemented;
2085         }
2086         break;
2087     case SOL_ICMPV6:
2088         switch (optname) {
2089         case ICMPV6_FILTER:
2090         {
2091             struct icmp6_filter icmp6f;
2092 
2093             if (optlen > sizeof(icmp6f)) {
2094                 optlen = sizeof(icmp6f);
2095             }
2096 
2097             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2098                 return -TARGET_EFAULT;
2099             }
2100 
2101             for (val = 0; val < 8; val++) {
2102                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2103             }
2104 
2105             ret = get_errno(setsockopt(sockfd, level, optname,
2106                                        &icmp6f, optlen));
2107             break;
2108         }
2109         default:
2110             goto unimplemented;
2111         }
2112         break;
2113     case SOL_RAW:
2114         switch (optname) {
2115         case ICMP_FILTER:
2116         case IPV6_CHECKSUM:
2117             /* those take an u32 value */
2118             if (optlen < sizeof(uint32_t)) {
2119                 return -TARGET_EINVAL;
2120             }
2121 
2122             if (get_user_u32(val, optval_addr)) {
2123                 return -TARGET_EFAULT;
2124             }
2125             ret = get_errno(setsockopt(sockfd, level, optname,
2126                                        &val, sizeof(val)));
2127             break;
2128 
2129         default:
2130             goto unimplemented;
2131         }
2132         break;
2133 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2134     case SOL_ALG:
2135         switch (optname) {
2136         case ALG_SET_KEY:
2137         {
2138             char *alg_key = g_malloc(optlen);
2139 
2140             if (!alg_key) {
2141                 return -TARGET_ENOMEM;
2142             }
2143             if (copy_from_user(alg_key, optval_addr, optlen)) {
2144                 g_free(alg_key);
2145                 return -TARGET_EFAULT;
2146             }
2147             ret = get_errno(setsockopt(sockfd, level, optname,
2148                                        alg_key, optlen));
2149             g_free(alg_key);
2150             break;
2151         }
2152         case ALG_SET_AEAD_AUTHSIZE:
2153         {
2154             ret = get_errno(setsockopt(sockfd, level, optname,
2155                                        NULL, optlen));
2156             break;
2157         }
2158         default:
2159             goto unimplemented;
2160         }
2161         break;
2162 #endif
2163     case TARGET_SOL_SOCKET:
2164         switch (optname) {
2165         case TARGET_SO_RCVTIMEO:
2166         {
2167                 struct timeval tv;
2168 
2169                 optname = SO_RCVTIMEO;
2170 
2171 set_timeout:
2172                 if (optlen != sizeof(struct target_timeval)) {
2173                     return -TARGET_EINVAL;
2174                 }
2175 
2176                 if (copy_from_user_timeval(&tv, optval_addr)) {
2177                     return -TARGET_EFAULT;
2178                 }
2179 
2180                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2181                                 &tv, sizeof(tv)));
2182                 return ret;
2183         }
2184         case TARGET_SO_SNDTIMEO:
2185                 optname = SO_SNDTIMEO;
2186                 goto set_timeout;
2187         case TARGET_SO_ATTACH_FILTER:
2188         {
2189                 struct target_sock_fprog *tfprog;
2190                 struct target_sock_filter *tfilter;
2191                 struct sock_fprog fprog;
2192                 struct sock_filter *filter;
2193                 int i;
2194 
2195                 if (optlen != sizeof(*tfprog)) {
2196                     return -TARGET_EINVAL;
2197                 }
2198                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2199                     return -TARGET_EFAULT;
2200                 }
2201                 if (!lock_user_struct(VERIFY_READ, tfilter,
2202                                       tswapal(tfprog->filter), 0)) {
2203                     unlock_user_struct(tfprog, optval_addr, 1);
2204                     return -TARGET_EFAULT;
2205                 }
2206 
2207                 fprog.len = tswap16(tfprog->len);
2208                 filter = g_try_new(struct sock_filter, fprog.len);
2209                 if (filter == NULL) {
2210                     unlock_user_struct(tfilter, tfprog->filter, 1);
2211                     unlock_user_struct(tfprog, optval_addr, 1);
2212                     return -TARGET_ENOMEM;
2213                 }
2214                 for (i = 0; i < fprog.len; i++) {
2215                     filter[i].code = tswap16(tfilter[i].code);
2216                     filter[i].jt = tfilter[i].jt;
2217                     filter[i].jf = tfilter[i].jf;
2218                     filter[i].k = tswap32(tfilter[i].k);
2219                 }
2220                 fprog.filter = filter;
2221 
2222                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2223                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2224                 g_free(filter);
2225 
2226                 unlock_user_struct(tfilter, tfprog->filter, 1);
2227                 unlock_user_struct(tfprog, optval_addr, 1);
2228                 return ret;
2229         }
2230 	case TARGET_SO_BINDTODEVICE:
2231 	{
2232 		char *dev_ifname, *addr_ifname;
2233 
2234 		if (optlen > IFNAMSIZ - 1) {
2235 		    optlen = IFNAMSIZ - 1;
2236 		}
2237 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2238 		if (!dev_ifname) {
2239 		    return -TARGET_EFAULT;
2240 		}
2241 		optname = SO_BINDTODEVICE;
2242 		addr_ifname = alloca(IFNAMSIZ);
2243 		memcpy(addr_ifname, dev_ifname, optlen);
2244 		addr_ifname[optlen] = 0;
2245 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2246                                            addr_ifname, optlen));
2247 		unlock_user (dev_ifname, optval_addr, 0);
2248 		return ret;
2249 	}
2250         case TARGET_SO_LINGER:
2251         {
2252                 struct linger lg;
2253                 struct target_linger *tlg;
2254 
2255                 if (optlen != sizeof(struct target_linger)) {
2256                     return -TARGET_EINVAL;
2257                 }
2258                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2259                     return -TARGET_EFAULT;
2260                 }
2261                 __get_user(lg.l_onoff, &tlg->l_onoff);
2262                 __get_user(lg.l_linger, &tlg->l_linger);
2263                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2264                                 &lg, sizeof(lg)));
2265                 unlock_user_struct(tlg, optval_addr, 0);
2266                 return ret;
2267         }
2268             /* Options with 'int' argument.  */
2269         case TARGET_SO_DEBUG:
2270 		optname = SO_DEBUG;
2271 		break;
2272         case TARGET_SO_REUSEADDR:
2273 		optname = SO_REUSEADDR;
2274 		break;
2275 #ifdef SO_REUSEPORT
2276         case TARGET_SO_REUSEPORT:
2277                 optname = SO_REUSEPORT;
2278                 break;
2279 #endif
2280         case TARGET_SO_TYPE:
2281 		optname = SO_TYPE;
2282 		break;
2283         case TARGET_SO_ERROR:
2284 		optname = SO_ERROR;
2285 		break;
2286         case TARGET_SO_DONTROUTE:
2287 		optname = SO_DONTROUTE;
2288 		break;
2289         case TARGET_SO_BROADCAST:
2290 		optname = SO_BROADCAST;
2291 		break;
2292         case TARGET_SO_SNDBUF:
2293 		optname = SO_SNDBUF;
2294 		break;
2295         case TARGET_SO_SNDBUFFORCE:
2296                 optname = SO_SNDBUFFORCE;
2297                 break;
2298         case TARGET_SO_RCVBUF:
2299 		optname = SO_RCVBUF;
2300 		break;
2301         case TARGET_SO_RCVBUFFORCE:
2302                 optname = SO_RCVBUFFORCE;
2303                 break;
2304         case TARGET_SO_KEEPALIVE:
2305 		optname = SO_KEEPALIVE;
2306 		break;
2307         case TARGET_SO_OOBINLINE:
2308 		optname = SO_OOBINLINE;
2309 		break;
2310         case TARGET_SO_NO_CHECK:
2311 		optname = SO_NO_CHECK;
2312 		break;
2313         case TARGET_SO_PRIORITY:
2314 		optname = SO_PRIORITY;
2315 		break;
2316 #ifdef SO_BSDCOMPAT
2317         case TARGET_SO_BSDCOMPAT:
2318 		optname = SO_BSDCOMPAT;
2319 		break;
2320 #endif
2321         case TARGET_SO_PASSCRED:
2322 		optname = SO_PASSCRED;
2323 		break;
2324         case TARGET_SO_PASSSEC:
2325                 optname = SO_PASSSEC;
2326                 break;
2327         case TARGET_SO_TIMESTAMP:
2328 		optname = SO_TIMESTAMP;
2329 		break;
2330         case TARGET_SO_RCVLOWAT:
2331 		optname = SO_RCVLOWAT;
2332 		break;
2333         default:
2334             goto unimplemented;
2335         }
2336 	if (optlen < sizeof(uint32_t))
2337             return -TARGET_EINVAL;
2338 
2339 	if (get_user_u32(val, optval_addr))
2340             return -TARGET_EFAULT;
2341 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2342         break;
2343 #ifdef SOL_NETLINK
2344     case SOL_NETLINK:
2345         switch (optname) {
2346         case NETLINK_PKTINFO:
2347         case NETLINK_ADD_MEMBERSHIP:
2348         case NETLINK_DROP_MEMBERSHIP:
2349         case NETLINK_BROADCAST_ERROR:
2350         case NETLINK_NO_ENOBUFS:
2351 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2352         case NETLINK_LISTEN_ALL_NSID:
2353         case NETLINK_CAP_ACK:
2354 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2355 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2356         case NETLINK_EXT_ACK:
2357 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2358 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2359         case NETLINK_GET_STRICT_CHK:
2360 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2361             break;
2362         default:
2363             goto unimplemented;
2364         }
2365         val = 0;
2366         if (optlen < sizeof(uint32_t)) {
2367             return -TARGET_EINVAL;
2368         }
2369         if (get_user_u32(val, optval_addr)) {
2370             return -TARGET_EFAULT;
2371         }
2372         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2373                                    sizeof(val)));
2374         break;
2375 #endif /* SOL_NETLINK */
2376     default:
2377     unimplemented:
2378         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2379                       level, optname);
2380         ret = -TARGET_ENOPROTOOPT;
2381     }
2382     return ret;
2383 }
2384 
2385 /* do_getsockopt() Must return target values and target errnos. */
2386 static abi_long do_getsockopt(int sockfd, int level, int optname,
2387                               abi_ulong optval_addr, abi_ulong optlen)
2388 {
2389     abi_long ret;
2390     int len, val;
2391     socklen_t lv;
2392 
2393     switch(level) {
2394     case TARGET_SOL_SOCKET:
2395         level = SOL_SOCKET;
2396         switch (optname) {
2397         /* These don't just return a single integer */
2398         case TARGET_SO_PEERNAME:
2399             goto unimplemented;
2400         case TARGET_SO_RCVTIMEO: {
2401             struct timeval tv;
2402             socklen_t tvlen;
2403 
2404             optname = SO_RCVTIMEO;
2405 
2406 get_timeout:
2407             if (get_user_u32(len, optlen)) {
2408                 return -TARGET_EFAULT;
2409             }
2410             if (len < 0) {
2411                 return -TARGET_EINVAL;
2412             }
2413 
2414             tvlen = sizeof(tv);
2415             ret = get_errno(getsockopt(sockfd, level, optname,
2416                                        &tv, &tvlen));
2417             if (ret < 0) {
2418                 return ret;
2419             }
2420             if (len > sizeof(struct target_timeval)) {
2421                 len = sizeof(struct target_timeval);
2422             }
2423             if (copy_to_user_timeval(optval_addr, &tv)) {
2424                 return -TARGET_EFAULT;
2425             }
2426             if (put_user_u32(len, optlen)) {
2427                 return -TARGET_EFAULT;
2428             }
2429             break;
2430         }
2431         case TARGET_SO_SNDTIMEO:
2432             optname = SO_SNDTIMEO;
2433             goto get_timeout;
2434         case TARGET_SO_PEERCRED: {
2435             struct ucred cr;
2436             socklen_t crlen;
2437             struct target_ucred *tcr;
2438 
2439             if (get_user_u32(len, optlen)) {
2440                 return -TARGET_EFAULT;
2441             }
2442             if (len < 0) {
2443                 return -TARGET_EINVAL;
2444             }
2445 
2446             crlen = sizeof(cr);
2447             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2448                                        &cr, &crlen));
2449             if (ret < 0) {
2450                 return ret;
2451             }
2452             if (len > crlen) {
2453                 len = crlen;
2454             }
2455             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2456                 return -TARGET_EFAULT;
2457             }
2458             __put_user(cr.pid, &tcr->pid);
2459             __put_user(cr.uid, &tcr->uid);
2460             __put_user(cr.gid, &tcr->gid);
2461             unlock_user_struct(tcr, optval_addr, 1);
2462             if (put_user_u32(len, optlen)) {
2463                 return -TARGET_EFAULT;
2464             }
2465             break;
2466         }
2467         case TARGET_SO_PEERSEC: {
2468             char *name;
2469 
2470             if (get_user_u32(len, optlen)) {
2471                 return -TARGET_EFAULT;
2472             }
2473             if (len < 0) {
2474                 return -TARGET_EINVAL;
2475             }
2476             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2477             if (!name) {
2478                 return -TARGET_EFAULT;
2479             }
2480             lv = len;
2481             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2482                                        name, &lv));
2483             if (put_user_u32(lv, optlen)) {
2484                 ret = -TARGET_EFAULT;
2485             }
2486             unlock_user(name, optval_addr, lv);
2487             break;
2488         }
2489         case TARGET_SO_LINGER:
2490         {
2491             struct linger lg;
2492             socklen_t lglen;
2493             struct target_linger *tlg;
2494 
2495             if (get_user_u32(len, optlen)) {
2496                 return -TARGET_EFAULT;
2497             }
2498             if (len < 0) {
2499                 return -TARGET_EINVAL;
2500             }
2501 
2502             lglen = sizeof(lg);
2503             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2504                                        &lg, &lglen));
2505             if (ret < 0) {
2506                 return ret;
2507             }
2508             if (len > lglen) {
2509                 len = lglen;
2510             }
2511             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2512                 return -TARGET_EFAULT;
2513             }
2514             __put_user(lg.l_onoff, &tlg->l_onoff);
2515             __put_user(lg.l_linger, &tlg->l_linger);
2516             unlock_user_struct(tlg, optval_addr, 1);
2517             if (put_user_u32(len, optlen)) {
2518                 return -TARGET_EFAULT;
2519             }
2520             break;
2521         }
2522         /* Options with 'int' argument.  */
2523         case TARGET_SO_DEBUG:
2524             optname = SO_DEBUG;
2525             goto int_case;
2526         case TARGET_SO_REUSEADDR:
2527             optname = SO_REUSEADDR;
2528             goto int_case;
2529 #ifdef SO_REUSEPORT
2530         case TARGET_SO_REUSEPORT:
2531             optname = SO_REUSEPORT;
2532             goto int_case;
2533 #endif
2534         case TARGET_SO_TYPE:
2535             optname = SO_TYPE;
2536             goto int_case;
2537         case TARGET_SO_ERROR:
2538             optname = SO_ERROR;
2539             goto int_case;
2540         case TARGET_SO_DONTROUTE:
2541             optname = SO_DONTROUTE;
2542             goto int_case;
2543         case TARGET_SO_BROADCAST:
2544             optname = SO_BROADCAST;
2545             goto int_case;
2546         case TARGET_SO_SNDBUF:
2547             optname = SO_SNDBUF;
2548             goto int_case;
2549         case TARGET_SO_RCVBUF:
2550             optname = SO_RCVBUF;
2551             goto int_case;
2552         case TARGET_SO_KEEPALIVE:
2553             optname = SO_KEEPALIVE;
2554             goto int_case;
2555         case TARGET_SO_OOBINLINE:
2556             optname = SO_OOBINLINE;
2557             goto int_case;
2558         case TARGET_SO_NO_CHECK:
2559             optname = SO_NO_CHECK;
2560             goto int_case;
2561         case TARGET_SO_PRIORITY:
2562             optname = SO_PRIORITY;
2563             goto int_case;
2564 #ifdef SO_BSDCOMPAT
2565         case TARGET_SO_BSDCOMPAT:
2566             optname = SO_BSDCOMPAT;
2567             goto int_case;
2568 #endif
2569         case TARGET_SO_PASSCRED:
2570             optname = SO_PASSCRED;
2571             goto int_case;
2572         case TARGET_SO_TIMESTAMP:
2573             optname = SO_TIMESTAMP;
2574             goto int_case;
2575         case TARGET_SO_RCVLOWAT:
2576             optname = SO_RCVLOWAT;
2577             goto int_case;
2578         case TARGET_SO_ACCEPTCONN:
2579             optname = SO_ACCEPTCONN;
2580             goto int_case;
2581         default:
2582             goto int_case;
2583         }
2584         break;
2585     case SOL_TCP:
2586         /* TCP options all take an 'int' value.  */
2587     int_case:
2588         if (get_user_u32(len, optlen))
2589             return -TARGET_EFAULT;
2590         if (len < 0)
2591             return -TARGET_EINVAL;
2592         lv = sizeof(lv);
2593         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2594         if (ret < 0)
2595             return ret;
2596         if (optname == SO_TYPE) {
2597             val = host_to_target_sock_type(val);
2598         }
2599         if (len > lv)
2600             len = lv;
2601         if (len == 4) {
2602             if (put_user_u32(val, optval_addr))
2603                 return -TARGET_EFAULT;
2604         } else {
2605             if (put_user_u8(val, optval_addr))
2606                 return -TARGET_EFAULT;
2607         }
2608         if (put_user_u32(len, optlen))
2609             return -TARGET_EFAULT;
2610         break;
2611     case SOL_IP:
2612         switch(optname) {
2613         case IP_TOS:
2614         case IP_TTL:
2615         case IP_HDRINCL:
2616         case IP_ROUTER_ALERT:
2617         case IP_RECVOPTS:
2618         case IP_RETOPTS:
2619         case IP_PKTINFO:
2620         case IP_MTU_DISCOVER:
2621         case IP_RECVERR:
2622         case IP_RECVTOS:
2623 #ifdef IP_FREEBIND
2624         case IP_FREEBIND:
2625 #endif
2626         case IP_MULTICAST_TTL:
2627         case IP_MULTICAST_LOOP:
2628             if (get_user_u32(len, optlen))
2629                 return -TARGET_EFAULT;
2630             if (len < 0)
2631                 return -TARGET_EINVAL;
2632             lv = sizeof(lv);
2633             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2634             if (ret < 0)
2635                 return ret;
2636             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2637                 len = 1;
2638                 if (put_user_u32(len, optlen)
2639                     || put_user_u8(val, optval_addr))
2640                     return -TARGET_EFAULT;
2641             } else {
2642                 if (len > sizeof(int))
2643                     len = sizeof(int);
2644                 if (put_user_u32(len, optlen)
2645                     || put_user_u32(val, optval_addr))
2646                     return -TARGET_EFAULT;
2647             }
2648             break;
2649         default:
2650             ret = -TARGET_ENOPROTOOPT;
2651             break;
2652         }
2653         break;
2654     case SOL_IPV6:
2655         switch (optname) {
2656         case IPV6_MTU_DISCOVER:
2657         case IPV6_MTU:
2658         case IPV6_V6ONLY:
2659         case IPV6_RECVPKTINFO:
2660         case IPV6_UNICAST_HOPS:
2661         case IPV6_MULTICAST_HOPS:
2662         case IPV6_MULTICAST_LOOP:
2663         case IPV6_RECVERR:
2664         case IPV6_RECVHOPLIMIT:
2665         case IPV6_2292HOPLIMIT:
2666         case IPV6_CHECKSUM:
2667         case IPV6_ADDRFORM:
2668         case IPV6_2292PKTINFO:
2669         case IPV6_RECVTCLASS:
2670         case IPV6_RECVRTHDR:
2671         case IPV6_2292RTHDR:
2672         case IPV6_RECVHOPOPTS:
2673         case IPV6_2292HOPOPTS:
2674         case IPV6_RECVDSTOPTS:
2675         case IPV6_2292DSTOPTS:
2676         case IPV6_TCLASS:
2677 #ifdef IPV6_RECVPATHMTU
2678         case IPV6_RECVPATHMTU:
2679 #endif
2680 #ifdef IPV6_TRANSPARENT
2681         case IPV6_TRANSPARENT:
2682 #endif
2683 #ifdef IPV6_FREEBIND
2684         case IPV6_FREEBIND:
2685 #endif
2686 #ifdef IPV6_RECVORIGDSTADDR
2687         case IPV6_RECVORIGDSTADDR:
2688 #endif
2689             if (get_user_u32(len, optlen))
2690                 return -TARGET_EFAULT;
2691             if (len < 0)
2692                 return -TARGET_EINVAL;
2693             lv = sizeof(lv);
2694             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2695             if (ret < 0)
2696                 return ret;
2697             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2698                 len = 1;
2699                 if (put_user_u32(len, optlen)
2700                     || put_user_u8(val, optval_addr))
2701                     return -TARGET_EFAULT;
2702             } else {
2703                 if (len > sizeof(int))
2704                     len = sizeof(int);
2705                 if (put_user_u32(len, optlen)
2706                     || put_user_u32(val, optval_addr))
2707                     return -TARGET_EFAULT;
2708             }
2709             break;
2710         default:
2711             ret = -TARGET_ENOPROTOOPT;
2712             break;
2713         }
2714         break;
2715 #ifdef SOL_NETLINK
2716     case SOL_NETLINK:
2717         switch (optname) {
2718         case NETLINK_PKTINFO:
2719         case NETLINK_BROADCAST_ERROR:
2720         case NETLINK_NO_ENOBUFS:
2721 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2722         case NETLINK_LISTEN_ALL_NSID:
2723         case NETLINK_CAP_ACK:
2724 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2725 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2726         case NETLINK_EXT_ACK:
2727 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2728 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2729         case NETLINK_GET_STRICT_CHK:
2730 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2731             if (get_user_u32(len, optlen)) {
2732                 return -TARGET_EFAULT;
2733             }
2734             if (len != sizeof(val)) {
2735                 return -TARGET_EINVAL;
2736             }
2737             lv = len;
2738             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2739             if (ret < 0) {
2740                 return ret;
2741             }
2742             if (put_user_u32(lv, optlen)
2743                 || put_user_u32(val, optval_addr)) {
2744                 return -TARGET_EFAULT;
2745             }
2746             break;
2747 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2748         case NETLINK_LIST_MEMBERSHIPS:
2749         {
2750             uint32_t *results;
2751             int i;
2752             if (get_user_u32(len, optlen)) {
2753                 return -TARGET_EFAULT;
2754             }
2755             if (len < 0) {
2756                 return -TARGET_EINVAL;
2757             }
2758             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2759             if (!results) {
2760                 return -TARGET_EFAULT;
2761             }
2762             lv = len;
2763             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2764             if (ret < 0) {
2765                 unlock_user(results, optval_addr, 0);
2766                 return ret;
2767             }
2768             /* swap host endianess to target endianess. */
2769             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2770                 results[i] = tswap32(results[i]);
2771             }
2772             if (put_user_u32(lv, optlen)) {
2773                 return -TARGET_EFAULT;
2774             }
2775             unlock_user(results, optval_addr, 0);
2776             break;
2777         }
2778 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2779         default:
2780             goto unimplemented;
2781         }
2782         break;
2783 #endif /* SOL_NETLINK */
2784     default:
2785     unimplemented:
2786         qemu_log_mask(LOG_UNIMP,
2787                       "getsockopt level=%d optname=%d not yet supported\n",
2788                       level, optname);
2789         ret = -TARGET_EOPNOTSUPP;
2790         break;
2791     }
2792     return ret;
2793 }
2794 
2795 /* Convert target low/high pair representing file offset into the host
2796  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2797  * as the kernel doesn't handle them either.
2798  */
2799 static void target_to_host_low_high(abi_ulong tlow,
2800                                     abi_ulong thigh,
2801                                     unsigned long *hlow,
2802                                     unsigned long *hhigh)
2803 {
2804     uint64_t off = tlow |
2805         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2806         TARGET_LONG_BITS / 2;
2807 
2808     *hlow = off;
2809     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2810 }
2811 
2812 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2813                                 abi_ulong count, int copy)
2814 {
2815     struct target_iovec *target_vec;
2816     struct iovec *vec;
2817     abi_ulong total_len, max_len;
2818     int i;
2819     int err = 0;
2820     bool bad_address = false;
2821 
2822     if (count == 0) {
2823         errno = 0;
2824         return NULL;
2825     }
2826     if (count > IOV_MAX) {
2827         errno = EINVAL;
2828         return NULL;
2829     }
2830 
2831     vec = g_try_new0(struct iovec, count);
2832     if (vec == NULL) {
2833         errno = ENOMEM;
2834         return NULL;
2835     }
2836 
2837     target_vec = lock_user(VERIFY_READ, target_addr,
2838                            count * sizeof(struct target_iovec), 1);
2839     if (target_vec == NULL) {
2840         err = EFAULT;
2841         goto fail2;
2842     }
2843 
2844     /* ??? If host page size > target page size, this will result in a
2845        value larger than what we can actually support.  */
2846     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2847     total_len = 0;
2848 
2849     for (i = 0; i < count; i++) {
2850         abi_ulong base = tswapal(target_vec[i].iov_base);
2851         abi_long len = tswapal(target_vec[i].iov_len);
2852 
2853         if (len < 0) {
2854             err = EINVAL;
2855             goto fail;
2856         } else if (len == 0) {
2857             /* Zero length pointer is ignored.  */
2858             vec[i].iov_base = 0;
2859         } else {
2860             vec[i].iov_base = lock_user(type, base, len, copy);
2861             /* If the first buffer pointer is bad, this is a fault.  But
2862              * subsequent bad buffers will result in a partial write; this
2863              * is realized by filling the vector with null pointers and
2864              * zero lengths. */
2865             if (!vec[i].iov_base) {
2866                 if (i == 0) {
2867                     err = EFAULT;
2868                     goto fail;
2869                 } else {
2870                     bad_address = true;
2871                 }
2872             }
2873             if (bad_address) {
2874                 len = 0;
2875             }
2876             if (len > max_len - total_len) {
2877                 len = max_len - total_len;
2878             }
2879         }
2880         vec[i].iov_len = len;
2881         total_len += len;
2882     }
2883 
2884     unlock_user(target_vec, target_addr, 0);
2885     return vec;
2886 
2887  fail:
2888     while (--i >= 0) {
2889         if (tswapal(target_vec[i].iov_len) > 0) {
2890             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2891         }
2892     }
2893     unlock_user(target_vec, target_addr, 0);
2894  fail2:
2895     g_free(vec);
2896     errno = err;
2897     return NULL;
2898 }
2899 
2900 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2901                          abi_ulong count, int copy)
2902 {
2903     struct target_iovec *target_vec;
2904     int i;
2905 
2906     target_vec = lock_user(VERIFY_READ, target_addr,
2907                            count * sizeof(struct target_iovec), 1);
2908     if (target_vec) {
2909         for (i = 0; i < count; i++) {
2910             abi_ulong base = tswapal(target_vec[i].iov_base);
2911             abi_long len = tswapal(target_vec[i].iov_len);
2912             if (len < 0) {
2913                 break;
2914             }
2915             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2916         }
2917         unlock_user(target_vec, target_addr, 0);
2918     }
2919 
2920     g_free(vec);
2921 }
2922 
2923 static inline int target_to_host_sock_type(int *type)
2924 {
2925     int host_type = 0;
2926     int target_type = *type;
2927 
2928     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2929     case TARGET_SOCK_DGRAM:
2930         host_type = SOCK_DGRAM;
2931         break;
2932     case TARGET_SOCK_STREAM:
2933         host_type = SOCK_STREAM;
2934         break;
2935     default:
2936         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2937         break;
2938     }
2939     if (target_type & TARGET_SOCK_CLOEXEC) {
2940 #if defined(SOCK_CLOEXEC)
2941         host_type |= SOCK_CLOEXEC;
2942 #else
2943         return -TARGET_EINVAL;
2944 #endif
2945     }
2946     if (target_type & TARGET_SOCK_NONBLOCK) {
2947 #if defined(SOCK_NONBLOCK)
2948         host_type |= SOCK_NONBLOCK;
2949 #elif !defined(O_NONBLOCK)
2950         return -TARGET_EINVAL;
2951 #endif
2952     }
2953     *type = host_type;
2954     return 0;
2955 }
2956 
2957 /* Try to emulate socket type flags after socket creation.  */
2958 static int sock_flags_fixup(int fd, int target_type)
2959 {
2960 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2961     if (target_type & TARGET_SOCK_NONBLOCK) {
2962         int flags = fcntl(fd, F_GETFL);
2963         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2964             close(fd);
2965             return -TARGET_EINVAL;
2966         }
2967     }
2968 #endif
2969     return fd;
2970 }
2971 
2972 /* do_socket() Must return target values and target errnos. */
2973 static abi_long do_socket(int domain, int type, int protocol)
2974 {
2975     int target_type = type;
2976     int ret;
2977 
2978     ret = target_to_host_sock_type(&type);
2979     if (ret) {
2980         return ret;
2981     }
2982 
2983     if (domain == PF_NETLINK && !(
2984 #ifdef CONFIG_RTNETLINK
2985          protocol == NETLINK_ROUTE ||
2986 #endif
2987          protocol == NETLINK_KOBJECT_UEVENT ||
2988          protocol == NETLINK_AUDIT)) {
2989         return -EPFNOSUPPORT;
2990     }
2991 
2992     if (domain == AF_PACKET ||
2993         (domain == AF_INET && type == SOCK_PACKET)) {
2994         protocol = tswap16(protocol);
2995     }
2996 
2997     ret = get_errno(socket(domain, type, protocol));
2998     if (ret >= 0) {
2999         ret = sock_flags_fixup(ret, target_type);
3000         if (type == SOCK_PACKET) {
3001             /* Manage an obsolete case :
3002              * if socket type is SOCK_PACKET, bind by name
3003              */
3004             fd_trans_register(ret, &target_packet_trans);
3005         } else if (domain == PF_NETLINK) {
3006             switch (protocol) {
3007 #ifdef CONFIG_RTNETLINK
3008             case NETLINK_ROUTE:
3009                 fd_trans_register(ret, &target_netlink_route_trans);
3010                 break;
3011 #endif
3012             case NETLINK_KOBJECT_UEVENT:
3013                 /* nothing to do: messages are strings */
3014                 break;
3015             case NETLINK_AUDIT:
3016                 fd_trans_register(ret, &target_netlink_audit_trans);
3017                 break;
3018             default:
3019                 g_assert_not_reached();
3020             }
3021         }
3022     }
3023     return ret;
3024 }
3025 
3026 /* do_bind() Must return target values and target errnos. */
3027 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3028                         socklen_t addrlen)
3029 {
3030     void *addr;
3031     abi_long ret;
3032 
3033     if ((int)addrlen < 0) {
3034         return -TARGET_EINVAL;
3035     }
3036 
3037     addr = alloca(addrlen+1);
3038 
3039     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3040     if (ret)
3041         return ret;
3042 
3043     return get_errno(bind(sockfd, addr, addrlen));
3044 }
3045 
3046 /* do_connect() Must return target values and target errnos. */
3047 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3048                            socklen_t addrlen)
3049 {
3050     void *addr;
3051     abi_long ret;
3052 
3053     if ((int)addrlen < 0) {
3054         return -TARGET_EINVAL;
3055     }
3056 
3057     addr = alloca(addrlen+1);
3058 
3059     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3060     if (ret)
3061         return ret;
3062 
3063     return get_errno(safe_connect(sockfd, addr, addrlen));
3064 }
3065 
3066 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3067 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3068                                       int flags, int send)
3069 {
3070     abi_long ret, len;
3071     struct msghdr msg;
3072     abi_ulong count;
3073     struct iovec *vec;
3074     abi_ulong target_vec;
3075 
3076     if (msgp->msg_name) {
3077         msg.msg_namelen = tswap32(msgp->msg_namelen);
3078         msg.msg_name = alloca(msg.msg_namelen+1);
3079         ret = target_to_host_sockaddr(fd, msg.msg_name,
3080                                       tswapal(msgp->msg_name),
3081                                       msg.msg_namelen);
3082         if (ret == -TARGET_EFAULT) {
3083             /* For connected sockets msg_name and msg_namelen must
3084              * be ignored, so returning EFAULT immediately is wrong.
3085              * Instead, pass a bad msg_name to the host kernel, and
3086              * let it decide whether to return EFAULT or not.
3087              */
3088             msg.msg_name = (void *)-1;
3089         } else if (ret) {
3090             goto out2;
3091         }
3092     } else {
3093         msg.msg_name = NULL;
3094         msg.msg_namelen = 0;
3095     }
3096     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3097     msg.msg_control = alloca(msg.msg_controllen);
3098     memset(msg.msg_control, 0, msg.msg_controllen);
3099 
3100     msg.msg_flags = tswap32(msgp->msg_flags);
3101 
3102     count = tswapal(msgp->msg_iovlen);
3103     target_vec = tswapal(msgp->msg_iov);
3104 
3105     if (count > IOV_MAX) {
3106         /* sendrcvmsg returns a different errno for this condition than
3107          * readv/writev, so we must catch it here before lock_iovec() does.
3108          */
3109         ret = -TARGET_EMSGSIZE;
3110         goto out2;
3111     }
3112 
3113     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3114                      target_vec, count, send);
3115     if (vec == NULL) {
3116         ret = -host_to_target_errno(errno);
3117         goto out2;
3118     }
3119     msg.msg_iovlen = count;
3120     msg.msg_iov = vec;
3121 
3122     if (send) {
3123         if (fd_trans_target_to_host_data(fd)) {
3124             void *host_msg;
3125 
3126             host_msg = g_malloc(msg.msg_iov->iov_len);
3127             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3128             ret = fd_trans_target_to_host_data(fd)(host_msg,
3129                                                    msg.msg_iov->iov_len);
3130             if (ret >= 0) {
3131                 msg.msg_iov->iov_base = host_msg;
3132                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3133             }
3134             g_free(host_msg);
3135         } else {
3136             ret = target_to_host_cmsg(&msg, msgp);
3137             if (ret == 0) {
3138                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3139             }
3140         }
3141     } else {
3142         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3143         if (!is_error(ret)) {
3144             len = ret;
3145             if (fd_trans_host_to_target_data(fd)) {
3146                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3147                                                MIN(msg.msg_iov->iov_len, len));
3148             } else {
3149                 ret = host_to_target_cmsg(msgp, &msg);
3150             }
3151             if (!is_error(ret)) {
3152                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3153                 msgp->msg_flags = tswap32(msg.msg_flags);
3154                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3155                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3156                                     msg.msg_name, msg.msg_namelen);
3157                     if (ret) {
3158                         goto out;
3159                     }
3160                 }
3161 
3162                 ret = len;
3163             }
3164         }
3165     }
3166 
3167 out:
3168     unlock_iovec(vec, target_vec, count, !send);
3169 out2:
3170     return ret;
3171 }
3172 
3173 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3174                                int flags, int send)
3175 {
3176     abi_long ret;
3177     struct target_msghdr *msgp;
3178 
3179     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3180                           msgp,
3181                           target_msg,
3182                           send ? 1 : 0)) {
3183         return -TARGET_EFAULT;
3184     }
3185     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3186     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3187     return ret;
3188 }
3189 
3190 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3191  * so it might not have this *mmsg-specific flag either.
3192  */
3193 #ifndef MSG_WAITFORONE
3194 #define MSG_WAITFORONE 0x10000
3195 #endif
3196 
3197 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3198                                 unsigned int vlen, unsigned int flags,
3199                                 int send)
3200 {
3201     struct target_mmsghdr *mmsgp;
3202     abi_long ret = 0;
3203     int i;
3204 
3205     if (vlen > UIO_MAXIOV) {
3206         vlen = UIO_MAXIOV;
3207     }
3208 
3209     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3210     if (!mmsgp) {
3211         return -TARGET_EFAULT;
3212     }
3213 
3214     for (i = 0; i < vlen; i++) {
3215         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3216         if (is_error(ret)) {
3217             break;
3218         }
3219         mmsgp[i].msg_len = tswap32(ret);
3220         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3221         if (flags & MSG_WAITFORONE) {
3222             flags |= MSG_DONTWAIT;
3223         }
3224     }
3225 
3226     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3227 
3228     /* Return number of datagrams sent if we sent any at all;
3229      * otherwise return the error.
3230      */
3231     if (i) {
3232         return i;
3233     }
3234     return ret;
3235 }
3236 
3237 /* do_accept4() Must return target values and target errnos. */
3238 static abi_long do_accept4(int fd, abi_ulong target_addr,
3239                            abi_ulong target_addrlen_addr, int flags)
3240 {
3241     socklen_t addrlen, ret_addrlen;
3242     void *addr;
3243     abi_long ret;
3244     int host_flags;
3245 
3246     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3247 
3248     if (target_addr == 0) {
3249         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3250     }
3251 
3252     /* linux returns EINVAL if addrlen pointer is invalid */
3253     if (get_user_u32(addrlen, target_addrlen_addr))
3254         return -TARGET_EINVAL;
3255 
3256     if ((int)addrlen < 0) {
3257         return -TARGET_EINVAL;
3258     }
3259 
3260     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3261         return -TARGET_EINVAL;
3262 
3263     addr = alloca(addrlen);
3264 
3265     ret_addrlen = addrlen;
3266     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3267     if (!is_error(ret)) {
3268         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3269         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3270             ret = -TARGET_EFAULT;
3271         }
3272     }
3273     return ret;
3274 }
3275 
3276 /* do_getpeername() Must return target values and target errnos. */
3277 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3278                                abi_ulong target_addrlen_addr)
3279 {
3280     socklen_t addrlen, ret_addrlen;
3281     void *addr;
3282     abi_long ret;
3283 
3284     if (get_user_u32(addrlen, target_addrlen_addr))
3285         return -TARGET_EFAULT;
3286 
3287     if ((int)addrlen < 0) {
3288         return -TARGET_EINVAL;
3289     }
3290 
3291     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3292         return -TARGET_EFAULT;
3293 
3294     addr = alloca(addrlen);
3295 
3296     ret_addrlen = addrlen;
3297     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3298     if (!is_error(ret)) {
3299         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3300         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3301             ret = -TARGET_EFAULT;
3302         }
3303     }
3304     return ret;
3305 }
3306 
3307 /* do_getsockname() Must return target values and target errnos. */
3308 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3309                                abi_ulong target_addrlen_addr)
3310 {
3311     socklen_t addrlen, ret_addrlen;
3312     void *addr;
3313     abi_long ret;
3314 
3315     if (get_user_u32(addrlen, target_addrlen_addr))
3316         return -TARGET_EFAULT;
3317 
3318     if ((int)addrlen < 0) {
3319         return -TARGET_EINVAL;
3320     }
3321 
3322     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3323         return -TARGET_EFAULT;
3324 
3325     addr = alloca(addrlen);
3326 
3327     ret_addrlen = addrlen;
3328     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3329     if (!is_error(ret)) {
3330         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3331         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3332             ret = -TARGET_EFAULT;
3333         }
3334     }
3335     return ret;
3336 }
3337 
3338 /* do_socketpair() Must return target values and target errnos. */
3339 static abi_long do_socketpair(int domain, int type, int protocol,
3340                               abi_ulong target_tab_addr)
3341 {
3342     int tab[2];
3343     abi_long ret;
3344 
3345     target_to_host_sock_type(&type);
3346 
3347     ret = get_errno(socketpair(domain, type, protocol, tab));
3348     if (!is_error(ret)) {
3349         if (put_user_s32(tab[0], target_tab_addr)
3350             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3351             ret = -TARGET_EFAULT;
3352     }
3353     return ret;
3354 }
3355 
3356 /* do_sendto() Must return target values and target errnos. */
3357 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3358                           abi_ulong target_addr, socklen_t addrlen)
3359 {
3360     void *addr;
3361     void *host_msg;
3362     void *copy_msg = NULL;
3363     abi_long ret;
3364 
3365     if ((int)addrlen < 0) {
3366         return -TARGET_EINVAL;
3367     }
3368 
3369     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3370     if (!host_msg)
3371         return -TARGET_EFAULT;
3372     if (fd_trans_target_to_host_data(fd)) {
3373         copy_msg = host_msg;
3374         host_msg = g_malloc(len);
3375         memcpy(host_msg, copy_msg, len);
3376         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3377         if (ret < 0) {
3378             goto fail;
3379         }
3380     }
3381     if (target_addr) {
3382         addr = alloca(addrlen+1);
3383         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3384         if (ret) {
3385             goto fail;
3386         }
3387         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3388     } else {
3389         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3390     }
3391 fail:
3392     if (copy_msg) {
3393         g_free(host_msg);
3394         host_msg = copy_msg;
3395     }
3396     unlock_user(host_msg, msg, 0);
3397     return ret;
3398 }
3399 
3400 /* do_recvfrom() Must return target values and target errnos. */
3401 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3402                             abi_ulong target_addr,
3403                             abi_ulong target_addrlen)
3404 {
3405     socklen_t addrlen, ret_addrlen;
3406     void *addr;
3407     void *host_msg;
3408     abi_long ret;
3409 
3410     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3411     if (!host_msg)
3412         return -TARGET_EFAULT;
3413     if (target_addr) {
3414         if (get_user_u32(addrlen, target_addrlen)) {
3415             ret = -TARGET_EFAULT;
3416             goto fail;
3417         }
3418         if ((int)addrlen < 0) {
3419             ret = -TARGET_EINVAL;
3420             goto fail;
3421         }
3422         addr = alloca(addrlen);
3423         ret_addrlen = addrlen;
3424         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3425                                       addr, &ret_addrlen));
3426     } else {
3427         addr = NULL; /* To keep compiler quiet.  */
3428         addrlen = 0; /* To keep compiler quiet.  */
3429         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3430     }
3431     if (!is_error(ret)) {
3432         if (fd_trans_host_to_target_data(fd)) {
3433             abi_long trans;
3434             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3435             if (is_error(trans)) {
3436                 ret = trans;
3437                 goto fail;
3438             }
3439         }
3440         if (target_addr) {
3441             host_to_target_sockaddr(target_addr, addr,
3442                                     MIN(addrlen, ret_addrlen));
3443             if (put_user_u32(ret_addrlen, target_addrlen)) {
3444                 ret = -TARGET_EFAULT;
3445                 goto fail;
3446             }
3447         }
3448         unlock_user(host_msg, msg, len);
3449     } else {
3450 fail:
3451         unlock_user(host_msg, msg, 0);
3452     }
3453     return ret;
3454 }
3455 
3456 #ifdef TARGET_NR_socketcall
3457 /* do_socketcall() must return target values and target errnos. */
3458 static abi_long do_socketcall(int num, abi_ulong vptr)
3459 {
3460     static const unsigned nargs[] = { /* number of arguments per operation */
3461         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3462         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3463         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3464         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3465         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3466         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3467         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3468         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3469         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3470         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3471         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3472         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3473         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3474         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3475         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3476         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3477         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3478         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3479         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3480         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3481     };
3482     abi_long a[6]; /* max 6 args */
3483     unsigned i;
3484 
3485     /* check the range of the first argument num */
3486     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3487     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3488         return -TARGET_EINVAL;
3489     }
3490     /* ensure we have space for args */
3491     if (nargs[num] > ARRAY_SIZE(a)) {
3492         return -TARGET_EINVAL;
3493     }
3494     /* collect the arguments in a[] according to nargs[] */
3495     for (i = 0; i < nargs[num]; ++i) {
3496         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3497             return -TARGET_EFAULT;
3498         }
3499     }
3500     /* now when we have the args, invoke the appropriate underlying function */
3501     switch (num) {
3502     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3503         return do_socket(a[0], a[1], a[2]);
3504     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3505         return do_bind(a[0], a[1], a[2]);
3506     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3507         return do_connect(a[0], a[1], a[2]);
3508     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3509         return get_errno(listen(a[0], a[1]));
3510     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3511         return do_accept4(a[0], a[1], a[2], 0);
3512     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3513         return do_getsockname(a[0], a[1], a[2]);
3514     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3515         return do_getpeername(a[0], a[1], a[2]);
3516     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3517         return do_socketpair(a[0], a[1], a[2], a[3]);
3518     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3519         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3520     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3521         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3522     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3523         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3524     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3525         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3526     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3527         return get_errno(shutdown(a[0], a[1]));
3528     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3529         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3530     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3531         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3532     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3533         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3534     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3535         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3536     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3537         return do_accept4(a[0], a[1], a[2], a[3]);
3538     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3539         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3540     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3541         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3542     default:
3543         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3544         return -TARGET_EINVAL;
3545     }
3546 }
3547 #endif
3548 
3549 #define N_SHM_REGIONS	32
3550 
3551 static struct shm_region {
3552     abi_ulong start;
3553     abi_ulong size;
3554     bool in_use;
3555 } shm_regions[N_SHM_REGIONS];
3556 
3557 #ifndef TARGET_SEMID64_DS
3558 /* asm-generic version of this struct */
3559 struct target_semid64_ds
3560 {
3561   struct target_ipc_perm sem_perm;
3562   abi_ulong sem_otime;
3563 #if TARGET_ABI_BITS == 32
3564   abi_ulong __unused1;
3565 #endif
3566   abi_ulong sem_ctime;
3567 #if TARGET_ABI_BITS == 32
3568   abi_ulong __unused2;
3569 #endif
3570   abi_ulong sem_nsems;
3571   abi_ulong __unused3;
3572   abi_ulong __unused4;
3573 };
3574 #endif
3575 
3576 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3577                                                abi_ulong target_addr)
3578 {
3579     struct target_ipc_perm *target_ip;
3580     struct target_semid64_ds *target_sd;
3581 
3582     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3583         return -TARGET_EFAULT;
3584     target_ip = &(target_sd->sem_perm);
3585     host_ip->__key = tswap32(target_ip->__key);
3586     host_ip->uid = tswap32(target_ip->uid);
3587     host_ip->gid = tswap32(target_ip->gid);
3588     host_ip->cuid = tswap32(target_ip->cuid);
3589     host_ip->cgid = tswap32(target_ip->cgid);
3590 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3591     host_ip->mode = tswap32(target_ip->mode);
3592 #else
3593     host_ip->mode = tswap16(target_ip->mode);
3594 #endif
3595 #if defined(TARGET_PPC)
3596     host_ip->__seq = tswap32(target_ip->__seq);
3597 #else
3598     host_ip->__seq = tswap16(target_ip->__seq);
3599 #endif
3600     unlock_user_struct(target_sd, target_addr, 0);
3601     return 0;
3602 }
3603 
3604 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3605                                                struct ipc_perm *host_ip)
3606 {
3607     struct target_ipc_perm *target_ip;
3608     struct target_semid64_ds *target_sd;
3609 
3610     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3611         return -TARGET_EFAULT;
3612     target_ip = &(target_sd->sem_perm);
3613     target_ip->__key = tswap32(host_ip->__key);
3614     target_ip->uid = tswap32(host_ip->uid);
3615     target_ip->gid = tswap32(host_ip->gid);
3616     target_ip->cuid = tswap32(host_ip->cuid);
3617     target_ip->cgid = tswap32(host_ip->cgid);
3618 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3619     target_ip->mode = tswap32(host_ip->mode);
3620 #else
3621     target_ip->mode = tswap16(host_ip->mode);
3622 #endif
3623 #if defined(TARGET_PPC)
3624     target_ip->__seq = tswap32(host_ip->__seq);
3625 #else
3626     target_ip->__seq = tswap16(host_ip->__seq);
3627 #endif
3628     unlock_user_struct(target_sd, target_addr, 1);
3629     return 0;
3630 }
3631 
3632 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3633                                                abi_ulong target_addr)
3634 {
3635     struct target_semid64_ds *target_sd;
3636 
3637     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3638         return -TARGET_EFAULT;
3639     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3640         return -TARGET_EFAULT;
3641     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3642     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3643     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3644     unlock_user_struct(target_sd, target_addr, 0);
3645     return 0;
3646 }
3647 
3648 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3649                                                struct semid_ds *host_sd)
3650 {
3651     struct target_semid64_ds *target_sd;
3652 
3653     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3654         return -TARGET_EFAULT;
3655     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3656         return -TARGET_EFAULT;
3657     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3658     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3659     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3660     unlock_user_struct(target_sd, target_addr, 1);
3661     return 0;
3662 }
3663 
3664 struct target_seminfo {
3665     int semmap;
3666     int semmni;
3667     int semmns;
3668     int semmnu;
3669     int semmsl;
3670     int semopm;
3671     int semume;
3672     int semusz;
3673     int semvmx;
3674     int semaem;
3675 };
3676 
3677 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3678                                               struct seminfo *host_seminfo)
3679 {
3680     struct target_seminfo *target_seminfo;
3681     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3682         return -TARGET_EFAULT;
3683     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3684     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3685     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3686     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3687     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3688     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3689     __put_user(host_seminfo->semume, &target_seminfo->semume);
3690     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3691     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3692     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3693     unlock_user_struct(target_seminfo, target_addr, 1);
3694     return 0;
3695 }
3696 
3697 union semun {
3698 	int val;
3699 	struct semid_ds *buf;
3700 	unsigned short *array;
3701 	struct seminfo *__buf;
3702 };
3703 
3704 union target_semun {
3705 	int val;
3706 	abi_ulong buf;
3707 	abi_ulong array;
3708 	abi_ulong __buf;
3709 };
3710 
3711 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3712                                                abi_ulong target_addr)
3713 {
3714     int nsems;
3715     unsigned short *array;
3716     union semun semun;
3717     struct semid_ds semid_ds;
3718     int i, ret;
3719 
3720     semun.buf = &semid_ds;
3721 
3722     ret = semctl(semid, 0, IPC_STAT, semun);
3723     if (ret == -1)
3724         return get_errno(ret);
3725 
3726     nsems = semid_ds.sem_nsems;
3727 
3728     *host_array = g_try_new(unsigned short, nsems);
3729     if (!*host_array) {
3730         return -TARGET_ENOMEM;
3731     }
3732     array = lock_user(VERIFY_READ, target_addr,
3733                       nsems*sizeof(unsigned short), 1);
3734     if (!array) {
3735         g_free(*host_array);
3736         return -TARGET_EFAULT;
3737     }
3738 
3739     for(i=0; i<nsems; i++) {
3740         __get_user((*host_array)[i], &array[i]);
3741     }
3742     unlock_user(array, target_addr, 0);
3743 
3744     return 0;
3745 }
3746 
3747 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3748                                                unsigned short **host_array)
3749 {
3750     int nsems;
3751     unsigned short *array;
3752     union semun semun;
3753     struct semid_ds semid_ds;
3754     int i, ret;
3755 
3756     semun.buf = &semid_ds;
3757 
3758     ret = semctl(semid, 0, IPC_STAT, semun);
3759     if (ret == -1)
3760         return get_errno(ret);
3761 
3762     nsems = semid_ds.sem_nsems;
3763 
3764     array = lock_user(VERIFY_WRITE, target_addr,
3765                       nsems*sizeof(unsigned short), 0);
3766     if (!array)
3767         return -TARGET_EFAULT;
3768 
3769     for(i=0; i<nsems; i++) {
3770         __put_user((*host_array)[i], &array[i]);
3771     }
3772     g_free(*host_array);
3773     unlock_user(array, target_addr, 1);
3774 
3775     return 0;
3776 }
3777 
3778 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3779                                  abi_ulong target_arg)
3780 {
3781     union target_semun target_su = { .buf = target_arg };
3782     union semun arg;
3783     struct semid_ds dsarg;
3784     unsigned short *array = NULL;
3785     struct seminfo seminfo;
3786     abi_long ret = -TARGET_EINVAL;
3787     abi_long err;
3788     cmd &= 0xff;
3789 
3790     switch( cmd ) {
3791 	case GETVAL:
3792 	case SETVAL:
3793             /* In 64 bit cross-endian situations, we will erroneously pick up
3794              * the wrong half of the union for the "val" element.  To rectify
3795              * this, the entire 8-byte structure is byteswapped, followed by
3796 	     * a swap of the 4 byte val field. In other cases, the data is
3797 	     * already in proper host byte order. */
3798 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3799 		target_su.buf = tswapal(target_su.buf);
3800 		arg.val = tswap32(target_su.val);
3801 	    } else {
3802 		arg.val = target_su.val;
3803 	    }
3804             ret = get_errno(semctl(semid, semnum, cmd, arg));
3805             break;
3806 	case GETALL:
3807 	case SETALL:
3808             err = target_to_host_semarray(semid, &array, target_su.array);
3809             if (err)
3810                 return err;
3811             arg.array = array;
3812             ret = get_errno(semctl(semid, semnum, cmd, arg));
3813             err = host_to_target_semarray(semid, target_su.array, &array);
3814             if (err)
3815                 return err;
3816             break;
3817 	case IPC_STAT:
3818 	case IPC_SET:
3819 	case SEM_STAT:
3820             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3821             if (err)
3822                 return err;
3823             arg.buf = &dsarg;
3824             ret = get_errno(semctl(semid, semnum, cmd, arg));
3825             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3826             if (err)
3827                 return err;
3828             break;
3829 	case IPC_INFO:
3830 	case SEM_INFO:
3831             arg.__buf = &seminfo;
3832             ret = get_errno(semctl(semid, semnum, cmd, arg));
3833             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3834             if (err)
3835                 return err;
3836             break;
3837 	case IPC_RMID:
3838 	case GETPID:
3839 	case GETNCNT:
3840 	case GETZCNT:
3841             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3842             break;
3843     }
3844 
3845     return ret;
3846 }
3847 
3848 struct target_sembuf {
3849     unsigned short sem_num;
3850     short sem_op;
3851     short sem_flg;
3852 };
3853 
3854 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3855                                              abi_ulong target_addr,
3856                                              unsigned nsops)
3857 {
3858     struct target_sembuf *target_sembuf;
3859     int i;
3860 
3861     target_sembuf = lock_user(VERIFY_READ, target_addr,
3862                               nsops*sizeof(struct target_sembuf), 1);
3863     if (!target_sembuf)
3864         return -TARGET_EFAULT;
3865 
3866     for(i=0; i<nsops; i++) {
3867         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3868         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3869         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3870     }
3871 
3872     unlock_user(target_sembuf, target_addr, 0);
3873 
3874     return 0;
3875 }
3876 
3877 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3878 {
3879     struct sembuf sops[nsops];
3880     abi_long ret;
3881 
3882     if (target_to_host_sembuf(sops, ptr, nsops))
3883         return -TARGET_EFAULT;
3884 
3885     ret = -TARGET_ENOSYS;
3886 #ifdef __NR_semtimedop
3887     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3888 #endif
3889 #ifdef __NR_ipc
3890     if (ret == -TARGET_ENOSYS) {
3891         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3892     }
3893 #endif
3894     return ret;
3895 }
3896 
3897 struct target_msqid_ds
3898 {
3899     struct target_ipc_perm msg_perm;
3900     abi_ulong msg_stime;
3901 #if TARGET_ABI_BITS == 32
3902     abi_ulong __unused1;
3903 #endif
3904     abi_ulong msg_rtime;
3905 #if TARGET_ABI_BITS == 32
3906     abi_ulong __unused2;
3907 #endif
3908     abi_ulong msg_ctime;
3909 #if TARGET_ABI_BITS == 32
3910     abi_ulong __unused3;
3911 #endif
3912     abi_ulong __msg_cbytes;
3913     abi_ulong msg_qnum;
3914     abi_ulong msg_qbytes;
3915     abi_ulong msg_lspid;
3916     abi_ulong msg_lrpid;
3917     abi_ulong __unused4;
3918     abi_ulong __unused5;
3919 };
3920 
3921 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3922                                                abi_ulong target_addr)
3923 {
3924     struct target_msqid_ds *target_md;
3925 
3926     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3927         return -TARGET_EFAULT;
3928     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3929         return -TARGET_EFAULT;
3930     host_md->msg_stime = tswapal(target_md->msg_stime);
3931     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3932     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3933     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3934     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3935     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3936     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3937     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3938     unlock_user_struct(target_md, target_addr, 0);
3939     return 0;
3940 }
3941 
3942 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3943                                                struct msqid_ds *host_md)
3944 {
3945     struct target_msqid_ds *target_md;
3946 
3947     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3948         return -TARGET_EFAULT;
3949     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3950         return -TARGET_EFAULT;
3951     target_md->msg_stime = tswapal(host_md->msg_stime);
3952     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3953     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3954     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3955     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3956     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3957     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3958     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3959     unlock_user_struct(target_md, target_addr, 1);
3960     return 0;
3961 }
3962 
3963 struct target_msginfo {
3964     int msgpool;
3965     int msgmap;
3966     int msgmax;
3967     int msgmnb;
3968     int msgmni;
3969     int msgssz;
3970     int msgtql;
3971     unsigned short int msgseg;
3972 };
3973 
3974 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3975                                               struct msginfo *host_msginfo)
3976 {
3977     struct target_msginfo *target_msginfo;
3978     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3979         return -TARGET_EFAULT;
3980     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3981     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3982     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3983     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3984     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3985     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3986     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3987     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3988     unlock_user_struct(target_msginfo, target_addr, 1);
3989     return 0;
3990 }
3991 
3992 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3993 {
3994     struct msqid_ds dsarg;
3995     struct msginfo msginfo;
3996     abi_long ret = -TARGET_EINVAL;
3997 
3998     cmd &= 0xff;
3999 
4000     switch (cmd) {
4001     case IPC_STAT:
4002     case IPC_SET:
4003     case MSG_STAT:
4004         if (target_to_host_msqid_ds(&dsarg,ptr))
4005             return -TARGET_EFAULT;
4006         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4007         if (host_to_target_msqid_ds(ptr,&dsarg))
4008             return -TARGET_EFAULT;
4009         break;
4010     case IPC_RMID:
4011         ret = get_errno(msgctl(msgid, cmd, NULL));
4012         break;
4013     case IPC_INFO:
4014     case MSG_INFO:
4015         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4016         if (host_to_target_msginfo(ptr, &msginfo))
4017             return -TARGET_EFAULT;
4018         break;
4019     }
4020 
4021     return ret;
4022 }
4023 
4024 struct target_msgbuf {
4025     abi_long mtype;
4026     char	mtext[1];
4027 };
4028 
4029 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4030                                  ssize_t msgsz, int msgflg)
4031 {
4032     struct target_msgbuf *target_mb;
4033     struct msgbuf *host_mb;
4034     abi_long ret = 0;
4035 
4036     if (msgsz < 0) {
4037         return -TARGET_EINVAL;
4038     }
4039 
4040     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4041         return -TARGET_EFAULT;
4042     host_mb = g_try_malloc(msgsz + sizeof(long));
4043     if (!host_mb) {
4044         unlock_user_struct(target_mb, msgp, 0);
4045         return -TARGET_ENOMEM;
4046     }
4047     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4048     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4049     ret = -TARGET_ENOSYS;
4050 #ifdef __NR_msgsnd
4051     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4052 #endif
4053 #ifdef __NR_ipc
4054     if (ret == -TARGET_ENOSYS) {
4055         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4056                                  host_mb, 0));
4057     }
4058 #endif
4059     g_free(host_mb);
4060     unlock_user_struct(target_mb, msgp, 0);
4061 
4062     return ret;
4063 }
4064 
4065 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4066                                  ssize_t msgsz, abi_long msgtyp,
4067                                  int msgflg)
4068 {
4069     struct target_msgbuf *target_mb;
4070     char *target_mtext;
4071     struct msgbuf *host_mb;
4072     abi_long ret = 0;
4073 
4074     if (msgsz < 0) {
4075         return -TARGET_EINVAL;
4076     }
4077 
4078     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4079         return -TARGET_EFAULT;
4080 
4081     host_mb = g_try_malloc(msgsz + sizeof(long));
4082     if (!host_mb) {
4083         ret = -TARGET_ENOMEM;
4084         goto end;
4085     }
4086     ret = -TARGET_ENOSYS;
4087 #ifdef __NR_msgrcv
4088     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4089 #endif
4090 #ifdef __NR_ipc
4091     if (ret == -TARGET_ENOSYS) {
4092         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4093                         msgflg, host_mb, msgtyp));
4094     }
4095 #endif
4096 
4097     if (ret > 0) {
4098         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4099         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4100         if (!target_mtext) {
4101             ret = -TARGET_EFAULT;
4102             goto end;
4103         }
4104         memcpy(target_mb->mtext, host_mb->mtext, ret);
4105         unlock_user(target_mtext, target_mtext_addr, ret);
4106     }
4107 
4108     target_mb->mtype = tswapal(host_mb->mtype);
4109 
4110 end:
4111     if (target_mb)
4112         unlock_user_struct(target_mb, msgp, 1);
4113     g_free(host_mb);
4114     return ret;
4115 }
4116 
4117 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4118                                                abi_ulong target_addr)
4119 {
4120     struct target_shmid_ds *target_sd;
4121 
4122     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4123         return -TARGET_EFAULT;
4124     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4125         return -TARGET_EFAULT;
4126     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4127     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4128     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4129     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4130     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4131     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4132     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4133     unlock_user_struct(target_sd, target_addr, 0);
4134     return 0;
4135 }
4136 
4137 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4138                                                struct shmid_ds *host_sd)
4139 {
4140     struct target_shmid_ds *target_sd;
4141 
4142     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4143         return -TARGET_EFAULT;
4144     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4145         return -TARGET_EFAULT;
4146     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4147     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4148     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4149     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4150     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4151     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4152     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4153     unlock_user_struct(target_sd, target_addr, 1);
4154     return 0;
4155 }
4156 
4157 struct  target_shminfo {
4158     abi_ulong shmmax;
4159     abi_ulong shmmin;
4160     abi_ulong shmmni;
4161     abi_ulong shmseg;
4162     abi_ulong shmall;
4163 };
4164 
4165 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4166                                               struct shminfo *host_shminfo)
4167 {
4168     struct target_shminfo *target_shminfo;
4169     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4170         return -TARGET_EFAULT;
4171     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4172     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4173     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4174     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4175     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4176     unlock_user_struct(target_shminfo, target_addr, 1);
4177     return 0;
4178 }
4179 
4180 struct target_shm_info {
4181     int used_ids;
4182     abi_ulong shm_tot;
4183     abi_ulong shm_rss;
4184     abi_ulong shm_swp;
4185     abi_ulong swap_attempts;
4186     abi_ulong swap_successes;
4187 };
4188 
4189 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4190                                                struct shm_info *host_shm_info)
4191 {
4192     struct target_shm_info *target_shm_info;
4193     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4194         return -TARGET_EFAULT;
4195     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4196     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4197     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4198     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4199     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4200     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4201     unlock_user_struct(target_shm_info, target_addr, 1);
4202     return 0;
4203 }
4204 
4205 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4206 {
4207     struct shmid_ds dsarg;
4208     struct shminfo shminfo;
4209     struct shm_info shm_info;
4210     abi_long ret = -TARGET_EINVAL;
4211 
4212     cmd &= 0xff;
4213 
4214     switch(cmd) {
4215     case IPC_STAT:
4216     case IPC_SET:
4217     case SHM_STAT:
4218         if (target_to_host_shmid_ds(&dsarg, buf))
4219             return -TARGET_EFAULT;
4220         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4221         if (host_to_target_shmid_ds(buf, &dsarg))
4222             return -TARGET_EFAULT;
4223         break;
4224     case IPC_INFO:
4225         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4226         if (host_to_target_shminfo(buf, &shminfo))
4227             return -TARGET_EFAULT;
4228         break;
4229     case SHM_INFO:
4230         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4231         if (host_to_target_shm_info(buf, &shm_info))
4232             return -TARGET_EFAULT;
4233         break;
4234     case IPC_RMID:
4235     case SHM_LOCK:
4236     case SHM_UNLOCK:
4237         ret = get_errno(shmctl(shmid, cmd, NULL));
4238         break;
4239     }
4240 
4241     return ret;
4242 }
4243 
4244 #ifndef TARGET_FORCE_SHMLBA
4245 /* For most architectures, SHMLBA is the same as the page size;
4246  * some architectures have larger values, in which case they should
4247  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4248  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4249  * and defining its own value for SHMLBA.
4250  *
4251  * The kernel also permits SHMLBA to be set by the architecture to a
4252  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4253  * this means that addresses are rounded to the large size if
4254  * SHM_RND is set but addresses not aligned to that size are not rejected
4255  * as long as they are at least page-aligned. Since the only architecture
4256  * which uses this is ia64 this code doesn't provide for that oddity.
4257  */
4258 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4259 {
4260     return TARGET_PAGE_SIZE;
4261 }
4262 #endif
4263 
4264 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4265                                  int shmid, abi_ulong shmaddr, int shmflg)
4266 {
4267     abi_long raddr;
4268     void *host_raddr;
4269     struct shmid_ds shm_info;
4270     int i,ret;
4271     abi_ulong shmlba;
4272 
4273     /* find out the length of the shared memory segment */
4274     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4275     if (is_error(ret)) {
4276         /* can't get length, bail out */
4277         return ret;
4278     }
4279 
4280     shmlba = target_shmlba(cpu_env);
4281 
4282     if (shmaddr & (shmlba - 1)) {
4283         if (shmflg & SHM_RND) {
4284             shmaddr &= ~(shmlba - 1);
4285         } else {
4286             return -TARGET_EINVAL;
4287         }
4288     }
4289     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4290         return -TARGET_EINVAL;
4291     }
4292 
4293     mmap_lock();
4294 
4295     if (shmaddr)
4296         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4297     else {
4298         abi_ulong mmap_start;
4299 
4300         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4301         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4302 
4303         if (mmap_start == -1) {
4304             errno = ENOMEM;
4305             host_raddr = (void *)-1;
4306         } else
4307             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4308     }
4309 
4310     if (host_raddr == (void *)-1) {
4311         mmap_unlock();
4312         return get_errno((long)host_raddr);
4313     }
4314     raddr=h2g((unsigned long)host_raddr);
4315 
4316     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4317                    PAGE_VALID | PAGE_READ |
4318                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4319 
4320     for (i = 0; i < N_SHM_REGIONS; i++) {
4321         if (!shm_regions[i].in_use) {
4322             shm_regions[i].in_use = true;
4323             shm_regions[i].start = raddr;
4324             shm_regions[i].size = shm_info.shm_segsz;
4325             break;
4326         }
4327     }
4328 
4329     mmap_unlock();
4330     return raddr;
4331 
4332 }
4333 
4334 static inline abi_long do_shmdt(abi_ulong shmaddr)
4335 {
4336     int i;
4337     abi_long rv;
4338 
4339     mmap_lock();
4340 
4341     for (i = 0; i < N_SHM_REGIONS; ++i) {
4342         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4343             shm_regions[i].in_use = false;
4344             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4345             break;
4346         }
4347     }
4348     rv = get_errno(shmdt(g2h(shmaddr)));
4349 
4350     mmap_unlock();
4351 
4352     return rv;
4353 }
4354 
4355 #ifdef TARGET_NR_ipc
4356 /* ??? This only works with linear mappings.  */
4357 /* do_ipc() must return target values and target errnos. */
4358 static abi_long do_ipc(CPUArchState *cpu_env,
4359                        unsigned int call, abi_long first,
4360                        abi_long second, abi_long third,
4361                        abi_long ptr, abi_long fifth)
4362 {
4363     int version;
4364     abi_long ret = 0;
4365 
4366     version = call >> 16;
4367     call &= 0xffff;
4368 
4369     switch (call) {
4370     case IPCOP_semop:
4371         ret = do_semop(first, ptr, second);
4372         break;
4373 
4374     case IPCOP_semget:
4375         ret = get_errno(semget(first, second, third));
4376         break;
4377 
4378     case IPCOP_semctl: {
4379         /* The semun argument to semctl is passed by value, so dereference the
4380          * ptr argument. */
4381         abi_ulong atptr;
4382         get_user_ual(atptr, ptr);
4383         ret = do_semctl(first, second, third, atptr);
4384         break;
4385     }
4386 
4387     case IPCOP_msgget:
4388         ret = get_errno(msgget(first, second));
4389         break;
4390 
4391     case IPCOP_msgsnd:
4392         ret = do_msgsnd(first, ptr, second, third);
4393         break;
4394 
4395     case IPCOP_msgctl:
4396         ret = do_msgctl(first, second, ptr);
4397         break;
4398 
4399     case IPCOP_msgrcv:
4400         switch (version) {
4401         case 0:
4402             {
4403                 struct target_ipc_kludge {
4404                     abi_long msgp;
4405                     abi_long msgtyp;
4406                 } *tmp;
4407 
4408                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4409                     ret = -TARGET_EFAULT;
4410                     break;
4411                 }
4412 
4413                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4414 
4415                 unlock_user_struct(tmp, ptr, 0);
4416                 break;
4417             }
4418         default:
4419             ret = do_msgrcv(first, ptr, second, fifth, third);
4420         }
4421         break;
4422 
4423     case IPCOP_shmat:
4424         switch (version) {
4425         default:
4426         {
4427             abi_ulong raddr;
4428             raddr = do_shmat(cpu_env, first, ptr, second);
4429             if (is_error(raddr))
4430                 return get_errno(raddr);
4431             if (put_user_ual(raddr, third))
4432                 return -TARGET_EFAULT;
4433             break;
4434         }
4435         case 1:
4436             ret = -TARGET_EINVAL;
4437             break;
4438         }
4439 	break;
4440     case IPCOP_shmdt:
4441         ret = do_shmdt(ptr);
4442 	break;
4443 
4444     case IPCOP_shmget:
4445 	/* IPC_* flag values are the same on all linux platforms */
4446 	ret = get_errno(shmget(first, second, third));
4447 	break;
4448 
4449 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4450     case IPCOP_shmctl:
4451         ret = do_shmctl(first, second, ptr);
4452         break;
4453     default:
4454         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4455                       call, version);
4456 	ret = -TARGET_ENOSYS;
4457 	break;
4458     }
4459     return ret;
4460 }
4461 #endif
4462 
4463 /* kernel structure types definitions */
4464 
4465 #define STRUCT(name, ...) STRUCT_ ## name,
4466 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4467 enum {
4468 #include "syscall_types.h"
4469 STRUCT_MAX
4470 };
4471 #undef STRUCT
4472 #undef STRUCT_SPECIAL
4473 
4474 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4475 #define STRUCT_SPECIAL(name)
4476 #include "syscall_types.h"
4477 #undef STRUCT
4478 #undef STRUCT_SPECIAL
4479 
4480 typedef struct IOCTLEntry IOCTLEntry;
4481 
4482 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4483                              int fd, int cmd, abi_long arg);
4484 
4485 struct IOCTLEntry {
4486     int target_cmd;
4487     unsigned int host_cmd;
4488     const char *name;
4489     int access;
4490     do_ioctl_fn *do_ioctl;
4491     const argtype arg_type[5];
4492 };
4493 
4494 #define IOC_R 0x0001
4495 #define IOC_W 0x0002
4496 #define IOC_RW (IOC_R | IOC_W)
4497 
4498 #define MAX_STRUCT_SIZE 4096
4499 
4500 #ifdef CONFIG_FIEMAP
4501 /* So fiemap access checks don't overflow on 32 bit systems.
4502  * This is very slightly smaller than the limit imposed by
4503  * the underlying kernel.
4504  */
4505 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4506                             / sizeof(struct fiemap_extent))
4507 
4508 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4509                                        int fd, int cmd, abi_long arg)
4510 {
4511     /* The parameter for this ioctl is a struct fiemap followed
4512      * by an array of struct fiemap_extent whose size is set
4513      * in fiemap->fm_extent_count. The array is filled in by the
4514      * ioctl.
4515      */
4516     int target_size_in, target_size_out;
4517     struct fiemap *fm;
4518     const argtype *arg_type = ie->arg_type;
4519     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4520     void *argptr, *p;
4521     abi_long ret;
4522     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4523     uint32_t outbufsz;
4524     int free_fm = 0;
4525 
4526     assert(arg_type[0] == TYPE_PTR);
4527     assert(ie->access == IOC_RW);
4528     arg_type++;
4529     target_size_in = thunk_type_size(arg_type, 0);
4530     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4531     if (!argptr) {
4532         return -TARGET_EFAULT;
4533     }
4534     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4535     unlock_user(argptr, arg, 0);
4536     fm = (struct fiemap *)buf_temp;
4537     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4538         return -TARGET_EINVAL;
4539     }
4540 
4541     outbufsz = sizeof (*fm) +
4542         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4543 
4544     if (outbufsz > MAX_STRUCT_SIZE) {
4545         /* We can't fit all the extents into the fixed size buffer.
4546          * Allocate one that is large enough and use it instead.
4547          */
4548         fm = g_try_malloc(outbufsz);
4549         if (!fm) {
4550             return -TARGET_ENOMEM;
4551         }
4552         memcpy(fm, buf_temp, sizeof(struct fiemap));
4553         free_fm = 1;
4554     }
4555     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4556     if (!is_error(ret)) {
4557         target_size_out = target_size_in;
4558         /* An extent_count of 0 means we were only counting the extents
4559          * so there are no structs to copy
4560          */
4561         if (fm->fm_extent_count != 0) {
4562             target_size_out += fm->fm_mapped_extents * extent_size;
4563         }
4564         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4565         if (!argptr) {
4566             ret = -TARGET_EFAULT;
4567         } else {
4568             /* Convert the struct fiemap */
4569             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4570             if (fm->fm_extent_count != 0) {
4571                 p = argptr + target_size_in;
4572                 /* ...and then all the struct fiemap_extents */
4573                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4574                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4575                                   THUNK_TARGET);
4576                     p += extent_size;
4577                 }
4578             }
4579             unlock_user(argptr, arg, target_size_out);
4580         }
4581     }
4582     if (free_fm) {
4583         g_free(fm);
4584     }
4585     return ret;
4586 }
4587 #endif
4588 
4589 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4590                                 int fd, int cmd, abi_long arg)
4591 {
4592     const argtype *arg_type = ie->arg_type;
4593     int target_size;
4594     void *argptr;
4595     int ret;
4596     struct ifconf *host_ifconf;
4597     uint32_t outbufsz;
4598     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4599     int target_ifreq_size;
4600     int nb_ifreq;
4601     int free_buf = 0;
4602     int i;
4603     int target_ifc_len;
4604     abi_long target_ifc_buf;
4605     int host_ifc_len;
4606     char *host_ifc_buf;
4607 
4608     assert(arg_type[0] == TYPE_PTR);
4609     assert(ie->access == IOC_RW);
4610 
4611     arg_type++;
4612     target_size = thunk_type_size(arg_type, 0);
4613 
4614     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4615     if (!argptr)
4616         return -TARGET_EFAULT;
4617     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4618     unlock_user(argptr, arg, 0);
4619 
4620     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4621     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4622     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4623 
4624     if (target_ifc_buf != 0) {
4625         target_ifc_len = host_ifconf->ifc_len;
4626         nb_ifreq = target_ifc_len / target_ifreq_size;
4627         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4628 
4629         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4630         if (outbufsz > MAX_STRUCT_SIZE) {
4631             /*
4632              * We can't fit all the extents into the fixed size buffer.
4633              * Allocate one that is large enough and use it instead.
4634              */
4635             host_ifconf = malloc(outbufsz);
4636             if (!host_ifconf) {
4637                 return -TARGET_ENOMEM;
4638             }
4639             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4640             free_buf = 1;
4641         }
4642         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4643 
4644         host_ifconf->ifc_len = host_ifc_len;
4645     } else {
4646       host_ifc_buf = NULL;
4647     }
4648     host_ifconf->ifc_buf = host_ifc_buf;
4649 
4650     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4651     if (!is_error(ret)) {
4652 	/* convert host ifc_len to target ifc_len */
4653 
4654         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4655         target_ifc_len = nb_ifreq * target_ifreq_size;
4656         host_ifconf->ifc_len = target_ifc_len;
4657 
4658 	/* restore target ifc_buf */
4659 
4660         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4661 
4662 	/* copy struct ifconf to target user */
4663 
4664         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4665         if (!argptr)
4666             return -TARGET_EFAULT;
4667         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4668         unlock_user(argptr, arg, target_size);
4669 
4670         if (target_ifc_buf != 0) {
4671             /* copy ifreq[] to target user */
4672             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4673             for (i = 0; i < nb_ifreq ; i++) {
4674                 thunk_convert(argptr + i * target_ifreq_size,
4675                               host_ifc_buf + i * sizeof(struct ifreq),
4676                               ifreq_arg_type, THUNK_TARGET);
4677             }
4678             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4679         }
4680     }
4681 
4682     if (free_buf) {
4683         free(host_ifconf);
4684     }
4685 
4686     return ret;
4687 }
4688 
4689 #if defined(CONFIG_USBFS)
4690 #if HOST_LONG_BITS > 64
4691 #error USBDEVFS thunks do not support >64 bit hosts yet.
4692 #endif
4693 struct live_urb {
4694     uint64_t target_urb_adr;
4695     uint64_t target_buf_adr;
4696     char *target_buf_ptr;
4697     struct usbdevfs_urb host_urb;
4698 };
4699 
4700 static GHashTable *usbdevfs_urb_hashtable(void)
4701 {
4702     static GHashTable *urb_hashtable;
4703 
4704     if (!urb_hashtable) {
4705         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4706     }
4707     return urb_hashtable;
4708 }
4709 
4710 static void urb_hashtable_insert(struct live_urb *urb)
4711 {
4712     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4713     g_hash_table_insert(urb_hashtable, urb, urb);
4714 }
4715 
4716 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4717 {
4718     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4719     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4720 }
4721 
4722 static void urb_hashtable_remove(struct live_urb *urb)
4723 {
4724     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4725     g_hash_table_remove(urb_hashtable, urb);
4726 }
4727 
4728 static abi_long
4729 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4730                           int fd, int cmd, abi_long arg)
4731 {
4732     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4733     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4734     struct live_urb *lurb;
4735     void *argptr;
4736     uint64_t hurb;
4737     int target_size;
4738     uintptr_t target_urb_adr;
4739     abi_long ret;
4740 
4741     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4742 
4743     memset(buf_temp, 0, sizeof(uint64_t));
4744     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4745     if (is_error(ret)) {
4746         return ret;
4747     }
4748 
4749     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4750     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4751     if (!lurb->target_urb_adr) {
4752         return -TARGET_EFAULT;
4753     }
4754     urb_hashtable_remove(lurb);
4755     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4756         lurb->host_urb.buffer_length);
4757     lurb->target_buf_ptr = NULL;
4758 
4759     /* restore the guest buffer pointer */
4760     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4761 
4762     /* update the guest urb struct */
4763     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4764     if (!argptr) {
4765         g_free(lurb);
4766         return -TARGET_EFAULT;
4767     }
4768     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4769     unlock_user(argptr, lurb->target_urb_adr, target_size);
4770 
4771     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4772     /* write back the urb handle */
4773     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4774     if (!argptr) {
4775         g_free(lurb);
4776         return -TARGET_EFAULT;
4777     }
4778 
4779     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4780     target_urb_adr = lurb->target_urb_adr;
4781     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4782     unlock_user(argptr, arg, target_size);
4783 
4784     g_free(lurb);
4785     return ret;
4786 }
4787 
4788 static abi_long
4789 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4790                              uint8_t *buf_temp __attribute__((unused)),
4791                              int fd, int cmd, abi_long arg)
4792 {
4793     struct live_urb *lurb;
4794 
4795     /* map target address back to host URB with metadata. */
4796     lurb = urb_hashtable_lookup(arg);
4797     if (!lurb) {
4798         return -TARGET_EFAULT;
4799     }
4800     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4801 }
4802 
4803 static abi_long
4804 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4805                             int fd, int cmd, abi_long arg)
4806 {
4807     const argtype *arg_type = ie->arg_type;
4808     int target_size;
4809     abi_long ret;
4810     void *argptr;
4811     int rw_dir;
4812     struct live_urb *lurb;
4813 
4814     /*
4815      * each submitted URB needs to map to a unique ID for the
4816      * kernel, and that unique ID needs to be a pointer to
4817      * host memory.  hence, we need to malloc for each URB.
4818      * isochronous transfers have a variable length struct.
4819      */
4820     arg_type++;
4821     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4822 
4823     /* construct host copy of urb and metadata */
4824     lurb = g_try_malloc0(sizeof(struct live_urb));
4825     if (!lurb) {
4826         return -TARGET_ENOMEM;
4827     }
4828 
4829     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4830     if (!argptr) {
4831         g_free(lurb);
4832         return -TARGET_EFAULT;
4833     }
4834     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4835     unlock_user(argptr, arg, 0);
4836 
4837     lurb->target_urb_adr = arg;
4838     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4839 
4840     /* buffer space used depends on endpoint type so lock the entire buffer */
4841     /* control type urbs should check the buffer contents for true direction */
4842     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4843     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4844         lurb->host_urb.buffer_length, 1);
4845     if (lurb->target_buf_ptr == NULL) {
4846         g_free(lurb);
4847         return -TARGET_EFAULT;
4848     }
4849 
4850     /* update buffer pointer in host copy */
4851     lurb->host_urb.buffer = lurb->target_buf_ptr;
4852 
4853     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4854     if (is_error(ret)) {
4855         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4856         g_free(lurb);
4857     } else {
4858         urb_hashtable_insert(lurb);
4859     }
4860 
4861     return ret;
4862 }
4863 #endif /* CONFIG_USBFS */
4864 
4865 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4866                             int cmd, abi_long arg)
4867 {
4868     void *argptr;
4869     struct dm_ioctl *host_dm;
4870     abi_long guest_data;
4871     uint32_t guest_data_size;
4872     int target_size;
4873     const argtype *arg_type = ie->arg_type;
4874     abi_long ret;
4875     void *big_buf = NULL;
4876     char *host_data;
4877 
4878     arg_type++;
4879     target_size = thunk_type_size(arg_type, 0);
4880     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4881     if (!argptr) {
4882         ret = -TARGET_EFAULT;
4883         goto out;
4884     }
4885     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4886     unlock_user(argptr, arg, 0);
4887 
4888     /* buf_temp is too small, so fetch things into a bigger buffer */
4889     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4890     memcpy(big_buf, buf_temp, target_size);
4891     buf_temp = big_buf;
4892     host_dm = big_buf;
4893 
4894     guest_data = arg + host_dm->data_start;
4895     if ((guest_data - arg) < 0) {
4896         ret = -TARGET_EINVAL;
4897         goto out;
4898     }
4899     guest_data_size = host_dm->data_size - host_dm->data_start;
4900     host_data = (char*)host_dm + host_dm->data_start;
4901 
4902     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4903     if (!argptr) {
4904         ret = -TARGET_EFAULT;
4905         goto out;
4906     }
4907 
4908     switch (ie->host_cmd) {
4909     case DM_REMOVE_ALL:
4910     case DM_LIST_DEVICES:
4911     case DM_DEV_CREATE:
4912     case DM_DEV_REMOVE:
4913     case DM_DEV_SUSPEND:
4914     case DM_DEV_STATUS:
4915     case DM_DEV_WAIT:
4916     case DM_TABLE_STATUS:
4917     case DM_TABLE_CLEAR:
4918     case DM_TABLE_DEPS:
4919     case DM_LIST_VERSIONS:
4920         /* no input data */
4921         break;
4922     case DM_DEV_RENAME:
4923     case DM_DEV_SET_GEOMETRY:
4924         /* data contains only strings */
4925         memcpy(host_data, argptr, guest_data_size);
4926         break;
4927     case DM_TARGET_MSG:
4928         memcpy(host_data, argptr, guest_data_size);
4929         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4930         break;
4931     case DM_TABLE_LOAD:
4932     {
4933         void *gspec = argptr;
4934         void *cur_data = host_data;
4935         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4936         int spec_size = thunk_type_size(arg_type, 0);
4937         int i;
4938 
4939         for (i = 0; i < host_dm->target_count; i++) {
4940             struct dm_target_spec *spec = cur_data;
4941             uint32_t next;
4942             int slen;
4943 
4944             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4945             slen = strlen((char*)gspec + spec_size) + 1;
4946             next = spec->next;
4947             spec->next = sizeof(*spec) + slen;
4948             strcpy((char*)&spec[1], gspec + spec_size);
4949             gspec += next;
4950             cur_data += spec->next;
4951         }
4952         break;
4953     }
4954     default:
4955         ret = -TARGET_EINVAL;
4956         unlock_user(argptr, guest_data, 0);
4957         goto out;
4958     }
4959     unlock_user(argptr, guest_data, 0);
4960 
4961     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4962     if (!is_error(ret)) {
4963         guest_data = arg + host_dm->data_start;
4964         guest_data_size = host_dm->data_size - host_dm->data_start;
4965         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4966         switch (ie->host_cmd) {
4967         case DM_REMOVE_ALL:
4968         case DM_DEV_CREATE:
4969         case DM_DEV_REMOVE:
4970         case DM_DEV_RENAME:
4971         case DM_DEV_SUSPEND:
4972         case DM_DEV_STATUS:
4973         case DM_TABLE_LOAD:
4974         case DM_TABLE_CLEAR:
4975         case DM_TARGET_MSG:
4976         case DM_DEV_SET_GEOMETRY:
4977             /* no return data */
4978             break;
4979         case DM_LIST_DEVICES:
4980         {
4981             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4982             uint32_t remaining_data = guest_data_size;
4983             void *cur_data = argptr;
4984             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4985             int nl_size = 12; /* can't use thunk_size due to alignment */
4986 
4987             while (1) {
4988                 uint32_t next = nl->next;
4989                 if (next) {
4990                     nl->next = nl_size + (strlen(nl->name) + 1);
4991                 }
4992                 if (remaining_data < nl->next) {
4993                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4994                     break;
4995                 }
4996                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4997                 strcpy(cur_data + nl_size, nl->name);
4998                 cur_data += nl->next;
4999                 remaining_data -= nl->next;
5000                 if (!next) {
5001                     break;
5002                 }
5003                 nl = (void*)nl + next;
5004             }
5005             break;
5006         }
5007         case DM_DEV_WAIT:
5008         case DM_TABLE_STATUS:
5009         {
5010             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5011             void *cur_data = argptr;
5012             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5013             int spec_size = thunk_type_size(arg_type, 0);
5014             int i;
5015 
5016             for (i = 0; i < host_dm->target_count; i++) {
5017                 uint32_t next = spec->next;
5018                 int slen = strlen((char*)&spec[1]) + 1;
5019                 spec->next = (cur_data - argptr) + spec_size + slen;
5020                 if (guest_data_size < spec->next) {
5021                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5022                     break;
5023                 }
5024                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5025                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5026                 cur_data = argptr + spec->next;
5027                 spec = (void*)host_dm + host_dm->data_start + next;
5028             }
5029             break;
5030         }
5031         case DM_TABLE_DEPS:
5032         {
5033             void *hdata = (void*)host_dm + host_dm->data_start;
5034             int count = *(uint32_t*)hdata;
5035             uint64_t *hdev = hdata + 8;
5036             uint64_t *gdev = argptr + 8;
5037             int i;
5038 
5039             *(uint32_t*)argptr = tswap32(count);
5040             for (i = 0; i < count; i++) {
5041                 *gdev = tswap64(*hdev);
5042                 gdev++;
5043                 hdev++;
5044             }
5045             break;
5046         }
5047         case DM_LIST_VERSIONS:
5048         {
5049             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5050             uint32_t remaining_data = guest_data_size;
5051             void *cur_data = argptr;
5052             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5053             int vers_size = thunk_type_size(arg_type, 0);
5054 
5055             while (1) {
5056                 uint32_t next = vers->next;
5057                 if (next) {
5058                     vers->next = vers_size + (strlen(vers->name) + 1);
5059                 }
5060                 if (remaining_data < vers->next) {
5061                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5062                     break;
5063                 }
5064                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5065                 strcpy(cur_data + vers_size, vers->name);
5066                 cur_data += vers->next;
5067                 remaining_data -= vers->next;
5068                 if (!next) {
5069                     break;
5070                 }
5071                 vers = (void*)vers + next;
5072             }
5073             break;
5074         }
5075         default:
5076             unlock_user(argptr, guest_data, 0);
5077             ret = -TARGET_EINVAL;
5078             goto out;
5079         }
5080         unlock_user(argptr, guest_data, guest_data_size);
5081 
5082         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5083         if (!argptr) {
5084             ret = -TARGET_EFAULT;
5085             goto out;
5086         }
5087         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5088         unlock_user(argptr, arg, target_size);
5089     }
5090 out:
5091     g_free(big_buf);
5092     return ret;
5093 }
5094 
5095 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5096                                int cmd, abi_long arg)
5097 {
5098     void *argptr;
5099     int target_size;
5100     const argtype *arg_type = ie->arg_type;
5101     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5102     abi_long ret;
5103 
5104     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5105     struct blkpg_partition host_part;
5106 
5107     /* Read and convert blkpg */
5108     arg_type++;
5109     target_size = thunk_type_size(arg_type, 0);
5110     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5111     if (!argptr) {
5112         ret = -TARGET_EFAULT;
5113         goto out;
5114     }
5115     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5116     unlock_user(argptr, arg, 0);
5117 
5118     switch (host_blkpg->op) {
5119     case BLKPG_ADD_PARTITION:
5120     case BLKPG_DEL_PARTITION:
5121         /* payload is struct blkpg_partition */
5122         break;
5123     default:
5124         /* Unknown opcode */
5125         ret = -TARGET_EINVAL;
5126         goto out;
5127     }
5128 
5129     /* Read and convert blkpg->data */
5130     arg = (abi_long)(uintptr_t)host_blkpg->data;
5131     target_size = thunk_type_size(part_arg_type, 0);
5132     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5133     if (!argptr) {
5134         ret = -TARGET_EFAULT;
5135         goto out;
5136     }
5137     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5138     unlock_user(argptr, arg, 0);
5139 
5140     /* Swizzle the data pointer to our local copy and call! */
5141     host_blkpg->data = &host_part;
5142     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5143 
5144 out:
5145     return ret;
5146 }
5147 
5148 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5149                                 int fd, int cmd, abi_long arg)
5150 {
5151     const argtype *arg_type = ie->arg_type;
5152     const StructEntry *se;
5153     const argtype *field_types;
5154     const int *dst_offsets, *src_offsets;
5155     int target_size;
5156     void *argptr;
5157     abi_ulong *target_rt_dev_ptr = NULL;
5158     unsigned long *host_rt_dev_ptr = NULL;
5159     abi_long ret;
5160     int i;
5161 
5162     assert(ie->access == IOC_W);
5163     assert(*arg_type == TYPE_PTR);
5164     arg_type++;
5165     assert(*arg_type == TYPE_STRUCT);
5166     target_size = thunk_type_size(arg_type, 0);
5167     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5168     if (!argptr) {
5169         return -TARGET_EFAULT;
5170     }
5171     arg_type++;
5172     assert(*arg_type == (int)STRUCT_rtentry);
5173     se = struct_entries + *arg_type++;
5174     assert(se->convert[0] == NULL);
5175     /* convert struct here to be able to catch rt_dev string */
5176     field_types = se->field_types;
5177     dst_offsets = se->field_offsets[THUNK_HOST];
5178     src_offsets = se->field_offsets[THUNK_TARGET];
5179     for (i = 0; i < se->nb_fields; i++) {
5180         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5181             assert(*field_types == TYPE_PTRVOID);
5182             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5183             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5184             if (*target_rt_dev_ptr != 0) {
5185                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5186                                                   tswapal(*target_rt_dev_ptr));
5187                 if (!*host_rt_dev_ptr) {
5188                     unlock_user(argptr, arg, 0);
5189                     return -TARGET_EFAULT;
5190                 }
5191             } else {
5192                 *host_rt_dev_ptr = 0;
5193             }
5194             field_types++;
5195             continue;
5196         }
5197         field_types = thunk_convert(buf_temp + dst_offsets[i],
5198                                     argptr + src_offsets[i],
5199                                     field_types, THUNK_HOST);
5200     }
5201     unlock_user(argptr, arg, 0);
5202 
5203     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5204 
5205     assert(host_rt_dev_ptr != NULL);
5206     assert(target_rt_dev_ptr != NULL);
5207     if (*host_rt_dev_ptr != 0) {
5208         unlock_user((void *)*host_rt_dev_ptr,
5209                     *target_rt_dev_ptr, 0);
5210     }
5211     return ret;
5212 }
5213 
5214 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5215                                      int fd, int cmd, abi_long arg)
5216 {
5217     int sig = target_to_host_signal(arg);
5218     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5219 }
5220 
5221 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5222                                     int fd, int cmd, abi_long arg)
5223 {
5224     struct timeval tv;
5225     abi_long ret;
5226 
5227     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5228     if (is_error(ret)) {
5229         return ret;
5230     }
5231 
5232     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5233         if (copy_to_user_timeval(arg, &tv)) {
5234             return -TARGET_EFAULT;
5235         }
5236     } else {
5237         if (copy_to_user_timeval64(arg, &tv)) {
5238             return -TARGET_EFAULT;
5239         }
5240     }
5241 
5242     return ret;
5243 }
5244 
5245 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5246                                       int fd, int cmd, abi_long arg)
5247 {
5248     struct timespec ts;
5249     abi_long ret;
5250 
5251     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5252     if (is_error(ret)) {
5253         return ret;
5254     }
5255 
5256     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5257         if (host_to_target_timespec(arg, &ts)) {
5258             return -TARGET_EFAULT;
5259         }
5260     } else{
5261         if (host_to_target_timespec64(arg, &ts)) {
5262             return -TARGET_EFAULT;
5263         }
5264     }
5265 
5266     return ret;
5267 }
5268 
5269 #ifdef TIOCGPTPEER
5270 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5271                                      int fd, int cmd, abi_long arg)
5272 {
5273     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5274     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5275 }
5276 #endif
5277 
5278 static IOCTLEntry ioctl_entries[] = {
5279 #define IOCTL(cmd, access, ...) \
5280     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5281 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5282     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5283 #define IOCTL_IGNORE(cmd) \
5284     { TARGET_ ## cmd, 0, #cmd },
5285 #include "ioctls.h"
5286     { 0, 0, },
5287 };
5288 
5289 /* ??? Implement proper locking for ioctls.  */
5290 /* do_ioctl() Must return target values and target errnos. */
5291 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5292 {
5293     const IOCTLEntry *ie;
5294     const argtype *arg_type;
5295     abi_long ret;
5296     uint8_t buf_temp[MAX_STRUCT_SIZE];
5297     int target_size;
5298     void *argptr;
5299 
5300     ie = ioctl_entries;
5301     for(;;) {
5302         if (ie->target_cmd == 0) {
5303             qemu_log_mask(
5304                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5305             return -TARGET_ENOSYS;
5306         }
5307         if (ie->target_cmd == cmd)
5308             break;
5309         ie++;
5310     }
5311     arg_type = ie->arg_type;
5312     if (ie->do_ioctl) {
5313         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5314     } else if (!ie->host_cmd) {
5315         /* Some architectures define BSD ioctls in their headers
5316            that are not implemented in Linux.  */
5317         return -TARGET_ENOSYS;
5318     }
5319 
5320     switch(arg_type[0]) {
5321     case TYPE_NULL:
5322         /* no argument */
5323         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5324         break;
5325     case TYPE_PTRVOID:
5326     case TYPE_INT:
5327     case TYPE_LONG:
5328     case TYPE_ULONG:
5329         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5330         break;
5331     case TYPE_PTR:
5332         arg_type++;
5333         target_size = thunk_type_size(arg_type, 0);
5334         switch(ie->access) {
5335         case IOC_R:
5336             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5337             if (!is_error(ret)) {
5338                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5339                 if (!argptr)
5340                     return -TARGET_EFAULT;
5341                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5342                 unlock_user(argptr, arg, target_size);
5343             }
5344             break;
5345         case IOC_W:
5346             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5347             if (!argptr)
5348                 return -TARGET_EFAULT;
5349             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5350             unlock_user(argptr, arg, 0);
5351             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5352             break;
5353         default:
5354         case IOC_RW:
5355             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5356             if (!argptr)
5357                 return -TARGET_EFAULT;
5358             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5359             unlock_user(argptr, arg, 0);
5360             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5361             if (!is_error(ret)) {
5362                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5363                 if (!argptr)
5364                     return -TARGET_EFAULT;
5365                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5366                 unlock_user(argptr, arg, target_size);
5367             }
5368             break;
5369         }
5370         break;
5371     default:
5372         qemu_log_mask(LOG_UNIMP,
5373                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5374                       (long)cmd, arg_type[0]);
5375         ret = -TARGET_ENOSYS;
5376         break;
5377     }
5378     return ret;
5379 }
5380 
5381 static const bitmask_transtbl iflag_tbl[] = {
5382         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5383         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5384         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5385         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5386         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5387         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5388         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5389         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5390         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5391         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5392         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5393         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5394         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5395         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5396         { 0, 0, 0, 0 }
5397 };
5398 
5399 static const bitmask_transtbl oflag_tbl[] = {
5400 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5401 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5402 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5403 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5404 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5405 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5406 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5407 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5408 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5409 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5410 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5411 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5412 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5413 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5414 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5415 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5416 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5417 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5418 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5419 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5420 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5421 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5422 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5423 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5424 	{ 0, 0, 0, 0 }
5425 };
5426 
5427 static const bitmask_transtbl cflag_tbl[] = {
5428 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5429 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5430 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5431 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5432 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5433 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5434 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5435 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5436 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5437 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5438 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5439 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5440 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5441 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5442 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5443 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5444 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5445 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5446 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5447 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5448 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5449 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5450 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5451 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5452 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5453 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5454 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5455 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5456 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5457 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5458 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5459 	{ 0, 0, 0, 0 }
5460 };
5461 
5462 static const bitmask_transtbl lflag_tbl[] = {
5463 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5464 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5465 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5466 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5467 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5468 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5469 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5470 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5471 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5472 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5473 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5474 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5475 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5476 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5477 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5478 	{ 0, 0, 0, 0 }
5479 };
5480 
5481 static void target_to_host_termios (void *dst, const void *src)
5482 {
5483     struct host_termios *host = dst;
5484     const struct target_termios *target = src;
5485 
5486     host->c_iflag =
5487         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5488     host->c_oflag =
5489         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5490     host->c_cflag =
5491         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5492     host->c_lflag =
5493         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5494     host->c_line = target->c_line;
5495 
5496     memset(host->c_cc, 0, sizeof(host->c_cc));
5497     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5498     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5499     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5500     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5501     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5502     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5503     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5504     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5505     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5506     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5507     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5508     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5509     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5510     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5511     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5512     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5513     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5514 }
5515 
5516 static void host_to_target_termios (void *dst, const void *src)
5517 {
5518     struct target_termios *target = dst;
5519     const struct host_termios *host = src;
5520 
5521     target->c_iflag =
5522         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5523     target->c_oflag =
5524         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5525     target->c_cflag =
5526         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5527     target->c_lflag =
5528         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5529     target->c_line = host->c_line;
5530 
5531     memset(target->c_cc, 0, sizeof(target->c_cc));
5532     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5533     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5534     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5535     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5536     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5537     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5538     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5539     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5540     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5541     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5542     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5543     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5544     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5545     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5546     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5547     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5548     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5549 }
5550 
5551 static const StructEntry struct_termios_def = {
5552     .convert = { host_to_target_termios, target_to_host_termios },
5553     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5554     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5555 };
5556 
5557 static bitmask_transtbl mmap_flags_tbl[] = {
5558     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5559     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5560     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5561     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5562       MAP_ANONYMOUS, MAP_ANONYMOUS },
5563     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5564       MAP_GROWSDOWN, MAP_GROWSDOWN },
5565     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5566       MAP_DENYWRITE, MAP_DENYWRITE },
5567     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5568       MAP_EXECUTABLE, MAP_EXECUTABLE },
5569     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5570     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5571       MAP_NORESERVE, MAP_NORESERVE },
5572     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5573     /* MAP_STACK had been ignored by the kernel for quite some time.
5574        Recognize it for the target insofar as we do not want to pass
5575        it through to the host.  */
5576     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5577     { 0, 0, 0, 0 }
5578 };
5579 
5580 /*
5581  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5582  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5583  */
5584 #if defined(TARGET_I386)
5585 
5586 /* NOTE: there is really one LDT for all the threads */
5587 static uint8_t *ldt_table;
5588 
5589 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5590 {
5591     int size;
5592     void *p;
5593 
5594     if (!ldt_table)
5595         return 0;
5596     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5597     if (size > bytecount)
5598         size = bytecount;
5599     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5600     if (!p)
5601         return -TARGET_EFAULT;
5602     /* ??? Should this by byteswapped?  */
5603     memcpy(p, ldt_table, size);
5604     unlock_user(p, ptr, size);
5605     return size;
5606 }
5607 
5608 /* XXX: add locking support */
5609 static abi_long write_ldt(CPUX86State *env,
5610                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5611 {
5612     struct target_modify_ldt_ldt_s ldt_info;
5613     struct target_modify_ldt_ldt_s *target_ldt_info;
5614     int seg_32bit, contents, read_exec_only, limit_in_pages;
5615     int seg_not_present, useable, lm;
5616     uint32_t *lp, entry_1, entry_2;
5617 
5618     if (bytecount != sizeof(ldt_info))
5619         return -TARGET_EINVAL;
5620     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5621         return -TARGET_EFAULT;
5622     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5623     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5624     ldt_info.limit = tswap32(target_ldt_info->limit);
5625     ldt_info.flags = tswap32(target_ldt_info->flags);
5626     unlock_user_struct(target_ldt_info, ptr, 0);
5627 
5628     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5629         return -TARGET_EINVAL;
5630     seg_32bit = ldt_info.flags & 1;
5631     contents = (ldt_info.flags >> 1) & 3;
5632     read_exec_only = (ldt_info.flags >> 3) & 1;
5633     limit_in_pages = (ldt_info.flags >> 4) & 1;
5634     seg_not_present = (ldt_info.flags >> 5) & 1;
5635     useable = (ldt_info.flags >> 6) & 1;
5636 #ifdef TARGET_ABI32
5637     lm = 0;
5638 #else
5639     lm = (ldt_info.flags >> 7) & 1;
5640 #endif
5641     if (contents == 3) {
5642         if (oldmode)
5643             return -TARGET_EINVAL;
5644         if (seg_not_present == 0)
5645             return -TARGET_EINVAL;
5646     }
5647     /* allocate the LDT */
5648     if (!ldt_table) {
5649         env->ldt.base = target_mmap(0,
5650                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5651                                     PROT_READ|PROT_WRITE,
5652                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5653         if (env->ldt.base == -1)
5654             return -TARGET_ENOMEM;
5655         memset(g2h(env->ldt.base), 0,
5656                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5657         env->ldt.limit = 0xffff;
5658         ldt_table = g2h(env->ldt.base);
5659     }
5660 
5661     /* NOTE: same code as Linux kernel */
5662     /* Allow LDTs to be cleared by the user. */
5663     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5664         if (oldmode ||
5665             (contents == 0		&&
5666              read_exec_only == 1	&&
5667              seg_32bit == 0		&&
5668              limit_in_pages == 0	&&
5669              seg_not_present == 1	&&
5670              useable == 0 )) {
5671             entry_1 = 0;
5672             entry_2 = 0;
5673             goto install;
5674         }
5675     }
5676 
5677     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5678         (ldt_info.limit & 0x0ffff);
5679     entry_2 = (ldt_info.base_addr & 0xff000000) |
5680         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5681         (ldt_info.limit & 0xf0000) |
5682         ((read_exec_only ^ 1) << 9) |
5683         (contents << 10) |
5684         ((seg_not_present ^ 1) << 15) |
5685         (seg_32bit << 22) |
5686         (limit_in_pages << 23) |
5687         (lm << 21) |
5688         0x7000;
5689     if (!oldmode)
5690         entry_2 |= (useable << 20);
5691 
5692     /* Install the new entry ...  */
5693 install:
5694     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5695     lp[0] = tswap32(entry_1);
5696     lp[1] = tswap32(entry_2);
5697     return 0;
5698 }
5699 
5700 /* specific and weird i386 syscalls */
5701 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5702                               unsigned long bytecount)
5703 {
5704     abi_long ret;
5705 
5706     switch (func) {
5707     case 0:
5708         ret = read_ldt(ptr, bytecount);
5709         break;
5710     case 1:
5711         ret = write_ldt(env, ptr, bytecount, 1);
5712         break;
5713     case 0x11:
5714         ret = write_ldt(env, ptr, bytecount, 0);
5715         break;
5716     default:
5717         ret = -TARGET_ENOSYS;
5718         break;
5719     }
5720     return ret;
5721 }
5722 
5723 #if defined(TARGET_ABI32)
5724 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5725 {
5726     uint64_t *gdt_table = g2h(env->gdt.base);
5727     struct target_modify_ldt_ldt_s ldt_info;
5728     struct target_modify_ldt_ldt_s *target_ldt_info;
5729     int seg_32bit, contents, read_exec_only, limit_in_pages;
5730     int seg_not_present, useable, lm;
5731     uint32_t *lp, entry_1, entry_2;
5732     int i;
5733 
5734     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5735     if (!target_ldt_info)
5736         return -TARGET_EFAULT;
5737     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5738     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5739     ldt_info.limit = tswap32(target_ldt_info->limit);
5740     ldt_info.flags = tswap32(target_ldt_info->flags);
5741     if (ldt_info.entry_number == -1) {
5742         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5743             if (gdt_table[i] == 0) {
5744                 ldt_info.entry_number = i;
5745                 target_ldt_info->entry_number = tswap32(i);
5746                 break;
5747             }
5748         }
5749     }
5750     unlock_user_struct(target_ldt_info, ptr, 1);
5751 
5752     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5753         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5754            return -TARGET_EINVAL;
5755     seg_32bit = ldt_info.flags & 1;
5756     contents = (ldt_info.flags >> 1) & 3;
5757     read_exec_only = (ldt_info.flags >> 3) & 1;
5758     limit_in_pages = (ldt_info.flags >> 4) & 1;
5759     seg_not_present = (ldt_info.flags >> 5) & 1;
5760     useable = (ldt_info.flags >> 6) & 1;
5761 #ifdef TARGET_ABI32
5762     lm = 0;
5763 #else
5764     lm = (ldt_info.flags >> 7) & 1;
5765 #endif
5766 
5767     if (contents == 3) {
5768         if (seg_not_present == 0)
5769             return -TARGET_EINVAL;
5770     }
5771 
5772     /* NOTE: same code as Linux kernel */
5773     /* Allow LDTs to be cleared by the user. */
5774     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5775         if ((contents == 0             &&
5776              read_exec_only == 1       &&
5777              seg_32bit == 0            &&
5778              limit_in_pages == 0       &&
5779              seg_not_present == 1      &&
5780              useable == 0 )) {
5781             entry_1 = 0;
5782             entry_2 = 0;
5783             goto install;
5784         }
5785     }
5786 
5787     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5788         (ldt_info.limit & 0x0ffff);
5789     entry_2 = (ldt_info.base_addr & 0xff000000) |
5790         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5791         (ldt_info.limit & 0xf0000) |
5792         ((read_exec_only ^ 1) << 9) |
5793         (contents << 10) |
5794         ((seg_not_present ^ 1) << 15) |
5795         (seg_32bit << 22) |
5796         (limit_in_pages << 23) |
5797         (useable << 20) |
5798         (lm << 21) |
5799         0x7000;
5800 
5801     /* Install the new entry ...  */
5802 install:
5803     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5804     lp[0] = tswap32(entry_1);
5805     lp[1] = tswap32(entry_2);
5806     return 0;
5807 }
5808 
5809 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5810 {
5811     struct target_modify_ldt_ldt_s *target_ldt_info;
5812     uint64_t *gdt_table = g2h(env->gdt.base);
5813     uint32_t base_addr, limit, flags;
5814     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5815     int seg_not_present, useable, lm;
5816     uint32_t *lp, entry_1, entry_2;
5817 
5818     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5819     if (!target_ldt_info)
5820         return -TARGET_EFAULT;
5821     idx = tswap32(target_ldt_info->entry_number);
5822     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5823         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5824         unlock_user_struct(target_ldt_info, ptr, 1);
5825         return -TARGET_EINVAL;
5826     }
5827     lp = (uint32_t *)(gdt_table + idx);
5828     entry_1 = tswap32(lp[0]);
5829     entry_2 = tswap32(lp[1]);
5830 
5831     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5832     contents = (entry_2 >> 10) & 3;
5833     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5834     seg_32bit = (entry_2 >> 22) & 1;
5835     limit_in_pages = (entry_2 >> 23) & 1;
5836     useable = (entry_2 >> 20) & 1;
5837 #ifdef TARGET_ABI32
5838     lm = 0;
5839 #else
5840     lm = (entry_2 >> 21) & 1;
5841 #endif
5842     flags = (seg_32bit << 0) | (contents << 1) |
5843         (read_exec_only << 3) | (limit_in_pages << 4) |
5844         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5845     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5846     base_addr = (entry_1 >> 16) |
5847         (entry_2 & 0xff000000) |
5848         ((entry_2 & 0xff) << 16);
5849     target_ldt_info->base_addr = tswapal(base_addr);
5850     target_ldt_info->limit = tswap32(limit);
5851     target_ldt_info->flags = tswap32(flags);
5852     unlock_user_struct(target_ldt_info, ptr, 1);
5853     return 0;
5854 }
5855 
5856 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5857 {
5858     return -ENOSYS;
5859 }
5860 #else
5861 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5862 {
5863     abi_long ret = 0;
5864     abi_ulong val;
5865     int idx;
5866 
5867     switch(code) {
5868     case TARGET_ARCH_SET_GS:
5869     case TARGET_ARCH_SET_FS:
5870         if (code == TARGET_ARCH_SET_GS)
5871             idx = R_GS;
5872         else
5873             idx = R_FS;
5874         cpu_x86_load_seg(env, idx, 0);
5875         env->segs[idx].base = addr;
5876         break;
5877     case TARGET_ARCH_GET_GS:
5878     case TARGET_ARCH_GET_FS:
5879         if (code == TARGET_ARCH_GET_GS)
5880             idx = R_GS;
5881         else
5882             idx = R_FS;
5883         val = env->segs[idx].base;
5884         if (put_user(val, addr, abi_ulong))
5885             ret = -TARGET_EFAULT;
5886         break;
5887     default:
5888         ret = -TARGET_EINVAL;
5889         break;
5890     }
5891     return ret;
5892 }
5893 #endif /* defined(TARGET_ABI32 */
5894 
5895 #endif /* defined(TARGET_I386) */
5896 
5897 #define NEW_STACK_SIZE 0x40000
5898 
5899 
5900 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5901 typedef struct {
5902     CPUArchState *env;
5903     pthread_mutex_t mutex;
5904     pthread_cond_t cond;
5905     pthread_t thread;
5906     uint32_t tid;
5907     abi_ulong child_tidptr;
5908     abi_ulong parent_tidptr;
5909     sigset_t sigmask;
5910 } new_thread_info;
5911 
5912 static void *clone_func(void *arg)
5913 {
5914     new_thread_info *info = arg;
5915     CPUArchState *env;
5916     CPUState *cpu;
5917     TaskState *ts;
5918 
5919     rcu_register_thread();
5920     tcg_register_thread();
5921     env = info->env;
5922     cpu = env_cpu(env);
5923     thread_cpu = cpu;
5924     ts = (TaskState *)cpu->opaque;
5925     info->tid = sys_gettid();
5926     task_settid(ts);
5927     if (info->child_tidptr)
5928         put_user_u32(info->tid, info->child_tidptr);
5929     if (info->parent_tidptr)
5930         put_user_u32(info->tid, info->parent_tidptr);
5931     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5932     /* Enable signals.  */
5933     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5934     /* Signal to the parent that we're ready.  */
5935     pthread_mutex_lock(&info->mutex);
5936     pthread_cond_broadcast(&info->cond);
5937     pthread_mutex_unlock(&info->mutex);
5938     /* Wait until the parent has finished initializing the tls state.  */
5939     pthread_mutex_lock(&clone_lock);
5940     pthread_mutex_unlock(&clone_lock);
5941     cpu_loop(env);
5942     /* never exits */
5943     return NULL;
5944 }
5945 
5946 /* do_fork() Must return host values and target errnos (unlike most
5947    do_*() functions). */
5948 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5949                    abi_ulong parent_tidptr, target_ulong newtls,
5950                    abi_ulong child_tidptr)
5951 {
5952     CPUState *cpu = env_cpu(env);
5953     int ret;
5954     TaskState *ts;
5955     CPUState *new_cpu;
5956     CPUArchState *new_env;
5957     sigset_t sigmask;
5958 
5959     flags &= ~CLONE_IGNORED_FLAGS;
5960 
5961     /* Emulate vfork() with fork() */
5962     if (flags & CLONE_VFORK)
5963         flags &= ~(CLONE_VFORK | CLONE_VM);
5964 
5965     if (flags & CLONE_VM) {
5966         TaskState *parent_ts = (TaskState *)cpu->opaque;
5967         new_thread_info info;
5968         pthread_attr_t attr;
5969 
5970         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5971             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5972             return -TARGET_EINVAL;
5973         }
5974 
5975         ts = g_new0(TaskState, 1);
5976         init_task_state(ts);
5977 
5978         /* Grab a mutex so that thread setup appears atomic.  */
5979         pthread_mutex_lock(&clone_lock);
5980 
5981         /* we create a new CPU instance. */
5982         new_env = cpu_copy(env);
5983         /* Init regs that differ from the parent.  */
5984         cpu_clone_regs_child(new_env, newsp, flags);
5985         cpu_clone_regs_parent(env, flags);
5986         new_cpu = env_cpu(new_env);
5987         new_cpu->opaque = ts;
5988         ts->bprm = parent_ts->bprm;
5989         ts->info = parent_ts->info;
5990         ts->signal_mask = parent_ts->signal_mask;
5991 
5992         if (flags & CLONE_CHILD_CLEARTID) {
5993             ts->child_tidptr = child_tidptr;
5994         }
5995 
5996         if (flags & CLONE_SETTLS) {
5997             cpu_set_tls (new_env, newtls);
5998         }
5999 
6000         memset(&info, 0, sizeof(info));
6001         pthread_mutex_init(&info.mutex, NULL);
6002         pthread_mutex_lock(&info.mutex);
6003         pthread_cond_init(&info.cond, NULL);
6004         info.env = new_env;
6005         if (flags & CLONE_CHILD_SETTID) {
6006             info.child_tidptr = child_tidptr;
6007         }
6008         if (flags & CLONE_PARENT_SETTID) {
6009             info.parent_tidptr = parent_tidptr;
6010         }
6011 
6012         ret = pthread_attr_init(&attr);
6013         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6014         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6015         /* It is not safe to deliver signals until the child has finished
6016            initializing, so temporarily block all signals.  */
6017         sigfillset(&sigmask);
6018         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6019         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6020 
6021         /* If this is our first additional thread, we need to ensure we
6022          * generate code for parallel execution and flush old translations.
6023          */
6024         if (!parallel_cpus) {
6025             parallel_cpus = true;
6026             tb_flush(cpu);
6027         }
6028 
6029         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6030         /* TODO: Free new CPU state if thread creation failed.  */
6031 
6032         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6033         pthread_attr_destroy(&attr);
6034         if (ret == 0) {
6035             /* Wait for the child to initialize.  */
6036             pthread_cond_wait(&info.cond, &info.mutex);
6037             ret = info.tid;
6038         } else {
6039             ret = -1;
6040         }
6041         pthread_mutex_unlock(&info.mutex);
6042         pthread_cond_destroy(&info.cond);
6043         pthread_mutex_destroy(&info.mutex);
6044         pthread_mutex_unlock(&clone_lock);
6045     } else {
6046         /* if no CLONE_VM, we consider it is a fork */
6047         if (flags & CLONE_INVALID_FORK_FLAGS) {
6048             return -TARGET_EINVAL;
6049         }
6050 
6051         /* We can't support custom termination signals */
6052         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6053             return -TARGET_EINVAL;
6054         }
6055 
6056         if (block_signals()) {
6057             return -TARGET_ERESTARTSYS;
6058         }
6059 
6060         fork_start();
6061         ret = fork();
6062         if (ret == 0) {
6063             /* Child Process.  */
6064             cpu_clone_regs_child(env, newsp, flags);
6065             fork_end(1);
6066             /* There is a race condition here.  The parent process could
6067                theoretically read the TID in the child process before the child
6068                tid is set.  This would require using either ptrace
6069                (not implemented) or having *_tidptr to point at a shared memory
6070                mapping.  We can't repeat the spinlock hack used above because
6071                the child process gets its own copy of the lock.  */
6072             if (flags & CLONE_CHILD_SETTID)
6073                 put_user_u32(sys_gettid(), child_tidptr);
6074             if (flags & CLONE_PARENT_SETTID)
6075                 put_user_u32(sys_gettid(), parent_tidptr);
6076             ts = (TaskState *)cpu->opaque;
6077             if (flags & CLONE_SETTLS)
6078                 cpu_set_tls (env, newtls);
6079             if (flags & CLONE_CHILD_CLEARTID)
6080                 ts->child_tidptr = child_tidptr;
6081         } else {
6082             cpu_clone_regs_parent(env, flags);
6083             fork_end(0);
6084         }
6085     }
6086     return ret;
6087 }
6088 
6089 /* warning : doesn't handle linux specific flags... */
6090 static int target_to_host_fcntl_cmd(int cmd)
6091 {
6092     int ret;
6093 
6094     switch(cmd) {
6095     case TARGET_F_DUPFD:
6096     case TARGET_F_GETFD:
6097     case TARGET_F_SETFD:
6098     case TARGET_F_GETFL:
6099     case TARGET_F_SETFL:
6100         ret = cmd;
6101         break;
6102     case TARGET_F_GETLK:
6103         ret = F_GETLK64;
6104         break;
6105     case TARGET_F_SETLK:
6106         ret = F_SETLK64;
6107         break;
6108     case TARGET_F_SETLKW:
6109         ret = F_SETLKW64;
6110         break;
6111     case TARGET_F_GETOWN:
6112         ret = F_GETOWN;
6113         break;
6114     case TARGET_F_SETOWN:
6115         ret = F_SETOWN;
6116         break;
6117     case TARGET_F_GETSIG:
6118         ret = F_GETSIG;
6119         break;
6120     case TARGET_F_SETSIG:
6121         ret = F_SETSIG;
6122         break;
6123 #if TARGET_ABI_BITS == 32
6124     case TARGET_F_GETLK64:
6125         ret = F_GETLK64;
6126         break;
6127     case TARGET_F_SETLK64:
6128         ret = F_SETLK64;
6129         break;
6130     case TARGET_F_SETLKW64:
6131         ret = F_SETLKW64;
6132         break;
6133 #endif
6134     case TARGET_F_SETLEASE:
6135         ret = F_SETLEASE;
6136         break;
6137     case TARGET_F_GETLEASE:
6138         ret = F_GETLEASE;
6139         break;
6140 #ifdef F_DUPFD_CLOEXEC
6141     case TARGET_F_DUPFD_CLOEXEC:
6142         ret = F_DUPFD_CLOEXEC;
6143         break;
6144 #endif
6145     case TARGET_F_NOTIFY:
6146         ret = F_NOTIFY;
6147         break;
6148 #ifdef F_GETOWN_EX
6149     case TARGET_F_GETOWN_EX:
6150         ret = F_GETOWN_EX;
6151         break;
6152 #endif
6153 #ifdef F_SETOWN_EX
6154     case TARGET_F_SETOWN_EX:
6155         ret = F_SETOWN_EX;
6156         break;
6157 #endif
6158 #ifdef F_SETPIPE_SZ
6159     case TARGET_F_SETPIPE_SZ:
6160         ret = F_SETPIPE_SZ;
6161         break;
6162     case TARGET_F_GETPIPE_SZ:
6163         ret = F_GETPIPE_SZ;
6164         break;
6165 #endif
6166     default:
6167         ret = -TARGET_EINVAL;
6168         break;
6169     }
6170 
6171 #if defined(__powerpc64__)
6172     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6173      * is not supported by kernel. The glibc fcntl call actually adjusts
6174      * them to 5, 6 and 7 before making the syscall(). Since we make the
6175      * syscall directly, adjust to what is supported by the kernel.
6176      */
6177     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6178         ret -= F_GETLK64 - 5;
6179     }
6180 #endif
6181 
6182     return ret;
6183 }
6184 
6185 #define FLOCK_TRANSTBL \
6186     switch (type) { \
6187     TRANSTBL_CONVERT(F_RDLCK); \
6188     TRANSTBL_CONVERT(F_WRLCK); \
6189     TRANSTBL_CONVERT(F_UNLCK); \
6190     TRANSTBL_CONVERT(F_EXLCK); \
6191     TRANSTBL_CONVERT(F_SHLCK); \
6192     }
6193 
6194 static int target_to_host_flock(int type)
6195 {
6196 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6197     FLOCK_TRANSTBL
6198 #undef  TRANSTBL_CONVERT
6199     return -TARGET_EINVAL;
6200 }
6201 
6202 static int host_to_target_flock(int type)
6203 {
6204 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6205     FLOCK_TRANSTBL
6206 #undef  TRANSTBL_CONVERT
6207     /* if we don't know how to convert the value coming
6208      * from the host we copy to the target field as-is
6209      */
6210     return type;
6211 }
6212 
6213 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6214                                             abi_ulong target_flock_addr)
6215 {
6216     struct target_flock *target_fl;
6217     int l_type;
6218 
6219     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6220         return -TARGET_EFAULT;
6221     }
6222 
6223     __get_user(l_type, &target_fl->l_type);
6224     l_type = target_to_host_flock(l_type);
6225     if (l_type < 0) {
6226         return l_type;
6227     }
6228     fl->l_type = l_type;
6229     __get_user(fl->l_whence, &target_fl->l_whence);
6230     __get_user(fl->l_start, &target_fl->l_start);
6231     __get_user(fl->l_len, &target_fl->l_len);
6232     __get_user(fl->l_pid, &target_fl->l_pid);
6233     unlock_user_struct(target_fl, target_flock_addr, 0);
6234     return 0;
6235 }
6236 
6237 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6238                                           const struct flock64 *fl)
6239 {
6240     struct target_flock *target_fl;
6241     short l_type;
6242 
6243     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6244         return -TARGET_EFAULT;
6245     }
6246 
6247     l_type = host_to_target_flock(fl->l_type);
6248     __put_user(l_type, &target_fl->l_type);
6249     __put_user(fl->l_whence, &target_fl->l_whence);
6250     __put_user(fl->l_start, &target_fl->l_start);
6251     __put_user(fl->l_len, &target_fl->l_len);
6252     __put_user(fl->l_pid, &target_fl->l_pid);
6253     unlock_user_struct(target_fl, target_flock_addr, 1);
6254     return 0;
6255 }
6256 
6257 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6258 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6259 
6260 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6261 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6262                                                    abi_ulong target_flock_addr)
6263 {
6264     struct target_oabi_flock64 *target_fl;
6265     int l_type;
6266 
6267     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6268         return -TARGET_EFAULT;
6269     }
6270 
6271     __get_user(l_type, &target_fl->l_type);
6272     l_type = target_to_host_flock(l_type);
6273     if (l_type < 0) {
6274         return l_type;
6275     }
6276     fl->l_type = l_type;
6277     __get_user(fl->l_whence, &target_fl->l_whence);
6278     __get_user(fl->l_start, &target_fl->l_start);
6279     __get_user(fl->l_len, &target_fl->l_len);
6280     __get_user(fl->l_pid, &target_fl->l_pid);
6281     unlock_user_struct(target_fl, target_flock_addr, 0);
6282     return 0;
6283 }
6284 
6285 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6286                                                  const struct flock64 *fl)
6287 {
6288     struct target_oabi_flock64 *target_fl;
6289     short l_type;
6290 
6291     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6292         return -TARGET_EFAULT;
6293     }
6294 
6295     l_type = host_to_target_flock(fl->l_type);
6296     __put_user(l_type, &target_fl->l_type);
6297     __put_user(fl->l_whence, &target_fl->l_whence);
6298     __put_user(fl->l_start, &target_fl->l_start);
6299     __put_user(fl->l_len, &target_fl->l_len);
6300     __put_user(fl->l_pid, &target_fl->l_pid);
6301     unlock_user_struct(target_fl, target_flock_addr, 1);
6302     return 0;
6303 }
6304 #endif
6305 
6306 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6307                                               abi_ulong target_flock_addr)
6308 {
6309     struct target_flock64 *target_fl;
6310     int l_type;
6311 
6312     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6313         return -TARGET_EFAULT;
6314     }
6315 
6316     __get_user(l_type, &target_fl->l_type);
6317     l_type = target_to_host_flock(l_type);
6318     if (l_type < 0) {
6319         return l_type;
6320     }
6321     fl->l_type = l_type;
6322     __get_user(fl->l_whence, &target_fl->l_whence);
6323     __get_user(fl->l_start, &target_fl->l_start);
6324     __get_user(fl->l_len, &target_fl->l_len);
6325     __get_user(fl->l_pid, &target_fl->l_pid);
6326     unlock_user_struct(target_fl, target_flock_addr, 0);
6327     return 0;
6328 }
6329 
6330 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6331                                             const struct flock64 *fl)
6332 {
6333     struct target_flock64 *target_fl;
6334     short l_type;
6335 
6336     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6337         return -TARGET_EFAULT;
6338     }
6339 
6340     l_type = host_to_target_flock(fl->l_type);
6341     __put_user(l_type, &target_fl->l_type);
6342     __put_user(fl->l_whence, &target_fl->l_whence);
6343     __put_user(fl->l_start, &target_fl->l_start);
6344     __put_user(fl->l_len, &target_fl->l_len);
6345     __put_user(fl->l_pid, &target_fl->l_pid);
6346     unlock_user_struct(target_fl, target_flock_addr, 1);
6347     return 0;
6348 }
6349 
6350 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6351 {
6352     struct flock64 fl64;
6353 #ifdef F_GETOWN_EX
6354     struct f_owner_ex fox;
6355     struct target_f_owner_ex *target_fox;
6356 #endif
6357     abi_long ret;
6358     int host_cmd = target_to_host_fcntl_cmd(cmd);
6359 
6360     if (host_cmd == -TARGET_EINVAL)
6361 	    return host_cmd;
6362 
6363     switch(cmd) {
6364     case TARGET_F_GETLK:
6365         ret = copy_from_user_flock(&fl64, arg);
6366         if (ret) {
6367             return ret;
6368         }
6369         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6370         if (ret == 0) {
6371             ret = copy_to_user_flock(arg, &fl64);
6372         }
6373         break;
6374 
6375     case TARGET_F_SETLK:
6376     case TARGET_F_SETLKW:
6377         ret = copy_from_user_flock(&fl64, arg);
6378         if (ret) {
6379             return ret;
6380         }
6381         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6382         break;
6383 
6384     case TARGET_F_GETLK64:
6385         ret = copy_from_user_flock64(&fl64, arg);
6386         if (ret) {
6387             return ret;
6388         }
6389         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6390         if (ret == 0) {
6391             ret = copy_to_user_flock64(arg, &fl64);
6392         }
6393         break;
6394     case TARGET_F_SETLK64:
6395     case TARGET_F_SETLKW64:
6396         ret = copy_from_user_flock64(&fl64, arg);
6397         if (ret) {
6398             return ret;
6399         }
6400         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6401         break;
6402 
6403     case TARGET_F_GETFL:
6404         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6405         if (ret >= 0) {
6406             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6407         }
6408         break;
6409 
6410     case TARGET_F_SETFL:
6411         ret = get_errno(safe_fcntl(fd, host_cmd,
6412                                    target_to_host_bitmask(arg,
6413                                                           fcntl_flags_tbl)));
6414         break;
6415 
6416 #ifdef F_GETOWN_EX
6417     case TARGET_F_GETOWN_EX:
6418         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6419         if (ret >= 0) {
6420             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6421                 return -TARGET_EFAULT;
6422             target_fox->type = tswap32(fox.type);
6423             target_fox->pid = tswap32(fox.pid);
6424             unlock_user_struct(target_fox, arg, 1);
6425         }
6426         break;
6427 #endif
6428 
6429 #ifdef F_SETOWN_EX
6430     case TARGET_F_SETOWN_EX:
6431         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6432             return -TARGET_EFAULT;
6433         fox.type = tswap32(target_fox->type);
6434         fox.pid = tswap32(target_fox->pid);
6435         unlock_user_struct(target_fox, arg, 0);
6436         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6437         break;
6438 #endif
6439 
6440     case TARGET_F_SETOWN:
6441     case TARGET_F_GETOWN:
6442     case TARGET_F_SETSIG:
6443     case TARGET_F_GETSIG:
6444     case TARGET_F_SETLEASE:
6445     case TARGET_F_GETLEASE:
6446     case TARGET_F_SETPIPE_SZ:
6447     case TARGET_F_GETPIPE_SZ:
6448         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6449         break;
6450 
6451     default:
6452         ret = get_errno(safe_fcntl(fd, cmd, arg));
6453         break;
6454     }
6455     return ret;
6456 }
6457 
6458 #ifdef USE_UID16
6459 
6460 static inline int high2lowuid(int uid)
6461 {
6462     if (uid > 65535)
6463         return 65534;
6464     else
6465         return uid;
6466 }
6467 
6468 static inline int high2lowgid(int gid)
6469 {
6470     if (gid > 65535)
6471         return 65534;
6472     else
6473         return gid;
6474 }
6475 
6476 static inline int low2highuid(int uid)
6477 {
6478     if ((int16_t)uid == -1)
6479         return -1;
6480     else
6481         return uid;
6482 }
6483 
6484 static inline int low2highgid(int gid)
6485 {
6486     if ((int16_t)gid == -1)
6487         return -1;
6488     else
6489         return gid;
6490 }
6491 static inline int tswapid(int id)
6492 {
6493     return tswap16(id);
6494 }
6495 
6496 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6497 
6498 #else /* !USE_UID16 */
6499 static inline int high2lowuid(int uid)
6500 {
6501     return uid;
6502 }
6503 static inline int high2lowgid(int gid)
6504 {
6505     return gid;
6506 }
6507 static inline int low2highuid(int uid)
6508 {
6509     return uid;
6510 }
6511 static inline int low2highgid(int gid)
6512 {
6513     return gid;
6514 }
6515 static inline int tswapid(int id)
6516 {
6517     return tswap32(id);
6518 }
6519 
6520 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6521 
6522 #endif /* USE_UID16 */
6523 
6524 /* We must do direct syscalls for setting UID/GID, because we want to
6525  * implement the Linux system call semantics of "change only for this thread",
6526  * not the libc/POSIX semantics of "change for all threads in process".
6527  * (See http://ewontfix.com/17/ for more details.)
6528  * We use the 32-bit version of the syscalls if present; if it is not
6529  * then either the host architecture supports 32-bit UIDs natively with
6530  * the standard syscall, or the 16-bit UID is the best we can do.
6531  */
6532 #ifdef __NR_setuid32
6533 #define __NR_sys_setuid __NR_setuid32
6534 #else
6535 #define __NR_sys_setuid __NR_setuid
6536 #endif
6537 #ifdef __NR_setgid32
6538 #define __NR_sys_setgid __NR_setgid32
6539 #else
6540 #define __NR_sys_setgid __NR_setgid
6541 #endif
6542 #ifdef __NR_setresuid32
6543 #define __NR_sys_setresuid __NR_setresuid32
6544 #else
6545 #define __NR_sys_setresuid __NR_setresuid
6546 #endif
6547 #ifdef __NR_setresgid32
6548 #define __NR_sys_setresgid __NR_setresgid32
6549 #else
6550 #define __NR_sys_setresgid __NR_setresgid
6551 #endif
6552 
6553 _syscall1(int, sys_setuid, uid_t, uid)
6554 _syscall1(int, sys_setgid, gid_t, gid)
6555 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6556 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6557 
6558 void syscall_init(void)
6559 {
6560     IOCTLEntry *ie;
6561     const argtype *arg_type;
6562     int size;
6563     int i;
6564 
6565     thunk_init(STRUCT_MAX);
6566 
6567 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6568 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6569 #include "syscall_types.h"
6570 #undef STRUCT
6571 #undef STRUCT_SPECIAL
6572 
6573     /* Build target_to_host_errno_table[] table from
6574      * host_to_target_errno_table[]. */
6575     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6576         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6577     }
6578 
6579     /* we patch the ioctl size if necessary. We rely on the fact that
6580        no ioctl has all the bits at '1' in the size field */
6581     ie = ioctl_entries;
6582     while (ie->target_cmd != 0) {
6583         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6584             TARGET_IOC_SIZEMASK) {
6585             arg_type = ie->arg_type;
6586             if (arg_type[0] != TYPE_PTR) {
6587                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6588                         ie->target_cmd);
6589                 exit(1);
6590             }
6591             arg_type++;
6592             size = thunk_type_size(arg_type, 0);
6593             ie->target_cmd = (ie->target_cmd &
6594                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6595                 (size << TARGET_IOC_SIZESHIFT);
6596         }
6597 
6598         /* automatic consistency check if same arch */
6599 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6600     (defined(__x86_64__) && defined(TARGET_X86_64))
6601         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6602             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6603                     ie->name, ie->target_cmd, ie->host_cmd);
6604         }
6605 #endif
6606         ie++;
6607     }
6608 }
6609 
6610 #if TARGET_ABI_BITS == 32
6611 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6612 {
6613 #ifdef TARGET_WORDS_BIGENDIAN
6614     return ((uint64_t)word0 << 32) | word1;
6615 #else
6616     return ((uint64_t)word1 << 32) | word0;
6617 #endif
6618 }
6619 #else /* TARGET_ABI_BITS == 32 */
6620 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6621 {
6622     return word0;
6623 }
6624 #endif /* TARGET_ABI_BITS != 32 */
6625 
6626 #ifdef TARGET_NR_truncate64
6627 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6628                                          abi_long arg2,
6629                                          abi_long arg3,
6630                                          abi_long arg4)
6631 {
6632     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6633         arg2 = arg3;
6634         arg3 = arg4;
6635     }
6636     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6637 }
6638 #endif
6639 
6640 #ifdef TARGET_NR_ftruncate64
6641 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6642                                           abi_long arg2,
6643                                           abi_long arg3,
6644                                           abi_long arg4)
6645 {
6646     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6647         arg2 = arg3;
6648         arg3 = arg4;
6649     }
6650     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6651 }
6652 #endif
6653 
6654 #if defined(TARGET_NR_timer_settime) || \
6655     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6656 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6657                                                  abi_ulong target_addr)
6658 {
6659     struct target_itimerspec *target_itspec;
6660 
6661     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6662         return -TARGET_EFAULT;
6663     }
6664 
6665     host_itspec->it_interval.tv_sec =
6666                             tswapal(target_itspec->it_interval.tv_sec);
6667     host_itspec->it_interval.tv_nsec =
6668                             tswapal(target_itspec->it_interval.tv_nsec);
6669     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6670     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6671 
6672     unlock_user_struct(target_itspec, target_addr, 1);
6673     return 0;
6674 }
6675 #endif
6676 
6677 #if ((defined(TARGET_NR_timerfd_gettime) || \
6678       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6679     defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6680 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6681                                                struct itimerspec *host_its)
6682 {
6683     struct target_itimerspec *target_itspec;
6684 
6685     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6686         return -TARGET_EFAULT;
6687     }
6688 
6689     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6690     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6691 
6692     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6693     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6694 
6695     unlock_user_struct(target_itspec, target_addr, 0);
6696     return 0;
6697 }
6698 #endif
6699 
6700 #if defined(TARGET_NR_adjtimex) || \
6701     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6702 static inline abi_long target_to_host_timex(struct timex *host_tx,
6703                                             abi_long target_addr)
6704 {
6705     struct target_timex *target_tx;
6706 
6707     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6708         return -TARGET_EFAULT;
6709     }
6710 
6711     __get_user(host_tx->modes, &target_tx->modes);
6712     __get_user(host_tx->offset, &target_tx->offset);
6713     __get_user(host_tx->freq, &target_tx->freq);
6714     __get_user(host_tx->maxerror, &target_tx->maxerror);
6715     __get_user(host_tx->esterror, &target_tx->esterror);
6716     __get_user(host_tx->status, &target_tx->status);
6717     __get_user(host_tx->constant, &target_tx->constant);
6718     __get_user(host_tx->precision, &target_tx->precision);
6719     __get_user(host_tx->tolerance, &target_tx->tolerance);
6720     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6721     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6722     __get_user(host_tx->tick, &target_tx->tick);
6723     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6724     __get_user(host_tx->jitter, &target_tx->jitter);
6725     __get_user(host_tx->shift, &target_tx->shift);
6726     __get_user(host_tx->stabil, &target_tx->stabil);
6727     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6728     __get_user(host_tx->calcnt, &target_tx->calcnt);
6729     __get_user(host_tx->errcnt, &target_tx->errcnt);
6730     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6731     __get_user(host_tx->tai, &target_tx->tai);
6732 
6733     unlock_user_struct(target_tx, target_addr, 0);
6734     return 0;
6735 }
6736 
6737 static inline abi_long host_to_target_timex(abi_long target_addr,
6738                                             struct timex *host_tx)
6739 {
6740     struct target_timex *target_tx;
6741 
6742     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6743         return -TARGET_EFAULT;
6744     }
6745 
6746     __put_user(host_tx->modes, &target_tx->modes);
6747     __put_user(host_tx->offset, &target_tx->offset);
6748     __put_user(host_tx->freq, &target_tx->freq);
6749     __put_user(host_tx->maxerror, &target_tx->maxerror);
6750     __put_user(host_tx->esterror, &target_tx->esterror);
6751     __put_user(host_tx->status, &target_tx->status);
6752     __put_user(host_tx->constant, &target_tx->constant);
6753     __put_user(host_tx->precision, &target_tx->precision);
6754     __put_user(host_tx->tolerance, &target_tx->tolerance);
6755     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6756     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6757     __put_user(host_tx->tick, &target_tx->tick);
6758     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6759     __put_user(host_tx->jitter, &target_tx->jitter);
6760     __put_user(host_tx->shift, &target_tx->shift);
6761     __put_user(host_tx->stabil, &target_tx->stabil);
6762     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6763     __put_user(host_tx->calcnt, &target_tx->calcnt);
6764     __put_user(host_tx->errcnt, &target_tx->errcnt);
6765     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6766     __put_user(host_tx->tai, &target_tx->tai);
6767 
6768     unlock_user_struct(target_tx, target_addr, 1);
6769     return 0;
6770 }
6771 #endif
6772 
6773 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6774                                                abi_ulong target_addr)
6775 {
6776     struct target_sigevent *target_sevp;
6777 
6778     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6779         return -TARGET_EFAULT;
6780     }
6781 
6782     /* This union is awkward on 64 bit systems because it has a 32 bit
6783      * integer and a pointer in it; we follow the conversion approach
6784      * used for handling sigval types in signal.c so the guest should get
6785      * the correct value back even if we did a 64 bit byteswap and it's
6786      * using the 32 bit integer.
6787      */
6788     host_sevp->sigev_value.sival_ptr =
6789         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6790     host_sevp->sigev_signo =
6791         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6792     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6793     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6794 
6795     unlock_user_struct(target_sevp, target_addr, 1);
6796     return 0;
6797 }
6798 
6799 #if defined(TARGET_NR_mlockall)
6800 static inline int target_to_host_mlockall_arg(int arg)
6801 {
6802     int result = 0;
6803 
6804     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6805         result |= MCL_CURRENT;
6806     }
6807     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6808         result |= MCL_FUTURE;
6809     }
6810     return result;
6811 }
6812 #endif
6813 
6814 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6815      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6816      defined(TARGET_NR_newfstatat))
6817 static inline abi_long host_to_target_stat64(void *cpu_env,
6818                                              abi_ulong target_addr,
6819                                              struct stat *host_st)
6820 {
6821 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6822     if (((CPUARMState *)cpu_env)->eabi) {
6823         struct target_eabi_stat64 *target_st;
6824 
6825         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6826             return -TARGET_EFAULT;
6827         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6828         __put_user(host_st->st_dev, &target_st->st_dev);
6829         __put_user(host_st->st_ino, &target_st->st_ino);
6830 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6831         __put_user(host_st->st_ino, &target_st->__st_ino);
6832 #endif
6833         __put_user(host_st->st_mode, &target_st->st_mode);
6834         __put_user(host_st->st_nlink, &target_st->st_nlink);
6835         __put_user(host_st->st_uid, &target_st->st_uid);
6836         __put_user(host_st->st_gid, &target_st->st_gid);
6837         __put_user(host_st->st_rdev, &target_st->st_rdev);
6838         __put_user(host_st->st_size, &target_st->st_size);
6839         __put_user(host_st->st_blksize, &target_st->st_blksize);
6840         __put_user(host_st->st_blocks, &target_st->st_blocks);
6841         __put_user(host_st->st_atime, &target_st->target_st_atime);
6842         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6843         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6844 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6845         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6846         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6847         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6848 #endif
6849         unlock_user_struct(target_st, target_addr, 1);
6850     } else
6851 #endif
6852     {
6853 #if defined(TARGET_HAS_STRUCT_STAT64)
6854         struct target_stat64 *target_st;
6855 #else
6856         struct target_stat *target_st;
6857 #endif
6858 
6859         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6860             return -TARGET_EFAULT;
6861         memset(target_st, 0, sizeof(*target_st));
6862         __put_user(host_st->st_dev, &target_st->st_dev);
6863         __put_user(host_st->st_ino, &target_st->st_ino);
6864 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6865         __put_user(host_st->st_ino, &target_st->__st_ino);
6866 #endif
6867         __put_user(host_st->st_mode, &target_st->st_mode);
6868         __put_user(host_st->st_nlink, &target_st->st_nlink);
6869         __put_user(host_st->st_uid, &target_st->st_uid);
6870         __put_user(host_st->st_gid, &target_st->st_gid);
6871         __put_user(host_st->st_rdev, &target_st->st_rdev);
6872         /* XXX: better use of kernel struct */
6873         __put_user(host_st->st_size, &target_st->st_size);
6874         __put_user(host_st->st_blksize, &target_st->st_blksize);
6875         __put_user(host_st->st_blocks, &target_st->st_blocks);
6876         __put_user(host_st->st_atime, &target_st->target_st_atime);
6877         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6878         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6879 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6880         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6881         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6882         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6883 #endif
6884         unlock_user_struct(target_st, target_addr, 1);
6885     }
6886 
6887     return 0;
6888 }
6889 #endif
6890 
6891 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6892 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6893                                             abi_ulong target_addr)
6894 {
6895     struct target_statx *target_stx;
6896 
6897     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6898         return -TARGET_EFAULT;
6899     }
6900     memset(target_stx, 0, sizeof(*target_stx));
6901 
6902     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6903     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6904     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6905     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6906     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6907     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6908     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6909     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6910     __put_user(host_stx->stx_size, &target_stx->stx_size);
6911     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6912     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6913     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6914     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6915     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6916     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6917     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6918     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6919     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6920     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6921     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6922     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6923     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6924     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6925 
6926     unlock_user_struct(target_stx, target_addr, 1);
6927 
6928     return 0;
6929 }
6930 #endif
6931 
6932 static int do_sys_futex(int *uaddr, int op, int val,
6933                          const struct timespec *timeout, int *uaddr2,
6934                          int val3)
6935 {
6936 #if HOST_LONG_BITS == 64
6937 #if defined(__NR_futex)
6938     /* always a 64-bit time_t, it doesn't define _time64 version  */
6939     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
6940 
6941 #endif
6942 #else /* HOST_LONG_BITS == 64 */
6943 #if defined(__NR_futex_time64)
6944     if (sizeof(timeout->tv_sec) == 8) {
6945         /* _time64 function on 32bit arch */
6946         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
6947     }
6948 #endif
6949 #if defined(__NR_futex)
6950     /* old function on 32bit arch */
6951     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
6952 #endif
6953 #endif /* HOST_LONG_BITS == 64 */
6954     g_assert_not_reached();
6955 }
6956 
6957 static int do_safe_futex(int *uaddr, int op, int val,
6958                          const struct timespec *timeout, int *uaddr2,
6959                          int val3)
6960 {
6961 #if HOST_LONG_BITS == 64
6962 #if defined(__NR_futex)
6963     /* always a 64-bit time_t, it doesn't define _time64 version  */
6964     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
6965 #endif
6966 #else /* HOST_LONG_BITS == 64 */
6967 #if defined(__NR_futex_time64)
6968     if (sizeof(timeout->tv_sec) == 8) {
6969         /* _time64 function on 32bit arch */
6970         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
6971                                            val3));
6972     }
6973 #endif
6974 #if defined(__NR_futex)
6975     /* old function on 32bit arch */
6976     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
6977 #endif
6978 #endif /* HOST_LONG_BITS == 64 */
6979     return -TARGET_ENOSYS;
6980 }
6981 
6982 /* ??? Using host futex calls even when target atomic operations
6983    are not really atomic probably breaks things.  However implementing
6984    futexes locally would make futexes shared between multiple processes
6985    tricky.  However they're probably useless because guest atomic
6986    operations won't work either.  */
6987 #if defined(TARGET_NR_futex)
6988 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6989                     target_ulong uaddr2, int val3)
6990 {
6991     struct timespec ts, *pts;
6992     int base_op;
6993 
6994     /* ??? We assume FUTEX_* constants are the same on both host
6995        and target.  */
6996 #ifdef FUTEX_CMD_MASK
6997     base_op = op & FUTEX_CMD_MASK;
6998 #else
6999     base_op = op;
7000 #endif
7001     switch (base_op) {
7002     case FUTEX_WAIT:
7003     case FUTEX_WAIT_BITSET:
7004         if (timeout) {
7005             pts = &ts;
7006             target_to_host_timespec(pts, timeout);
7007         } else {
7008             pts = NULL;
7009         }
7010         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7011     case FUTEX_WAKE:
7012         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7013     case FUTEX_FD:
7014         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7015     case FUTEX_REQUEUE:
7016     case FUTEX_CMP_REQUEUE:
7017     case FUTEX_WAKE_OP:
7018         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7019            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7020            But the prototype takes a `struct timespec *'; insert casts
7021            to satisfy the compiler.  We do not need to tswap TIMEOUT
7022            since it's not compared to guest memory.  */
7023         pts = (struct timespec *)(uintptr_t) timeout;
7024         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7025                              (base_op == FUTEX_CMP_REQUEUE
7026                                       ? tswap32(val3)
7027                                       : val3));
7028     default:
7029         return -TARGET_ENOSYS;
7030     }
7031 }
7032 #endif
7033 
7034 #if defined(TARGET_NR_futex_time64)
7035 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7036                            target_ulong uaddr2, int val3)
7037 {
7038     struct timespec ts, *pts;
7039     int base_op;
7040 
7041     /* ??? We assume FUTEX_* constants are the same on both host
7042        and target.  */
7043 #ifdef FUTEX_CMD_MASK
7044     base_op = op & FUTEX_CMD_MASK;
7045 #else
7046     base_op = op;
7047 #endif
7048     switch (base_op) {
7049     case FUTEX_WAIT:
7050     case FUTEX_WAIT_BITSET:
7051         if (timeout) {
7052             pts = &ts;
7053             target_to_host_timespec64(pts, timeout);
7054         } else {
7055             pts = NULL;
7056         }
7057         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7058     case FUTEX_WAKE:
7059         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7060     case FUTEX_FD:
7061         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7062     case FUTEX_REQUEUE:
7063     case FUTEX_CMP_REQUEUE:
7064     case FUTEX_WAKE_OP:
7065         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7066            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7067            But the prototype takes a `struct timespec *'; insert casts
7068            to satisfy the compiler.  We do not need to tswap TIMEOUT
7069            since it's not compared to guest memory.  */
7070         pts = (struct timespec *)(uintptr_t) timeout;
7071         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7072                              (base_op == FUTEX_CMP_REQUEUE
7073                                       ? tswap32(val3)
7074                                       : val3));
7075     default:
7076         return -TARGET_ENOSYS;
7077     }
7078 }
7079 #endif
7080 
7081 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7082 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7083                                      abi_long handle, abi_long mount_id,
7084                                      abi_long flags)
7085 {
7086     struct file_handle *target_fh;
7087     struct file_handle *fh;
7088     int mid = 0;
7089     abi_long ret;
7090     char *name;
7091     unsigned int size, total_size;
7092 
7093     if (get_user_s32(size, handle)) {
7094         return -TARGET_EFAULT;
7095     }
7096 
7097     name = lock_user_string(pathname);
7098     if (!name) {
7099         return -TARGET_EFAULT;
7100     }
7101 
7102     total_size = sizeof(struct file_handle) + size;
7103     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7104     if (!target_fh) {
7105         unlock_user(name, pathname, 0);
7106         return -TARGET_EFAULT;
7107     }
7108 
7109     fh = g_malloc0(total_size);
7110     fh->handle_bytes = size;
7111 
7112     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7113     unlock_user(name, pathname, 0);
7114 
7115     /* man name_to_handle_at(2):
7116      * Other than the use of the handle_bytes field, the caller should treat
7117      * the file_handle structure as an opaque data type
7118      */
7119 
7120     memcpy(target_fh, fh, total_size);
7121     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7122     target_fh->handle_type = tswap32(fh->handle_type);
7123     g_free(fh);
7124     unlock_user(target_fh, handle, total_size);
7125 
7126     if (put_user_s32(mid, mount_id)) {
7127         return -TARGET_EFAULT;
7128     }
7129 
7130     return ret;
7131 
7132 }
7133 #endif
7134 
7135 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7136 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7137                                      abi_long flags)
7138 {
7139     struct file_handle *target_fh;
7140     struct file_handle *fh;
7141     unsigned int size, total_size;
7142     abi_long ret;
7143 
7144     if (get_user_s32(size, handle)) {
7145         return -TARGET_EFAULT;
7146     }
7147 
7148     total_size = sizeof(struct file_handle) + size;
7149     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7150     if (!target_fh) {
7151         return -TARGET_EFAULT;
7152     }
7153 
7154     fh = g_memdup(target_fh, total_size);
7155     fh->handle_bytes = size;
7156     fh->handle_type = tswap32(target_fh->handle_type);
7157 
7158     ret = get_errno(open_by_handle_at(mount_fd, fh,
7159                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7160 
7161     g_free(fh);
7162 
7163     unlock_user(target_fh, handle, total_size);
7164 
7165     return ret;
7166 }
7167 #endif
7168 
7169 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7170 
7171 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7172 {
7173     int host_flags;
7174     target_sigset_t *target_mask;
7175     sigset_t host_mask;
7176     abi_long ret;
7177 
7178     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7179         return -TARGET_EINVAL;
7180     }
7181     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7182         return -TARGET_EFAULT;
7183     }
7184 
7185     target_to_host_sigset(&host_mask, target_mask);
7186 
7187     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7188 
7189     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7190     if (ret >= 0) {
7191         fd_trans_register(ret, &target_signalfd_trans);
7192     }
7193 
7194     unlock_user_struct(target_mask, mask, 0);
7195 
7196     return ret;
7197 }
7198 #endif
7199 
7200 /* Map host to target signal numbers for the wait family of syscalls.
7201    Assume all other status bits are the same.  */
7202 int host_to_target_waitstatus(int status)
7203 {
7204     if (WIFSIGNALED(status)) {
7205         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7206     }
7207     if (WIFSTOPPED(status)) {
7208         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7209                | (status & 0xff);
7210     }
7211     return status;
7212 }
7213 
7214 static int open_self_cmdline(void *cpu_env, int fd)
7215 {
7216     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7217     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7218     int i;
7219 
7220     for (i = 0; i < bprm->argc; i++) {
7221         size_t len = strlen(bprm->argv[i]) + 1;
7222 
7223         if (write(fd, bprm->argv[i], len) != len) {
7224             return -1;
7225         }
7226     }
7227 
7228     return 0;
7229 }
7230 
7231 static int open_self_maps(void *cpu_env, int fd)
7232 {
7233     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7234     TaskState *ts = cpu->opaque;
7235     FILE *fp;
7236     char *line = NULL;
7237     size_t len = 0;
7238     ssize_t read;
7239 
7240     fp = fopen("/proc/self/maps", "r");
7241     if (fp == NULL) {
7242         return -1;
7243     }
7244 
7245     while ((read = getline(&line, &len, fp)) != -1) {
7246         int fields, dev_maj, dev_min, inode;
7247         uint64_t min, max, offset;
7248         char flag_r, flag_w, flag_x, flag_p;
7249         char path[512] = "";
7250         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7251                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7252                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7253 
7254         if ((fields < 10) || (fields > 11)) {
7255             continue;
7256         }
7257         if (h2g_valid(min)) {
7258             int flags = page_get_flags(h2g(min));
7259             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7260             if (page_check_range(h2g(min), max - min, flags) == -1) {
7261                 continue;
7262             }
7263             if (h2g(min) == ts->info->stack_limit) {
7264                 pstrcpy(path, sizeof(path), "      [stack]");
7265             }
7266             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7267                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7268                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7269                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7270                     path[0] ? "         " : "", path);
7271         }
7272     }
7273 
7274 #ifdef TARGET_VSYSCALL_PAGE
7275     /*
7276      * We only support execution from the vsyscall page.
7277      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7278      */
7279     dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7280             " --xp 00000000 00:00 0 [vsyscall]\n",
7281             TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7282 #endif
7283 
7284     free(line);
7285     fclose(fp);
7286 
7287     return 0;
7288 }
7289 
7290 static int open_self_stat(void *cpu_env, int fd)
7291 {
7292     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7293     TaskState *ts = cpu->opaque;
7294     abi_ulong start_stack = ts->info->start_stack;
7295     int i;
7296 
7297     for (i = 0; i < 44; i++) {
7298       char buf[128];
7299       int len;
7300       uint64_t val = 0;
7301 
7302       if (i == 0) {
7303         /* pid */
7304         val = getpid();
7305         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7306       } else if (i == 1) {
7307         /* app name */
7308         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7309       } else if (i == 27) {
7310         /* stack bottom */
7311         val = start_stack;
7312         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7313       } else {
7314         /* for the rest, there is MasterCard */
7315         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7316       }
7317 
7318       len = strlen(buf);
7319       if (write(fd, buf, len) != len) {
7320           return -1;
7321       }
7322     }
7323 
7324     return 0;
7325 }
7326 
7327 static int open_self_auxv(void *cpu_env, int fd)
7328 {
7329     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7330     TaskState *ts = cpu->opaque;
7331     abi_ulong auxv = ts->info->saved_auxv;
7332     abi_ulong len = ts->info->auxv_len;
7333     char *ptr;
7334 
7335     /*
7336      * Auxiliary vector is stored in target process stack.
7337      * read in whole auxv vector and copy it to file
7338      */
7339     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7340     if (ptr != NULL) {
7341         while (len > 0) {
7342             ssize_t r;
7343             r = write(fd, ptr, len);
7344             if (r <= 0) {
7345                 break;
7346             }
7347             len -= r;
7348             ptr += r;
7349         }
7350         lseek(fd, 0, SEEK_SET);
7351         unlock_user(ptr, auxv, len);
7352     }
7353 
7354     return 0;
7355 }
7356 
7357 static int is_proc_myself(const char *filename, const char *entry)
7358 {
7359     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7360         filename += strlen("/proc/");
7361         if (!strncmp(filename, "self/", strlen("self/"))) {
7362             filename += strlen("self/");
7363         } else if (*filename >= '1' && *filename <= '9') {
7364             char myself[80];
7365             snprintf(myself, sizeof(myself), "%d/", getpid());
7366             if (!strncmp(filename, myself, strlen(myself))) {
7367                 filename += strlen(myself);
7368             } else {
7369                 return 0;
7370             }
7371         } else {
7372             return 0;
7373         }
7374         if (!strcmp(filename, entry)) {
7375             return 1;
7376         }
7377     }
7378     return 0;
7379 }
7380 
7381 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7382     defined(TARGET_SPARC) || defined(TARGET_M68K)
7383 static int is_proc(const char *filename, const char *entry)
7384 {
7385     return strcmp(filename, entry) == 0;
7386 }
7387 #endif
7388 
7389 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7390 static int open_net_route(void *cpu_env, int fd)
7391 {
7392     FILE *fp;
7393     char *line = NULL;
7394     size_t len = 0;
7395     ssize_t read;
7396 
7397     fp = fopen("/proc/net/route", "r");
7398     if (fp == NULL) {
7399         return -1;
7400     }
7401 
7402     /* read header */
7403 
7404     read = getline(&line, &len, fp);
7405     dprintf(fd, "%s", line);
7406 
7407     /* read routes */
7408 
7409     while ((read = getline(&line, &len, fp)) != -1) {
7410         char iface[16];
7411         uint32_t dest, gw, mask;
7412         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7413         int fields;
7414 
7415         fields = sscanf(line,
7416                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7417                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7418                         &mask, &mtu, &window, &irtt);
7419         if (fields != 11) {
7420             continue;
7421         }
7422         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7423                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7424                 metric, tswap32(mask), mtu, window, irtt);
7425     }
7426 
7427     free(line);
7428     fclose(fp);
7429 
7430     return 0;
7431 }
7432 #endif
7433 
7434 #if defined(TARGET_SPARC)
7435 static int open_cpuinfo(void *cpu_env, int fd)
7436 {
7437     dprintf(fd, "type\t\t: sun4u\n");
7438     return 0;
7439 }
7440 #endif
7441 
7442 #if defined(TARGET_M68K)
7443 static int open_hardware(void *cpu_env, int fd)
7444 {
7445     dprintf(fd, "Model:\t\tqemu-m68k\n");
7446     return 0;
7447 }
7448 #endif
7449 
7450 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7451 {
7452     struct fake_open {
7453         const char *filename;
7454         int (*fill)(void *cpu_env, int fd);
7455         int (*cmp)(const char *s1, const char *s2);
7456     };
7457     const struct fake_open *fake_open;
7458     static const struct fake_open fakes[] = {
7459         { "maps", open_self_maps, is_proc_myself },
7460         { "stat", open_self_stat, is_proc_myself },
7461         { "auxv", open_self_auxv, is_proc_myself },
7462         { "cmdline", open_self_cmdline, is_proc_myself },
7463 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7464         { "/proc/net/route", open_net_route, is_proc },
7465 #endif
7466 #if defined(TARGET_SPARC)
7467         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7468 #endif
7469 #if defined(TARGET_M68K)
7470         { "/proc/hardware", open_hardware, is_proc },
7471 #endif
7472         { NULL, NULL, NULL }
7473     };
7474 
7475     if (is_proc_myself(pathname, "exe")) {
7476         int execfd = qemu_getauxval(AT_EXECFD);
7477         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7478     }
7479 
7480     for (fake_open = fakes; fake_open->filename; fake_open++) {
7481         if (fake_open->cmp(pathname, fake_open->filename)) {
7482             break;
7483         }
7484     }
7485 
7486     if (fake_open->filename) {
7487         const char *tmpdir;
7488         char filename[PATH_MAX];
7489         int fd, r;
7490 
7491         /* create temporary file to map stat to */
7492         tmpdir = getenv("TMPDIR");
7493         if (!tmpdir)
7494             tmpdir = "/tmp";
7495         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7496         fd = mkstemp(filename);
7497         if (fd < 0) {
7498             return fd;
7499         }
7500         unlink(filename);
7501 
7502         if ((r = fake_open->fill(cpu_env, fd))) {
7503             int e = errno;
7504             close(fd);
7505             errno = e;
7506             return r;
7507         }
7508         lseek(fd, 0, SEEK_SET);
7509 
7510         return fd;
7511     }
7512 
7513     return safe_openat(dirfd, path(pathname), flags, mode);
7514 }
7515 
7516 #define TIMER_MAGIC 0x0caf0000
7517 #define TIMER_MAGIC_MASK 0xffff0000
7518 
7519 /* Convert QEMU provided timer ID back to internal 16bit index format */
7520 static target_timer_t get_timer_id(abi_long arg)
7521 {
7522     target_timer_t timerid = arg;
7523 
7524     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7525         return -TARGET_EINVAL;
7526     }
7527 
7528     timerid &= 0xffff;
7529 
7530     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7531         return -TARGET_EINVAL;
7532     }
7533 
7534     return timerid;
7535 }
7536 
7537 static int target_to_host_cpu_mask(unsigned long *host_mask,
7538                                    size_t host_size,
7539                                    abi_ulong target_addr,
7540                                    size_t target_size)
7541 {
7542     unsigned target_bits = sizeof(abi_ulong) * 8;
7543     unsigned host_bits = sizeof(*host_mask) * 8;
7544     abi_ulong *target_mask;
7545     unsigned i, j;
7546 
7547     assert(host_size >= target_size);
7548 
7549     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7550     if (!target_mask) {
7551         return -TARGET_EFAULT;
7552     }
7553     memset(host_mask, 0, host_size);
7554 
7555     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7556         unsigned bit = i * target_bits;
7557         abi_ulong val;
7558 
7559         __get_user(val, &target_mask[i]);
7560         for (j = 0; j < target_bits; j++, bit++) {
7561             if (val & (1UL << j)) {
7562                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7563             }
7564         }
7565     }
7566 
7567     unlock_user(target_mask, target_addr, 0);
7568     return 0;
7569 }
7570 
7571 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7572                                    size_t host_size,
7573                                    abi_ulong target_addr,
7574                                    size_t target_size)
7575 {
7576     unsigned target_bits = sizeof(abi_ulong) * 8;
7577     unsigned host_bits = sizeof(*host_mask) * 8;
7578     abi_ulong *target_mask;
7579     unsigned i, j;
7580 
7581     assert(host_size >= target_size);
7582 
7583     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7584     if (!target_mask) {
7585         return -TARGET_EFAULT;
7586     }
7587 
7588     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7589         unsigned bit = i * target_bits;
7590         abi_ulong val = 0;
7591 
7592         for (j = 0; j < target_bits; j++, bit++) {
7593             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7594                 val |= 1UL << j;
7595             }
7596         }
7597         __put_user(val, &target_mask[i]);
7598     }
7599 
7600     unlock_user(target_mask, target_addr, target_size);
7601     return 0;
7602 }
7603 
7604 /* This is an internal helper for do_syscall so that it is easier
7605  * to have a single return point, so that actions, such as logging
7606  * of syscall results, can be performed.
7607  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7608  */
7609 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7610                             abi_long arg2, abi_long arg3, abi_long arg4,
7611                             abi_long arg5, abi_long arg6, abi_long arg7,
7612                             abi_long arg8)
7613 {
7614     CPUState *cpu = env_cpu(cpu_env);
7615     abi_long ret;
7616 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7617     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7618     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7619     || defined(TARGET_NR_statx)
7620     struct stat st;
7621 #endif
7622 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7623     || defined(TARGET_NR_fstatfs)
7624     struct statfs stfs;
7625 #endif
7626     void *p;
7627 
7628     switch(num) {
7629     case TARGET_NR_exit:
7630         /* In old applications this may be used to implement _exit(2).
7631            However in threaded applictions it is used for thread termination,
7632            and _exit_group is used for application termination.
7633            Do thread termination if we have more then one thread.  */
7634 
7635         if (block_signals()) {
7636             return -TARGET_ERESTARTSYS;
7637         }
7638 
7639         cpu_list_lock();
7640 
7641         if (CPU_NEXT(first_cpu)) {
7642             TaskState *ts;
7643 
7644             /* Remove the CPU from the list.  */
7645             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7646 
7647             cpu_list_unlock();
7648 
7649             ts = cpu->opaque;
7650             if (ts->child_tidptr) {
7651                 put_user_u32(0, ts->child_tidptr);
7652                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7653                           NULL, NULL, 0);
7654             }
7655             thread_cpu = NULL;
7656             object_unref(OBJECT(cpu));
7657             g_free(ts);
7658             rcu_unregister_thread();
7659             pthread_exit(NULL);
7660         }
7661 
7662         cpu_list_unlock();
7663         preexit_cleanup(cpu_env, arg1);
7664         _exit(arg1);
7665         return 0; /* avoid warning */
7666     case TARGET_NR_read:
7667         if (arg2 == 0 && arg3 == 0) {
7668             return get_errno(safe_read(arg1, 0, 0));
7669         } else {
7670             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7671                 return -TARGET_EFAULT;
7672             ret = get_errno(safe_read(arg1, p, arg3));
7673             if (ret >= 0 &&
7674                 fd_trans_host_to_target_data(arg1)) {
7675                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7676             }
7677             unlock_user(p, arg2, ret);
7678         }
7679         return ret;
7680     case TARGET_NR_write:
7681         if (arg2 == 0 && arg3 == 0) {
7682             return get_errno(safe_write(arg1, 0, 0));
7683         }
7684         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7685             return -TARGET_EFAULT;
7686         if (fd_trans_target_to_host_data(arg1)) {
7687             void *copy = g_malloc(arg3);
7688             memcpy(copy, p, arg3);
7689             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7690             if (ret >= 0) {
7691                 ret = get_errno(safe_write(arg1, copy, ret));
7692             }
7693             g_free(copy);
7694         } else {
7695             ret = get_errno(safe_write(arg1, p, arg3));
7696         }
7697         unlock_user(p, arg2, 0);
7698         return ret;
7699 
7700 #ifdef TARGET_NR_open
7701     case TARGET_NR_open:
7702         if (!(p = lock_user_string(arg1)))
7703             return -TARGET_EFAULT;
7704         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7705                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7706                                   arg3));
7707         fd_trans_unregister(ret);
7708         unlock_user(p, arg1, 0);
7709         return ret;
7710 #endif
7711     case TARGET_NR_openat:
7712         if (!(p = lock_user_string(arg2)))
7713             return -TARGET_EFAULT;
7714         ret = get_errno(do_openat(cpu_env, arg1, p,
7715                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7716                                   arg4));
7717         fd_trans_unregister(ret);
7718         unlock_user(p, arg2, 0);
7719         return ret;
7720 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7721     case TARGET_NR_name_to_handle_at:
7722         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7723         return ret;
7724 #endif
7725 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7726     case TARGET_NR_open_by_handle_at:
7727         ret = do_open_by_handle_at(arg1, arg2, arg3);
7728         fd_trans_unregister(ret);
7729         return ret;
7730 #endif
7731     case TARGET_NR_close:
7732         fd_trans_unregister(arg1);
7733         return get_errno(close(arg1));
7734 
7735     case TARGET_NR_brk:
7736         return do_brk(arg1);
7737 #ifdef TARGET_NR_fork
7738     case TARGET_NR_fork:
7739         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7740 #endif
7741 #ifdef TARGET_NR_waitpid
7742     case TARGET_NR_waitpid:
7743         {
7744             int status;
7745             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7746             if (!is_error(ret) && arg2 && ret
7747                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7748                 return -TARGET_EFAULT;
7749         }
7750         return ret;
7751 #endif
7752 #ifdef TARGET_NR_waitid
7753     case TARGET_NR_waitid:
7754         {
7755             siginfo_t info;
7756             info.si_pid = 0;
7757             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7758             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7759                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7760                     return -TARGET_EFAULT;
7761                 host_to_target_siginfo(p, &info);
7762                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7763             }
7764         }
7765         return ret;
7766 #endif
7767 #ifdef TARGET_NR_creat /* not on alpha */
7768     case TARGET_NR_creat:
7769         if (!(p = lock_user_string(arg1)))
7770             return -TARGET_EFAULT;
7771         ret = get_errno(creat(p, arg2));
7772         fd_trans_unregister(ret);
7773         unlock_user(p, arg1, 0);
7774         return ret;
7775 #endif
7776 #ifdef TARGET_NR_link
7777     case TARGET_NR_link:
7778         {
7779             void * p2;
7780             p = lock_user_string(arg1);
7781             p2 = lock_user_string(arg2);
7782             if (!p || !p2)
7783                 ret = -TARGET_EFAULT;
7784             else
7785                 ret = get_errno(link(p, p2));
7786             unlock_user(p2, arg2, 0);
7787             unlock_user(p, arg1, 0);
7788         }
7789         return ret;
7790 #endif
7791 #if defined(TARGET_NR_linkat)
7792     case TARGET_NR_linkat:
7793         {
7794             void * p2 = NULL;
7795             if (!arg2 || !arg4)
7796                 return -TARGET_EFAULT;
7797             p  = lock_user_string(arg2);
7798             p2 = lock_user_string(arg4);
7799             if (!p || !p2)
7800                 ret = -TARGET_EFAULT;
7801             else
7802                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7803             unlock_user(p, arg2, 0);
7804             unlock_user(p2, arg4, 0);
7805         }
7806         return ret;
7807 #endif
7808 #ifdef TARGET_NR_unlink
7809     case TARGET_NR_unlink:
7810         if (!(p = lock_user_string(arg1)))
7811             return -TARGET_EFAULT;
7812         ret = get_errno(unlink(p));
7813         unlock_user(p, arg1, 0);
7814         return ret;
7815 #endif
7816 #if defined(TARGET_NR_unlinkat)
7817     case TARGET_NR_unlinkat:
7818         if (!(p = lock_user_string(arg2)))
7819             return -TARGET_EFAULT;
7820         ret = get_errno(unlinkat(arg1, p, arg3));
7821         unlock_user(p, arg2, 0);
7822         return ret;
7823 #endif
7824     case TARGET_NR_execve:
7825         {
7826             char **argp, **envp;
7827             int argc, envc;
7828             abi_ulong gp;
7829             abi_ulong guest_argp;
7830             abi_ulong guest_envp;
7831             abi_ulong addr;
7832             char **q;
7833             int total_size = 0;
7834 
7835             argc = 0;
7836             guest_argp = arg2;
7837             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7838                 if (get_user_ual(addr, gp))
7839                     return -TARGET_EFAULT;
7840                 if (!addr)
7841                     break;
7842                 argc++;
7843             }
7844             envc = 0;
7845             guest_envp = arg3;
7846             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7847                 if (get_user_ual(addr, gp))
7848                     return -TARGET_EFAULT;
7849                 if (!addr)
7850                     break;
7851                 envc++;
7852             }
7853 
7854             argp = g_new0(char *, argc + 1);
7855             envp = g_new0(char *, envc + 1);
7856 
7857             for (gp = guest_argp, q = argp; gp;
7858                   gp += sizeof(abi_ulong), q++) {
7859                 if (get_user_ual(addr, gp))
7860                     goto execve_efault;
7861                 if (!addr)
7862                     break;
7863                 if (!(*q = lock_user_string(addr)))
7864                     goto execve_efault;
7865                 total_size += strlen(*q) + 1;
7866             }
7867             *q = NULL;
7868 
7869             for (gp = guest_envp, q = envp; gp;
7870                   gp += sizeof(abi_ulong), q++) {
7871                 if (get_user_ual(addr, gp))
7872                     goto execve_efault;
7873                 if (!addr)
7874                     break;
7875                 if (!(*q = lock_user_string(addr)))
7876                     goto execve_efault;
7877                 total_size += strlen(*q) + 1;
7878             }
7879             *q = NULL;
7880 
7881             if (!(p = lock_user_string(arg1)))
7882                 goto execve_efault;
7883             /* Although execve() is not an interruptible syscall it is
7884              * a special case where we must use the safe_syscall wrapper:
7885              * if we allow a signal to happen before we make the host
7886              * syscall then we will 'lose' it, because at the point of
7887              * execve the process leaves QEMU's control. So we use the
7888              * safe syscall wrapper to ensure that we either take the
7889              * signal as a guest signal, or else it does not happen
7890              * before the execve completes and makes it the other
7891              * program's problem.
7892              */
7893             ret = get_errno(safe_execve(p, argp, envp));
7894             unlock_user(p, arg1, 0);
7895 
7896             goto execve_end;
7897 
7898         execve_efault:
7899             ret = -TARGET_EFAULT;
7900 
7901         execve_end:
7902             for (gp = guest_argp, q = argp; *q;
7903                   gp += sizeof(abi_ulong), q++) {
7904                 if (get_user_ual(addr, gp)
7905                     || !addr)
7906                     break;
7907                 unlock_user(*q, addr, 0);
7908             }
7909             for (gp = guest_envp, q = envp; *q;
7910                   gp += sizeof(abi_ulong), q++) {
7911                 if (get_user_ual(addr, gp)
7912                     || !addr)
7913                     break;
7914                 unlock_user(*q, addr, 0);
7915             }
7916 
7917             g_free(argp);
7918             g_free(envp);
7919         }
7920         return ret;
7921     case TARGET_NR_chdir:
7922         if (!(p = lock_user_string(arg1)))
7923             return -TARGET_EFAULT;
7924         ret = get_errno(chdir(p));
7925         unlock_user(p, arg1, 0);
7926         return ret;
7927 #ifdef TARGET_NR_time
7928     case TARGET_NR_time:
7929         {
7930             time_t host_time;
7931             ret = get_errno(time(&host_time));
7932             if (!is_error(ret)
7933                 && arg1
7934                 && put_user_sal(host_time, arg1))
7935                 return -TARGET_EFAULT;
7936         }
7937         return ret;
7938 #endif
7939 #ifdef TARGET_NR_mknod
7940     case TARGET_NR_mknod:
7941         if (!(p = lock_user_string(arg1)))
7942             return -TARGET_EFAULT;
7943         ret = get_errno(mknod(p, arg2, arg3));
7944         unlock_user(p, arg1, 0);
7945         return ret;
7946 #endif
7947 #if defined(TARGET_NR_mknodat)
7948     case TARGET_NR_mknodat:
7949         if (!(p = lock_user_string(arg2)))
7950             return -TARGET_EFAULT;
7951         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7952         unlock_user(p, arg2, 0);
7953         return ret;
7954 #endif
7955 #ifdef TARGET_NR_chmod
7956     case TARGET_NR_chmod:
7957         if (!(p = lock_user_string(arg1)))
7958             return -TARGET_EFAULT;
7959         ret = get_errno(chmod(p, arg2));
7960         unlock_user(p, arg1, 0);
7961         return ret;
7962 #endif
7963 #ifdef TARGET_NR_lseek
7964     case TARGET_NR_lseek:
7965         return get_errno(lseek(arg1, arg2, arg3));
7966 #endif
7967 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7968     /* Alpha specific */
7969     case TARGET_NR_getxpid:
7970         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7971         return get_errno(getpid());
7972 #endif
7973 #ifdef TARGET_NR_getpid
7974     case TARGET_NR_getpid:
7975         return get_errno(getpid());
7976 #endif
7977     case TARGET_NR_mount:
7978         {
7979             /* need to look at the data field */
7980             void *p2, *p3;
7981 
7982             if (arg1) {
7983                 p = lock_user_string(arg1);
7984                 if (!p) {
7985                     return -TARGET_EFAULT;
7986                 }
7987             } else {
7988                 p = NULL;
7989             }
7990 
7991             p2 = lock_user_string(arg2);
7992             if (!p2) {
7993                 if (arg1) {
7994                     unlock_user(p, arg1, 0);
7995                 }
7996                 return -TARGET_EFAULT;
7997             }
7998 
7999             if (arg3) {
8000                 p3 = lock_user_string(arg3);
8001                 if (!p3) {
8002                     if (arg1) {
8003                         unlock_user(p, arg1, 0);
8004                     }
8005                     unlock_user(p2, arg2, 0);
8006                     return -TARGET_EFAULT;
8007                 }
8008             } else {
8009                 p3 = NULL;
8010             }
8011 
8012             /* FIXME - arg5 should be locked, but it isn't clear how to
8013              * do that since it's not guaranteed to be a NULL-terminated
8014              * string.
8015              */
8016             if (!arg5) {
8017                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8018             } else {
8019                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8020             }
8021             ret = get_errno(ret);
8022 
8023             if (arg1) {
8024                 unlock_user(p, arg1, 0);
8025             }
8026             unlock_user(p2, arg2, 0);
8027             if (arg3) {
8028                 unlock_user(p3, arg3, 0);
8029             }
8030         }
8031         return ret;
8032 #ifdef TARGET_NR_umount
8033     case TARGET_NR_umount:
8034         if (!(p = lock_user_string(arg1)))
8035             return -TARGET_EFAULT;
8036         ret = get_errno(umount(p));
8037         unlock_user(p, arg1, 0);
8038         return ret;
8039 #endif
8040 #ifdef TARGET_NR_stime /* not on alpha */
8041     case TARGET_NR_stime:
8042         {
8043             struct timespec ts;
8044             ts.tv_nsec = 0;
8045             if (get_user_sal(ts.tv_sec, arg1)) {
8046                 return -TARGET_EFAULT;
8047             }
8048             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8049         }
8050 #endif
8051 #ifdef TARGET_NR_alarm /* not on alpha */
8052     case TARGET_NR_alarm:
8053         return alarm(arg1);
8054 #endif
8055 #ifdef TARGET_NR_pause /* not on alpha */
8056     case TARGET_NR_pause:
8057         if (!block_signals()) {
8058             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8059         }
8060         return -TARGET_EINTR;
8061 #endif
8062 #ifdef TARGET_NR_utime
8063     case TARGET_NR_utime:
8064         {
8065             struct utimbuf tbuf, *host_tbuf;
8066             struct target_utimbuf *target_tbuf;
8067             if (arg2) {
8068                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8069                     return -TARGET_EFAULT;
8070                 tbuf.actime = tswapal(target_tbuf->actime);
8071                 tbuf.modtime = tswapal(target_tbuf->modtime);
8072                 unlock_user_struct(target_tbuf, arg2, 0);
8073                 host_tbuf = &tbuf;
8074             } else {
8075                 host_tbuf = NULL;
8076             }
8077             if (!(p = lock_user_string(arg1)))
8078                 return -TARGET_EFAULT;
8079             ret = get_errno(utime(p, host_tbuf));
8080             unlock_user(p, arg1, 0);
8081         }
8082         return ret;
8083 #endif
8084 #ifdef TARGET_NR_utimes
8085     case TARGET_NR_utimes:
8086         {
8087             struct timeval *tvp, tv[2];
8088             if (arg2) {
8089                 if (copy_from_user_timeval(&tv[0], arg2)
8090                     || copy_from_user_timeval(&tv[1],
8091                                               arg2 + sizeof(struct target_timeval)))
8092                     return -TARGET_EFAULT;
8093                 tvp = tv;
8094             } else {
8095                 tvp = NULL;
8096             }
8097             if (!(p = lock_user_string(arg1)))
8098                 return -TARGET_EFAULT;
8099             ret = get_errno(utimes(p, tvp));
8100             unlock_user(p, arg1, 0);
8101         }
8102         return ret;
8103 #endif
8104 #if defined(TARGET_NR_futimesat)
8105     case TARGET_NR_futimesat:
8106         {
8107             struct timeval *tvp, tv[2];
8108             if (arg3) {
8109                 if (copy_from_user_timeval(&tv[0], arg3)
8110                     || copy_from_user_timeval(&tv[1],
8111                                               arg3 + sizeof(struct target_timeval)))
8112                     return -TARGET_EFAULT;
8113                 tvp = tv;
8114             } else {
8115                 tvp = NULL;
8116             }
8117             if (!(p = lock_user_string(arg2))) {
8118                 return -TARGET_EFAULT;
8119             }
8120             ret = get_errno(futimesat(arg1, path(p), tvp));
8121             unlock_user(p, arg2, 0);
8122         }
8123         return ret;
8124 #endif
8125 #ifdef TARGET_NR_access
8126     case TARGET_NR_access:
8127         if (!(p = lock_user_string(arg1))) {
8128             return -TARGET_EFAULT;
8129         }
8130         ret = get_errno(access(path(p), arg2));
8131         unlock_user(p, arg1, 0);
8132         return ret;
8133 #endif
8134 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8135     case TARGET_NR_faccessat:
8136         if (!(p = lock_user_string(arg2))) {
8137             return -TARGET_EFAULT;
8138         }
8139         ret = get_errno(faccessat(arg1, p, arg3, 0));
8140         unlock_user(p, arg2, 0);
8141         return ret;
8142 #endif
8143 #ifdef TARGET_NR_nice /* not on alpha */
8144     case TARGET_NR_nice:
8145         return get_errno(nice(arg1));
8146 #endif
8147     case TARGET_NR_sync:
8148         sync();
8149         return 0;
8150 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8151     case TARGET_NR_syncfs:
8152         return get_errno(syncfs(arg1));
8153 #endif
8154     case TARGET_NR_kill:
8155         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8156 #ifdef TARGET_NR_rename
8157     case TARGET_NR_rename:
8158         {
8159             void *p2;
8160             p = lock_user_string(arg1);
8161             p2 = lock_user_string(arg2);
8162             if (!p || !p2)
8163                 ret = -TARGET_EFAULT;
8164             else
8165                 ret = get_errno(rename(p, p2));
8166             unlock_user(p2, arg2, 0);
8167             unlock_user(p, arg1, 0);
8168         }
8169         return ret;
8170 #endif
8171 #if defined(TARGET_NR_renameat)
8172     case TARGET_NR_renameat:
8173         {
8174             void *p2;
8175             p  = lock_user_string(arg2);
8176             p2 = lock_user_string(arg4);
8177             if (!p || !p2)
8178                 ret = -TARGET_EFAULT;
8179             else
8180                 ret = get_errno(renameat(arg1, p, arg3, p2));
8181             unlock_user(p2, arg4, 0);
8182             unlock_user(p, arg2, 0);
8183         }
8184         return ret;
8185 #endif
8186 #if defined(TARGET_NR_renameat2)
8187     case TARGET_NR_renameat2:
8188         {
8189             void *p2;
8190             p  = lock_user_string(arg2);
8191             p2 = lock_user_string(arg4);
8192             if (!p || !p2) {
8193                 ret = -TARGET_EFAULT;
8194             } else {
8195                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8196             }
8197             unlock_user(p2, arg4, 0);
8198             unlock_user(p, arg2, 0);
8199         }
8200         return ret;
8201 #endif
8202 #ifdef TARGET_NR_mkdir
8203     case TARGET_NR_mkdir:
8204         if (!(p = lock_user_string(arg1)))
8205             return -TARGET_EFAULT;
8206         ret = get_errno(mkdir(p, arg2));
8207         unlock_user(p, arg1, 0);
8208         return ret;
8209 #endif
8210 #if defined(TARGET_NR_mkdirat)
8211     case TARGET_NR_mkdirat:
8212         if (!(p = lock_user_string(arg2)))
8213             return -TARGET_EFAULT;
8214         ret = get_errno(mkdirat(arg1, p, arg3));
8215         unlock_user(p, arg2, 0);
8216         return ret;
8217 #endif
8218 #ifdef TARGET_NR_rmdir
8219     case TARGET_NR_rmdir:
8220         if (!(p = lock_user_string(arg1)))
8221             return -TARGET_EFAULT;
8222         ret = get_errno(rmdir(p));
8223         unlock_user(p, arg1, 0);
8224         return ret;
8225 #endif
8226     case TARGET_NR_dup:
8227         ret = get_errno(dup(arg1));
8228         if (ret >= 0) {
8229             fd_trans_dup(arg1, ret);
8230         }
8231         return ret;
8232 #ifdef TARGET_NR_pipe
8233     case TARGET_NR_pipe:
8234         return do_pipe(cpu_env, arg1, 0, 0);
8235 #endif
8236 #ifdef TARGET_NR_pipe2
8237     case TARGET_NR_pipe2:
8238         return do_pipe(cpu_env, arg1,
8239                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8240 #endif
8241     case TARGET_NR_times:
8242         {
8243             struct target_tms *tmsp;
8244             struct tms tms;
8245             ret = get_errno(times(&tms));
8246             if (arg1) {
8247                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8248                 if (!tmsp)
8249                     return -TARGET_EFAULT;
8250                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8251                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8252                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8253                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8254             }
8255             if (!is_error(ret))
8256                 ret = host_to_target_clock_t(ret);
8257         }
8258         return ret;
8259     case TARGET_NR_acct:
8260         if (arg1 == 0) {
8261             ret = get_errno(acct(NULL));
8262         } else {
8263             if (!(p = lock_user_string(arg1))) {
8264                 return -TARGET_EFAULT;
8265             }
8266             ret = get_errno(acct(path(p)));
8267             unlock_user(p, arg1, 0);
8268         }
8269         return ret;
8270 #ifdef TARGET_NR_umount2
8271     case TARGET_NR_umount2:
8272         if (!(p = lock_user_string(arg1)))
8273             return -TARGET_EFAULT;
8274         ret = get_errno(umount2(p, arg2));
8275         unlock_user(p, arg1, 0);
8276         return ret;
8277 #endif
8278     case TARGET_NR_ioctl:
8279         return do_ioctl(arg1, arg2, arg3);
8280 #ifdef TARGET_NR_fcntl
8281     case TARGET_NR_fcntl:
8282         return do_fcntl(arg1, arg2, arg3);
8283 #endif
8284     case TARGET_NR_setpgid:
8285         return get_errno(setpgid(arg1, arg2));
8286     case TARGET_NR_umask:
8287         return get_errno(umask(arg1));
8288     case TARGET_NR_chroot:
8289         if (!(p = lock_user_string(arg1)))
8290             return -TARGET_EFAULT;
8291         ret = get_errno(chroot(p));
8292         unlock_user(p, arg1, 0);
8293         return ret;
8294 #ifdef TARGET_NR_dup2
8295     case TARGET_NR_dup2:
8296         ret = get_errno(dup2(arg1, arg2));
8297         if (ret >= 0) {
8298             fd_trans_dup(arg1, arg2);
8299         }
8300         return ret;
8301 #endif
8302 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8303     case TARGET_NR_dup3:
8304     {
8305         int host_flags;
8306 
8307         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8308             return -EINVAL;
8309         }
8310         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8311         ret = get_errno(dup3(arg1, arg2, host_flags));
8312         if (ret >= 0) {
8313             fd_trans_dup(arg1, arg2);
8314         }
8315         return ret;
8316     }
8317 #endif
8318 #ifdef TARGET_NR_getppid /* not on alpha */
8319     case TARGET_NR_getppid:
8320         return get_errno(getppid());
8321 #endif
8322 #ifdef TARGET_NR_getpgrp
8323     case TARGET_NR_getpgrp:
8324         return get_errno(getpgrp());
8325 #endif
8326     case TARGET_NR_setsid:
8327         return get_errno(setsid());
8328 #ifdef TARGET_NR_sigaction
8329     case TARGET_NR_sigaction:
8330         {
8331 #if defined(TARGET_ALPHA)
8332             struct target_sigaction act, oact, *pact = 0;
8333             struct target_old_sigaction *old_act;
8334             if (arg2) {
8335                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8336                     return -TARGET_EFAULT;
8337                 act._sa_handler = old_act->_sa_handler;
8338                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8339                 act.sa_flags = old_act->sa_flags;
8340                 act.sa_restorer = 0;
8341                 unlock_user_struct(old_act, arg2, 0);
8342                 pact = &act;
8343             }
8344             ret = get_errno(do_sigaction(arg1, pact, &oact));
8345             if (!is_error(ret) && arg3) {
8346                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8347                     return -TARGET_EFAULT;
8348                 old_act->_sa_handler = oact._sa_handler;
8349                 old_act->sa_mask = oact.sa_mask.sig[0];
8350                 old_act->sa_flags = oact.sa_flags;
8351                 unlock_user_struct(old_act, arg3, 1);
8352             }
8353 #elif defined(TARGET_MIPS)
8354 	    struct target_sigaction act, oact, *pact, *old_act;
8355 
8356 	    if (arg2) {
8357                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8358                     return -TARGET_EFAULT;
8359 		act._sa_handler = old_act->_sa_handler;
8360 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8361 		act.sa_flags = old_act->sa_flags;
8362 		unlock_user_struct(old_act, arg2, 0);
8363 		pact = &act;
8364 	    } else {
8365 		pact = NULL;
8366 	    }
8367 
8368 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8369 
8370 	    if (!is_error(ret) && arg3) {
8371                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8372                     return -TARGET_EFAULT;
8373 		old_act->_sa_handler = oact._sa_handler;
8374 		old_act->sa_flags = oact.sa_flags;
8375 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8376 		old_act->sa_mask.sig[1] = 0;
8377 		old_act->sa_mask.sig[2] = 0;
8378 		old_act->sa_mask.sig[3] = 0;
8379 		unlock_user_struct(old_act, arg3, 1);
8380 	    }
8381 #else
8382             struct target_old_sigaction *old_act;
8383             struct target_sigaction act, oact, *pact;
8384             if (arg2) {
8385                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8386                     return -TARGET_EFAULT;
8387                 act._sa_handler = old_act->_sa_handler;
8388                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8389                 act.sa_flags = old_act->sa_flags;
8390                 act.sa_restorer = old_act->sa_restorer;
8391 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8392                 act.ka_restorer = 0;
8393 #endif
8394                 unlock_user_struct(old_act, arg2, 0);
8395                 pact = &act;
8396             } else {
8397                 pact = NULL;
8398             }
8399             ret = get_errno(do_sigaction(arg1, pact, &oact));
8400             if (!is_error(ret) && arg3) {
8401                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8402                     return -TARGET_EFAULT;
8403                 old_act->_sa_handler = oact._sa_handler;
8404                 old_act->sa_mask = oact.sa_mask.sig[0];
8405                 old_act->sa_flags = oact.sa_flags;
8406                 old_act->sa_restorer = oact.sa_restorer;
8407                 unlock_user_struct(old_act, arg3, 1);
8408             }
8409 #endif
8410         }
8411         return ret;
8412 #endif
8413     case TARGET_NR_rt_sigaction:
8414         {
8415 #if defined(TARGET_ALPHA)
8416             /* For Alpha and SPARC this is a 5 argument syscall, with
8417              * a 'restorer' parameter which must be copied into the
8418              * sa_restorer field of the sigaction struct.
8419              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8420              * and arg5 is the sigsetsize.
8421              * Alpha also has a separate rt_sigaction struct that it uses
8422              * here; SPARC uses the usual sigaction struct.
8423              */
8424             struct target_rt_sigaction *rt_act;
8425             struct target_sigaction act, oact, *pact = 0;
8426 
8427             if (arg4 != sizeof(target_sigset_t)) {
8428                 return -TARGET_EINVAL;
8429             }
8430             if (arg2) {
8431                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8432                     return -TARGET_EFAULT;
8433                 act._sa_handler = rt_act->_sa_handler;
8434                 act.sa_mask = rt_act->sa_mask;
8435                 act.sa_flags = rt_act->sa_flags;
8436                 act.sa_restorer = arg5;
8437                 unlock_user_struct(rt_act, arg2, 0);
8438                 pact = &act;
8439             }
8440             ret = get_errno(do_sigaction(arg1, pact, &oact));
8441             if (!is_error(ret) && arg3) {
8442                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8443                     return -TARGET_EFAULT;
8444                 rt_act->_sa_handler = oact._sa_handler;
8445                 rt_act->sa_mask = oact.sa_mask;
8446                 rt_act->sa_flags = oact.sa_flags;
8447                 unlock_user_struct(rt_act, arg3, 1);
8448             }
8449 #else
8450 #ifdef TARGET_SPARC
8451             target_ulong restorer = arg4;
8452             target_ulong sigsetsize = arg5;
8453 #else
8454             target_ulong sigsetsize = arg4;
8455 #endif
8456             struct target_sigaction *act;
8457             struct target_sigaction *oact;
8458 
8459             if (sigsetsize != sizeof(target_sigset_t)) {
8460                 return -TARGET_EINVAL;
8461             }
8462             if (arg2) {
8463                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8464                     return -TARGET_EFAULT;
8465                 }
8466 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8467                 act->ka_restorer = restorer;
8468 #endif
8469             } else {
8470                 act = NULL;
8471             }
8472             if (arg3) {
8473                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8474                     ret = -TARGET_EFAULT;
8475                     goto rt_sigaction_fail;
8476                 }
8477             } else
8478                 oact = NULL;
8479             ret = get_errno(do_sigaction(arg1, act, oact));
8480 	rt_sigaction_fail:
8481             if (act)
8482                 unlock_user_struct(act, arg2, 0);
8483             if (oact)
8484                 unlock_user_struct(oact, arg3, 1);
8485 #endif
8486         }
8487         return ret;
8488 #ifdef TARGET_NR_sgetmask /* not on alpha */
8489     case TARGET_NR_sgetmask:
8490         {
8491             sigset_t cur_set;
8492             abi_ulong target_set;
8493             ret = do_sigprocmask(0, NULL, &cur_set);
8494             if (!ret) {
8495                 host_to_target_old_sigset(&target_set, &cur_set);
8496                 ret = target_set;
8497             }
8498         }
8499         return ret;
8500 #endif
8501 #ifdef TARGET_NR_ssetmask /* not on alpha */
8502     case TARGET_NR_ssetmask:
8503         {
8504             sigset_t set, oset;
8505             abi_ulong target_set = arg1;
8506             target_to_host_old_sigset(&set, &target_set);
8507             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8508             if (!ret) {
8509                 host_to_target_old_sigset(&target_set, &oset);
8510                 ret = target_set;
8511             }
8512         }
8513         return ret;
8514 #endif
8515 #ifdef TARGET_NR_sigprocmask
8516     case TARGET_NR_sigprocmask:
8517         {
8518 #if defined(TARGET_ALPHA)
8519             sigset_t set, oldset;
8520             abi_ulong mask;
8521             int how;
8522 
8523             switch (arg1) {
8524             case TARGET_SIG_BLOCK:
8525                 how = SIG_BLOCK;
8526                 break;
8527             case TARGET_SIG_UNBLOCK:
8528                 how = SIG_UNBLOCK;
8529                 break;
8530             case TARGET_SIG_SETMASK:
8531                 how = SIG_SETMASK;
8532                 break;
8533             default:
8534                 return -TARGET_EINVAL;
8535             }
8536             mask = arg2;
8537             target_to_host_old_sigset(&set, &mask);
8538 
8539             ret = do_sigprocmask(how, &set, &oldset);
8540             if (!is_error(ret)) {
8541                 host_to_target_old_sigset(&mask, &oldset);
8542                 ret = mask;
8543                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8544             }
8545 #else
8546             sigset_t set, oldset, *set_ptr;
8547             int how;
8548 
8549             if (arg2) {
8550                 switch (arg1) {
8551                 case TARGET_SIG_BLOCK:
8552                     how = SIG_BLOCK;
8553                     break;
8554                 case TARGET_SIG_UNBLOCK:
8555                     how = SIG_UNBLOCK;
8556                     break;
8557                 case TARGET_SIG_SETMASK:
8558                     how = SIG_SETMASK;
8559                     break;
8560                 default:
8561                     return -TARGET_EINVAL;
8562                 }
8563                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8564                     return -TARGET_EFAULT;
8565                 target_to_host_old_sigset(&set, p);
8566                 unlock_user(p, arg2, 0);
8567                 set_ptr = &set;
8568             } else {
8569                 how = 0;
8570                 set_ptr = NULL;
8571             }
8572             ret = do_sigprocmask(how, set_ptr, &oldset);
8573             if (!is_error(ret) && arg3) {
8574                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8575                     return -TARGET_EFAULT;
8576                 host_to_target_old_sigset(p, &oldset);
8577                 unlock_user(p, arg3, sizeof(target_sigset_t));
8578             }
8579 #endif
8580         }
8581         return ret;
8582 #endif
8583     case TARGET_NR_rt_sigprocmask:
8584         {
8585             int how = arg1;
8586             sigset_t set, oldset, *set_ptr;
8587 
8588             if (arg4 != sizeof(target_sigset_t)) {
8589                 return -TARGET_EINVAL;
8590             }
8591 
8592             if (arg2) {
8593                 switch(how) {
8594                 case TARGET_SIG_BLOCK:
8595                     how = SIG_BLOCK;
8596                     break;
8597                 case TARGET_SIG_UNBLOCK:
8598                     how = SIG_UNBLOCK;
8599                     break;
8600                 case TARGET_SIG_SETMASK:
8601                     how = SIG_SETMASK;
8602                     break;
8603                 default:
8604                     return -TARGET_EINVAL;
8605                 }
8606                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8607                     return -TARGET_EFAULT;
8608                 target_to_host_sigset(&set, p);
8609                 unlock_user(p, arg2, 0);
8610                 set_ptr = &set;
8611             } else {
8612                 how = 0;
8613                 set_ptr = NULL;
8614             }
8615             ret = do_sigprocmask(how, set_ptr, &oldset);
8616             if (!is_error(ret) && arg3) {
8617                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8618                     return -TARGET_EFAULT;
8619                 host_to_target_sigset(p, &oldset);
8620                 unlock_user(p, arg3, sizeof(target_sigset_t));
8621             }
8622         }
8623         return ret;
8624 #ifdef TARGET_NR_sigpending
8625     case TARGET_NR_sigpending:
8626         {
8627             sigset_t set;
8628             ret = get_errno(sigpending(&set));
8629             if (!is_error(ret)) {
8630                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8631                     return -TARGET_EFAULT;
8632                 host_to_target_old_sigset(p, &set);
8633                 unlock_user(p, arg1, sizeof(target_sigset_t));
8634             }
8635         }
8636         return ret;
8637 #endif
8638     case TARGET_NR_rt_sigpending:
8639         {
8640             sigset_t set;
8641 
8642             /* Yes, this check is >, not != like most. We follow the kernel's
8643              * logic and it does it like this because it implements
8644              * NR_sigpending through the same code path, and in that case
8645              * the old_sigset_t is smaller in size.
8646              */
8647             if (arg2 > sizeof(target_sigset_t)) {
8648                 return -TARGET_EINVAL;
8649             }
8650 
8651             ret = get_errno(sigpending(&set));
8652             if (!is_error(ret)) {
8653                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8654                     return -TARGET_EFAULT;
8655                 host_to_target_sigset(p, &set);
8656                 unlock_user(p, arg1, sizeof(target_sigset_t));
8657             }
8658         }
8659         return ret;
8660 #ifdef TARGET_NR_sigsuspend
8661     case TARGET_NR_sigsuspend:
8662         {
8663             TaskState *ts = cpu->opaque;
8664 #if defined(TARGET_ALPHA)
8665             abi_ulong mask = arg1;
8666             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8667 #else
8668             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8669                 return -TARGET_EFAULT;
8670             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8671             unlock_user(p, arg1, 0);
8672 #endif
8673             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8674                                                SIGSET_T_SIZE));
8675             if (ret != -TARGET_ERESTARTSYS) {
8676                 ts->in_sigsuspend = 1;
8677             }
8678         }
8679         return ret;
8680 #endif
8681     case TARGET_NR_rt_sigsuspend:
8682         {
8683             TaskState *ts = cpu->opaque;
8684 
8685             if (arg2 != sizeof(target_sigset_t)) {
8686                 return -TARGET_EINVAL;
8687             }
8688             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8689                 return -TARGET_EFAULT;
8690             target_to_host_sigset(&ts->sigsuspend_mask, p);
8691             unlock_user(p, arg1, 0);
8692             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8693                                                SIGSET_T_SIZE));
8694             if (ret != -TARGET_ERESTARTSYS) {
8695                 ts->in_sigsuspend = 1;
8696             }
8697         }
8698         return ret;
8699 #ifdef TARGET_NR_rt_sigtimedwait
8700     case TARGET_NR_rt_sigtimedwait:
8701         {
8702             sigset_t set;
8703             struct timespec uts, *puts;
8704             siginfo_t uinfo;
8705 
8706             if (arg4 != sizeof(target_sigset_t)) {
8707                 return -TARGET_EINVAL;
8708             }
8709 
8710             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8711                 return -TARGET_EFAULT;
8712             target_to_host_sigset(&set, p);
8713             unlock_user(p, arg1, 0);
8714             if (arg3) {
8715                 puts = &uts;
8716                 target_to_host_timespec(puts, arg3);
8717             } else {
8718                 puts = NULL;
8719             }
8720             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8721                                                  SIGSET_T_SIZE));
8722             if (!is_error(ret)) {
8723                 if (arg2) {
8724                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8725                                   0);
8726                     if (!p) {
8727                         return -TARGET_EFAULT;
8728                     }
8729                     host_to_target_siginfo(p, &uinfo);
8730                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8731                 }
8732                 ret = host_to_target_signal(ret);
8733             }
8734         }
8735         return ret;
8736 #endif
8737     case TARGET_NR_rt_sigqueueinfo:
8738         {
8739             siginfo_t uinfo;
8740 
8741             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8742             if (!p) {
8743                 return -TARGET_EFAULT;
8744             }
8745             target_to_host_siginfo(&uinfo, p);
8746             unlock_user(p, arg3, 0);
8747             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8748         }
8749         return ret;
8750     case TARGET_NR_rt_tgsigqueueinfo:
8751         {
8752             siginfo_t uinfo;
8753 
8754             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8755             if (!p) {
8756                 return -TARGET_EFAULT;
8757             }
8758             target_to_host_siginfo(&uinfo, p);
8759             unlock_user(p, arg4, 0);
8760             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8761         }
8762         return ret;
8763 #ifdef TARGET_NR_sigreturn
8764     case TARGET_NR_sigreturn:
8765         if (block_signals()) {
8766             return -TARGET_ERESTARTSYS;
8767         }
8768         return do_sigreturn(cpu_env);
8769 #endif
8770     case TARGET_NR_rt_sigreturn:
8771         if (block_signals()) {
8772             return -TARGET_ERESTARTSYS;
8773         }
8774         return do_rt_sigreturn(cpu_env);
8775     case TARGET_NR_sethostname:
8776         if (!(p = lock_user_string(arg1)))
8777             return -TARGET_EFAULT;
8778         ret = get_errno(sethostname(p, arg2));
8779         unlock_user(p, arg1, 0);
8780         return ret;
8781 #ifdef TARGET_NR_setrlimit
8782     case TARGET_NR_setrlimit:
8783         {
8784             int resource = target_to_host_resource(arg1);
8785             struct target_rlimit *target_rlim;
8786             struct rlimit rlim;
8787             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8788                 return -TARGET_EFAULT;
8789             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8790             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8791             unlock_user_struct(target_rlim, arg2, 0);
8792             /*
8793              * If we just passed through resource limit settings for memory then
8794              * they would also apply to QEMU's own allocations, and QEMU will
8795              * crash or hang or die if its allocations fail. Ideally we would
8796              * track the guest allocations in QEMU and apply the limits ourselves.
8797              * For now, just tell the guest the call succeeded but don't actually
8798              * limit anything.
8799              */
8800             if (resource != RLIMIT_AS &&
8801                 resource != RLIMIT_DATA &&
8802                 resource != RLIMIT_STACK) {
8803                 return get_errno(setrlimit(resource, &rlim));
8804             } else {
8805                 return 0;
8806             }
8807         }
8808 #endif
8809 #ifdef TARGET_NR_getrlimit
8810     case TARGET_NR_getrlimit:
8811         {
8812             int resource = target_to_host_resource(arg1);
8813             struct target_rlimit *target_rlim;
8814             struct rlimit rlim;
8815 
8816             ret = get_errno(getrlimit(resource, &rlim));
8817             if (!is_error(ret)) {
8818                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8819                     return -TARGET_EFAULT;
8820                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8821                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8822                 unlock_user_struct(target_rlim, arg2, 1);
8823             }
8824         }
8825         return ret;
8826 #endif
8827     case TARGET_NR_getrusage:
8828         {
8829             struct rusage rusage;
8830             ret = get_errno(getrusage(arg1, &rusage));
8831             if (!is_error(ret)) {
8832                 ret = host_to_target_rusage(arg2, &rusage);
8833             }
8834         }
8835         return ret;
8836 #if defined(TARGET_NR_gettimeofday)
8837     case TARGET_NR_gettimeofday:
8838         {
8839             struct timeval tv;
8840             struct timezone tz;
8841 
8842             ret = get_errno(gettimeofday(&tv, &tz));
8843             if (!is_error(ret)) {
8844                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
8845                     return -TARGET_EFAULT;
8846                 }
8847                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
8848                     return -TARGET_EFAULT;
8849                 }
8850             }
8851         }
8852         return ret;
8853 #endif
8854 #if defined(TARGET_NR_settimeofday)
8855     case TARGET_NR_settimeofday:
8856         {
8857             struct timeval tv, *ptv = NULL;
8858             struct timezone tz, *ptz = NULL;
8859 
8860             if (arg1) {
8861                 if (copy_from_user_timeval(&tv, arg1)) {
8862                     return -TARGET_EFAULT;
8863                 }
8864                 ptv = &tv;
8865             }
8866 
8867             if (arg2) {
8868                 if (copy_from_user_timezone(&tz, arg2)) {
8869                     return -TARGET_EFAULT;
8870                 }
8871                 ptz = &tz;
8872             }
8873 
8874             return get_errno(settimeofday(ptv, ptz));
8875         }
8876 #endif
8877 #if defined(TARGET_NR_select)
8878     case TARGET_NR_select:
8879 #if defined(TARGET_WANT_NI_OLD_SELECT)
8880         /* some architectures used to have old_select here
8881          * but now ENOSYS it.
8882          */
8883         ret = -TARGET_ENOSYS;
8884 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8885         ret = do_old_select(arg1);
8886 #else
8887         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8888 #endif
8889         return ret;
8890 #endif
8891 #ifdef TARGET_NR_pselect6
8892     case TARGET_NR_pselect6:
8893         {
8894             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8895             fd_set rfds, wfds, efds;
8896             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8897             struct timespec ts, *ts_ptr;
8898 
8899             /*
8900              * The 6th arg is actually two args smashed together,
8901              * so we cannot use the C library.
8902              */
8903             sigset_t set;
8904             struct {
8905                 sigset_t *set;
8906                 size_t size;
8907             } sig, *sig_ptr;
8908 
8909             abi_ulong arg_sigset, arg_sigsize, *arg7;
8910             target_sigset_t *target_sigset;
8911 
8912             n = arg1;
8913             rfd_addr = arg2;
8914             wfd_addr = arg3;
8915             efd_addr = arg4;
8916             ts_addr = arg5;
8917 
8918             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8919             if (ret) {
8920                 return ret;
8921             }
8922             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8923             if (ret) {
8924                 return ret;
8925             }
8926             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8927             if (ret) {
8928                 return ret;
8929             }
8930 
8931             /*
8932              * This takes a timespec, and not a timeval, so we cannot
8933              * use the do_select() helper ...
8934              */
8935             if (ts_addr) {
8936                 if (target_to_host_timespec(&ts, ts_addr)) {
8937                     return -TARGET_EFAULT;
8938                 }
8939                 ts_ptr = &ts;
8940             } else {
8941                 ts_ptr = NULL;
8942             }
8943 
8944             /* Extract the two packed args for the sigset */
8945             if (arg6) {
8946                 sig_ptr = &sig;
8947                 sig.size = SIGSET_T_SIZE;
8948 
8949                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8950                 if (!arg7) {
8951                     return -TARGET_EFAULT;
8952                 }
8953                 arg_sigset = tswapal(arg7[0]);
8954                 arg_sigsize = tswapal(arg7[1]);
8955                 unlock_user(arg7, arg6, 0);
8956 
8957                 if (arg_sigset) {
8958                     sig.set = &set;
8959                     if (arg_sigsize != sizeof(*target_sigset)) {
8960                         /* Like the kernel, we enforce correct size sigsets */
8961                         return -TARGET_EINVAL;
8962                     }
8963                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8964                                               sizeof(*target_sigset), 1);
8965                     if (!target_sigset) {
8966                         return -TARGET_EFAULT;
8967                     }
8968                     target_to_host_sigset(&set, target_sigset);
8969                     unlock_user(target_sigset, arg_sigset, 0);
8970                 } else {
8971                     sig.set = NULL;
8972                 }
8973             } else {
8974                 sig_ptr = NULL;
8975             }
8976 
8977             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8978                                           ts_ptr, sig_ptr));
8979 
8980             if (!is_error(ret)) {
8981                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8982                     return -TARGET_EFAULT;
8983                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8984                     return -TARGET_EFAULT;
8985                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8986                     return -TARGET_EFAULT;
8987 
8988                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8989                     return -TARGET_EFAULT;
8990             }
8991         }
8992         return ret;
8993 #endif
8994 #ifdef TARGET_NR_symlink
8995     case TARGET_NR_symlink:
8996         {
8997             void *p2;
8998             p = lock_user_string(arg1);
8999             p2 = lock_user_string(arg2);
9000             if (!p || !p2)
9001                 ret = -TARGET_EFAULT;
9002             else
9003                 ret = get_errno(symlink(p, p2));
9004             unlock_user(p2, arg2, 0);
9005             unlock_user(p, arg1, 0);
9006         }
9007         return ret;
9008 #endif
9009 #if defined(TARGET_NR_symlinkat)
9010     case TARGET_NR_symlinkat:
9011         {
9012             void *p2;
9013             p  = lock_user_string(arg1);
9014             p2 = lock_user_string(arg3);
9015             if (!p || !p2)
9016                 ret = -TARGET_EFAULT;
9017             else
9018                 ret = get_errno(symlinkat(p, arg2, p2));
9019             unlock_user(p2, arg3, 0);
9020             unlock_user(p, arg1, 0);
9021         }
9022         return ret;
9023 #endif
9024 #ifdef TARGET_NR_readlink
9025     case TARGET_NR_readlink:
9026         {
9027             void *p2;
9028             p = lock_user_string(arg1);
9029             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9030             if (!p || !p2) {
9031                 ret = -TARGET_EFAULT;
9032             } else if (!arg3) {
9033                 /* Short circuit this for the magic exe check. */
9034                 ret = -TARGET_EINVAL;
9035             } else if (is_proc_myself((const char *)p, "exe")) {
9036                 char real[PATH_MAX], *temp;
9037                 temp = realpath(exec_path, real);
9038                 /* Return value is # of bytes that we wrote to the buffer. */
9039                 if (temp == NULL) {
9040                     ret = get_errno(-1);
9041                 } else {
9042                     /* Don't worry about sign mismatch as earlier mapping
9043                      * logic would have thrown a bad address error. */
9044                     ret = MIN(strlen(real), arg3);
9045                     /* We cannot NUL terminate the string. */
9046                     memcpy(p2, real, ret);
9047                 }
9048             } else {
9049                 ret = get_errno(readlink(path(p), p2, arg3));
9050             }
9051             unlock_user(p2, arg2, ret);
9052             unlock_user(p, arg1, 0);
9053         }
9054         return ret;
9055 #endif
9056 #if defined(TARGET_NR_readlinkat)
9057     case TARGET_NR_readlinkat:
9058         {
9059             void *p2;
9060             p  = lock_user_string(arg2);
9061             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9062             if (!p || !p2) {
9063                 ret = -TARGET_EFAULT;
9064             } else if (is_proc_myself((const char *)p, "exe")) {
9065                 char real[PATH_MAX], *temp;
9066                 temp = realpath(exec_path, real);
9067                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9068                 snprintf((char *)p2, arg4, "%s", real);
9069             } else {
9070                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9071             }
9072             unlock_user(p2, arg3, ret);
9073             unlock_user(p, arg2, 0);
9074         }
9075         return ret;
9076 #endif
9077 #ifdef TARGET_NR_swapon
9078     case TARGET_NR_swapon:
9079         if (!(p = lock_user_string(arg1)))
9080             return -TARGET_EFAULT;
9081         ret = get_errno(swapon(p, arg2));
9082         unlock_user(p, arg1, 0);
9083         return ret;
9084 #endif
9085     case TARGET_NR_reboot:
9086         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9087            /* arg4 must be ignored in all other cases */
9088            p = lock_user_string(arg4);
9089            if (!p) {
9090                return -TARGET_EFAULT;
9091            }
9092            ret = get_errno(reboot(arg1, arg2, arg3, p));
9093            unlock_user(p, arg4, 0);
9094         } else {
9095            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9096         }
9097         return ret;
9098 #ifdef TARGET_NR_mmap
9099     case TARGET_NR_mmap:
9100 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9101     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9102     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9103     || defined(TARGET_S390X)
9104         {
9105             abi_ulong *v;
9106             abi_ulong v1, v2, v3, v4, v5, v6;
9107             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9108                 return -TARGET_EFAULT;
9109             v1 = tswapal(v[0]);
9110             v2 = tswapal(v[1]);
9111             v3 = tswapal(v[2]);
9112             v4 = tswapal(v[3]);
9113             v5 = tswapal(v[4]);
9114             v6 = tswapal(v[5]);
9115             unlock_user(v, arg1, 0);
9116             ret = get_errno(target_mmap(v1, v2, v3,
9117                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9118                                         v5, v6));
9119         }
9120 #else
9121         ret = get_errno(target_mmap(arg1, arg2, arg3,
9122                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9123                                     arg5,
9124                                     arg6));
9125 #endif
9126         return ret;
9127 #endif
9128 #ifdef TARGET_NR_mmap2
9129     case TARGET_NR_mmap2:
9130 #ifndef MMAP_SHIFT
9131 #define MMAP_SHIFT 12
9132 #endif
9133         ret = target_mmap(arg1, arg2, arg3,
9134                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9135                           arg5, arg6 << MMAP_SHIFT);
9136         return get_errno(ret);
9137 #endif
9138     case TARGET_NR_munmap:
9139         return get_errno(target_munmap(arg1, arg2));
9140     case TARGET_NR_mprotect:
9141         {
9142             TaskState *ts = cpu->opaque;
9143             /* Special hack to detect libc making the stack executable.  */
9144             if ((arg3 & PROT_GROWSDOWN)
9145                 && arg1 >= ts->info->stack_limit
9146                 && arg1 <= ts->info->start_stack) {
9147                 arg3 &= ~PROT_GROWSDOWN;
9148                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9149                 arg1 = ts->info->stack_limit;
9150             }
9151         }
9152         return get_errno(target_mprotect(arg1, arg2, arg3));
9153 #ifdef TARGET_NR_mremap
9154     case TARGET_NR_mremap:
9155         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9156 #endif
9157         /* ??? msync/mlock/munlock are broken for softmmu.  */
9158 #ifdef TARGET_NR_msync
9159     case TARGET_NR_msync:
9160         return get_errno(msync(g2h(arg1), arg2, arg3));
9161 #endif
9162 #ifdef TARGET_NR_mlock
9163     case TARGET_NR_mlock:
9164         return get_errno(mlock(g2h(arg1), arg2));
9165 #endif
9166 #ifdef TARGET_NR_munlock
9167     case TARGET_NR_munlock:
9168         return get_errno(munlock(g2h(arg1), arg2));
9169 #endif
9170 #ifdef TARGET_NR_mlockall
9171     case TARGET_NR_mlockall:
9172         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9173 #endif
9174 #ifdef TARGET_NR_munlockall
9175     case TARGET_NR_munlockall:
9176         return get_errno(munlockall());
9177 #endif
9178 #ifdef TARGET_NR_truncate
9179     case TARGET_NR_truncate:
9180         if (!(p = lock_user_string(arg1)))
9181             return -TARGET_EFAULT;
9182         ret = get_errno(truncate(p, arg2));
9183         unlock_user(p, arg1, 0);
9184         return ret;
9185 #endif
9186 #ifdef TARGET_NR_ftruncate
9187     case TARGET_NR_ftruncate:
9188         return get_errno(ftruncate(arg1, arg2));
9189 #endif
9190     case TARGET_NR_fchmod:
9191         return get_errno(fchmod(arg1, arg2));
9192 #if defined(TARGET_NR_fchmodat)
9193     case TARGET_NR_fchmodat:
9194         if (!(p = lock_user_string(arg2)))
9195             return -TARGET_EFAULT;
9196         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9197         unlock_user(p, arg2, 0);
9198         return ret;
9199 #endif
9200     case TARGET_NR_getpriority:
9201         /* Note that negative values are valid for getpriority, so we must
9202            differentiate based on errno settings.  */
9203         errno = 0;
9204         ret = getpriority(arg1, arg2);
9205         if (ret == -1 && errno != 0) {
9206             return -host_to_target_errno(errno);
9207         }
9208 #ifdef TARGET_ALPHA
9209         /* Return value is the unbiased priority.  Signal no error.  */
9210         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9211 #else
9212         /* Return value is a biased priority to avoid negative numbers.  */
9213         ret = 20 - ret;
9214 #endif
9215         return ret;
9216     case TARGET_NR_setpriority:
9217         return get_errno(setpriority(arg1, arg2, arg3));
9218 #ifdef TARGET_NR_statfs
9219     case TARGET_NR_statfs:
9220         if (!(p = lock_user_string(arg1))) {
9221             return -TARGET_EFAULT;
9222         }
9223         ret = get_errno(statfs(path(p), &stfs));
9224         unlock_user(p, arg1, 0);
9225     convert_statfs:
9226         if (!is_error(ret)) {
9227             struct target_statfs *target_stfs;
9228 
9229             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9230                 return -TARGET_EFAULT;
9231             __put_user(stfs.f_type, &target_stfs->f_type);
9232             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9233             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9234             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9235             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9236             __put_user(stfs.f_files, &target_stfs->f_files);
9237             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9238             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9239             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9240             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9241             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9242 #ifdef _STATFS_F_FLAGS
9243             __put_user(stfs.f_flags, &target_stfs->f_flags);
9244 #else
9245             __put_user(0, &target_stfs->f_flags);
9246 #endif
9247             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9248             unlock_user_struct(target_stfs, arg2, 1);
9249         }
9250         return ret;
9251 #endif
9252 #ifdef TARGET_NR_fstatfs
9253     case TARGET_NR_fstatfs:
9254         ret = get_errno(fstatfs(arg1, &stfs));
9255         goto convert_statfs;
9256 #endif
9257 #ifdef TARGET_NR_statfs64
9258     case TARGET_NR_statfs64:
9259         if (!(p = lock_user_string(arg1))) {
9260             return -TARGET_EFAULT;
9261         }
9262         ret = get_errno(statfs(path(p), &stfs));
9263         unlock_user(p, arg1, 0);
9264     convert_statfs64:
9265         if (!is_error(ret)) {
9266             struct target_statfs64 *target_stfs;
9267 
9268             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9269                 return -TARGET_EFAULT;
9270             __put_user(stfs.f_type, &target_stfs->f_type);
9271             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9272             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9273             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9274             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9275             __put_user(stfs.f_files, &target_stfs->f_files);
9276             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9277             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9278             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9279             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9280             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9281             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9282             unlock_user_struct(target_stfs, arg3, 1);
9283         }
9284         return ret;
9285     case TARGET_NR_fstatfs64:
9286         ret = get_errno(fstatfs(arg1, &stfs));
9287         goto convert_statfs64;
9288 #endif
9289 #ifdef TARGET_NR_socketcall
9290     case TARGET_NR_socketcall:
9291         return do_socketcall(arg1, arg2);
9292 #endif
9293 #ifdef TARGET_NR_accept
9294     case TARGET_NR_accept:
9295         return do_accept4(arg1, arg2, arg3, 0);
9296 #endif
9297 #ifdef TARGET_NR_accept4
9298     case TARGET_NR_accept4:
9299         return do_accept4(arg1, arg2, arg3, arg4);
9300 #endif
9301 #ifdef TARGET_NR_bind
9302     case TARGET_NR_bind:
9303         return do_bind(arg1, arg2, arg3);
9304 #endif
9305 #ifdef TARGET_NR_connect
9306     case TARGET_NR_connect:
9307         return do_connect(arg1, arg2, arg3);
9308 #endif
9309 #ifdef TARGET_NR_getpeername
9310     case TARGET_NR_getpeername:
9311         return do_getpeername(arg1, arg2, arg3);
9312 #endif
9313 #ifdef TARGET_NR_getsockname
9314     case TARGET_NR_getsockname:
9315         return do_getsockname(arg1, arg2, arg3);
9316 #endif
9317 #ifdef TARGET_NR_getsockopt
9318     case TARGET_NR_getsockopt:
9319         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9320 #endif
9321 #ifdef TARGET_NR_listen
9322     case TARGET_NR_listen:
9323         return get_errno(listen(arg1, arg2));
9324 #endif
9325 #ifdef TARGET_NR_recv
9326     case TARGET_NR_recv:
9327         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9328 #endif
9329 #ifdef TARGET_NR_recvfrom
9330     case TARGET_NR_recvfrom:
9331         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9332 #endif
9333 #ifdef TARGET_NR_recvmsg
9334     case TARGET_NR_recvmsg:
9335         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9336 #endif
9337 #ifdef TARGET_NR_send
9338     case TARGET_NR_send:
9339         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9340 #endif
9341 #ifdef TARGET_NR_sendmsg
9342     case TARGET_NR_sendmsg:
9343         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9344 #endif
9345 #ifdef TARGET_NR_sendmmsg
9346     case TARGET_NR_sendmmsg:
9347         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9348 #endif
9349 #ifdef TARGET_NR_recvmmsg
9350     case TARGET_NR_recvmmsg:
9351         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9352 #endif
9353 #ifdef TARGET_NR_sendto
9354     case TARGET_NR_sendto:
9355         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9356 #endif
9357 #ifdef TARGET_NR_shutdown
9358     case TARGET_NR_shutdown:
9359         return get_errno(shutdown(arg1, arg2));
9360 #endif
9361 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9362     case TARGET_NR_getrandom:
9363         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9364         if (!p) {
9365             return -TARGET_EFAULT;
9366         }
9367         ret = get_errno(getrandom(p, arg2, arg3));
9368         unlock_user(p, arg1, ret);
9369         return ret;
9370 #endif
9371 #ifdef TARGET_NR_socket
9372     case TARGET_NR_socket:
9373         return do_socket(arg1, arg2, arg3);
9374 #endif
9375 #ifdef TARGET_NR_socketpair
9376     case TARGET_NR_socketpair:
9377         return do_socketpair(arg1, arg2, arg3, arg4);
9378 #endif
9379 #ifdef TARGET_NR_setsockopt
9380     case TARGET_NR_setsockopt:
9381         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9382 #endif
9383 #if defined(TARGET_NR_syslog)
9384     case TARGET_NR_syslog:
9385         {
9386             int len = arg2;
9387 
9388             switch (arg1) {
9389             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9390             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9391             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9392             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9393             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9394             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9395             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9396             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9397                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9398             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9399             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9400             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9401                 {
9402                     if (len < 0) {
9403                         return -TARGET_EINVAL;
9404                     }
9405                     if (len == 0) {
9406                         return 0;
9407                     }
9408                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9409                     if (!p) {
9410                         return -TARGET_EFAULT;
9411                     }
9412                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9413                     unlock_user(p, arg2, arg3);
9414                 }
9415                 return ret;
9416             default:
9417                 return -TARGET_EINVAL;
9418             }
9419         }
9420         break;
9421 #endif
9422     case TARGET_NR_setitimer:
9423         {
9424             struct itimerval value, ovalue, *pvalue;
9425 
9426             if (arg2) {
9427                 pvalue = &value;
9428                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9429                     || copy_from_user_timeval(&pvalue->it_value,
9430                                               arg2 + sizeof(struct target_timeval)))
9431                     return -TARGET_EFAULT;
9432             } else {
9433                 pvalue = NULL;
9434             }
9435             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9436             if (!is_error(ret) && arg3) {
9437                 if (copy_to_user_timeval(arg3,
9438                                          &ovalue.it_interval)
9439                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9440                                             &ovalue.it_value))
9441                     return -TARGET_EFAULT;
9442             }
9443         }
9444         return ret;
9445     case TARGET_NR_getitimer:
9446         {
9447             struct itimerval value;
9448 
9449             ret = get_errno(getitimer(arg1, &value));
9450             if (!is_error(ret) && arg2) {
9451                 if (copy_to_user_timeval(arg2,
9452                                          &value.it_interval)
9453                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9454                                             &value.it_value))
9455                     return -TARGET_EFAULT;
9456             }
9457         }
9458         return ret;
9459 #ifdef TARGET_NR_stat
9460     case TARGET_NR_stat:
9461         if (!(p = lock_user_string(arg1))) {
9462             return -TARGET_EFAULT;
9463         }
9464         ret = get_errno(stat(path(p), &st));
9465         unlock_user(p, arg1, 0);
9466         goto do_stat;
9467 #endif
9468 #ifdef TARGET_NR_lstat
9469     case TARGET_NR_lstat:
9470         if (!(p = lock_user_string(arg1))) {
9471             return -TARGET_EFAULT;
9472         }
9473         ret = get_errno(lstat(path(p), &st));
9474         unlock_user(p, arg1, 0);
9475         goto do_stat;
9476 #endif
9477 #ifdef TARGET_NR_fstat
9478     case TARGET_NR_fstat:
9479         {
9480             ret = get_errno(fstat(arg1, &st));
9481 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9482         do_stat:
9483 #endif
9484             if (!is_error(ret)) {
9485                 struct target_stat *target_st;
9486 
9487                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9488                     return -TARGET_EFAULT;
9489                 memset(target_st, 0, sizeof(*target_st));
9490                 __put_user(st.st_dev, &target_st->st_dev);
9491                 __put_user(st.st_ino, &target_st->st_ino);
9492                 __put_user(st.st_mode, &target_st->st_mode);
9493                 __put_user(st.st_uid, &target_st->st_uid);
9494                 __put_user(st.st_gid, &target_st->st_gid);
9495                 __put_user(st.st_nlink, &target_st->st_nlink);
9496                 __put_user(st.st_rdev, &target_st->st_rdev);
9497                 __put_user(st.st_size, &target_st->st_size);
9498                 __put_user(st.st_blksize, &target_st->st_blksize);
9499                 __put_user(st.st_blocks, &target_st->st_blocks);
9500                 __put_user(st.st_atime, &target_st->target_st_atime);
9501                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9502                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9503 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9504     defined(TARGET_STAT_HAVE_NSEC)
9505                 __put_user(st.st_atim.tv_nsec,
9506                            &target_st->target_st_atime_nsec);
9507                 __put_user(st.st_mtim.tv_nsec,
9508                            &target_st->target_st_mtime_nsec);
9509                 __put_user(st.st_ctim.tv_nsec,
9510                            &target_st->target_st_ctime_nsec);
9511 #endif
9512                 unlock_user_struct(target_st, arg2, 1);
9513             }
9514         }
9515         return ret;
9516 #endif
9517     case TARGET_NR_vhangup:
9518         return get_errno(vhangup());
9519 #ifdef TARGET_NR_syscall
9520     case TARGET_NR_syscall:
9521         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9522                           arg6, arg7, arg8, 0);
9523 #endif
9524 #if defined(TARGET_NR_wait4)
9525     case TARGET_NR_wait4:
9526         {
9527             int status;
9528             abi_long status_ptr = arg2;
9529             struct rusage rusage, *rusage_ptr;
9530             abi_ulong target_rusage = arg4;
9531             abi_long rusage_err;
9532             if (target_rusage)
9533                 rusage_ptr = &rusage;
9534             else
9535                 rusage_ptr = NULL;
9536             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9537             if (!is_error(ret)) {
9538                 if (status_ptr && ret) {
9539                     status = host_to_target_waitstatus(status);
9540                     if (put_user_s32(status, status_ptr))
9541                         return -TARGET_EFAULT;
9542                 }
9543                 if (target_rusage) {
9544                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9545                     if (rusage_err) {
9546                         ret = rusage_err;
9547                     }
9548                 }
9549             }
9550         }
9551         return ret;
9552 #endif
9553 #ifdef TARGET_NR_swapoff
9554     case TARGET_NR_swapoff:
9555         if (!(p = lock_user_string(arg1)))
9556             return -TARGET_EFAULT;
9557         ret = get_errno(swapoff(p));
9558         unlock_user(p, arg1, 0);
9559         return ret;
9560 #endif
9561     case TARGET_NR_sysinfo:
9562         {
9563             struct target_sysinfo *target_value;
9564             struct sysinfo value;
9565             ret = get_errno(sysinfo(&value));
9566             if (!is_error(ret) && arg1)
9567             {
9568                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9569                     return -TARGET_EFAULT;
9570                 __put_user(value.uptime, &target_value->uptime);
9571                 __put_user(value.loads[0], &target_value->loads[0]);
9572                 __put_user(value.loads[1], &target_value->loads[1]);
9573                 __put_user(value.loads[2], &target_value->loads[2]);
9574                 __put_user(value.totalram, &target_value->totalram);
9575                 __put_user(value.freeram, &target_value->freeram);
9576                 __put_user(value.sharedram, &target_value->sharedram);
9577                 __put_user(value.bufferram, &target_value->bufferram);
9578                 __put_user(value.totalswap, &target_value->totalswap);
9579                 __put_user(value.freeswap, &target_value->freeswap);
9580                 __put_user(value.procs, &target_value->procs);
9581                 __put_user(value.totalhigh, &target_value->totalhigh);
9582                 __put_user(value.freehigh, &target_value->freehigh);
9583                 __put_user(value.mem_unit, &target_value->mem_unit);
9584                 unlock_user_struct(target_value, arg1, 1);
9585             }
9586         }
9587         return ret;
9588 #ifdef TARGET_NR_ipc
9589     case TARGET_NR_ipc:
9590         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9591 #endif
9592 #ifdef TARGET_NR_semget
9593     case TARGET_NR_semget:
9594         return get_errno(semget(arg1, arg2, arg3));
9595 #endif
9596 #ifdef TARGET_NR_semop
9597     case TARGET_NR_semop:
9598         return do_semop(arg1, arg2, arg3);
9599 #endif
9600 #ifdef TARGET_NR_semctl
9601     case TARGET_NR_semctl:
9602         return do_semctl(arg1, arg2, arg3, arg4);
9603 #endif
9604 #ifdef TARGET_NR_msgctl
9605     case TARGET_NR_msgctl:
9606         return do_msgctl(arg1, arg2, arg3);
9607 #endif
9608 #ifdef TARGET_NR_msgget
9609     case TARGET_NR_msgget:
9610         return get_errno(msgget(arg1, arg2));
9611 #endif
9612 #ifdef TARGET_NR_msgrcv
9613     case TARGET_NR_msgrcv:
9614         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9615 #endif
9616 #ifdef TARGET_NR_msgsnd
9617     case TARGET_NR_msgsnd:
9618         return do_msgsnd(arg1, arg2, arg3, arg4);
9619 #endif
9620 #ifdef TARGET_NR_shmget
9621     case TARGET_NR_shmget:
9622         return get_errno(shmget(arg1, arg2, arg3));
9623 #endif
9624 #ifdef TARGET_NR_shmctl
9625     case TARGET_NR_shmctl:
9626         return do_shmctl(arg1, arg2, arg3);
9627 #endif
9628 #ifdef TARGET_NR_shmat
9629     case TARGET_NR_shmat:
9630         return do_shmat(cpu_env, arg1, arg2, arg3);
9631 #endif
9632 #ifdef TARGET_NR_shmdt
9633     case TARGET_NR_shmdt:
9634         return do_shmdt(arg1);
9635 #endif
9636     case TARGET_NR_fsync:
9637         return get_errno(fsync(arg1));
9638     case TARGET_NR_clone:
9639         /* Linux manages to have three different orderings for its
9640          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9641          * match the kernel's CONFIG_CLONE_* settings.
9642          * Microblaze is further special in that it uses a sixth
9643          * implicit argument to clone for the TLS pointer.
9644          */
9645 #if defined(TARGET_MICROBLAZE)
9646         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9647 #elif defined(TARGET_CLONE_BACKWARDS)
9648         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9649 #elif defined(TARGET_CLONE_BACKWARDS2)
9650         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9651 #else
9652         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9653 #endif
9654         return ret;
9655 #ifdef __NR_exit_group
9656         /* new thread calls */
9657     case TARGET_NR_exit_group:
9658         preexit_cleanup(cpu_env, arg1);
9659         return get_errno(exit_group(arg1));
9660 #endif
9661     case TARGET_NR_setdomainname:
9662         if (!(p = lock_user_string(arg1)))
9663             return -TARGET_EFAULT;
9664         ret = get_errno(setdomainname(p, arg2));
9665         unlock_user(p, arg1, 0);
9666         return ret;
9667     case TARGET_NR_uname:
9668         /* no need to transcode because we use the linux syscall */
9669         {
9670             struct new_utsname * buf;
9671 
9672             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9673                 return -TARGET_EFAULT;
9674             ret = get_errno(sys_uname(buf));
9675             if (!is_error(ret)) {
9676                 /* Overwrite the native machine name with whatever is being
9677                    emulated. */
9678                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9679                           sizeof(buf->machine));
9680                 /* Allow the user to override the reported release.  */
9681                 if (qemu_uname_release && *qemu_uname_release) {
9682                     g_strlcpy(buf->release, qemu_uname_release,
9683                               sizeof(buf->release));
9684                 }
9685             }
9686             unlock_user_struct(buf, arg1, 1);
9687         }
9688         return ret;
9689 #ifdef TARGET_I386
9690     case TARGET_NR_modify_ldt:
9691         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9692 #if !defined(TARGET_X86_64)
9693     case TARGET_NR_vm86:
9694         return do_vm86(cpu_env, arg1, arg2);
9695 #endif
9696 #endif
9697 #if defined(TARGET_NR_adjtimex)
9698     case TARGET_NR_adjtimex:
9699         {
9700             struct timex host_buf;
9701 
9702             if (target_to_host_timex(&host_buf, arg1) != 0) {
9703                 return -TARGET_EFAULT;
9704             }
9705             ret = get_errno(adjtimex(&host_buf));
9706             if (!is_error(ret)) {
9707                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9708                     return -TARGET_EFAULT;
9709                 }
9710             }
9711         }
9712         return ret;
9713 #endif
9714 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9715     case TARGET_NR_clock_adjtime:
9716         {
9717             struct timex htx, *phtx = &htx;
9718 
9719             if (target_to_host_timex(phtx, arg2) != 0) {
9720                 return -TARGET_EFAULT;
9721             }
9722             ret = get_errno(clock_adjtime(arg1, phtx));
9723             if (!is_error(ret) && phtx) {
9724                 if (host_to_target_timex(arg2, phtx) != 0) {
9725                     return -TARGET_EFAULT;
9726                 }
9727             }
9728         }
9729         return ret;
9730 #endif
9731     case TARGET_NR_getpgid:
9732         return get_errno(getpgid(arg1));
9733     case TARGET_NR_fchdir:
9734         return get_errno(fchdir(arg1));
9735     case TARGET_NR_personality:
9736         return get_errno(personality(arg1));
9737 #ifdef TARGET_NR__llseek /* Not on alpha */
9738     case TARGET_NR__llseek:
9739         {
9740             int64_t res;
9741 #if !defined(__NR_llseek)
9742             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9743             if (res == -1) {
9744                 ret = get_errno(res);
9745             } else {
9746                 ret = 0;
9747             }
9748 #else
9749             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9750 #endif
9751             if ((ret == 0) && put_user_s64(res, arg4)) {
9752                 return -TARGET_EFAULT;
9753             }
9754         }
9755         return ret;
9756 #endif
9757 #ifdef TARGET_NR_getdents
9758     case TARGET_NR_getdents:
9759 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9760 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9761         {
9762             struct target_dirent *target_dirp;
9763             struct linux_dirent *dirp;
9764             abi_long count = arg3;
9765 
9766             dirp = g_try_malloc(count);
9767             if (!dirp) {
9768                 return -TARGET_ENOMEM;
9769             }
9770 
9771             ret = get_errno(sys_getdents(arg1, dirp, count));
9772             if (!is_error(ret)) {
9773                 struct linux_dirent *de;
9774 		struct target_dirent *tde;
9775                 int len = ret;
9776                 int reclen, treclen;
9777 		int count1, tnamelen;
9778 
9779 		count1 = 0;
9780                 de = dirp;
9781                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9782                     return -TARGET_EFAULT;
9783 		tde = target_dirp;
9784                 while (len > 0) {
9785                     reclen = de->d_reclen;
9786                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9787                     assert(tnamelen >= 0);
9788                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9789                     assert(count1 + treclen <= count);
9790                     tde->d_reclen = tswap16(treclen);
9791                     tde->d_ino = tswapal(de->d_ino);
9792                     tde->d_off = tswapal(de->d_off);
9793                     memcpy(tde->d_name, de->d_name, tnamelen);
9794                     de = (struct linux_dirent *)((char *)de + reclen);
9795                     len -= reclen;
9796                     tde = (struct target_dirent *)((char *)tde + treclen);
9797 		    count1 += treclen;
9798                 }
9799 		ret = count1;
9800                 unlock_user(target_dirp, arg2, ret);
9801             }
9802             g_free(dirp);
9803         }
9804 #else
9805         {
9806             struct linux_dirent *dirp;
9807             abi_long count = arg3;
9808 
9809             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9810                 return -TARGET_EFAULT;
9811             ret = get_errno(sys_getdents(arg1, dirp, count));
9812             if (!is_error(ret)) {
9813                 struct linux_dirent *de;
9814                 int len = ret;
9815                 int reclen;
9816                 de = dirp;
9817                 while (len > 0) {
9818                     reclen = de->d_reclen;
9819                     if (reclen > len)
9820                         break;
9821                     de->d_reclen = tswap16(reclen);
9822                     tswapls(&de->d_ino);
9823                     tswapls(&de->d_off);
9824                     de = (struct linux_dirent *)((char *)de + reclen);
9825                     len -= reclen;
9826                 }
9827             }
9828             unlock_user(dirp, arg2, ret);
9829         }
9830 #endif
9831 #else
9832         /* Implement getdents in terms of getdents64 */
9833         {
9834             struct linux_dirent64 *dirp;
9835             abi_long count = arg3;
9836 
9837             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9838             if (!dirp) {
9839                 return -TARGET_EFAULT;
9840             }
9841             ret = get_errno(sys_getdents64(arg1, dirp, count));
9842             if (!is_error(ret)) {
9843                 /* Convert the dirent64 structs to target dirent.  We do this
9844                  * in-place, since we can guarantee that a target_dirent is no
9845                  * larger than a dirent64; however this means we have to be
9846                  * careful to read everything before writing in the new format.
9847                  */
9848                 struct linux_dirent64 *de;
9849                 struct target_dirent *tde;
9850                 int len = ret;
9851                 int tlen = 0;
9852 
9853                 de = dirp;
9854                 tde = (struct target_dirent *)dirp;
9855                 while (len > 0) {
9856                     int namelen, treclen;
9857                     int reclen = de->d_reclen;
9858                     uint64_t ino = de->d_ino;
9859                     int64_t off = de->d_off;
9860                     uint8_t type = de->d_type;
9861 
9862                     namelen = strlen(de->d_name);
9863                     treclen = offsetof(struct target_dirent, d_name)
9864                         + namelen + 2;
9865                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9866 
9867                     memmove(tde->d_name, de->d_name, namelen + 1);
9868                     tde->d_ino = tswapal(ino);
9869                     tde->d_off = tswapal(off);
9870                     tde->d_reclen = tswap16(treclen);
9871                     /* The target_dirent type is in what was formerly a padding
9872                      * byte at the end of the structure:
9873                      */
9874                     *(((char *)tde) + treclen - 1) = type;
9875 
9876                     de = (struct linux_dirent64 *)((char *)de + reclen);
9877                     tde = (struct target_dirent *)((char *)tde + treclen);
9878                     len -= reclen;
9879                     tlen += treclen;
9880                 }
9881                 ret = tlen;
9882             }
9883             unlock_user(dirp, arg2, ret);
9884         }
9885 #endif
9886         return ret;
9887 #endif /* TARGET_NR_getdents */
9888 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9889     case TARGET_NR_getdents64:
9890         {
9891             struct linux_dirent64 *dirp;
9892             abi_long count = arg3;
9893             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9894                 return -TARGET_EFAULT;
9895             ret = get_errno(sys_getdents64(arg1, dirp, count));
9896             if (!is_error(ret)) {
9897                 struct linux_dirent64 *de;
9898                 int len = ret;
9899                 int reclen;
9900                 de = dirp;
9901                 while (len > 0) {
9902                     reclen = de->d_reclen;
9903                     if (reclen > len)
9904                         break;
9905                     de->d_reclen = tswap16(reclen);
9906                     tswap64s((uint64_t *)&de->d_ino);
9907                     tswap64s((uint64_t *)&de->d_off);
9908                     de = (struct linux_dirent64 *)((char *)de + reclen);
9909                     len -= reclen;
9910                 }
9911             }
9912             unlock_user(dirp, arg2, ret);
9913         }
9914         return ret;
9915 #endif /* TARGET_NR_getdents64 */
9916 #if defined(TARGET_NR__newselect)
9917     case TARGET_NR__newselect:
9918         return do_select(arg1, arg2, arg3, arg4, arg5);
9919 #endif
9920 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9921 # ifdef TARGET_NR_poll
9922     case TARGET_NR_poll:
9923 # endif
9924 # ifdef TARGET_NR_ppoll
9925     case TARGET_NR_ppoll:
9926 # endif
9927         {
9928             struct target_pollfd *target_pfd;
9929             unsigned int nfds = arg2;
9930             struct pollfd *pfd;
9931             unsigned int i;
9932 
9933             pfd = NULL;
9934             target_pfd = NULL;
9935             if (nfds) {
9936                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9937                     return -TARGET_EINVAL;
9938                 }
9939 
9940                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9941                                        sizeof(struct target_pollfd) * nfds, 1);
9942                 if (!target_pfd) {
9943                     return -TARGET_EFAULT;
9944                 }
9945 
9946                 pfd = alloca(sizeof(struct pollfd) * nfds);
9947                 for (i = 0; i < nfds; i++) {
9948                     pfd[i].fd = tswap32(target_pfd[i].fd);
9949                     pfd[i].events = tswap16(target_pfd[i].events);
9950                 }
9951             }
9952 
9953             switch (num) {
9954 # ifdef TARGET_NR_ppoll
9955             case TARGET_NR_ppoll:
9956             {
9957                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9958                 target_sigset_t *target_set;
9959                 sigset_t _set, *set = &_set;
9960 
9961                 if (arg3) {
9962                     if (target_to_host_timespec(timeout_ts, arg3)) {
9963                         unlock_user(target_pfd, arg1, 0);
9964                         return -TARGET_EFAULT;
9965                     }
9966                 } else {
9967                     timeout_ts = NULL;
9968                 }
9969 
9970                 if (arg4) {
9971                     if (arg5 != sizeof(target_sigset_t)) {
9972                         unlock_user(target_pfd, arg1, 0);
9973                         return -TARGET_EINVAL;
9974                     }
9975 
9976                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9977                     if (!target_set) {
9978                         unlock_user(target_pfd, arg1, 0);
9979                         return -TARGET_EFAULT;
9980                     }
9981                     target_to_host_sigset(set, target_set);
9982                 } else {
9983                     set = NULL;
9984                 }
9985 
9986                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9987                                            set, SIGSET_T_SIZE));
9988 
9989                 if (!is_error(ret) && arg3) {
9990                     host_to_target_timespec(arg3, timeout_ts);
9991                 }
9992                 if (arg4) {
9993                     unlock_user(target_set, arg4, 0);
9994                 }
9995                 break;
9996             }
9997 # endif
9998 # ifdef TARGET_NR_poll
9999             case TARGET_NR_poll:
10000             {
10001                 struct timespec ts, *pts;
10002 
10003                 if (arg3 >= 0) {
10004                     /* Convert ms to secs, ns */
10005                     ts.tv_sec = arg3 / 1000;
10006                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10007                     pts = &ts;
10008                 } else {
10009                     /* -ve poll() timeout means "infinite" */
10010                     pts = NULL;
10011                 }
10012                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10013                 break;
10014             }
10015 # endif
10016             default:
10017                 g_assert_not_reached();
10018             }
10019 
10020             if (!is_error(ret)) {
10021                 for(i = 0; i < nfds; i++) {
10022                     target_pfd[i].revents = tswap16(pfd[i].revents);
10023                 }
10024             }
10025             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10026         }
10027         return ret;
10028 #endif
10029     case TARGET_NR_flock:
10030         /* NOTE: the flock constant seems to be the same for every
10031            Linux platform */
10032         return get_errno(safe_flock(arg1, arg2));
10033     case TARGET_NR_readv:
10034         {
10035             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10036             if (vec != NULL) {
10037                 ret = get_errno(safe_readv(arg1, vec, arg3));
10038                 unlock_iovec(vec, arg2, arg3, 1);
10039             } else {
10040                 ret = -host_to_target_errno(errno);
10041             }
10042         }
10043         return ret;
10044     case TARGET_NR_writev:
10045         {
10046             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10047             if (vec != NULL) {
10048                 ret = get_errno(safe_writev(arg1, vec, arg3));
10049                 unlock_iovec(vec, arg2, arg3, 0);
10050             } else {
10051                 ret = -host_to_target_errno(errno);
10052             }
10053         }
10054         return ret;
10055 #if defined(TARGET_NR_preadv)
10056     case TARGET_NR_preadv:
10057         {
10058             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10059             if (vec != NULL) {
10060                 unsigned long low, high;
10061 
10062                 target_to_host_low_high(arg4, arg5, &low, &high);
10063                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10064                 unlock_iovec(vec, arg2, arg3, 1);
10065             } else {
10066                 ret = -host_to_target_errno(errno);
10067            }
10068         }
10069         return ret;
10070 #endif
10071 #if defined(TARGET_NR_pwritev)
10072     case TARGET_NR_pwritev:
10073         {
10074             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10075             if (vec != NULL) {
10076                 unsigned long low, high;
10077 
10078                 target_to_host_low_high(arg4, arg5, &low, &high);
10079                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10080                 unlock_iovec(vec, arg2, arg3, 0);
10081             } else {
10082                 ret = -host_to_target_errno(errno);
10083            }
10084         }
10085         return ret;
10086 #endif
10087     case TARGET_NR_getsid:
10088         return get_errno(getsid(arg1));
10089 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10090     case TARGET_NR_fdatasync:
10091         return get_errno(fdatasync(arg1));
10092 #endif
10093 #ifdef TARGET_NR__sysctl
10094     case TARGET_NR__sysctl:
10095         /* We don't implement this, but ENOTDIR is always a safe
10096            return value. */
10097         return -TARGET_ENOTDIR;
10098 #endif
10099     case TARGET_NR_sched_getaffinity:
10100         {
10101             unsigned int mask_size;
10102             unsigned long *mask;
10103 
10104             /*
10105              * sched_getaffinity needs multiples of ulong, so need to take
10106              * care of mismatches between target ulong and host ulong sizes.
10107              */
10108             if (arg2 & (sizeof(abi_ulong) - 1)) {
10109                 return -TARGET_EINVAL;
10110             }
10111             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10112 
10113             mask = alloca(mask_size);
10114             memset(mask, 0, mask_size);
10115             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10116 
10117             if (!is_error(ret)) {
10118                 if (ret > arg2) {
10119                     /* More data returned than the caller's buffer will fit.
10120                      * This only happens if sizeof(abi_long) < sizeof(long)
10121                      * and the caller passed us a buffer holding an odd number
10122                      * of abi_longs. If the host kernel is actually using the
10123                      * extra 4 bytes then fail EINVAL; otherwise we can just
10124                      * ignore them and only copy the interesting part.
10125                      */
10126                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10127                     if (numcpus > arg2 * 8) {
10128                         return -TARGET_EINVAL;
10129                     }
10130                     ret = arg2;
10131                 }
10132 
10133                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10134                     return -TARGET_EFAULT;
10135                 }
10136             }
10137         }
10138         return ret;
10139     case TARGET_NR_sched_setaffinity:
10140         {
10141             unsigned int mask_size;
10142             unsigned long *mask;
10143 
10144             /*
10145              * sched_setaffinity needs multiples of ulong, so need to take
10146              * care of mismatches between target ulong and host ulong sizes.
10147              */
10148             if (arg2 & (sizeof(abi_ulong) - 1)) {
10149                 return -TARGET_EINVAL;
10150             }
10151             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10152             mask = alloca(mask_size);
10153 
10154             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10155             if (ret) {
10156                 return ret;
10157             }
10158 
10159             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10160         }
10161     case TARGET_NR_getcpu:
10162         {
10163             unsigned cpu, node;
10164             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10165                                        arg2 ? &node : NULL,
10166                                        NULL));
10167             if (is_error(ret)) {
10168                 return ret;
10169             }
10170             if (arg1 && put_user_u32(cpu, arg1)) {
10171                 return -TARGET_EFAULT;
10172             }
10173             if (arg2 && put_user_u32(node, arg2)) {
10174                 return -TARGET_EFAULT;
10175             }
10176         }
10177         return ret;
10178     case TARGET_NR_sched_setparam:
10179         {
10180             struct sched_param *target_schp;
10181             struct sched_param schp;
10182 
10183             if (arg2 == 0) {
10184                 return -TARGET_EINVAL;
10185             }
10186             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10187                 return -TARGET_EFAULT;
10188             schp.sched_priority = tswap32(target_schp->sched_priority);
10189             unlock_user_struct(target_schp, arg2, 0);
10190             return get_errno(sched_setparam(arg1, &schp));
10191         }
10192     case TARGET_NR_sched_getparam:
10193         {
10194             struct sched_param *target_schp;
10195             struct sched_param schp;
10196 
10197             if (arg2 == 0) {
10198                 return -TARGET_EINVAL;
10199             }
10200             ret = get_errno(sched_getparam(arg1, &schp));
10201             if (!is_error(ret)) {
10202                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10203                     return -TARGET_EFAULT;
10204                 target_schp->sched_priority = tswap32(schp.sched_priority);
10205                 unlock_user_struct(target_schp, arg2, 1);
10206             }
10207         }
10208         return ret;
10209     case TARGET_NR_sched_setscheduler:
10210         {
10211             struct sched_param *target_schp;
10212             struct sched_param schp;
10213             if (arg3 == 0) {
10214                 return -TARGET_EINVAL;
10215             }
10216             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10217                 return -TARGET_EFAULT;
10218             schp.sched_priority = tswap32(target_schp->sched_priority);
10219             unlock_user_struct(target_schp, arg3, 0);
10220             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10221         }
10222     case TARGET_NR_sched_getscheduler:
10223         return get_errno(sched_getscheduler(arg1));
10224     case TARGET_NR_sched_yield:
10225         return get_errno(sched_yield());
10226     case TARGET_NR_sched_get_priority_max:
10227         return get_errno(sched_get_priority_max(arg1));
10228     case TARGET_NR_sched_get_priority_min:
10229         return get_errno(sched_get_priority_min(arg1));
10230 #ifdef TARGET_NR_sched_rr_get_interval
10231     case TARGET_NR_sched_rr_get_interval:
10232         {
10233             struct timespec ts;
10234             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10235             if (!is_error(ret)) {
10236                 ret = host_to_target_timespec(arg2, &ts);
10237             }
10238         }
10239         return ret;
10240 #endif
10241 #if defined(TARGET_NR_nanosleep)
10242     case TARGET_NR_nanosleep:
10243         {
10244             struct timespec req, rem;
10245             target_to_host_timespec(&req, arg1);
10246             ret = get_errno(safe_nanosleep(&req, &rem));
10247             if (is_error(ret) && arg2) {
10248                 host_to_target_timespec(arg2, &rem);
10249             }
10250         }
10251         return ret;
10252 #endif
10253     case TARGET_NR_prctl:
10254         switch (arg1) {
10255         case PR_GET_PDEATHSIG:
10256         {
10257             int deathsig;
10258             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10259             if (!is_error(ret) && arg2
10260                 && put_user_ual(deathsig, arg2)) {
10261                 return -TARGET_EFAULT;
10262             }
10263             return ret;
10264         }
10265 #ifdef PR_GET_NAME
10266         case PR_GET_NAME:
10267         {
10268             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10269             if (!name) {
10270                 return -TARGET_EFAULT;
10271             }
10272             ret = get_errno(prctl(arg1, (unsigned long)name,
10273                                   arg3, arg4, arg5));
10274             unlock_user(name, arg2, 16);
10275             return ret;
10276         }
10277         case PR_SET_NAME:
10278         {
10279             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10280             if (!name) {
10281                 return -TARGET_EFAULT;
10282             }
10283             ret = get_errno(prctl(arg1, (unsigned long)name,
10284                                   arg3, arg4, arg5));
10285             unlock_user(name, arg2, 0);
10286             return ret;
10287         }
10288 #endif
10289 #ifdef TARGET_MIPS
10290         case TARGET_PR_GET_FP_MODE:
10291         {
10292             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10293             ret = 0;
10294             if (env->CP0_Status & (1 << CP0St_FR)) {
10295                 ret |= TARGET_PR_FP_MODE_FR;
10296             }
10297             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10298                 ret |= TARGET_PR_FP_MODE_FRE;
10299             }
10300             return ret;
10301         }
10302         case TARGET_PR_SET_FP_MODE:
10303         {
10304             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10305             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10306             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10307             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10308             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10309 
10310             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10311                                             TARGET_PR_FP_MODE_FRE;
10312 
10313             /* If nothing to change, return right away, successfully.  */
10314             if (old_fr == new_fr && old_fre == new_fre) {
10315                 return 0;
10316             }
10317             /* Check the value is valid */
10318             if (arg2 & ~known_bits) {
10319                 return -TARGET_EOPNOTSUPP;
10320             }
10321             /* Setting FRE without FR is not supported.  */
10322             if (new_fre && !new_fr) {
10323                 return -TARGET_EOPNOTSUPP;
10324             }
10325             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10326                 /* FR1 is not supported */
10327                 return -TARGET_EOPNOTSUPP;
10328             }
10329             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10330                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10331                 /* cannot set FR=0 */
10332                 return -TARGET_EOPNOTSUPP;
10333             }
10334             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10335                 /* Cannot set FRE=1 */
10336                 return -TARGET_EOPNOTSUPP;
10337             }
10338 
10339             int i;
10340             fpr_t *fpr = env->active_fpu.fpr;
10341             for (i = 0; i < 32 ; i += 2) {
10342                 if (!old_fr && new_fr) {
10343                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10344                 } else if (old_fr && !new_fr) {
10345                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10346                 }
10347             }
10348 
10349             if (new_fr) {
10350                 env->CP0_Status |= (1 << CP0St_FR);
10351                 env->hflags |= MIPS_HFLAG_F64;
10352             } else {
10353                 env->CP0_Status &= ~(1 << CP0St_FR);
10354                 env->hflags &= ~MIPS_HFLAG_F64;
10355             }
10356             if (new_fre) {
10357                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10358                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10359                     env->hflags |= MIPS_HFLAG_FRE;
10360                 }
10361             } else {
10362                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10363                 env->hflags &= ~MIPS_HFLAG_FRE;
10364             }
10365 
10366             return 0;
10367         }
10368 #endif /* MIPS */
10369 #ifdef TARGET_AARCH64
10370         case TARGET_PR_SVE_SET_VL:
10371             /*
10372              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10373              * PR_SVE_VL_INHERIT.  Note the kernel definition
10374              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10375              * even though the current architectural maximum is VQ=16.
10376              */
10377             ret = -TARGET_EINVAL;
10378             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10379                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10380                 CPUARMState *env = cpu_env;
10381                 ARMCPU *cpu = env_archcpu(env);
10382                 uint32_t vq, old_vq;
10383 
10384                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10385                 vq = MAX(arg2 / 16, 1);
10386                 vq = MIN(vq, cpu->sve_max_vq);
10387 
10388                 if (vq < old_vq) {
10389                     aarch64_sve_narrow_vq(env, vq);
10390                 }
10391                 env->vfp.zcr_el[1] = vq - 1;
10392                 arm_rebuild_hflags(env);
10393                 ret = vq * 16;
10394             }
10395             return ret;
10396         case TARGET_PR_SVE_GET_VL:
10397             ret = -TARGET_EINVAL;
10398             {
10399                 ARMCPU *cpu = env_archcpu(cpu_env);
10400                 if (cpu_isar_feature(aa64_sve, cpu)) {
10401                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10402                 }
10403             }
10404             return ret;
10405         case TARGET_PR_PAC_RESET_KEYS:
10406             {
10407                 CPUARMState *env = cpu_env;
10408                 ARMCPU *cpu = env_archcpu(env);
10409 
10410                 if (arg3 || arg4 || arg5) {
10411                     return -TARGET_EINVAL;
10412                 }
10413                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10414                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10415                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10416                                TARGET_PR_PAC_APGAKEY);
10417                     int ret = 0;
10418                     Error *err = NULL;
10419 
10420                     if (arg2 == 0) {
10421                         arg2 = all;
10422                     } else if (arg2 & ~all) {
10423                         return -TARGET_EINVAL;
10424                     }
10425                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10426                         ret |= qemu_guest_getrandom(&env->keys.apia,
10427                                                     sizeof(ARMPACKey), &err);
10428                     }
10429                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10430                         ret |= qemu_guest_getrandom(&env->keys.apib,
10431                                                     sizeof(ARMPACKey), &err);
10432                     }
10433                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10434                         ret |= qemu_guest_getrandom(&env->keys.apda,
10435                                                     sizeof(ARMPACKey), &err);
10436                     }
10437                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10438                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10439                                                     sizeof(ARMPACKey), &err);
10440                     }
10441                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10442                         ret |= qemu_guest_getrandom(&env->keys.apga,
10443                                                     sizeof(ARMPACKey), &err);
10444                     }
10445                     if (ret != 0) {
10446                         /*
10447                          * Some unknown failure in the crypto.  The best
10448                          * we can do is log it and fail the syscall.
10449                          * The real syscall cannot fail this way.
10450                          */
10451                         qemu_log_mask(LOG_UNIMP,
10452                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10453                                       error_get_pretty(err));
10454                         error_free(err);
10455                         return -TARGET_EIO;
10456                     }
10457                     return 0;
10458                 }
10459             }
10460             return -TARGET_EINVAL;
10461 #endif /* AARCH64 */
10462         case PR_GET_SECCOMP:
10463         case PR_SET_SECCOMP:
10464             /* Disable seccomp to prevent the target disabling syscalls we
10465              * need. */
10466             return -TARGET_EINVAL;
10467         default:
10468             /* Most prctl options have no pointer arguments */
10469             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10470         }
10471         break;
10472 #ifdef TARGET_NR_arch_prctl
10473     case TARGET_NR_arch_prctl:
10474         return do_arch_prctl(cpu_env, arg1, arg2);
10475 #endif
10476 #ifdef TARGET_NR_pread64
10477     case TARGET_NR_pread64:
10478         if (regpairs_aligned(cpu_env, num)) {
10479             arg4 = arg5;
10480             arg5 = arg6;
10481         }
10482         if (arg2 == 0 && arg3 == 0) {
10483             /* Special-case NULL buffer and zero length, which should succeed */
10484             p = 0;
10485         } else {
10486             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10487             if (!p) {
10488                 return -TARGET_EFAULT;
10489             }
10490         }
10491         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10492         unlock_user(p, arg2, ret);
10493         return ret;
10494     case TARGET_NR_pwrite64:
10495         if (regpairs_aligned(cpu_env, num)) {
10496             arg4 = arg5;
10497             arg5 = arg6;
10498         }
10499         if (arg2 == 0 && arg3 == 0) {
10500             /* Special-case NULL buffer and zero length, which should succeed */
10501             p = 0;
10502         } else {
10503             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10504             if (!p) {
10505                 return -TARGET_EFAULT;
10506             }
10507         }
10508         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10509         unlock_user(p, arg2, 0);
10510         return ret;
10511 #endif
10512     case TARGET_NR_getcwd:
10513         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10514             return -TARGET_EFAULT;
10515         ret = get_errno(sys_getcwd1(p, arg2));
10516         unlock_user(p, arg1, ret);
10517         return ret;
10518     case TARGET_NR_capget:
10519     case TARGET_NR_capset:
10520     {
10521         struct target_user_cap_header *target_header;
10522         struct target_user_cap_data *target_data = NULL;
10523         struct __user_cap_header_struct header;
10524         struct __user_cap_data_struct data[2];
10525         struct __user_cap_data_struct *dataptr = NULL;
10526         int i, target_datalen;
10527         int data_items = 1;
10528 
10529         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10530             return -TARGET_EFAULT;
10531         }
10532         header.version = tswap32(target_header->version);
10533         header.pid = tswap32(target_header->pid);
10534 
10535         if (header.version != _LINUX_CAPABILITY_VERSION) {
10536             /* Version 2 and up takes pointer to two user_data structs */
10537             data_items = 2;
10538         }
10539 
10540         target_datalen = sizeof(*target_data) * data_items;
10541 
10542         if (arg2) {
10543             if (num == TARGET_NR_capget) {
10544                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10545             } else {
10546                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10547             }
10548             if (!target_data) {
10549                 unlock_user_struct(target_header, arg1, 0);
10550                 return -TARGET_EFAULT;
10551             }
10552 
10553             if (num == TARGET_NR_capset) {
10554                 for (i = 0; i < data_items; i++) {
10555                     data[i].effective = tswap32(target_data[i].effective);
10556                     data[i].permitted = tswap32(target_data[i].permitted);
10557                     data[i].inheritable = tswap32(target_data[i].inheritable);
10558                 }
10559             }
10560 
10561             dataptr = data;
10562         }
10563 
10564         if (num == TARGET_NR_capget) {
10565             ret = get_errno(capget(&header, dataptr));
10566         } else {
10567             ret = get_errno(capset(&header, dataptr));
10568         }
10569 
10570         /* The kernel always updates version for both capget and capset */
10571         target_header->version = tswap32(header.version);
10572         unlock_user_struct(target_header, arg1, 1);
10573 
10574         if (arg2) {
10575             if (num == TARGET_NR_capget) {
10576                 for (i = 0; i < data_items; i++) {
10577                     target_data[i].effective = tswap32(data[i].effective);
10578                     target_data[i].permitted = tswap32(data[i].permitted);
10579                     target_data[i].inheritable = tswap32(data[i].inheritable);
10580                 }
10581                 unlock_user(target_data, arg2, target_datalen);
10582             } else {
10583                 unlock_user(target_data, arg2, 0);
10584             }
10585         }
10586         return ret;
10587     }
10588     case TARGET_NR_sigaltstack:
10589         return do_sigaltstack(arg1, arg2,
10590                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10591 
10592 #ifdef CONFIG_SENDFILE
10593 #ifdef TARGET_NR_sendfile
10594     case TARGET_NR_sendfile:
10595     {
10596         off_t *offp = NULL;
10597         off_t off;
10598         if (arg3) {
10599             ret = get_user_sal(off, arg3);
10600             if (is_error(ret)) {
10601                 return ret;
10602             }
10603             offp = &off;
10604         }
10605         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10606         if (!is_error(ret) && arg3) {
10607             abi_long ret2 = put_user_sal(off, arg3);
10608             if (is_error(ret2)) {
10609                 ret = ret2;
10610             }
10611         }
10612         return ret;
10613     }
10614 #endif
10615 #ifdef TARGET_NR_sendfile64
10616     case TARGET_NR_sendfile64:
10617     {
10618         off_t *offp = NULL;
10619         off_t off;
10620         if (arg3) {
10621             ret = get_user_s64(off, arg3);
10622             if (is_error(ret)) {
10623                 return ret;
10624             }
10625             offp = &off;
10626         }
10627         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10628         if (!is_error(ret) && arg3) {
10629             abi_long ret2 = put_user_s64(off, arg3);
10630             if (is_error(ret2)) {
10631                 ret = ret2;
10632             }
10633         }
10634         return ret;
10635     }
10636 #endif
10637 #endif
10638 #ifdef TARGET_NR_vfork
10639     case TARGET_NR_vfork:
10640         return get_errno(do_fork(cpu_env,
10641                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10642                          0, 0, 0, 0));
10643 #endif
10644 #ifdef TARGET_NR_ugetrlimit
10645     case TARGET_NR_ugetrlimit:
10646     {
10647 	struct rlimit rlim;
10648 	int resource = target_to_host_resource(arg1);
10649 	ret = get_errno(getrlimit(resource, &rlim));
10650 	if (!is_error(ret)) {
10651 	    struct target_rlimit *target_rlim;
10652             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10653                 return -TARGET_EFAULT;
10654 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10655 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10656             unlock_user_struct(target_rlim, arg2, 1);
10657 	}
10658         return ret;
10659     }
10660 #endif
10661 #ifdef TARGET_NR_truncate64
10662     case TARGET_NR_truncate64:
10663         if (!(p = lock_user_string(arg1)))
10664             return -TARGET_EFAULT;
10665 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10666         unlock_user(p, arg1, 0);
10667         return ret;
10668 #endif
10669 #ifdef TARGET_NR_ftruncate64
10670     case TARGET_NR_ftruncate64:
10671         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10672 #endif
10673 #ifdef TARGET_NR_stat64
10674     case TARGET_NR_stat64:
10675         if (!(p = lock_user_string(arg1))) {
10676             return -TARGET_EFAULT;
10677         }
10678         ret = get_errno(stat(path(p), &st));
10679         unlock_user(p, arg1, 0);
10680         if (!is_error(ret))
10681             ret = host_to_target_stat64(cpu_env, arg2, &st);
10682         return ret;
10683 #endif
10684 #ifdef TARGET_NR_lstat64
10685     case TARGET_NR_lstat64:
10686         if (!(p = lock_user_string(arg1))) {
10687             return -TARGET_EFAULT;
10688         }
10689         ret = get_errno(lstat(path(p), &st));
10690         unlock_user(p, arg1, 0);
10691         if (!is_error(ret))
10692             ret = host_to_target_stat64(cpu_env, arg2, &st);
10693         return ret;
10694 #endif
10695 #ifdef TARGET_NR_fstat64
10696     case TARGET_NR_fstat64:
10697         ret = get_errno(fstat(arg1, &st));
10698         if (!is_error(ret))
10699             ret = host_to_target_stat64(cpu_env, arg2, &st);
10700         return ret;
10701 #endif
10702 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10703 #ifdef TARGET_NR_fstatat64
10704     case TARGET_NR_fstatat64:
10705 #endif
10706 #ifdef TARGET_NR_newfstatat
10707     case TARGET_NR_newfstatat:
10708 #endif
10709         if (!(p = lock_user_string(arg2))) {
10710             return -TARGET_EFAULT;
10711         }
10712         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10713         unlock_user(p, arg2, 0);
10714         if (!is_error(ret))
10715             ret = host_to_target_stat64(cpu_env, arg3, &st);
10716         return ret;
10717 #endif
10718 #if defined(TARGET_NR_statx)
10719     case TARGET_NR_statx:
10720         {
10721             struct target_statx *target_stx;
10722             int dirfd = arg1;
10723             int flags = arg3;
10724 
10725             p = lock_user_string(arg2);
10726             if (p == NULL) {
10727                 return -TARGET_EFAULT;
10728             }
10729 #if defined(__NR_statx)
10730             {
10731                 /*
10732                  * It is assumed that struct statx is architecture independent.
10733                  */
10734                 struct target_statx host_stx;
10735                 int mask = arg4;
10736 
10737                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10738                 if (!is_error(ret)) {
10739                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10740                         unlock_user(p, arg2, 0);
10741                         return -TARGET_EFAULT;
10742                     }
10743                 }
10744 
10745                 if (ret != -TARGET_ENOSYS) {
10746                     unlock_user(p, arg2, 0);
10747                     return ret;
10748                 }
10749             }
10750 #endif
10751             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10752             unlock_user(p, arg2, 0);
10753 
10754             if (!is_error(ret)) {
10755                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10756                     return -TARGET_EFAULT;
10757                 }
10758                 memset(target_stx, 0, sizeof(*target_stx));
10759                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10760                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10761                 __put_user(st.st_ino, &target_stx->stx_ino);
10762                 __put_user(st.st_mode, &target_stx->stx_mode);
10763                 __put_user(st.st_uid, &target_stx->stx_uid);
10764                 __put_user(st.st_gid, &target_stx->stx_gid);
10765                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10766                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10767                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10768                 __put_user(st.st_size, &target_stx->stx_size);
10769                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10770                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10771                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10772                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10773                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10774                 unlock_user_struct(target_stx, arg5, 1);
10775             }
10776         }
10777         return ret;
10778 #endif
10779 #ifdef TARGET_NR_lchown
10780     case TARGET_NR_lchown:
10781         if (!(p = lock_user_string(arg1)))
10782             return -TARGET_EFAULT;
10783         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10784         unlock_user(p, arg1, 0);
10785         return ret;
10786 #endif
10787 #ifdef TARGET_NR_getuid
10788     case TARGET_NR_getuid:
10789         return get_errno(high2lowuid(getuid()));
10790 #endif
10791 #ifdef TARGET_NR_getgid
10792     case TARGET_NR_getgid:
10793         return get_errno(high2lowgid(getgid()));
10794 #endif
10795 #ifdef TARGET_NR_geteuid
10796     case TARGET_NR_geteuid:
10797         return get_errno(high2lowuid(geteuid()));
10798 #endif
10799 #ifdef TARGET_NR_getegid
10800     case TARGET_NR_getegid:
10801         return get_errno(high2lowgid(getegid()));
10802 #endif
10803     case TARGET_NR_setreuid:
10804         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10805     case TARGET_NR_setregid:
10806         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10807     case TARGET_NR_getgroups:
10808         {
10809             int gidsetsize = arg1;
10810             target_id *target_grouplist;
10811             gid_t *grouplist;
10812             int i;
10813 
10814             grouplist = alloca(gidsetsize * sizeof(gid_t));
10815             ret = get_errno(getgroups(gidsetsize, grouplist));
10816             if (gidsetsize == 0)
10817                 return ret;
10818             if (!is_error(ret)) {
10819                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10820                 if (!target_grouplist)
10821                     return -TARGET_EFAULT;
10822                 for(i = 0;i < ret; i++)
10823                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10824                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10825             }
10826         }
10827         return ret;
10828     case TARGET_NR_setgroups:
10829         {
10830             int gidsetsize = arg1;
10831             target_id *target_grouplist;
10832             gid_t *grouplist = NULL;
10833             int i;
10834             if (gidsetsize) {
10835                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10836                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10837                 if (!target_grouplist) {
10838                     return -TARGET_EFAULT;
10839                 }
10840                 for (i = 0; i < gidsetsize; i++) {
10841                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10842                 }
10843                 unlock_user(target_grouplist, arg2, 0);
10844             }
10845             return get_errno(setgroups(gidsetsize, grouplist));
10846         }
10847     case TARGET_NR_fchown:
10848         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10849 #if defined(TARGET_NR_fchownat)
10850     case TARGET_NR_fchownat:
10851         if (!(p = lock_user_string(arg2)))
10852             return -TARGET_EFAULT;
10853         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10854                                  low2highgid(arg4), arg5));
10855         unlock_user(p, arg2, 0);
10856         return ret;
10857 #endif
10858 #ifdef TARGET_NR_setresuid
10859     case TARGET_NR_setresuid:
10860         return get_errno(sys_setresuid(low2highuid(arg1),
10861                                        low2highuid(arg2),
10862                                        low2highuid(arg3)));
10863 #endif
10864 #ifdef TARGET_NR_getresuid
10865     case TARGET_NR_getresuid:
10866         {
10867             uid_t ruid, euid, suid;
10868             ret = get_errno(getresuid(&ruid, &euid, &suid));
10869             if (!is_error(ret)) {
10870                 if (put_user_id(high2lowuid(ruid), arg1)
10871                     || put_user_id(high2lowuid(euid), arg2)
10872                     || put_user_id(high2lowuid(suid), arg3))
10873                     return -TARGET_EFAULT;
10874             }
10875         }
10876         return ret;
10877 #endif
10878 #ifdef TARGET_NR_getresgid
10879     case TARGET_NR_setresgid:
10880         return get_errno(sys_setresgid(low2highgid(arg1),
10881                                        low2highgid(arg2),
10882                                        low2highgid(arg3)));
10883 #endif
10884 #ifdef TARGET_NR_getresgid
10885     case TARGET_NR_getresgid:
10886         {
10887             gid_t rgid, egid, sgid;
10888             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10889             if (!is_error(ret)) {
10890                 if (put_user_id(high2lowgid(rgid), arg1)
10891                     || put_user_id(high2lowgid(egid), arg2)
10892                     || put_user_id(high2lowgid(sgid), arg3))
10893                     return -TARGET_EFAULT;
10894             }
10895         }
10896         return ret;
10897 #endif
10898 #ifdef TARGET_NR_chown
10899     case TARGET_NR_chown:
10900         if (!(p = lock_user_string(arg1)))
10901             return -TARGET_EFAULT;
10902         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10903         unlock_user(p, arg1, 0);
10904         return ret;
10905 #endif
10906     case TARGET_NR_setuid:
10907         return get_errno(sys_setuid(low2highuid(arg1)));
10908     case TARGET_NR_setgid:
10909         return get_errno(sys_setgid(low2highgid(arg1)));
10910     case TARGET_NR_setfsuid:
10911         return get_errno(setfsuid(arg1));
10912     case TARGET_NR_setfsgid:
10913         return get_errno(setfsgid(arg1));
10914 
10915 #ifdef TARGET_NR_lchown32
10916     case TARGET_NR_lchown32:
10917         if (!(p = lock_user_string(arg1)))
10918             return -TARGET_EFAULT;
10919         ret = get_errno(lchown(p, arg2, arg3));
10920         unlock_user(p, arg1, 0);
10921         return ret;
10922 #endif
10923 #ifdef TARGET_NR_getuid32
10924     case TARGET_NR_getuid32:
10925         return get_errno(getuid());
10926 #endif
10927 
10928 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10929    /* Alpha specific */
10930     case TARGET_NR_getxuid:
10931          {
10932             uid_t euid;
10933             euid=geteuid();
10934             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10935          }
10936         return get_errno(getuid());
10937 #endif
10938 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10939    /* Alpha specific */
10940     case TARGET_NR_getxgid:
10941          {
10942             uid_t egid;
10943             egid=getegid();
10944             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10945          }
10946         return get_errno(getgid());
10947 #endif
10948 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10949     /* Alpha specific */
10950     case TARGET_NR_osf_getsysinfo:
10951         ret = -TARGET_EOPNOTSUPP;
10952         switch (arg1) {
10953           case TARGET_GSI_IEEE_FP_CONTROL:
10954             {
10955                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10956                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10957 
10958                 swcr &= ~SWCR_STATUS_MASK;
10959                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10960 
10961                 if (put_user_u64 (swcr, arg2))
10962                         return -TARGET_EFAULT;
10963                 ret = 0;
10964             }
10965             break;
10966 
10967           /* case GSI_IEEE_STATE_AT_SIGNAL:
10968              -- Not implemented in linux kernel.
10969              case GSI_UACPROC:
10970              -- Retrieves current unaligned access state; not much used.
10971              case GSI_PROC_TYPE:
10972              -- Retrieves implver information; surely not used.
10973              case GSI_GET_HWRPB:
10974              -- Grabs a copy of the HWRPB; surely not used.
10975           */
10976         }
10977         return ret;
10978 #endif
10979 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10980     /* Alpha specific */
10981     case TARGET_NR_osf_setsysinfo:
10982         ret = -TARGET_EOPNOTSUPP;
10983         switch (arg1) {
10984           case TARGET_SSI_IEEE_FP_CONTROL:
10985             {
10986                 uint64_t swcr, fpcr;
10987 
10988                 if (get_user_u64 (swcr, arg2)) {
10989                     return -TARGET_EFAULT;
10990                 }
10991 
10992                 /*
10993                  * The kernel calls swcr_update_status to update the
10994                  * status bits from the fpcr at every point that it
10995                  * could be queried.  Therefore, we store the status
10996                  * bits only in FPCR.
10997                  */
10998                 ((CPUAlphaState *)cpu_env)->swcr
10999                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11000 
11001                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11002                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11003                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11004                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11005                 ret = 0;
11006             }
11007             break;
11008 
11009           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11010             {
11011                 uint64_t exc, fpcr, fex;
11012 
11013                 if (get_user_u64(exc, arg2)) {
11014                     return -TARGET_EFAULT;
11015                 }
11016                 exc &= SWCR_STATUS_MASK;
11017                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11018 
11019                 /* Old exceptions are not signaled.  */
11020                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11021                 fex = exc & ~fex;
11022                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11023                 fex &= ((CPUArchState *)cpu_env)->swcr;
11024 
11025                 /* Update the hardware fpcr.  */
11026                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11027                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11028 
11029                 if (fex) {
11030                     int si_code = TARGET_FPE_FLTUNK;
11031                     target_siginfo_t info;
11032 
11033                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11034                         si_code = TARGET_FPE_FLTUND;
11035                     }
11036                     if (fex & SWCR_TRAP_ENABLE_INE) {
11037                         si_code = TARGET_FPE_FLTRES;
11038                     }
11039                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11040                         si_code = TARGET_FPE_FLTUND;
11041                     }
11042                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11043                         si_code = TARGET_FPE_FLTOVF;
11044                     }
11045                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11046                         si_code = TARGET_FPE_FLTDIV;
11047                     }
11048                     if (fex & SWCR_TRAP_ENABLE_INV) {
11049                         si_code = TARGET_FPE_FLTINV;
11050                     }
11051 
11052                     info.si_signo = SIGFPE;
11053                     info.si_errno = 0;
11054                     info.si_code = si_code;
11055                     info._sifields._sigfault._addr
11056                         = ((CPUArchState *)cpu_env)->pc;
11057                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11058                                  QEMU_SI_FAULT, &info);
11059                 }
11060                 ret = 0;
11061             }
11062             break;
11063 
11064           /* case SSI_NVPAIRS:
11065              -- Used with SSIN_UACPROC to enable unaligned accesses.
11066              case SSI_IEEE_STATE_AT_SIGNAL:
11067              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11068              -- Not implemented in linux kernel
11069           */
11070         }
11071         return ret;
11072 #endif
11073 #ifdef TARGET_NR_osf_sigprocmask
11074     /* Alpha specific.  */
11075     case TARGET_NR_osf_sigprocmask:
11076         {
11077             abi_ulong mask;
11078             int how;
11079             sigset_t set, oldset;
11080 
11081             switch(arg1) {
11082             case TARGET_SIG_BLOCK:
11083                 how = SIG_BLOCK;
11084                 break;
11085             case TARGET_SIG_UNBLOCK:
11086                 how = SIG_UNBLOCK;
11087                 break;
11088             case TARGET_SIG_SETMASK:
11089                 how = SIG_SETMASK;
11090                 break;
11091             default:
11092                 return -TARGET_EINVAL;
11093             }
11094             mask = arg2;
11095             target_to_host_old_sigset(&set, &mask);
11096             ret = do_sigprocmask(how, &set, &oldset);
11097             if (!ret) {
11098                 host_to_target_old_sigset(&mask, &oldset);
11099                 ret = mask;
11100             }
11101         }
11102         return ret;
11103 #endif
11104 
11105 #ifdef TARGET_NR_getgid32
11106     case TARGET_NR_getgid32:
11107         return get_errno(getgid());
11108 #endif
11109 #ifdef TARGET_NR_geteuid32
11110     case TARGET_NR_geteuid32:
11111         return get_errno(geteuid());
11112 #endif
11113 #ifdef TARGET_NR_getegid32
11114     case TARGET_NR_getegid32:
11115         return get_errno(getegid());
11116 #endif
11117 #ifdef TARGET_NR_setreuid32
11118     case TARGET_NR_setreuid32:
11119         return get_errno(setreuid(arg1, arg2));
11120 #endif
11121 #ifdef TARGET_NR_setregid32
11122     case TARGET_NR_setregid32:
11123         return get_errno(setregid(arg1, arg2));
11124 #endif
11125 #ifdef TARGET_NR_getgroups32
11126     case TARGET_NR_getgroups32:
11127         {
11128             int gidsetsize = arg1;
11129             uint32_t *target_grouplist;
11130             gid_t *grouplist;
11131             int i;
11132 
11133             grouplist = alloca(gidsetsize * sizeof(gid_t));
11134             ret = get_errno(getgroups(gidsetsize, grouplist));
11135             if (gidsetsize == 0)
11136                 return ret;
11137             if (!is_error(ret)) {
11138                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11139                 if (!target_grouplist) {
11140                     return -TARGET_EFAULT;
11141                 }
11142                 for(i = 0;i < ret; i++)
11143                     target_grouplist[i] = tswap32(grouplist[i]);
11144                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11145             }
11146         }
11147         return ret;
11148 #endif
11149 #ifdef TARGET_NR_setgroups32
11150     case TARGET_NR_setgroups32:
11151         {
11152             int gidsetsize = arg1;
11153             uint32_t *target_grouplist;
11154             gid_t *grouplist;
11155             int i;
11156 
11157             grouplist = alloca(gidsetsize * sizeof(gid_t));
11158             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11159             if (!target_grouplist) {
11160                 return -TARGET_EFAULT;
11161             }
11162             for(i = 0;i < gidsetsize; i++)
11163                 grouplist[i] = tswap32(target_grouplist[i]);
11164             unlock_user(target_grouplist, arg2, 0);
11165             return get_errno(setgroups(gidsetsize, grouplist));
11166         }
11167 #endif
11168 #ifdef TARGET_NR_fchown32
11169     case TARGET_NR_fchown32:
11170         return get_errno(fchown(arg1, arg2, arg3));
11171 #endif
11172 #ifdef TARGET_NR_setresuid32
11173     case TARGET_NR_setresuid32:
11174         return get_errno(sys_setresuid(arg1, arg2, arg3));
11175 #endif
11176 #ifdef TARGET_NR_getresuid32
11177     case TARGET_NR_getresuid32:
11178         {
11179             uid_t ruid, euid, suid;
11180             ret = get_errno(getresuid(&ruid, &euid, &suid));
11181             if (!is_error(ret)) {
11182                 if (put_user_u32(ruid, arg1)
11183                     || put_user_u32(euid, arg2)
11184                     || put_user_u32(suid, arg3))
11185                     return -TARGET_EFAULT;
11186             }
11187         }
11188         return ret;
11189 #endif
11190 #ifdef TARGET_NR_setresgid32
11191     case TARGET_NR_setresgid32:
11192         return get_errno(sys_setresgid(arg1, arg2, arg3));
11193 #endif
11194 #ifdef TARGET_NR_getresgid32
11195     case TARGET_NR_getresgid32:
11196         {
11197             gid_t rgid, egid, sgid;
11198             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11199             if (!is_error(ret)) {
11200                 if (put_user_u32(rgid, arg1)
11201                     || put_user_u32(egid, arg2)
11202                     || put_user_u32(sgid, arg3))
11203                     return -TARGET_EFAULT;
11204             }
11205         }
11206         return ret;
11207 #endif
11208 #ifdef TARGET_NR_chown32
11209     case TARGET_NR_chown32:
11210         if (!(p = lock_user_string(arg1)))
11211             return -TARGET_EFAULT;
11212         ret = get_errno(chown(p, arg2, arg3));
11213         unlock_user(p, arg1, 0);
11214         return ret;
11215 #endif
11216 #ifdef TARGET_NR_setuid32
11217     case TARGET_NR_setuid32:
11218         return get_errno(sys_setuid(arg1));
11219 #endif
11220 #ifdef TARGET_NR_setgid32
11221     case TARGET_NR_setgid32:
11222         return get_errno(sys_setgid(arg1));
11223 #endif
11224 #ifdef TARGET_NR_setfsuid32
11225     case TARGET_NR_setfsuid32:
11226         return get_errno(setfsuid(arg1));
11227 #endif
11228 #ifdef TARGET_NR_setfsgid32
11229     case TARGET_NR_setfsgid32:
11230         return get_errno(setfsgid(arg1));
11231 #endif
11232 #ifdef TARGET_NR_mincore
11233     case TARGET_NR_mincore:
11234         {
11235             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11236             if (!a) {
11237                 return -TARGET_ENOMEM;
11238             }
11239             p = lock_user_string(arg3);
11240             if (!p) {
11241                 ret = -TARGET_EFAULT;
11242             } else {
11243                 ret = get_errno(mincore(a, arg2, p));
11244                 unlock_user(p, arg3, ret);
11245             }
11246             unlock_user(a, arg1, 0);
11247         }
11248         return ret;
11249 #endif
11250 #ifdef TARGET_NR_arm_fadvise64_64
11251     case TARGET_NR_arm_fadvise64_64:
11252         /* arm_fadvise64_64 looks like fadvise64_64 but
11253          * with different argument order: fd, advice, offset, len
11254          * rather than the usual fd, offset, len, advice.
11255          * Note that offset and len are both 64-bit so appear as
11256          * pairs of 32-bit registers.
11257          */
11258         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11259                             target_offset64(arg5, arg6), arg2);
11260         return -host_to_target_errno(ret);
11261 #endif
11262 
11263 #if TARGET_ABI_BITS == 32
11264 
11265 #ifdef TARGET_NR_fadvise64_64
11266     case TARGET_NR_fadvise64_64:
11267 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11268         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11269         ret = arg2;
11270         arg2 = arg3;
11271         arg3 = arg4;
11272         arg4 = arg5;
11273         arg5 = arg6;
11274         arg6 = ret;
11275 #else
11276         /* 6 args: fd, offset (high, low), len (high, low), advice */
11277         if (regpairs_aligned(cpu_env, num)) {
11278             /* offset is in (3,4), len in (5,6) and advice in 7 */
11279             arg2 = arg3;
11280             arg3 = arg4;
11281             arg4 = arg5;
11282             arg5 = arg6;
11283             arg6 = arg7;
11284         }
11285 #endif
11286         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11287                             target_offset64(arg4, arg5), arg6);
11288         return -host_to_target_errno(ret);
11289 #endif
11290 
11291 #ifdef TARGET_NR_fadvise64
11292     case TARGET_NR_fadvise64:
11293         /* 5 args: fd, offset (high, low), len, advice */
11294         if (regpairs_aligned(cpu_env, num)) {
11295             /* offset is in (3,4), len in 5 and advice in 6 */
11296             arg2 = arg3;
11297             arg3 = arg4;
11298             arg4 = arg5;
11299             arg5 = arg6;
11300         }
11301         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11302         return -host_to_target_errno(ret);
11303 #endif
11304 
11305 #else /* not a 32-bit ABI */
11306 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11307 #ifdef TARGET_NR_fadvise64_64
11308     case TARGET_NR_fadvise64_64:
11309 #endif
11310 #ifdef TARGET_NR_fadvise64
11311     case TARGET_NR_fadvise64:
11312 #endif
11313 #ifdef TARGET_S390X
11314         switch (arg4) {
11315         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11316         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11317         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11318         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11319         default: break;
11320         }
11321 #endif
11322         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11323 #endif
11324 #endif /* end of 64-bit ABI fadvise handling */
11325 
11326 #ifdef TARGET_NR_madvise
11327     case TARGET_NR_madvise:
11328         /* A straight passthrough may not be safe because qemu sometimes
11329            turns private file-backed mappings into anonymous mappings.
11330            This will break MADV_DONTNEED.
11331            This is a hint, so ignoring and returning success is ok.  */
11332         return 0;
11333 #endif
11334 #if TARGET_ABI_BITS == 32
11335     case TARGET_NR_fcntl64:
11336     {
11337 	int cmd;
11338 	struct flock64 fl;
11339         from_flock64_fn *copyfrom = copy_from_user_flock64;
11340         to_flock64_fn *copyto = copy_to_user_flock64;
11341 
11342 #ifdef TARGET_ARM
11343         if (!((CPUARMState *)cpu_env)->eabi) {
11344             copyfrom = copy_from_user_oabi_flock64;
11345             copyto = copy_to_user_oabi_flock64;
11346         }
11347 #endif
11348 
11349 	cmd = target_to_host_fcntl_cmd(arg2);
11350         if (cmd == -TARGET_EINVAL) {
11351             return cmd;
11352         }
11353 
11354         switch(arg2) {
11355         case TARGET_F_GETLK64:
11356             ret = copyfrom(&fl, arg3);
11357             if (ret) {
11358                 break;
11359             }
11360             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11361             if (ret == 0) {
11362                 ret = copyto(arg3, &fl);
11363             }
11364 	    break;
11365 
11366         case TARGET_F_SETLK64:
11367         case TARGET_F_SETLKW64:
11368             ret = copyfrom(&fl, arg3);
11369             if (ret) {
11370                 break;
11371             }
11372             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11373 	    break;
11374         default:
11375             ret = do_fcntl(arg1, arg2, arg3);
11376             break;
11377         }
11378         return ret;
11379     }
11380 #endif
11381 #ifdef TARGET_NR_cacheflush
11382     case TARGET_NR_cacheflush:
11383         /* self-modifying code is handled automatically, so nothing needed */
11384         return 0;
11385 #endif
11386 #ifdef TARGET_NR_getpagesize
11387     case TARGET_NR_getpagesize:
11388         return TARGET_PAGE_SIZE;
11389 #endif
11390     case TARGET_NR_gettid:
11391         return get_errno(sys_gettid());
11392 #ifdef TARGET_NR_readahead
11393     case TARGET_NR_readahead:
11394 #if TARGET_ABI_BITS == 32
11395         if (regpairs_aligned(cpu_env, num)) {
11396             arg2 = arg3;
11397             arg3 = arg4;
11398             arg4 = arg5;
11399         }
11400         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11401 #else
11402         ret = get_errno(readahead(arg1, arg2, arg3));
11403 #endif
11404         return ret;
11405 #endif
11406 #ifdef CONFIG_ATTR
11407 #ifdef TARGET_NR_setxattr
11408     case TARGET_NR_listxattr:
11409     case TARGET_NR_llistxattr:
11410     {
11411         void *p, *b = 0;
11412         if (arg2) {
11413             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11414             if (!b) {
11415                 return -TARGET_EFAULT;
11416             }
11417         }
11418         p = lock_user_string(arg1);
11419         if (p) {
11420             if (num == TARGET_NR_listxattr) {
11421                 ret = get_errno(listxattr(p, b, arg3));
11422             } else {
11423                 ret = get_errno(llistxattr(p, b, arg3));
11424             }
11425         } else {
11426             ret = -TARGET_EFAULT;
11427         }
11428         unlock_user(p, arg1, 0);
11429         unlock_user(b, arg2, arg3);
11430         return ret;
11431     }
11432     case TARGET_NR_flistxattr:
11433     {
11434         void *b = 0;
11435         if (arg2) {
11436             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11437             if (!b) {
11438                 return -TARGET_EFAULT;
11439             }
11440         }
11441         ret = get_errno(flistxattr(arg1, b, arg3));
11442         unlock_user(b, arg2, arg3);
11443         return ret;
11444     }
11445     case TARGET_NR_setxattr:
11446     case TARGET_NR_lsetxattr:
11447         {
11448             void *p, *n, *v = 0;
11449             if (arg3) {
11450                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11451                 if (!v) {
11452                     return -TARGET_EFAULT;
11453                 }
11454             }
11455             p = lock_user_string(arg1);
11456             n = lock_user_string(arg2);
11457             if (p && n) {
11458                 if (num == TARGET_NR_setxattr) {
11459                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11460                 } else {
11461                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11462                 }
11463             } else {
11464                 ret = -TARGET_EFAULT;
11465             }
11466             unlock_user(p, arg1, 0);
11467             unlock_user(n, arg2, 0);
11468             unlock_user(v, arg3, 0);
11469         }
11470         return ret;
11471     case TARGET_NR_fsetxattr:
11472         {
11473             void *n, *v = 0;
11474             if (arg3) {
11475                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11476                 if (!v) {
11477                     return -TARGET_EFAULT;
11478                 }
11479             }
11480             n = lock_user_string(arg2);
11481             if (n) {
11482                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11483             } else {
11484                 ret = -TARGET_EFAULT;
11485             }
11486             unlock_user(n, arg2, 0);
11487             unlock_user(v, arg3, 0);
11488         }
11489         return ret;
11490     case TARGET_NR_getxattr:
11491     case TARGET_NR_lgetxattr:
11492         {
11493             void *p, *n, *v = 0;
11494             if (arg3) {
11495                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11496                 if (!v) {
11497                     return -TARGET_EFAULT;
11498                 }
11499             }
11500             p = lock_user_string(arg1);
11501             n = lock_user_string(arg2);
11502             if (p && n) {
11503                 if (num == TARGET_NR_getxattr) {
11504                     ret = get_errno(getxattr(p, n, v, arg4));
11505                 } else {
11506                     ret = get_errno(lgetxattr(p, n, v, arg4));
11507                 }
11508             } else {
11509                 ret = -TARGET_EFAULT;
11510             }
11511             unlock_user(p, arg1, 0);
11512             unlock_user(n, arg2, 0);
11513             unlock_user(v, arg3, arg4);
11514         }
11515         return ret;
11516     case TARGET_NR_fgetxattr:
11517         {
11518             void *n, *v = 0;
11519             if (arg3) {
11520                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11521                 if (!v) {
11522                     return -TARGET_EFAULT;
11523                 }
11524             }
11525             n = lock_user_string(arg2);
11526             if (n) {
11527                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11528             } else {
11529                 ret = -TARGET_EFAULT;
11530             }
11531             unlock_user(n, arg2, 0);
11532             unlock_user(v, arg3, arg4);
11533         }
11534         return ret;
11535     case TARGET_NR_removexattr:
11536     case TARGET_NR_lremovexattr:
11537         {
11538             void *p, *n;
11539             p = lock_user_string(arg1);
11540             n = lock_user_string(arg2);
11541             if (p && n) {
11542                 if (num == TARGET_NR_removexattr) {
11543                     ret = get_errno(removexattr(p, n));
11544                 } else {
11545                     ret = get_errno(lremovexattr(p, n));
11546                 }
11547             } else {
11548                 ret = -TARGET_EFAULT;
11549             }
11550             unlock_user(p, arg1, 0);
11551             unlock_user(n, arg2, 0);
11552         }
11553         return ret;
11554     case TARGET_NR_fremovexattr:
11555         {
11556             void *n;
11557             n = lock_user_string(arg2);
11558             if (n) {
11559                 ret = get_errno(fremovexattr(arg1, n));
11560             } else {
11561                 ret = -TARGET_EFAULT;
11562             }
11563             unlock_user(n, arg2, 0);
11564         }
11565         return ret;
11566 #endif
11567 #endif /* CONFIG_ATTR */
11568 #ifdef TARGET_NR_set_thread_area
11569     case TARGET_NR_set_thread_area:
11570 #if defined(TARGET_MIPS)
11571       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11572       return 0;
11573 #elif defined(TARGET_CRIS)
11574       if (arg1 & 0xff)
11575           ret = -TARGET_EINVAL;
11576       else {
11577           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11578           ret = 0;
11579       }
11580       return ret;
11581 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11582       return do_set_thread_area(cpu_env, arg1);
11583 #elif defined(TARGET_M68K)
11584       {
11585           TaskState *ts = cpu->opaque;
11586           ts->tp_value = arg1;
11587           return 0;
11588       }
11589 #else
11590       return -TARGET_ENOSYS;
11591 #endif
11592 #endif
11593 #ifdef TARGET_NR_get_thread_area
11594     case TARGET_NR_get_thread_area:
11595 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11596         return do_get_thread_area(cpu_env, arg1);
11597 #elif defined(TARGET_M68K)
11598         {
11599             TaskState *ts = cpu->opaque;
11600             return ts->tp_value;
11601         }
11602 #else
11603         return -TARGET_ENOSYS;
11604 #endif
11605 #endif
11606 #ifdef TARGET_NR_getdomainname
11607     case TARGET_NR_getdomainname:
11608         return -TARGET_ENOSYS;
11609 #endif
11610 
11611 #ifdef TARGET_NR_clock_settime
11612     case TARGET_NR_clock_settime:
11613     {
11614         struct timespec ts;
11615 
11616         ret = target_to_host_timespec(&ts, arg2);
11617         if (!is_error(ret)) {
11618             ret = get_errno(clock_settime(arg1, &ts));
11619         }
11620         return ret;
11621     }
11622 #endif
11623 #ifdef TARGET_NR_clock_settime64
11624     case TARGET_NR_clock_settime64:
11625     {
11626         struct timespec ts;
11627 
11628         ret = target_to_host_timespec64(&ts, arg2);
11629         if (!is_error(ret)) {
11630             ret = get_errno(clock_settime(arg1, &ts));
11631         }
11632         return ret;
11633     }
11634 #endif
11635 #ifdef TARGET_NR_clock_gettime
11636     case TARGET_NR_clock_gettime:
11637     {
11638         struct timespec ts;
11639         ret = get_errno(clock_gettime(arg1, &ts));
11640         if (!is_error(ret)) {
11641             ret = host_to_target_timespec(arg2, &ts);
11642         }
11643         return ret;
11644     }
11645 #endif
11646 #ifdef TARGET_NR_clock_gettime64
11647     case TARGET_NR_clock_gettime64:
11648     {
11649         struct timespec ts;
11650         ret = get_errno(clock_gettime(arg1, &ts));
11651         if (!is_error(ret)) {
11652             ret = host_to_target_timespec64(arg2, &ts);
11653         }
11654         return ret;
11655     }
11656 #endif
11657 #ifdef TARGET_NR_clock_getres
11658     case TARGET_NR_clock_getres:
11659     {
11660         struct timespec ts;
11661         ret = get_errno(clock_getres(arg1, &ts));
11662         if (!is_error(ret)) {
11663             host_to_target_timespec(arg2, &ts);
11664         }
11665         return ret;
11666     }
11667 #endif
11668 #ifdef TARGET_NR_clock_nanosleep
11669     case TARGET_NR_clock_nanosleep:
11670     {
11671         struct timespec ts;
11672         target_to_host_timespec(&ts, arg3);
11673         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11674                                              &ts, arg4 ? &ts : NULL));
11675         if (arg4)
11676             host_to_target_timespec(arg4, &ts);
11677 
11678 #if defined(TARGET_PPC)
11679         /* clock_nanosleep is odd in that it returns positive errno values.
11680          * On PPC, CR0 bit 3 should be set in such a situation. */
11681         if (ret && ret != -TARGET_ERESTARTSYS) {
11682             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11683         }
11684 #endif
11685         return ret;
11686     }
11687 #endif
11688 
11689 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11690     case TARGET_NR_set_tid_address:
11691         return get_errno(set_tid_address((int *)g2h(arg1)));
11692 #endif
11693 
11694     case TARGET_NR_tkill:
11695         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11696 
11697     case TARGET_NR_tgkill:
11698         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11699                          target_to_host_signal(arg3)));
11700 
11701 #ifdef TARGET_NR_set_robust_list
11702     case TARGET_NR_set_robust_list:
11703     case TARGET_NR_get_robust_list:
11704         /* The ABI for supporting robust futexes has userspace pass
11705          * the kernel a pointer to a linked list which is updated by
11706          * userspace after the syscall; the list is walked by the kernel
11707          * when the thread exits. Since the linked list in QEMU guest
11708          * memory isn't a valid linked list for the host and we have
11709          * no way to reliably intercept the thread-death event, we can't
11710          * support these. Silently return ENOSYS so that guest userspace
11711          * falls back to a non-robust futex implementation (which should
11712          * be OK except in the corner case of the guest crashing while
11713          * holding a mutex that is shared with another process via
11714          * shared memory).
11715          */
11716         return -TARGET_ENOSYS;
11717 #endif
11718 
11719 #if defined(TARGET_NR_utimensat)
11720     case TARGET_NR_utimensat:
11721         {
11722             struct timespec *tsp, ts[2];
11723             if (!arg3) {
11724                 tsp = NULL;
11725             } else {
11726                 target_to_host_timespec(ts, arg3);
11727                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11728                 tsp = ts;
11729             }
11730             if (!arg2)
11731                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11732             else {
11733                 if (!(p = lock_user_string(arg2))) {
11734                     return -TARGET_EFAULT;
11735                 }
11736                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11737                 unlock_user(p, arg2, 0);
11738             }
11739         }
11740         return ret;
11741 #endif
11742 #ifdef TARGET_NR_futex
11743     case TARGET_NR_futex:
11744         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11745 #endif
11746 #ifdef TARGET_NR_futex_time64
11747     case TARGET_NR_futex_time64:
11748         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11749 #endif
11750 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11751     case TARGET_NR_inotify_init:
11752         ret = get_errno(sys_inotify_init());
11753         if (ret >= 0) {
11754             fd_trans_register(ret, &target_inotify_trans);
11755         }
11756         return ret;
11757 #endif
11758 #ifdef CONFIG_INOTIFY1
11759 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11760     case TARGET_NR_inotify_init1:
11761         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11762                                           fcntl_flags_tbl)));
11763         if (ret >= 0) {
11764             fd_trans_register(ret, &target_inotify_trans);
11765         }
11766         return ret;
11767 #endif
11768 #endif
11769 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11770     case TARGET_NR_inotify_add_watch:
11771         p = lock_user_string(arg2);
11772         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11773         unlock_user(p, arg2, 0);
11774         return ret;
11775 #endif
11776 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11777     case TARGET_NR_inotify_rm_watch:
11778         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11779 #endif
11780 
11781 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11782     case TARGET_NR_mq_open:
11783         {
11784             struct mq_attr posix_mq_attr;
11785             struct mq_attr *pposix_mq_attr;
11786             int host_flags;
11787 
11788             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11789             pposix_mq_attr = NULL;
11790             if (arg4) {
11791                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11792                     return -TARGET_EFAULT;
11793                 }
11794                 pposix_mq_attr = &posix_mq_attr;
11795             }
11796             p = lock_user_string(arg1 - 1);
11797             if (!p) {
11798                 return -TARGET_EFAULT;
11799             }
11800             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11801             unlock_user (p, arg1, 0);
11802         }
11803         return ret;
11804 
11805     case TARGET_NR_mq_unlink:
11806         p = lock_user_string(arg1 - 1);
11807         if (!p) {
11808             return -TARGET_EFAULT;
11809         }
11810         ret = get_errno(mq_unlink(p));
11811         unlock_user (p, arg1, 0);
11812         return ret;
11813 
11814 #ifdef TARGET_NR_mq_timedsend
11815     case TARGET_NR_mq_timedsend:
11816         {
11817             struct timespec ts;
11818 
11819             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11820             if (arg5 != 0) {
11821                 target_to_host_timespec(&ts, arg5);
11822                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11823                 host_to_target_timespec(arg5, &ts);
11824             } else {
11825                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11826             }
11827             unlock_user (p, arg2, arg3);
11828         }
11829         return ret;
11830 #endif
11831 
11832 #ifdef TARGET_NR_mq_timedreceive
11833     case TARGET_NR_mq_timedreceive:
11834         {
11835             struct timespec ts;
11836             unsigned int prio;
11837 
11838             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11839             if (arg5 != 0) {
11840                 target_to_host_timespec(&ts, arg5);
11841                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11842                                                      &prio, &ts));
11843                 host_to_target_timespec(arg5, &ts);
11844             } else {
11845                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11846                                                      &prio, NULL));
11847             }
11848             unlock_user (p, arg2, arg3);
11849             if (arg4 != 0)
11850                 put_user_u32(prio, arg4);
11851         }
11852         return ret;
11853 #endif
11854 
11855     /* Not implemented for now... */
11856 /*     case TARGET_NR_mq_notify: */
11857 /*         break; */
11858 
11859     case TARGET_NR_mq_getsetattr:
11860         {
11861             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11862             ret = 0;
11863             if (arg2 != 0) {
11864                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11865                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11866                                            &posix_mq_attr_out));
11867             } else if (arg3 != 0) {
11868                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11869             }
11870             if (ret == 0 && arg3 != 0) {
11871                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11872             }
11873         }
11874         return ret;
11875 #endif
11876 
11877 #ifdef CONFIG_SPLICE
11878 #ifdef TARGET_NR_tee
11879     case TARGET_NR_tee:
11880         {
11881             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11882         }
11883         return ret;
11884 #endif
11885 #ifdef TARGET_NR_splice
11886     case TARGET_NR_splice:
11887         {
11888             loff_t loff_in, loff_out;
11889             loff_t *ploff_in = NULL, *ploff_out = NULL;
11890             if (arg2) {
11891                 if (get_user_u64(loff_in, arg2)) {
11892                     return -TARGET_EFAULT;
11893                 }
11894                 ploff_in = &loff_in;
11895             }
11896             if (arg4) {
11897                 if (get_user_u64(loff_out, arg4)) {
11898                     return -TARGET_EFAULT;
11899                 }
11900                 ploff_out = &loff_out;
11901             }
11902             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11903             if (arg2) {
11904                 if (put_user_u64(loff_in, arg2)) {
11905                     return -TARGET_EFAULT;
11906                 }
11907             }
11908             if (arg4) {
11909                 if (put_user_u64(loff_out, arg4)) {
11910                     return -TARGET_EFAULT;
11911                 }
11912             }
11913         }
11914         return ret;
11915 #endif
11916 #ifdef TARGET_NR_vmsplice
11917 	case TARGET_NR_vmsplice:
11918         {
11919             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11920             if (vec != NULL) {
11921                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11922                 unlock_iovec(vec, arg2, arg3, 0);
11923             } else {
11924                 ret = -host_to_target_errno(errno);
11925             }
11926         }
11927         return ret;
11928 #endif
11929 #endif /* CONFIG_SPLICE */
11930 #ifdef CONFIG_EVENTFD
11931 #if defined(TARGET_NR_eventfd)
11932     case TARGET_NR_eventfd:
11933         ret = get_errno(eventfd(arg1, 0));
11934         if (ret >= 0) {
11935             fd_trans_register(ret, &target_eventfd_trans);
11936         }
11937         return ret;
11938 #endif
11939 #if defined(TARGET_NR_eventfd2)
11940     case TARGET_NR_eventfd2:
11941     {
11942         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11943         if (arg2 & TARGET_O_NONBLOCK) {
11944             host_flags |= O_NONBLOCK;
11945         }
11946         if (arg2 & TARGET_O_CLOEXEC) {
11947             host_flags |= O_CLOEXEC;
11948         }
11949         ret = get_errno(eventfd(arg1, host_flags));
11950         if (ret >= 0) {
11951             fd_trans_register(ret, &target_eventfd_trans);
11952         }
11953         return ret;
11954     }
11955 #endif
11956 #endif /* CONFIG_EVENTFD  */
11957 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11958     case TARGET_NR_fallocate:
11959 #if TARGET_ABI_BITS == 32
11960         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11961                                   target_offset64(arg5, arg6)));
11962 #else
11963         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11964 #endif
11965         return ret;
11966 #endif
11967 #if defined(CONFIG_SYNC_FILE_RANGE)
11968 #if defined(TARGET_NR_sync_file_range)
11969     case TARGET_NR_sync_file_range:
11970 #if TARGET_ABI_BITS == 32
11971 #if defined(TARGET_MIPS)
11972         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11973                                         target_offset64(arg5, arg6), arg7));
11974 #else
11975         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11976                                         target_offset64(arg4, arg5), arg6));
11977 #endif /* !TARGET_MIPS */
11978 #else
11979         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11980 #endif
11981         return ret;
11982 #endif
11983 #if defined(TARGET_NR_sync_file_range2) || \
11984     defined(TARGET_NR_arm_sync_file_range)
11985 #if defined(TARGET_NR_sync_file_range2)
11986     case TARGET_NR_sync_file_range2:
11987 #endif
11988 #if defined(TARGET_NR_arm_sync_file_range)
11989     case TARGET_NR_arm_sync_file_range:
11990 #endif
11991         /* This is like sync_file_range but the arguments are reordered */
11992 #if TARGET_ABI_BITS == 32
11993         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11994                                         target_offset64(arg5, arg6), arg2));
11995 #else
11996         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11997 #endif
11998         return ret;
11999 #endif
12000 #endif
12001 #if defined(TARGET_NR_signalfd4)
12002     case TARGET_NR_signalfd4:
12003         return do_signalfd4(arg1, arg2, arg4);
12004 #endif
12005 #if defined(TARGET_NR_signalfd)
12006     case TARGET_NR_signalfd:
12007         return do_signalfd4(arg1, arg2, 0);
12008 #endif
12009 #if defined(CONFIG_EPOLL)
12010 #if defined(TARGET_NR_epoll_create)
12011     case TARGET_NR_epoll_create:
12012         return get_errno(epoll_create(arg1));
12013 #endif
12014 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12015     case TARGET_NR_epoll_create1:
12016         return get_errno(epoll_create1(arg1));
12017 #endif
12018 #if defined(TARGET_NR_epoll_ctl)
12019     case TARGET_NR_epoll_ctl:
12020     {
12021         struct epoll_event ep;
12022         struct epoll_event *epp = 0;
12023         if (arg4) {
12024             struct target_epoll_event *target_ep;
12025             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12026                 return -TARGET_EFAULT;
12027             }
12028             ep.events = tswap32(target_ep->events);
12029             /* The epoll_data_t union is just opaque data to the kernel,
12030              * so we transfer all 64 bits across and need not worry what
12031              * actual data type it is.
12032              */
12033             ep.data.u64 = tswap64(target_ep->data.u64);
12034             unlock_user_struct(target_ep, arg4, 0);
12035             epp = &ep;
12036         }
12037         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12038     }
12039 #endif
12040 
12041 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12042 #if defined(TARGET_NR_epoll_wait)
12043     case TARGET_NR_epoll_wait:
12044 #endif
12045 #if defined(TARGET_NR_epoll_pwait)
12046     case TARGET_NR_epoll_pwait:
12047 #endif
12048     {
12049         struct target_epoll_event *target_ep;
12050         struct epoll_event *ep;
12051         int epfd = arg1;
12052         int maxevents = arg3;
12053         int timeout = arg4;
12054 
12055         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12056             return -TARGET_EINVAL;
12057         }
12058 
12059         target_ep = lock_user(VERIFY_WRITE, arg2,
12060                               maxevents * sizeof(struct target_epoll_event), 1);
12061         if (!target_ep) {
12062             return -TARGET_EFAULT;
12063         }
12064 
12065         ep = g_try_new(struct epoll_event, maxevents);
12066         if (!ep) {
12067             unlock_user(target_ep, arg2, 0);
12068             return -TARGET_ENOMEM;
12069         }
12070 
12071         switch (num) {
12072 #if defined(TARGET_NR_epoll_pwait)
12073         case TARGET_NR_epoll_pwait:
12074         {
12075             target_sigset_t *target_set;
12076             sigset_t _set, *set = &_set;
12077 
12078             if (arg5) {
12079                 if (arg6 != sizeof(target_sigset_t)) {
12080                     ret = -TARGET_EINVAL;
12081                     break;
12082                 }
12083 
12084                 target_set = lock_user(VERIFY_READ, arg5,
12085                                        sizeof(target_sigset_t), 1);
12086                 if (!target_set) {
12087                     ret = -TARGET_EFAULT;
12088                     break;
12089                 }
12090                 target_to_host_sigset(set, target_set);
12091                 unlock_user(target_set, arg5, 0);
12092             } else {
12093                 set = NULL;
12094             }
12095 
12096             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12097                                              set, SIGSET_T_SIZE));
12098             break;
12099         }
12100 #endif
12101 #if defined(TARGET_NR_epoll_wait)
12102         case TARGET_NR_epoll_wait:
12103             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12104                                              NULL, 0));
12105             break;
12106 #endif
12107         default:
12108             ret = -TARGET_ENOSYS;
12109         }
12110         if (!is_error(ret)) {
12111             int i;
12112             for (i = 0; i < ret; i++) {
12113                 target_ep[i].events = tswap32(ep[i].events);
12114                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12115             }
12116             unlock_user(target_ep, arg2,
12117                         ret * sizeof(struct target_epoll_event));
12118         } else {
12119             unlock_user(target_ep, arg2, 0);
12120         }
12121         g_free(ep);
12122         return ret;
12123     }
12124 #endif
12125 #endif
12126 #ifdef TARGET_NR_prlimit64
12127     case TARGET_NR_prlimit64:
12128     {
12129         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12130         struct target_rlimit64 *target_rnew, *target_rold;
12131         struct host_rlimit64 rnew, rold, *rnewp = 0;
12132         int resource = target_to_host_resource(arg2);
12133 
12134         if (arg3 && (resource != RLIMIT_AS &&
12135                      resource != RLIMIT_DATA &&
12136                      resource != RLIMIT_STACK)) {
12137             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12138                 return -TARGET_EFAULT;
12139             }
12140             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12141             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12142             unlock_user_struct(target_rnew, arg3, 0);
12143             rnewp = &rnew;
12144         }
12145 
12146         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12147         if (!is_error(ret) && arg4) {
12148             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12149                 return -TARGET_EFAULT;
12150             }
12151             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12152             target_rold->rlim_max = tswap64(rold.rlim_max);
12153             unlock_user_struct(target_rold, arg4, 1);
12154         }
12155         return ret;
12156     }
12157 #endif
12158 #ifdef TARGET_NR_gethostname
12159     case TARGET_NR_gethostname:
12160     {
12161         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12162         if (name) {
12163             ret = get_errno(gethostname(name, arg2));
12164             unlock_user(name, arg1, arg2);
12165         } else {
12166             ret = -TARGET_EFAULT;
12167         }
12168         return ret;
12169     }
12170 #endif
12171 #ifdef TARGET_NR_atomic_cmpxchg_32
12172     case TARGET_NR_atomic_cmpxchg_32:
12173     {
12174         /* should use start_exclusive from main.c */
12175         abi_ulong mem_value;
12176         if (get_user_u32(mem_value, arg6)) {
12177             target_siginfo_t info;
12178             info.si_signo = SIGSEGV;
12179             info.si_errno = 0;
12180             info.si_code = TARGET_SEGV_MAPERR;
12181             info._sifields._sigfault._addr = arg6;
12182             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12183                          QEMU_SI_FAULT, &info);
12184             ret = 0xdeadbeef;
12185 
12186         }
12187         if (mem_value == arg2)
12188             put_user_u32(arg1, arg6);
12189         return mem_value;
12190     }
12191 #endif
12192 #ifdef TARGET_NR_atomic_barrier
12193     case TARGET_NR_atomic_barrier:
12194         /* Like the kernel implementation and the
12195            qemu arm barrier, no-op this? */
12196         return 0;
12197 #endif
12198 
12199 #ifdef TARGET_NR_timer_create
12200     case TARGET_NR_timer_create:
12201     {
12202         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12203 
12204         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12205 
12206         int clkid = arg1;
12207         int timer_index = next_free_host_timer();
12208 
12209         if (timer_index < 0) {
12210             ret = -TARGET_EAGAIN;
12211         } else {
12212             timer_t *phtimer = g_posix_timers  + timer_index;
12213 
12214             if (arg2) {
12215                 phost_sevp = &host_sevp;
12216                 ret = target_to_host_sigevent(phost_sevp, arg2);
12217                 if (ret != 0) {
12218                     return ret;
12219                 }
12220             }
12221 
12222             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12223             if (ret) {
12224                 phtimer = NULL;
12225             } else {
12226                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12227                     return -TARGET_EFAULT;
12228                 }
12229             }
12230         }
12231         return ret;
12232     }
12233 #endif
12234 
12235 #ifdef TARGET_NR_timer_settime
12236     case TARGET_NR_timer_settime:
12237     {
12238         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12239          * struct itimerspec * old_value */
12240         target_timer_t timerid = get_timer_id(arg1);
12241 
12242         if (timerid < 0) {
12243             ret = timerid;
12244         } else if (arg3 == 0) {
12245             ret = -TARGET_EINVAL;
12246         } else {
12247             timer_t htimer = g_posix_timers[timerid];
12248             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12249 
12250             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12251                 return -TARGET_EFAULT;
12252             }
12253             ret = get_errno(
12254                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12255             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12256                 return -TARGET_EFAULT;
12257             }
12258         }
12259         return ret;
12260     }
12261 #endif
12262 
12263 #ifdef TARGET_NR_timer_gettime
12264     case TARGET_NR_timer_gettime:
12265     {
12266         /* args: timer_t timerid, struct itimerspec *curr_value */
12267         target_timer_t timerid = get_timer_id(arg1);
12268 
12269         if (timerid < 0) {
12270             ret = timerid;
12271         } else if (!arg2) {
12272             ret = -TARGET_EFAULT;
12273         } else {
12274             timer_t htimer = g_posix_timers[timerid];
12275             struct itimerspec hspec;
12276             ret = get_errno(timer_gettime(htimer, &hspec));
12277 
12278             if (host_to_target_itimerspec(arg2, &hspec)) {
12279                 ret = -TARGET_EFAULT;
12280             }
12281         }
12282         return ret;
12283     }
12284 #endif
12285 
12286 #ifdef TARGET_NR_timer_getoverrun
12287     case TARGET_NR_timer_getoverrun:
12288     {
12289         /* args: timer_t timerid */
12290         target_timer_t timerid = get_timer_id(arg1);
12291 
12292         if (timerid < 0) {
12293             ret = timerid;
12294         } else {
12295             timer_t htimer = g_posix_timers[timerid];
12296             ret = get_errno(timer_getoverrun(htimer));
12297         }
12298         return ret;
12299     }
12300 #endif
12301 
12302 #ifdef TARGET_NR_timer_delete
12303     case TARGET_NR_timer_delete:
12304     {
12305         /* args: timer_t timerid */
12306         target_timer_t timerid = get_timer_id(arg1);
12307 
12308         if (timerid < 0) {
12309             ret = timerid;
12310         } else {
12311             timer_t htimer = g_posix_timers[timerid];
12312             ret = get_errno(timer_delete(htimer));
12313             g_posix_timers[timerid] = 0;
12314         }
12315         return ret;
12316     }
12317 #endif
12318 
12319 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12320     case TARGET_NR_timerfd_create:
12321         return get_errno(timerfd_create(arg1,
12322                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12323 #endif
12324 
12325 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12326     case TARGET_NR_timerfd_gettime:
12327         {
12328             struct itimerspec its_curr;
12329 
12330             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12331 
12332             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12333                 return -TARGET_EFAULT;
12334             }
12335         }
12336         return ret;
12337 #endif
12338 
12339 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12340     case TARGET_NR_timerfd_settime:
12341         {
12342             struct itimerspec its_new, its_old, *p_new;
12343 
12344             if (arg3) {
12345                 if (target_to_host_itimerspec(&its_new, arg3)) {
12346                     return -TARGET_EFAULT;
12347                 }
12348                 p_new = &its_new;
12349             } else {
12350                 p_new = NULL;
12351             }
12352 
12353             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12354 
12355             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12356                 return -TARGET_EFAULT;
12357             }
12358         }
12359         return ret;
12360 #endif
12361 
12362 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12363     case TARGET_NR_ioprio_get:
12364         return get_errno(ioprio_get(arg1, arg2));
12365 #endif
12366 
12367 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12368     case TARGET_NR_ioprio_set:
12369         return get_errno(ioprio_set(arg1, arg2, arg3));
12370 #endif
12371 
12372 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12373     case TARGET_NR_setns:
12374         return get_errno(setns(arg1, arg2));
12375 #endif
12376 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12377     case TARGET_NR_unshare:
12378         return get_errno(unshare(arg1));
12379 #endif
12380 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12381     case TARGET_NR_kcmp:
12382         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12383 #endif
12384 #ifdef TARGET_NR_swapcontext
12385     case TARGET_NR_swapcontext:
12386         /* PowerPC specific.  */
12387         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12388 #endif
12389 #ifdef TARGET_NR_memfd_create
12390     case TARGET_NR_memfd_create:
12391         p = lock_user_string(arg1);
12392         if (!p) {
12393             return -TARGET_EFAULT;
12394         }
12395         ret = get_errno(memfd_create(p, arg2));
12396         fd_trans_unregister(ret);
12397         unlock_user(p, arg1, 0);
12398         return ret;
12399 #endif
12400 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12401     case TARGET_NR_membarrier:
12402         return get_errno(membarrier(arg1, arg2));
12403 #endif
12404 
12405     default:
12406         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12407         return -TARGET_ENOSYS;
12408     }
12409     return ret;
12410 }
12411 
12412 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12413                     abi_long arg2, abi_long arg3, abi_long arg4,
12414                     abi_long arg5, abi_long arg6, abi_long arg7,
12415                     abi_long arg8)
12416 {
12417     CPUState *cpu = env_cpu(cpu_env);
12418     abi_long ret;
12419 
12420 #ifdef DEBUG_ERESTARTSYS
12421     /* Debug-only code for exercising the syscall-restart code paths
12422      * in the per-architecture cpu main loops: restart every syscall
12423      * the guest makes once before letting it through.
12424      */
12425     {
12426         static bool flag;
12427         flag = !flag;
12428         if (flag) {
12429             return -TARGET_ERESTARTSYS;
12430         }
12431     }
12432 #endif
12433 
12434     record_syscall_start(cpu, num, arg1,
12435                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12436 
12437     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12438         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12439     }
12440 
12441     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12442                       arg5, arg6, arg7, arg8);
12443 
12444     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12445         print_syscall_ret(num, ret);
12446     }
12447 
12448     record_syscall_return(cpu, num, ret);
12449     return ret;
12450 }
12451