xref: /qemu/linux-user/syscall.c (revision b3c818a4)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/if_tun.h>
60 #include <linux/errqueue.h>
61 #include <linux/random.h>
62 #ifdef CONFIG_TIMERFD
63 #include <sys/timerfd.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 #ifdef HAVE_SYS_KCOV_H
78 #include <sys/kcov.h>
79 #endif
80 
81 #define termios host_termios
82 #define winsize host_winsize
83 #define termio host_termio
84 #define sgttyb host_sgttyb /* same as target */
85 #define tchars host_tchars /* same as target */
86 #define ltchars host_ltchars /* same as target */
87 
88 #include <linux/termios.h>
89 #include <linux/unistd.h>
90 #include <linux/cdrom.h>
91 #include <linux/hdreg.h>
92 #include <linux/soundcard.h>
93 #include <linux/kd.h>
94 #include <linux/mtio.h>
95 #include <linux/fs.h>
96 #include <linux/fd.h>
97 #if defined(CONFIG_FIEMAP)
98 #include <linux/fiemap.h>
99 #endif
100 #include <linux/fb.h>
101 #if defined(CONFIG_USBFS)
102 #include <linux/usbdevice_fs.h>
103 #include <linux/usb/ch9.h>
104 #endif
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include <linux/filter.h>
110 #include <linux/blkpg.h>
111 #include <netpacket/packet.h>
112 #include <linux/netlink.h>
113 #include <linux/if_alg.h>
114 #include <linux/rtc.h>
115 #include <sound/asound.h>
116 #ifdef HAVE_BTRFS_H
117 #include <linux/btrfs.h>
118 #endif
119 #ifdef HAVE_DRM_H
120 #include <libdrm/drm.h>
121 #include <libdrm/i915_drm.h>
122 #endif
123 #include "linux_loop.h"
124 #include "uname.h"
125 
126 #include "qemu.h"
127 #include "qemu/guest-random.h"
128 #include "qemu/selfmap.h"
129 #include "user/syscall-trace.h"
130 #include "qapi/error.h"
131 #include "fd-trans.h"
132 #include "tcg/tcg.h"
133 
134 #ifndef CLONE_IO
135 #define CLONE_IO                0x80000000      /* Clone io context */
136 #endif
137 
138 /* We can't directly call the host clone syscall, because this will
139  * badly confuse libc (breaking mutexes, for example). So we must
140  * divide clone flags into:
141  *  * flag combinations that look like pthread_create()
142  *  * flag combinations that look like fork()
143  *  * flags we can implement within QEMU itself
144  *  * flags we can't support and will return an error for
145  */
146 /* For thread creation, all these flags must be present; for
147  * fork, none must be present.
148  */
149 #define CLONE_THREAD_FLAGS                              \
150     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
151      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
152 
153 /* These flags are ignored:
154  * CLONE_DETACHED is now ignored by the kernel;
155  * CLONE_IO is just an optimisation hint to the I/O scheduler
156  */
157 #define CLONE_IGNORED_FLAGS                     \
158     (CLONE_DETACHED | CLONE_IO)
159 
160 /* Flags for fork which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_FORK_FLAGS               \
162     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
163      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
164 
165 /* Flags for thread creation which we can implement within QEMU itself */
166 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
167     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
168      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
169 
170 #define CLONE_INVALID_FORK_FLAGS                                        \
171     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
172 
173 #define CLONE_INVALID_THREAD_FLAGS                                      \
174     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
175        CLONE_IGNORED_FLAGS))
176 
177 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
178  * have almost all been allocated. We cannot support any of
179  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
180  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
181  * The checks against the invalid thread masks above will catch these.
182  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
183  */
184 
185 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
186  * once. This exercises the codepaths for restart.
187  */
188 //#define DEBUG_ERESTARTSYS
189 
190 //#include <linux/msdos_fs.h>
191 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
192 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
193 
194 #undef _syscall0
195 #undef _syscall1
196 #undef _syscall2
197 #undef _syscall3
198 #undef _syscall4
199 #undef _syscall5
200 #undef _syscall6
201 
202 #define _syscall0(type,name)		\
203 static type name (void)			\
204 {					\
205 	return syscall(__NR_##name);	\
206 }
207 
208 #define _syscall1(type,name,type1,arg1)		\
209 static type name (type1 arg1)			\
210 {						\
211 	return syscall(__NR_##name, arg1);	\
212 }
213 
214 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
215 static type name (type1 arg1,type2 arg2)		\
216 {							\
217 	return syscall(__NR_##name, arg1, arg2);	\
218 }
219 
220 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
221 static type name (type1 arg1,type2 arg2,type3 arg3)		\
222 {								\
223 	return syscall(__NR_##name, arg1, arg2, arg3);		\
224 }
225 
226 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
227 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
228 {										\
229 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
230 }
231 
232 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
233 		  type5,arg5)							\
234 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
235 {										\
236 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
237 }
238 
239 
240 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
241 		  type5,arg5,type6,arg6)					\
242 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
243                   type6 arg6)							\
244 {										\
245 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
246 }
247 
248 
249 #define __NR_sys_uname __NR_uname
250 #define __NR_sys_getcwd1 __NR_getcwd
251 #define __NR_sys_getdents __NR_getdents
252 #define __NR_sys_getdents64 __NR_getdents64
253 #define __NR_sys_getpriority __NR_getpriority
254 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
255 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
256 #define __NR_sys_syslog __NR_syslog
257 #if defined(__NR_futex)
258 # define __NR_sys_futex __NR_futex
259 #endif
260 #if defined(__NR_futex_time64)
261 # define __NR_sys_futex_time64 __NR_futex_time64
262 #endif
263 #define __NR_sys_inotify_init __NR_inotify_init
264 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
265 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
266 #define __NR_sys_statx __NR_statx
267 
268 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
269 #define __NR__llseek __NR_lseek
270 #endif
271 
272 /* Newer kernel ports have llseek() instead of _llseek() */
273 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
274 #define TARGET_NR__llseek TARGET_NR_llseek
275 #endif
276 
277 #define __NR_sys_gettid __NR_gettid
278 _syscall0(int, sys_gettid)
279 
280 /* For the 64-bit guest on 32-bit host case we must emulate
281  * getdents using getdents64, because otherwise the host
282  * might hand us back more dirent records than we can fit
283  * into the guest buffer after structure format conversion.
284  * Otherwise we emulate getdents with getdents if the host has it.
285  */
286 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
287 #define EMULATE_GETDENTS_WITH_GETDENTS
288 #endif
289 
290 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
291 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
292 #endif
293 #if (defined(TARGET_NR_getdents) && \
294       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
295     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
296 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
297 #endif
298 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
299 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
300           loff_t *, res, uint, wh);
301 #endif
302 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
303 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
304           siginfo_t *, uinfo)
305 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
306 #ifdef __NR_exit_group
307 _syscall1(int,exit_group,int,error_code)
308 #endif
309 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
310 _syscall1(int,set_tid_address,int *,tidptr)
311 #endif
312 #if defined(__NR_futex)
313 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
314           const struct timespec *,timeout,int *,uaddr2,int,val3)
315 #endif
316 #if defined(__NR_futex_time64)
317 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
318           const struct timespec *,timeout,int *,uaddr2,int,val3)
319 #endif
320 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
321 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
322           unsigned long *, user_mask_ptr);
323 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
324 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
325           unsigned long *, user_mask_ptr);
326 #define __NR_sys_getcpu __NR_getcpu
327 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
328 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
329           void *, arg);
330 _syscall2(int, capget, struct __user_cap_header_struct *, header,
331           struct __user_cap_data_struct *, data);
332 _syscall2(int, capset, struct __user_cap_header_struct *, header,
333           struct __user_cap_data_struct *, data);
334 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
335 _syscall2(int, ioprio_get, int, which, int, who)
336 #endif
337 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
338 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
339 #endif
340 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
341 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
342 #endif
343 
344 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
345 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
346           unsigned long, idx1, unsigned long, idx2)
347 #endif
348 
349 /*
350  * It is assumed that struct statx is architecture independent.
351  */
352 #if defined(TARGET_NR_statx) && defined(__NR_statx)
353 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
354           unsigned int, mask, struct target_statx *, statxbuf)
355 #endif
356 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
357 _syscall2(int, membarrier, int, cmd, int, flags)
358 #endif
359 
360 static bitmask_transtbl fcntl_flags_tbl[] = {
361   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
362   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
363   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
364   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
365   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
366   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
367   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
368   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
369   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
370   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
371   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
372   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
373   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
374 #if defined(O_DIRECT)
375   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
376 #endif
377 #if defined(O_NOATIME)
378   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
379 #endif
380 #if defined(O_CLOEXEC)
381   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
382 #endif
383 #if defined(O_PATH)
384   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
385 #endif
386 #if defined(O_TMPFILE)
387   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
388 #endif
389   /* Don't terminate the list prematurely on 64-bit host+guest.  */
390 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
391   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
392 #endif
393   { 0, 0, 0, 0 }
394 };
395 
396 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
397 
398 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
399 #if defined(__NR_utimensat)
400 #define __NR_sys_utimensat __NR_utimensat
401 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
402           const struct timespec *,tsp,int,flags)
403 #else
404 static int sys_utimensat(int dirfd, const char *pathname,
405                          const struct timespec times[2], int flags)
406 {
407     errno = ENOSYS;
408     return -1;
409 }
410 #endif
411 #endif /* TARGET_NR_utimensat */
412 
413 #ifdef TARGET_NR_renameat2
414 #if defined(__NR_renameat2)
415 #define __NR_sys_renameat2 __NR_renameat2
416 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
417           const char *, new, unsigned int, flags)
418 #else
419 static int sys_renameat2(int oldfd, const char *old,
420                          int newfd, const char *new, int flags)
421 {
422     if (flags == 0) {
423         return renameat(oldfd, old, newfd, new);
424     }
425     errno = ENOSYS;
426     return -1;
427 }
428 #endif
429 #endif /* TARGET_NR_renameat2 */
430 
431 #ifdef CONFIG_INOTIFY
432 #include <sys/inotify.h>
433 
434 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
435 static int sys_inotify_init(void)
436 {
437   return (inotify_init());
438 }
439 #endif
440 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
441 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
442 {
443   return (inotify_add_watch(fd, pathname, mask));
444 }
445 #endif
446 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
447 static int sys_inotify_rm_watch(int fd, int32_t wd)
448 {
449   return (inotify_rm_watch(fd, wd));
450 }
451 #endif
452 #ifdef CONFIG_INOTIFY1
453 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
454 static int sys_inotify_init1(int flags)
455 {
456   return (inotify_init1(flags));
457 }
458 #endif
459 #endif
460 #else
461 /* Userspace can usually survive runtime without inotify */
462 #undef TARGET_NR_inotify_init
463 #undef TARGET_NR_inotify_init1
464 #undef TARGET_NR_inotify_add_watch
465 #undef TARGET_NR_inotify_rm_watch
466 #endif /* CONFIG_INOTIFY  */
467 
468 #if defined(TARGET_NR_prlimit64)
469 #ifndef __NR_prlimit64
470 # define __NR_prlimit64 -1
471 #endif
472 #define __NR_sys_prlimit64 __NR_prlimit64
473 /* The glibc rlimit structure may not be that used by the underlying syscall */
474 struct host_rlimit64 {
475     uint64_t rlim_cur;
476     uint64_t rlim_max;
477 };
478 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
479           const struct host_rlimit64 *, new_limit,
480           struct host_rlimit64 *, old_limit)
481 #endif
482 
483 
484 #if defined(TARGET_NR_timer_create)
485 /* Maximum of 32 active POSIX timers allowed at any one time. */
486 static timer_t g_posix_timers[32] = { 0, } ;
487 
488 static inline int next_free_host_timer(void)
489 {
490     int k ;
491     /* FIXME: Does finding the next free slot require a lock? */
492     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
493         if (g_posix_timers[k] == 0) {
494             g_posix_timers[k] = (timer_t) 1;
495             return k;
496         }
497     }
498     return -1;
499 }
500 #endif
501 
502 #define ERRNO_TABLE_SIZE 1200
503 
504 /* target_to_host_errno_table[] is initialized from
505  * host_to_target_errno_table[] in syscall_init(). */
506 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
507 };
508 
509 /*
510  * This list is the union of errno values overridden in asm-<arch>/errno.h
511  * minus the errnos that are not actually generic to all archs.
512  */
513 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
514     [EAGAIN]		= TARGET_EAGAIN,
515     [EIDRM]		= TARGET_EIDRM,
516     [ECHRNG]		= TARGET_ECHRNG,
517     [EL2NSYNC]		= TARGET_EL2NSYNC,
518     [EL3HLT]		= TARGET_EL3HLT,
519     [EL3RST]		= TARGET_EL3RST,
520     [ELNRNG]		= TARGET_ELNRNG,
521     [EUNATCH]		= TARGET_EUNATCH,
522     [ENOCSI]		= TARGET_ENOCSI,
523     [EL2HLT]		= TARGET_EL2HLT,
524     [EDEADLK]		= TARGET_EDEADLK,
525     [ENOLCK]		= TARGET_ENOLCK,
526     [EBADE]		= TARGET_EBADE,
527     [EBADR]		= TARGET_EBADR,
528     [EXFULL]		= TARGET_EXFULL,
529     [ENOANO]		= TARGET_ENOANO,
530     [EBADRQC]		= TARGET_EBADRQC,
531     [EBADSLT]		= TARGET_EBADSLT,
532     [EBFONT]		= TARGET_EBFONT,
533     [ENOSTR]		= TARGET_ENOSTR,
534     [ENODATA]		= TARGET_ENODATA,
535     [ETIME]		= TARGET_ETIME,
536     [ENOSR]		= TARGET_ENOSR,
537     [ENONET]		= TARGET_ENONET,
538     [ENOPKG]		= TARGET_ENOPKG,
539     [EREMOTE]		= TARGET_EREMOTE,
540     [ENOLINK]		= TARGET_ENOLINK,
541     [EADV]		= TARGET_EADV,
542     [ESRMNT]		= TARGET_ESRMNT,
543     [ECOMM]		= TARGET_ECOMM,
544     [EPROTO]		= TARGET_EPROTO,
545     [EDOTDOT]		= TARGET_EDOTDOT,
546     [EMULTIHOP]		= TARGET_EMULTIHOP,
547     [EBADMSG]		= TARGET_EBADMSG,
548     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
549     [EOVERFLOW]		= TARGET_EOVERFLOW,
550     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
551     [EBADFD]		= TARGET_EBADFD,
552     [EREMCHG]		= TARGET_EREMCHG,
553     [ELIBACC]		= TARGET_ELIBACC,
554     [ELIBBAD]		= TARGET_ELIBBAD,
555     [ELIBSCN]		= TARGET_ELIBSCN,
556     [ELIBMAX]		= TARGET_ELIBMAX,
557     [ELIBEXEC]		= TARGET_ELIBEXEC,
558     [EILSEQ]		= TARGET_EILSEQ,
559     [ENOSYS]		= TARGET_ENOSYS,
560     [ELOOP]		= TARGET_ELOOP,
561     [ERESTART]		= TARGET_ERESTART,
562     [ESTRPIPE]		= TARGET_ESTRPIPE,
563     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
564     [EUSERS]		= TARGET_EUSERS,
565     [ENOTSOCK]		= TARGET_ENOTSOCK,
566     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
567     [EMSGSIZE]		= TARGET_EMSGSIZE,
568     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
569     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
570     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
571     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
572     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
573     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
574     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
575     [EADDRINUSE]	= TARGET_EADDRINUSE,
576     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
577     [ENETDOWN]		= TARGET_ENETDOWN,
578     [ENETUNREACH]	= TARGET_ENETUNREACH,
579     [ENETRESET]		= TARGET_ENETRESET,
580     [ECONNABORTED]	= TARGET_ECONNABORTED,
581     [ECONNRESET]	= TARGET_ECONNRESET,
582     [ENOBUFS]		= TARGET_ENOBUFS,
583     [EISCONN]		= TARGET_EISCONN,
584     [ENOTCONN]		= TARGET_ENOTCONN,
585     [EUCLEAN]		= TARGET_EUCLEAN,
586     [ENOTNAM]		= TARGET_ENOTNAM,
587     [ENAVAIL]		= TARGET_ENAVAIL,
588     [EISNAM]		= TARGET_EISNAM,
589     [EREMOTEIO]		= TARGET_EREMOTEIO,
590     [EDQUOT]            = TARGET_EDQUOT,
591     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
592     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
593     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
594     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
595     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
596     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
597     [EALREADY]		= TARGET_EALREADY,
598     [EINPROGRESS]	= TARGET_EINPROGRESS,
599     [ESTALE]		= TARGET_ESTALE,
600     [ECANCELED]		= TARGET_ECANCELED,
601     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
602     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
603 #ifdef ENOKEY
604     [ENOKEY]		= TARGET_ENOKEY,
605 #endif
606 #ifdef EKEYEXPIRED
607     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
608 #endif
609 #ifdef EKEYREVOKED
610     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
611 #endif
612 #ifdef EKEYREJECTED
613     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
614 #endif
615 #ifdef EOWNERDEAD
616     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
617 #endif
618 #ifdef ENOTRECOVERABLE
619     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
620 #endif
621 #ifdef ENOMSG
622     [ENOMSG]            = TARGET_ENOMSG,
623 #endif
624 #ifdef ERKFILL
625     [ERFKILL]           = TARGET_ERFKILL,
626 #endif
627 #ifdef EHWPOISON
628     [EHWPOISON]         = TARGET_EHWPOISON,
629 #endif
630 };
631 
632 static inline int host_to_target_errno(int err)
633 {
634     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
635         host_to_target_errno_table[err]) {
636         return host_to_target_errno_table[err];
637     }
638     return err;
639 }
640 
641 static inline int target_to_host_errno(int err)
642 {
643     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
644         target_to_host_errno_table[err]) {
645         return target_to_host_errno_table[err];
646     }
647     return err;
648 }
649 
650 static inline abi_long get_errno(abi_long ret)
651 {
652     if (ret == -1)
653         return -host_to_target_errno(errno);
654     else
655         return ret;
656 }
657 
658 const char *target_strerror(int err)
659 {
660     if (err == TARGET_ERESTARTSYS) {
661         return "To be restarted";
662     }
663     if (err == TARGET_QEMU_ESIGRETURN) {
664         return "Successful exit from sigreturn";
665     }
666 
667     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
668         return NULL;
669     }
670     return strerror(target_to_host_errno(err));
671 }
672 
673 #define safe_syscall0(type, name) \
674 static type safe_##name(void) \
675 { \
676     return safe_syscall(__NR_##name); \
677 }
678 
679 #define safe_syscall1(type, name, type1, arg1) \
680 static type safe_##name(type1 arg1) \
681 { \
682     return safe_syscall(__NR_##name, arg1); \
683 }
684 
685 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
686 static type safe_##name(type1 arg1, type2 arg2) \
687 { \
688     return safe_syscall(__NR_##name, arg1, arg2); \
689 }
690 
691 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
693 { \
694     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
695 }
696 
697 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
698     type4, arg4) \
699 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
700 { \
701     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
702 }
703 
704 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
705     type4, arg4, type5, arg5) \
706 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
707     type5 arg5) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
710 }
711 
712 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
713     type4, arg4, type5, arg5, type6, arg6) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715     type5 arg5, type6 arg6) \
716 { \
717     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
718 }
719 
720 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
721 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
722 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
723               int, flags, mode_t, mode)
724 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
725 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
726               struct rusage *, rusage)
727 #endif
728 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
729               int, options, struct rusage *, rusage)
730 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
731 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
732     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
733 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
734               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
735 #endif
736 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
737 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
738               struct timespec *, tsp, const sigset_t *, sigmask,
739               size_t, sigsetsize)
740 #endif
741 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
742               int, maxevents, int, timeout, const sigset_t *, sigmask,
743               size_t, sigsetsize)
744 #if defined(__NR_futex)
745 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
746               const struct timespec *,timeout,int *,uaddr2,int,val3)
747 #endif
748 #if defined(__NR_futex_time64)
749 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
750               const struct timespec *,timeout,int *,uaddr2,int,val3)
751 #endif
752 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
753 safe_syscall2(int, kill, pid_t, pid, int, sig)
754 safe_syscall2(int, tkill, int, tid, int, sig)
755 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
756 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
757 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
758 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
759               unsigned long, pos_l, unsigned long, pos_h)
760 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
761               unsigned long, pos_l, unsigned long, pos_h)
762 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
763               socklen_t, addrlen)
764 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
765               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
766 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
767               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
768 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
769 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
770 safe_syscall2(int, flock, int, fd, int, operation)
771 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
772 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
773               const struct timespec *, uts, size_t, sigsetsize)
774 #endif
775 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
776               int, flags)
777 #if defined(TARGET_NR_nanosleep)
778 safe_syscall2(int, nanosleep, const struct timespec *, req,
779               struct timespec *, rem)
780 #endif
781 #if defined(TARGET_NR_clock_nanosleep) || \
782     defined(TARGET_NR_clock_nanosleep_time64)
783 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
784               const struct timespec *, req, struct timespec *, rem)
785 #endif
786 #ifdef __NR_ipc
787 #ifdef __s390x__
788 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
789               void *, ptr)
790 #else
791 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
792               void *, ptr, long, fifth)
793 #endif
794 #endif
795 #ifdef __NR_msgsnd
796 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
797               int, flags)
798 #endif
799 #ifdef __NR_msgrcv
800 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
801               long, msgtype, int, flags)
802 #endif
803 #ifdef __NR_semtimedop
804 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
805               unsigned, nsops, const struct timespec *, timeout)
806 #endif
807 #if defined(TARGET_NR_mq_timedsend) || \
808     defined(TARGET_NR_mq_timedsend_time64)
809 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
810               size_t, len, unsigned, prio, const struct timespec *, timeout)
811 #endif
812 #if defined(TARGET_NR_mq_timedreceive) || \
813     defined(TARGET_NR_mq_timedreceive_time64)
814 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
815               size_t, len, unsigned *, prio, const struct timespec *, timeout)
816 #endif
817 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
818 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
819               int, outfd, loff_t *, poutoff, size_t, length,
820               unsigned int, flags)
821 #endif
822 
823 /* We do ioctl like this rather than via safe_syscall3 to preserve the
824  * "third argument might be integer or pointer or not present" behaviour of
825  * the libc function.
826  */
827 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
828 /* Similarly for fcntl. Note that callers must always:
829  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
830  *  use the flock64 struct rather than unsuffixed flock
831  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
832  */
833 #ifdef __NR_fcntl64
834 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
835 #else
836 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
837 #endif
838 
839 static inline int host_to_target_sock_type(int host_type)
840 {
841     int target_type;
842 
843     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
844     case SOCK_DGRAM:
845         target_type = TARGET_SOCK_DGRAM;
846         break;
847     case SOCK_STREAM:
848         target_type = TARGET_SOCK_STREAM;
849         break;
850     default:
851         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
852         break;
853     }
854 
855 #if defined(SOCK_CLOEXEC)
856     if (host_type & SOCK_CLOEXEC) {
857         target_type |= TARGET_SOCK_CLOEXEC;
858     }
859 #endif
860 
861 #if defined(SOCK_NONBLOCK)
862     if (host_type & SOCK_NONBLOCK) {
863         target_type |= TARGET_SOCK_NONBLOCK;
864     }
865 #endif
866 
867     return target_type;
868 }
869 
870 static abi_ulong target_brk;
871 static abi_ulong target_original_brk;
872 static abi_ulong brk_page;
873 
874 void target_set_brk(abi_ulong new_brk)
875 {
876     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
877     brk_page = HOST_PAGE_ALIGN(target_brk);
878 }
879 
880 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
881 #define DEBUGF_BRK(message, args...)
882 
883 /* do_brk() must return target values and target errnos. */
884 abi_long do_brk(abi_ulong new_brk)
885 {
886     abi_long mapped_addr;
887     abi_ulong new_alloc_size;
888 
889     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
890 
891     if (!new_brk) {
892         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
893         return target_brk;
894     }
895     if (new_brk < target_original_brk) {
896         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
897                    target_brk);
898         return target_brk;
899     }
900 
901     /* If the new brk is less than the highest page reserved to the
902      * target heap allocation, set it and we're almost done...  */
903     if (new_brk <= brk_page) {
904         /* Heap contents are initialized to zero, as for anonymous
905          * mapped pages.  */
906         if (new_brk > target_brk) {
907             memset(g2h(target_brk), 0, new_brk - target_brk);
908         }
909 	target_brk = new_brk;
910         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
911 	return target_brk;
912     }
913 
914     /* We need to allocate more memory after the brk... Note that
915      * we don't use MAP_FIXED because that will map over the top of
916      * any existing mapping (like the one with the host libc or qemu
917      * itself); instead we treat "mapped but at wrong address" as
918      * a failure and unmap again.
919      */
920     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
921     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
922                                         PROT_READ|PROT_WRITE,
923                                         MAP_ANON|MAP_PRIVATE, 0, 0));
924 
925     if (mapped_addr == brk_page) {
926         /* Heap contents are initialized to zero, as for anonymous
927          * mapped pages.  Technically the new pages are already
928          * initialized to zero since they *are* anonymous mapped
929          * pages, however we have to take care with the contents that
930          * come from the remaining part of the previous page: it may
931          * contains garbage data due to a previous heap usage (grown
932          * then shrunken).  */
933         memset(g2h(target_brk), 0, brk_page - target_brk);
934 
935         target_brk = new_brk;
936         brk_page = HOST_PAGE_ALIGN(target_brk);
937         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
938             target_brk);
939         return target_brk;
940     } else if (mapped_addr != -1) {
941         /* Mapped but at wrong address, meaning there wasn't actually
942          * enough space for this brk.
943          */
944         target_munmap(mapped_addr, new_alloc_size);
945         mapped_addr = -1;
946         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
947     }
948     else {
949         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
950     }
951 
952 #if defined(TARGET_ALPHA)
953     /* We (partially) emulate OSF/1 on Alpha, which requires we
954        return a proper errno, not an unchanged brk value.  */
955     return -TARGET_ENOMEM;
956 #endif
957     /* For everything else, return the previous break. */
958     return target_brk;
959 }
960 
961 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
962     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
963 static inline abi_long copy_from_user_fdset(fd_set *fds,
964                                             abi_ulong target_fds_addr,
965                                             int n)
966 {
967     int i, nw, j, k;
968     abi_ulong b, *target_fds;
969 
970     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
971     if (!(target_fds = lock_user(VERIFY_READ,
972                                  target_fds_addr,
973                                  sizeof(abi_ulong) * nw,
974                                  1)))
975         return -TARGET_EFAULT;
976 
977     FD_ZERO(fds);
978     k = 0;
979     for (i = 0; i < nw; i++) {
980         /* grab the abi_ulong */
981         __get_user(b, &target_fds[i]);
982         for (j = 0; j < TARGET_ABI_BITS; j++) {
983             /* check the bit inside the abi_ulong */
984             if ((b >> j) & 1)
985                 FD_SET(k, fds);
986             k++;
987         }
988     }
989 
990     unlock_user(target_fds, target_fds_addr, 0);
991 
992     return 0;
993 }
994 
995 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
996                                                  abi_ulong target_fds_addr,
997                                                  int n)
998 {
999     if (target_fds_addr) {
1000         if (copy_from_user_fdset(fds, target_fds_addr, n))
1001             return -TARGET_EFAULT;
1002         *fds_ptr = fds;
1003     } else {
1004         *fds_ptr = NULL;
1005     }
1006     return 0;
1007 }
1008 
1009 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1010                                           const fd_set *fds,
1011                                           int n)
1012 {
1013     int i, nw, j, k;
1014     abi_long v;
1015     abi_ulong *target_fds;
1016 
1017     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1018     if (!(target_fds = lock_user(VERIFY_WRITE,
1019                                  target_fds_addr,
1020                                  sizeof(abi_ulong) * nw,
1021                                  0)))
1022         return -TARGET_EFAULT;
1023 
1024     k = 0;
1025     for (i = 0; i < nw; i++) {
1026         v = 0;
1027         for (j = 0; j < TARGET_ABI_BITS; j++) {
1028             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1029             k++;
1030         }
1031         __put_user(v, &target_fds[i]);
1032     }
1033 
1034     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1035 
1036     return 0;
1037 }
1038 #endif
1039 
1040 #if defined(__alpha__)
1041 #define HOST_HZ 1024
1042 #else
1043 #define HOST_HZ 100
1044 #endif
1045 
1046 static inline abi_long host_to_target_clock_t(long ticks)
1047 {
1048 #if HOST_HZ == TARGET_HZ
1049     return ticks;
1050 #else
1051     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1052 #endif
1053 }
1054 
1055 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1056                                              const struct rusage *rusage)
1057 {
1058     struct target_rusage *target_rusage;
1059 
1060     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1061         return -TARGET_EFAULT;
1062     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1063     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1064     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1065     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1066     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1067     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1068     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1069     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1070     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1071     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1072     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1073     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1074     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1075     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1076     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1077     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1078     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1079     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1080     unlock_user_struct(target_rusage, target_addr, 1);
1081 
1082     return 0;
1083 }
1084 
1085 #ifdef TARGET_NR_setrlimit
1086 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1087 {
1088     abi_ulong target_rlim_swap;
1089     rlim_t result;
1090 
1091     target_rlim_swap = tswapal(target_rlim);
1092     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1093         return RLIM_INFINITY;
1094 
1095     result = target_rlim_swap;
1096     if (target_rlim_swap != (rlim_t)result)
1097         return RLIM_INFINITY;
1098 
1099     return result;
1100 }
1101 #endif
1102 
1103 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1104 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1105 {
1106     abi_ulong target_rlim_swap;
1107     abi_ulong result;
1108 
1109     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1110         target_rlim_swap = TARGET_RLIM_INFINITY;
1111     else
1112         target_rlim_swap = rlim;
1113     result = tswapal(target_rlim_swap);
1114 
1115     return result;
1116 }
1117 #endif
1118 
1119 static inline int target_to_host_resource(int code)
1120 {
1121     switch (code) {
1122     case TARGET_RLIMIT_AS:
1123         return RLIMIT_AS;
1124     case TARGET_RLIMIT_CORE:
1125         return RLIMIT_CORE;
1126     case TARGET_RLIMIT_CPU:
1127         return RLIMIT_CPU;
1128     case TARGET_RLIMIT_DATA:
1129         return RLIMIT_DATA;
1130     case TARGET_RLIMIT_FSIZE:
1131         return RLIMIT_FSIZE;
1132     case TARGET_RLIMIT_LOCKS:
1133         return RLIMIT_LOCKS;
1134     case TARGET_RLIMIT_MEMLOCK:
1135         return RLIMIT_MEMLOCK;
1136     case TARGET_RLIMIT_MSGQUEUE:
1137         return RLIMIT_MSGQUEUE;
1138     case TARGET_RLIMIT_NICE:
1139         return RLIMIT_NICE;
1140     case TARGET_RLIMIT_NOFILE:
1141         return RLIMIT_NOFILE;
1142     case TARGET_RLIMIT_NPROC:
1143         return RLIMIT_NPROC;
1144     case TARGET_RLIMIT_RSS:
1145         return RLIMIT_RSS;
1146     case TARGET_RLIMIT_RTPRIO:
1147         return RLIMIT_RTPRIO;
1148     case TARGET_RLIMIT_SIGPENDING:
1149         return RLIMIT_SIGPENDING;
1150     case TARGET_RLIMIT_STACK:
1151         return RLIMIT_STACK;
1152     default:
1153         return code;
1154     }
1155 }
1156 
1157 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1158                                               abi_ulong target_tv_addr)
1159 {
1160     struct target_timeval *target_tv;
1161 
1162     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1163         return -TARGET_EFAULT;
1164     }
1165 
1166     __get_user(tv->tv_sec, &target_tv->tv_sec);
1167     __get_user(tv->tv_usec, &target_tv->tv_usec);
1168 
1169     unlock_user_struct(target_tv, target_tv_addr, 0);
1170 
1171     return 0;
1172 }
1173 
1174 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1175                                             const struct timeval *tv)
1176 {
1177     struct target_timeval *target_tv;
1178 
1179     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1180         return -TARGET_EFAULT;
1181     }
1182 
1183     __put_user(tv->tv_sec, &target_tv->tv_sec);
1184     __put_user(tv->tv_usec, &target_tv->tv_usec);
1185 
1186     unlock_user_struct(target_tv, target_tv_addr, 1);
1187 
1188     return 0;
1189 }
1190 
1191 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1192 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1193                                                 abi_ulong target_tv_addr)
1194 {
1195     struct target__kernel_sock_timeval *target_tv;
1196 
1197     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1198         return -TARGET_EFAULT;
1199     }
1200 
1201     __get_user(tv->tv_sec, &target_tv->tv_sec);
1202     __get_user(tv->tv_usec, &target_tv->tv_usec);
1203 
1204     unlock_user_struct(target_tv, target_tv_addr, 0);
1205 
1206     return 0;
1207 }
1208 #endif
1209 
1210 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1211                                               const struct timeval *tv)
1212 {
1213     struct target__kernel_sock_timeval *target_tv;
1214 
1215     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1216         return -TARGET_EFAULT;
1217     }
1218 
1219     __put_user(tv->tv_sec, &target_tv->tv_sec);
1220     __put_user(tv->tv_usec, &target_tv->tv_usec);
1221 
1222     unlock_user_struct(target_tv, target_tv_addr, 1);
1223 
1224     return 0;
1225 }
1226 
1227 #if defined(TARGET_NR_futex) || \
1228     defined(TARGET_NR_rt_sigtimedwait) || \
1229     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1230     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1231     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1232     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1233     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1234     defined(TARGET_NR_timer_settime) || \
1235     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1236 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1237                                                abi_ulong target_addr)
1238 {
1239     struct target_timespec *target_ts;
1240 
1241     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1242         return -TARGET_EFAULT;
1243     }
1244     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1245     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1246     unlock_user_struct(target_ts, target_addr, 0);
1247     return 0;
1248 }
1249 #endif
1250 
1251 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1252     defined(TARGET_NR_timer_settime64) || \
1253     defined(TARGET_NR_mq_timedsend_time64) || \
1254     defined(TARGET_NR_mq_timedreceive_time64) || \
1255     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1256     defined(TARGET_NR_clock_nanosleep_time64) || \
1257     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1258     defined(TARGET_NR_utimensat) || \
1259     defined(TARGET_NR_utimensat_time64) || \
1260     defined(TARGET_NR_semtimedop_time64) || \
1261     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1262 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1263                                                  abi_ulong target_addr)
1264 {
1265     struct target__kernel_timespec *target_ts;
1266 
1267     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1268         return -TARGET_EFAULT;
1269     }
1270     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1271     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1272     /* in 32bit mode, this drops the padding */
1273     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1274     unlock_user_struct(target_ts, target_addr, 0);
1275     return 0;
1276 }
1277 #endif
1278 
1279 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1280                                                struct timespec *host_ts)
1281 {
1282     struct target_timespec *target_ts;
1283 
1284     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1285         return -TARGET_EFAULT;
1286     }
1287     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1288     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1289     unlock_user_struct(target_ts, target_addr, 1);
1290     return 0;
1291 }
1292 
1293 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1294                                                  struct timespec *host_ts)
1295 {
1296     struct target__kernel_timespec *target_ts;
1297 
1298     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1299         return -TARGET_EFAULT;
1300     }
1301     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1302     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1303     unlock_user_struct(target_ts, target_addr, 1);
1304     return 0;
1305 }
1306 
1307 #if defined(TARGET_NR_gettimeofday)
1308 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1309                                              struct timezone *tz)
1310 {
1311     struct target_timezone *target_tz;
1312 
1313     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1314         return -TARGET_EFAULT;
1315     }
1316 
1317     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1318     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1319 
1320     unlock_user_struct(target_tz, target_tz_addr, 1);
1321 
1322     return 0;
1323 }
1324 #endif
1325 
1326 #if defined(TARGET_NR_settimeofday)
1327 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1328                                                abi_ulong target_tz_addr)
1329 {
1330     struct target_timezone *target_tz;
1331 
1332     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1333         return -TARGET_EFAULT;
1334     }
1335 
1336     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1337     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1338 
1339     unlock_user_struct(target_tz, target_tz_addr, 0);
1340 
1341     return 0;
1342 }
1343 #endif
1344 
1345 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1346 #include <mqueue.h>
1347 
1348 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1349                                               abi_ulong target_mq_attr_addr)
1350 {
1351     struct target_mq_attr *target_mq_attr;
1352 
1353     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1354                           target_mq_attr_addr, 1))
1355         return -TARGET_EFAULT;
1356 
1357     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1358     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1359     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1360     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1361 
1362     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1363 
1364     return 0;
1365 }
1366 
1367 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1368                                             const struct mq_attr *attr)
1369 {
1370     struct target_mq_attr *target_mq_attr;
1371 
1372     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1373                           target_mq_attr_addr, 0))
1374         return -TARGET_EFAULT;
1375 
1376     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1377     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1378     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1379     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1380 
1381     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1382 
1383     return 0;
1384 }
1385 #endif
1386 
1387 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1388 /* do_select() must return target values and target errnos. */
1389 static abi_long do_select(int n,
1390                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1391                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1392 {
1393     fd_set rfds, wfds, efds;
1394     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1395     struct timeval tv;
1396     struct timespec ts, *ts_ptr;
1397     abi_long ret;
1398 
1399     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1400     if (ret) {
1401         return ret;
1402     }
1403     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1404     if (ret) {
1405         return ret;
1406     }
1407     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1408     if (ret) {
1409         return ret;
1410     }
1411 
1412     if (target_tv_addr) {
1413         if (copy_from_user_timeval(&tv, target_tv_addr))
1414             return -TARGET_EFAULT;
1415         ts.tv_sec = tv.tv_sec;
1416         ts.tv_nsec = tv.tv_usec * 1000;
1417         ts_ptr = &ts;
1418     } else {
1419         ts_ptr = NULL;
1420     }
1421 
1422     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1423                                   ts_ptr, NULL));
1424 
1425     if (!is_error(ret)) {
1426         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1427             return -TARGET_EFAULT;
1428         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1429             return -TARGET_EFAULT;
1430         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1431             return -TARGET_EFAULT;
1432 
1433         if (target_tv_addr) {
1434             tv.tv_sec = ts.tv_sec;
1435             tv.tv_usec = ts.tv_nsec / 1000;
1436             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1437                 return -TARGET_EFAULT;
1438             }
1439         }
1440     }
1441 
1442     return ret;
1443 }
1444 
1445 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1446 static abi_long do_old_select(abi_ulong arg1)
1447 {
1448     struct target_sel_arg_struct *sel;
1449     abi_ulong inp, outp, exp, tvp;
1450     long nsel;
1451 
1452     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1453         return -TARGET_EFAULT;
1454     }
1455 
1456     nsel = tswapal(sel->n);
1457     inp = tswapal(sel->inp);
1458     outp = tswapal(sel->outp);
1459     exp = tswapal(sel->exp);
1460     tvp = tswapal(sel->tvp);
1461 
1462     unlock_user_struct(sel, arg1, 0);
1463 
1464     return do_select(nsel, inp, outp, exp, tvp);
1465 }
1466 #endif
1467 #endif
1468 
1469 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1470 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1471                             abi_long arg4, abi_long arg5, abi_long arg6,
1472                             bool time64)
1473 {
1474     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1475     fd_set rfds, wfds, efds;
1476     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1477     struct timespec ts, *ts_ptr;
1478     abi_long ret;
1479 
1480     /*
1481      * The 6th arg is actually two args smashed together,
1482      * so we cannot use the C library.
1483      */
1484     sigset_t set;
1485     struct {
1486         sigset_t *set;
1487         size_t size;
1488     } sig, *sig_ptr;
1489 
1490     abi_ulong arg_sigset, arg_sigsize, *arg7;
1491     target_sigset_t *target_sigset;
1492 
1493     n = arg1;
1494     rfd_addr = arg2;
1495     wfd_addr = arg3;
1496     efd_addr = arg4;
1497     ts_addr = arg5;
1498 
1499     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1500     if (ret) {
1501         return ret;
1502     }
1503     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1504     if (ret) {
1505         return ret;
1506     }
1507     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1508     if (ret) {
1509         return ret;
1510     }
1511 
1512     /*
1513      * This takes a timespec, and not a timeval, so we cannot
1514      * use the do_select() helper ...
1515      */
1516     if (ts_addr) {
1517         if (time64) {
1518             if (target_to_host_timespec64(&ts, ts_addr)) {
1519                 return -TARGET_EFAULT;
1520             }
1521         } else {
1522             if (target_to_host_timespec(&ts, ts_addr)) {
1523                 return -TARGET_EFAULT;
1524             }
1525         }
1526             ts_ptr = &ts;
1527     } else {
1528         ts_ptr = NULL;
1529     }
1530 
1531     /* Extract the two packed args for the sigset */
1532     if (arg6) {
1533         sig_ptr = &sig;
1534         sig.size = SIGSET_T_SIZE;
1535 
1536         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1537         if (!arg7) {
1538             return -TARGET_EFAULT;
1539         }
1540         arg_sigset = tswapal(arg7[0]);
1541         arg_sigsize = tswapal(arg7[1]);
1542         unlock_user(arg7, arg6, 0);
1543 
1544         if (arg_sigset) {
1545             sig.set = &set;
1546             if (arg_sigsize != sizeof(*target_sigset)) {
1547                 /* Like the kernel, we enforce correct size sigsets */
1548                 return -TARGET_EINVAL;
1549             }
1550             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1551                                       sizeof(*target_sigset), 1);
1552             if (!target_sigset) {
1553                 return -TARGET_EFAULT;
1554             }
1555             target_to_host_sigset(&set, target_sigset);
1556             unlock_user(target_sigset, arg_sigset, 0);
1557         } else {
1558             sig.set = NULL;
1559         }
1560     } else {
1561         sig_ptr = NULL;
1562     }
1563 
1564     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1565                                   ts_ptr, sig_ptr));
1566 
1567     if (!is_error(ret)) {
1568         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1569             return -TARGET_EFAULT;
1570         }
1571         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1572             return -TARGET_EFAULT;
1573         }
1574         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1575             return -TARGET_EFAULT;
1576         }
1577         if (time64) {
1578             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1579                 return -TARGET_EFAULT;
1580             }
1581         } else {
1582             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1583                 return -TARGET_EFAULT;
1584             }
1585         }
1586     }
1587     return ret;
1588 }
1589 #endif
1590 
1591 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1592     defined(TARGET_NR_ppoll_time64)
1593 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1594                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1595 {
1596     struct target_pollfd *target_pfd;
1597     unsigned int nfds = arg2;
1598     struct pollfd *pfd;
1599     unsigned int i;
1600     abi_long ret;
1601 
1602     pfd = NULL;
1603     target_pfd = NULL;
1604     if (nfds) {
1605         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1606             return -TARGET_EINVAL;
1607         }
1608         target_pfd = lock_user(VERIFY_WRITE, arg1,
1609                                sizeof(struct target_pollfd) * nfds, 1);
1610         if (!target_pfd) {
1611             return -TARGET_EFAULT;
1612         }
1613 
1614         pfd = alloca(sizeof(struct pollfd) * nfds);
1615         for (i = 0; i < nfds; i++) {
1616             pfd[i].fd = tswap32(target_pfd[i].fd);
1617             pfd[i].events = tswap16(target_pfd[i].events);
1618         }
1619     }
1620     if (ppoll) {
1621         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1622         target_sigset_t *target_set;
1623         sigset_t _set, *set = &_set;
1624 
1625         if (arg3) {
1626             if (time64) {
1627                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1628                     unlock_user(target_pfd, arg1, 0);
1629                     return -TARGET_EFAULT;
1630                 }
1631             } else {
1632                 if (target_to_host_timespec(timeout_ts, arg3)) {
1633                     unlock_user(target_pfd, arg1, 0);
1634                     return -TARGET_EFAULT;
1635                 }
1636             }
1637         } else {
1638             timeout_ts = NULL;
1639         }
1640 
1641         if (arg4) {
1642             if (arg5 != sizeof(target_sigset_t)) {
1643                 unlock_user(target_pfd, arg1, 0);
1644                 return -TARGET_EINVAL;
1645             }
1646 
1647             target_set = lock_user(VERIFY_READ, arg4,
1648                                    sizeof(target_sigset_t), 1);
1649             if (!target_set) {
1650                 unlock_user(target_pfd, arg1, 0);
1651                 return -TARGET_EFAULT;
1652             }
1653             target_to_host_sigset(set, target_set);
1654         } else {
1655             set = NULL;
1656         }
1657 
1658         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1659                                    set, SIGSET_T_SIZE));
1660 
1661         if (!is_error(ret) && arg3) {
1662             if (time64) {
1663                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1664                     return -TARGET_EFAULT;
1665                 }
1666             } else {
1667                 if (host_to_target_timespec(arg3, timeout_ts)) {
1668                     return -TARGET_EFAULT;
1669                 }
1670             }
1671         }
1672         if (arg4) {
1673             unlock_user(target_set, arg4, 0);
1674         }
1675     } else {
1676           struct timespec ts, *pts;
1677 
1678           if (arg3 >= 0) {
1679               /* Convert ms to secs, ns */
1680               ts.tv_sec = arg3 / 1000;
1681               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1682               pts = &ts;
1683           } else {
1684               /* -ve poll() timeout means "infinite" */
1685               pts = NULL;
1686           }
1687           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1688     }
1689 
1690     if (!is_error(ret)) {
1691         for (i = 0; i < nfds; i++) {
1692             target_pfd[i].revents = tswap16(pfd[i].revents);
1693         }
1694     }
1695     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1696     return ret;
1697 }
1698 #endif
1699 
1700 static abi_long do_pipe2(int host_pipe[], int flags)
1701 {
1702 #ifdef CONFIG_PIPE2
1703     return pipe2(host_pipe, flags);
1704 #else
1705     return -ENOSYS;
1706 #endif
1707 }
1708 
1709 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1710                         int flags, int is_pipe2)
1711 {
1712     int host_pipe[2];
1713     abi_long ret;
1714     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1715 
1716     if (is_error(ret))
1717         return get_errno(ret);
1718 
1719     /* Several targets have special calling conventions for the original
1720        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1721     if (!is_pipe2) {
1722 #if defined(TARGET_ALPHA)
1723         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1724         return host_pipe[0];
1725 #elif defined(TARGET_MIPS)
1726         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1727         return host_pipe[0];
1728 #elif defined(TARGET_SH4)
1729         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1730         return host_pipe[0];
1731 #elif defined(TARGET_SPARC)
1732         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1733         return host_pipe[0];
1734 #endif
1735     }
1736 
1737     if (put_user_s32(host_pipe[0], pipedes)
1738         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1739         return -TARGET_EFAULT;
1740     return get_errno(ret);
1741 }
1742 
1743 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1744                                               abi_ulong target_addr,
1745                                               socklen_t len)
1746 {
1747     struct target_ip_mreqn *target_smreqn;
1748 
1749     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1750     if (!target_smreqn)
1751         return -TARGET_EFAULT;
1752     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1753     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1754     if (len == sizeof(struct target_ip_mreqn))
1755         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1756     unlock_user(target_smreqn, target_addr, 0);
1757 
1758     return 0;
1759 }
1760 
1761 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1762                                                abi_ulong target_addr,
1763                                                socklen_t len)
1764 {
1765     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1766     sa_family_t sa_family;
1767     struct target_sockaddr *target_saddr;
1768 
1769     if (fd_trans_target_to_host_addr(fd)) {
1770         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1771     }
1772 
1773     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1774     if (!target_saddr)
1775         return -TARGET_EFAULT;
1776 
1777     sa_family = tswap16(target_saddr->sa_family);
1778 
1779     /* Oops. The caller might send a incomplete sun_path; sun_path
1780      * must be terminated by \0 (see the manual page), but
1781      * unfortunately it is quite common to specify sockaddr_un
1782      * length as "strlen(x->sun_path)" while it should be
1783      * "strlen(...) + 1". We'll fix that here if needed.
1784      * Linux kernel has a similar feature.
1785      */
1786 
1787     if (sa_family == AF_UNIX) {
1788         if (len < unix_maxlen && len > 0) {
1789             char *cp = (char*)target_saddr;
1790 
1791             if ( cp[len-1] && !cp[len] )
1792                 len++;
1793         }
1794         if (len > unix_maxlen)
1795             len = unix_maxlen;
1796     }
1797 
1798     memcpy(addr, target_saddr, len);
1799     addr->sa_family = sa_family;
1800     if (sa_family == AF_NETLINK) {
1801         struct sockaddr_nl *nladdr;
1802 
1803         nladdr = (struct sockaddr_nl *)addr;
1804         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1805         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1806     } else if (sa_family == AF_PACKET) {
1807 	struct target_sockaddr_ll *lladdr;
1808 
1809 	lladdr = (struct target_sockaddr_ll *)addr;
1810 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1811 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1812     }
1813     unlock_user(target_saddr, target_addr, 0);
1814 
1815     return 0;
1816 }
1817 
1818 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1819                                                struct sockaddr *addr,
1820                                                socklen_t len)
1821 {
1822     struct target_sockaddr *target_saddr;
1823 
1824     if (len == 0) {
1825         return 0;
1826     }
1827     assert(addr);
1828 
1829     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1830     if (!target_saddr)
1831         return -TARGET_EFAULT;
1832     memcpy(target_saddr, addr, len);
1833     if (len >= offsetof(struct target_sockaddr, sa_family) +
1834         sizeof(target_saddr->sa_family)) {
1835         target_saddr->sa_family = tswap16(addr->sa_family);
1836     }
1837     if (addr->sa_family == AF_NETLINK &&
1838         len >= sizeof(struct target_sockaddr_nl)) {
1839         struct target_sockaddr_nl *target_nl =
1840                (struct target_sockaddr_nl *)target_saddr;
1841         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1842         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1843     } else if (addr->sa_family == AF_PACKET) {
1844         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1845         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1846         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1847     } else if (addr->sa_family == AF_INET6 &&
1848                len >= sizeof(struct target_sockaddr_in6)) {
1849         struct target_sockaddr_in6 *target_in6 =
1850                (struct target_sockaddr_in6 *)target_saddr;
1851         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1852     }
1853     unlock_user(target_saddr, target_addr, len);
1854 
1855     return 0;
1856 }
1857 
1858 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1859                                            struct target_msghdr *target_msgh)
1860 {
1861     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1862     abi_long msg_controllen;
1863     abi_ulong target_cmsg_addr;
1864     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1865     socklen_t space = 0;
1866 
1867     msg_controllen = tswapal(target_msgh->msg_controllen);
1868     if (msg_controllen < sizeof (struct target_cmsghdr))
1869         goto the_end;
1870     target_cmsg_addr = tswapal(target_msgh->msg_control);
1871     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1872     target_cmsg_start = target_cmsg;
1873     if (!target_cmsg)
1874         return -TARGET_EFAULT;
1875 
1876     while (cmsg && target_cmsg) {
1877         void *data = CMSG_DATA(cmsg);
1878         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1879 
1880         int len = tswapal(target_cmsg->cmsg_len)
1881             - sizeof(struct target_cmsghdr);
1882 
1883         space += CMSG_SPACE(len);
1884         if (space > msgh->msg_controllen) {
1885             space -= CMSG_SPACE(len);
1886             /* This is a QEMU bug, since we allocated the payload
1887              * area ourselves (unlike overflow in host-to-target
1888              * conversion, which is just the guest giving us a buffer
1889              * that's too small). It can't happen for the payload types
1890              * we currently support; if it becomes an issue in future
1891              * we would need to improve our allocation strategy to
1892              * something more intelligent than "twice the size of the
1893              * target buffer we're reading from".
1894              */
1895             qemu_log_mask(LOG_UNIMP,
1896                           ("Unsupported ancillary data %d/%d: "
1897                            "unhandled msg size\n"),
1898                           tswap32(target_cmsg->cmsg_level),
1899                           tswap32(target_cmsg->cmsg_type));
1900             break;
1901         }
1902 
1903         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1904             cmsg->cmsg_level = SOL_SOCKET;
1905         } else {
1906             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1907         }
1908         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1909         cmsg->cmsg_len = CMSG_LEN(len);
1910 
1911         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1912             int *fd = (int *)data;
1913             int *target_fd = (int *)target_data;
1914             int i, numfds = len / sizeof(int);
1915 
1916             for (i = 0; i < numfds; i++) {
1917                 __get_user(fd[i], target_fd + i);
1918             }
1919         } else if (cmsg->cmsg_level == SOL_SOCKET
1920                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1921             struct ucred *cred = (struct ucred *)data;
1922             struct target_ucred *target_cred =
1923                 (struct target_ucred *)target_data;
1924 
1925             __get_user(cred->pid, &target_cred->pid);
1926             __get_user(cred->uid, &target_cred->uid);
1927             __get_user(cred->gid, &target_cred->gid);
1928         } else {
1929             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1930                           cmsg->cmsg_level, cmsg->cmsg_type);
1931             memcpy(data, target_data, len);
1932         }
1933 
1934         cmsg = CMSG_NXTHDR(msgh, cmsg);
1935         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1936                                          target_cmsg_start);
1937     }
1938     unlock_user(target_cmsg, target_cmsg_addr, 0);
1939  the_end:
1940     msgh->msg_controllen = space;
1941     return 0;
1942 }
1943 
1944 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1945                                            struct msghdr *msgh)
1946 {
1947     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1948     abi_long msg_controllen;
1949     abi_ulong target_cmsg_addr;
1950     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1951     socklen_t space = 0;
1952 
1953     msg_controllen = tswapal(target_msgh->msg_controllen);
1954     if (msg_controllen < sizeof (struct target_cmsghdr))
1955         goto the_end;
1956     target_cmsg_addr = tswapal(target_msgh->msg_control);
1957     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1958     target_cmsg_start = target_cmsg;
1959     if (!target_cmsg)
1960         return -TARGET_EFAULT;
1961 
1962     while (cmsg && target_cmsg) {
1963         void *data = CMSG_DATA(cmsg);
1964         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1965 
1966         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1967         int tgt_len, tgt_space;
1968 
1969         /* We never copy a half-header but may copy half-data;
1970          * this is Linux's behaviour in put_cmsg(). Note that
1971          * truncation here is a guest problem (which we report
1972          * to the guest via the CTRUNC bit), unlike truncation
1973          * in target_to_host_cmsg, which is a QEMU bug.
1974          */
1975         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1976             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1977             break;
1978         }
1979 
1980         if (cmsg->cmsg_level == SOL_SOCKET) {
1981             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1982         } else {
1983             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1984         }
1985         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1986 
1987         /* Payload types which need a different size of payload on
1988          * the target must adjust tgt_len here.
1989          */
1990         tgt_len = len;
1991         switch (cmsg->cmsg_level) {
1992         case SOL_SOCKET:
1993             switch (cmsg->cmsg_type) {
1994             case SO_TIMESTAMP:
1995                 tgt_len = sizeof(struct target_timeval);
1996                 break;
1997             default:
1998                 break;
1999             }
2000             break;
2001         default:
2002             break;
2003         }
2004 
2005         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2006             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2007             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2008         }
2009 
2010         /* We must now copy-and-convert len bytes of payload
2011          * into tgt_len bytes of destination space. Bear in mind
2012          * that in both source and destination we may be dealing
2013          * with a truncated value!
2014          */
2015         switch (cmsg->cmsg_level) {
2016         case SOL_SOCKET:
2017             switch (cmsg->cmsg_type) {
2018             case SCM_RIGHTS:
2019             {
2020                 int *fd = (int *)data;
2021                 int *target_fd = (int *)target_data;
2022                 int i, numfds = tgt_len / sizeof(int);
2023 
2024                 for (i = 0; i < numfds; i++) {
2025                     __put_user(fd[i], target_fd + i);
2026                 }
2027                 break;
2028             }
2029             case SO_TIMESTAMP:
2030             {
2031                 struct timeval *tv = (struct timeval *)data;
2032                 struct target_timeval *target_tv =
2033                     (struct target_timeval *)target_data;
2034 
2035                 if (len != sizeof(struct timeval) ||
2036                     tgt_len != sizeof(struct target_timeval)) {
2037                     goto unimplemented;
2038                 }
2039 
2040                 /* copy struct timeval to target */
2041                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2042                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2043                 break;
2044             }
2045             case SCM_CREDENTIALS:
2046             {
2047                 struct ucred *cred = (struct ucred *)data;
2048                 struct target_ucred *target_cred =
2049                     (struct target_ucred *)target_data;
2050 
2051                 __put_user(cred->pid, &target_cred->pid);
2052                 __put_user(cred->uid, &target_cred->uid);
2053                 __put_user(cred->gid, &target_cred->gid);
2054                 break;
2055             }
2056             default:
2057                 goto unimplemented;
2058             }
2059             break;
2060 
2061         case SOL_IP:
2062             switch (cmsg->cmsg_type) {
2063             case IP_TTL:
2064             {
2065                 uint32_t *v = (uint32_t *)data;
2066                 uint32_t *t_int = (uint32_t *)target_data;
2067 
2068                 if (len != sizeof(uint32_t) ||
2069                     tgt_len != sizeof(uint32_t)) {
2070                     goto unimplemented;
2071                 }
2072                 __put_user(*v, t_int);
2073                 break;
2074             }
2075             case IP_RECVERR:
2076             {
2077                 struct errhdr_t {
2078                    struct sock_extended_err ee;
2079                    struct sockaddr_in offender;
2080                 };
2081                 struct errhdr_t *errh = (struct errhdr_t *)data;
2082                 struct errhdr_t *target_errh =
2083                     (struct errhdr_t *)target_data;
2084 
2085                 if (len != sizeof(struct errhdr_t) ||
2086                     tgt_len != sizeof(struct errhdr_t)) {
2087                     goto unimplemented;
2088                 }
2089                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2090                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2091                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2092                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2093                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2094                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2095                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2096                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2097                     (void *) &errh->offender, sizeof(errh->offender));
2098                 break;
2099             }
2100             default:
2101                 goto unimplemented;
2102             }
2103             break;
2104 
2105         case SOL_IPV6:
2106             switch (cmsg->cmsg_type) {
2107             case IPV6_HOPLIMIT:
2108             {
2109                 uint32_t *v = (uint32_t *)data;
2110                 uint32_t *t_int = (uint32_t *)target_data;
2111 
2112                 if (len != sizeof(uint32_t) ||
2113                     tgt_len != sizeof(uint32_t)) {
2114                     goto unimplemented;
2115                 }
2116                 __put_user(*v, t_int);
2117                 break;
2118             }
2119             case IPV6_RECVERR:
2120             {
2121                 struct errhdr6_t {
2122                    struct sock_extended_err ee;
2123                    struct sockaddr_in6 offender;
2124                 };
2125                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2126                 struct errhdr6_t *target_errh =
2127                     (struct errhdr6_t *)target_data;
2128 
2129                 if (len != sizeof(struct errhdr6_t) ||
2130                     tgt_len != sizeof(struct errhdr6_t)) {
2131                     goto unimplemented;
2132                 }
2133                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2134                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2135                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2136                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2137                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2138                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2139                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2140                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2141                     (void *) &errh->offender, sizeof(errh->offender));
2142                 break;
2143             }
2144             default:
2145                 goto unimplemented;
2146             }
2147             break;
2148 
2149         default:
2150         unimplemented:
2151             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2152                           cmsg->cmsg_level, cmsg->cmsg_type);
2153             memcpy(target_data, data, MIN(len, tgt_len));
2154             if (tgt_len > len) {
2155                 memset(target_data + len, 0, tgt_len - len);
2156             }
2157         }
2158 
2159         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2160         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2161         if (msg_controllen < tgt_space) {
2162             tgt_space = msg_controllen;
2163         }
2164         msg_controllen -= tgt_space;
2165         space += tgt_space;
2166         cmsg = CMSG_NXTHDR(msgh, cmsg);
2167         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2168                                          target_cmsg_start);
2169     }
2170     unlock_user(target_cmsg, target_cmsg_addr, space);
2171  the_end:
2172     target_msgh->msg_controllen = tswapal(space);
2173     return 0;
2174 }
2175 
2176 /* do_setsockopt() Must return target values and target errnos. */
2177 static abi_long do_setsockopt(int sockfd, int level, int optname,
2178                               abi_ulong optval_addr, socklen_t optlen)
2179 {
2180     abi_long ret;
2181     int val;
2182     struct ip_mreqn *ip_mreq;
2183     struct ip_mreq_source *ip_mreq_source;
2184 
2185     switch(level) {
2186     case SOL_TCP:
2187         /* TCP options all take an 'int' value.  */
2188         if (optlen < sizeof(uint32_t))
2189             return -TARGET_EINVAL;
2190 
2191         if (get_user_u32(val, optval_addr))
2192             return -TARGET_EFAULT;
2193         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2194         break;
2195     case SOL_IP:
2196         switch(optname) {
2197         case IP_TOS:
2198         case IP_TTL:
2199         case IP_HDRINCL:
2200         case IP_ROUTER_ALERT:
2201         case IP_RECVOPTS:
2202         case IP_RETOPTS:
2203         case IP_PKTINFO:
2204         case IP_MTU_DISCOVER:
2205         case IP_RECVERR:
2206         case IP_RECVTTL:
2207         case IP_RECVTOS:
2208 #ifdef IP_FREEBIND
2209         case IP_FREEBIND:
2210 #endif
2211         case IP_MULTICAST_TTL:
2212         case IP_MULTICAST_LOOP:
2213             val = 0;
2214             if (optlen >= sizeof(uint32_t)) {
2215                 if (get_user_u32(val, optval_addr))
2216                     return -TARGET_EFAULT;
2217             } else if (optlen >= 1) {
2218                 if (get_user_u8(val, optval_addr))
2219                     return -TARGET_EFAULT;
2220             }
2221             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2222             break;
2223         case IP_ADD_MEMBERSHIP:
2224         case IP_DROP_MEMBERSHIP:
2225             if (optlen < sizeof (struct target_ip_mreq) ||
2226                 optlen > sizeof (struct target_ip_mreqn))
2227                 return -TARGET_EINVAL;
2228 
2229             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2230             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2231             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2232             break;
2233 
2234         case IP_BLOCK_SOURCE:
2235         case IP_UNBLOCK_SOURCE:
2236         case IP_ADD_SOURCE_MEMBERSHIP:
2237         case IP_DROP_SOURCE_MEMBERSHIP:
2238             if (optlen != sizeof (struct target_ip_mreq_source))
2239                 return -TARGET_EINVAL;
2240 
2241             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2242             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2243             unlock_user (ip_mreq_source, optval_addr, 0);
2244             break;
2245 
2246         default:
2247             goto unimplemented;
2248         }
2249         break;
2250     case SOL_IPV6:
2251         switch (optname) {
2252         case IPV6_MTU_DISCOVER:
2253         case IPV6_MTU:
2254         case IPV6_V6ONLY:
2255         case IPV6_RECVPKTINFO:
2256         case IPV6_UNICAST_HOPS:
2257         case IPV6_MULTICAST_HOPS:
2258         case IPV6_MULTICAST_LOOP:
2259         case IPV6_RECVERR:
2260         case IPV6_RECVHOPLIMIT:
2261         case IPV6_2292HOPLIMIT:
2262         case IPV6_CHECKSUM:
2263         case IPV6_ADDRFORM:
2264         case IPV6_2292PKTINFO:
2265         case IPV6_RECVTCLASS:
2266         case IPV6_RECVRTHDR:
2267         case IPV6_2292RTHDR:
2268         case IPV6_RECVHOPOPTS:
2269         case IPV6_2292HOPOPTS:
2270         case IPV6_RECVDSTOPTS:
2271         case IPV6_2292DSTOPTS:
2272         case IPV6_TCLASS:
2273 #ifdef IPV6_RECVPATHMTU
2274         case IPV6_RECVPATHMTU:
2275 #endif
2276 #ifdef IPV6_TRANSPARENT
2277         case IPV6_TRANSPARENT:
2278 #endif
2279 #ifdef IPV6_FREEBIND
2280         case IPV6_FREEBIND:
2281 #endif
2282 #ifdef IPV6_RECVORIGDSTADDR
2283         case IPV6_RECVORIGDSTADDR:
2284 #endif
2285             val = 0;
2286             if (optlen < sizeof(uint32_t)) {
2287                 return -TARGET_EINVAL;
2288             }
2289             if (get_user_u32(val, optval_addr)) {
2290                 return -TARGET_EFAULT;
2291             }
2292             ret = get_errno(setsockopt(sockfd, level, optname,
2293                                        &val, sizeof(val)));
2294             break;
2295         case IPV6_PKTINFO:
2296         {
2297             struct in6_pktinfo pki;
2298 
2299             if (optlen < sizeof(pki)) {
2300                 return -TARGET_EINVAL;
2301             }
2302 
2303             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2304                 return -TARGET_EFAULT;
2305             }
2306 
2307             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2308 
2309             ret = get_errno(setsockopt(sockfd, level, optname,
2310                                        &pki, sizeof(pki)));
2311             break;
2312         }
2313         case IPV6_ADD_MEMBERSHIP:
2314         case IPV6_DROP_MEMBERSHIP:
2315         {
2316             struct ipv6_mreq ipv6mreq;
2317 
2318             if (optlen < sizeof(ipv6mreq)) {
2319                 return -TARGET_EINVAL;
2320             }
2321 
2322             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2323                 return -TARGET_EFAULT;
2324             }
2325 
2326             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2327 
2328             ret = get_errno(setsockopt(sockfd, level, optname,
2329                                        &ipv6mreq, sizeof(ipv6mreq)));
2330             break;
2331         }
2332         default:
2333             goto unimplemented;
2334         }
2335         break;
2336     case SOL_ICMPV6:
2337         switch (optname) {
2338         case ICMPV6_FILTER:
2339         {
2340             struct icmp6_filter icmp6f;
2341 
2342             if (optlen > sizeof(icmp6f)) {
2343                 optlen = sizeof(icmp6f);
2344             }
2345 
2346             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2347                 return -TARGET_EFAULT;
2348             }
2349 
2350             for (val = 0; val < 8; val++) {
2351                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2352             }
2353 
2354             ret = get_errno(setsockopt(sockfd, level, optname,
2355                                        &icmp6f, optlen));
2356             break;
2357         }
2358         default:
2359             goto unimplemented;
2360         }
2361         break;
2362     case SOL_RAW:
2363         switch (optname) {
2364         case ICMP_FILTER:
2365         case IPV6_CHECKSUM:
2366             /* those take an u32 value */
2367             if (optlen < sizeof(uint32_t)) {
2368                 return -TARGET_EINVAL;
2369             }
2370 
2371             if (get_user_u32(val, optval_addr)) {
2372                 return -TARGET_EFAULT;
2373             }
2374             ret = get_errno(setsockopt(sockfd, level, optname,
2375                                        &val, sizeof(val)));
2376             break;
2377 
2378         default:
2379             goto unimplemented;
2380         }
2381         break;
2382 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2383     case SOL_ALG:
2384         switch (optname) {
2385         case ALG_SET_KEY:
2386         {
2387             char *alg_key = g_malloc(optlen);
2388 
2389             if (!alg_key) {
2390                 return -TARGET_ENOMEM;
2391             }
2392             if (copy_from_user(alg_key, optval_addr, optlen)) {
2393                 g_free(alg_key);
2394                 return -TARGET_EFAULT;
2395             }
2396             ret = get_errno(setsockopt(sockfd, level, optname,
2397                                        alg_key, optlen));
2398             g_free(alg_key);
2399             break;
2400         }
2401         case ALG_SET_AEAD_AUTHSIZE:
2402         {
2403             ret = get_errno(setsockopt(sockfd, level, optname,
2404                                        NULL, optlen));
2405             break;
2406         }
2407         default:
2408             goto unimplemented;
2409         }
2410         break;
2411 #endif
2412     case TARGET_SOL_SOCKET:
2413         switch (optname) {
2414         case TARGET_SO_RCVTIMEO:
2415         {
2416                 struct timeval tv;
2417 
2418                 optname = SO_RCVTIMEO;
2419 
2420 set_timeout:
2421                 if (optlen != sizeof(struct target_timeval)) {
2422                     return -TARGET_EINVAL;
2423                 }
2424 
2425                 if (copy_from_user_timeval(&tv, optval_addr)) {
2426                     return -TARGET_EFAULT;
2427                 }
2428 
2429                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2430                                 &tv, sizeof(tv)));
2431                 return ret;
2432         }
2433         case TARGET_SO_SNDTIMEO:
2434                 optname = SO_SNDTIMEO;
2435                 goto set_timeout;
2436         case TARGET_SO_ATTACH_FILTER:
2437         {
2438                 struct target_sock_fprog *tfprog;
2439                 struct target_sock_filter *tfilter;
2440                 struct sock_fprog fprog;
2441                 struct sock_filter *filter;
2442                 int i;
2443 
2444                 if (optlen != sizeof(*tfprog)) {
2445                     return -TARGET_EINVAL;
2446                 }
2447                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2448                     return -TARGET_EFAULT;
2449                 }
2450                 if (!lock_user_struct(VERIFY_READ, tfilter,
2451                                       tswapal(tfprog->filter), 0)) {
2452                     unlock_user_struct(tfprog, optval_addr, 1);
2453                     return -TARGET_EFAULT;
2454                 }
2455 
2456                 fprog.len = tswap16(tfprog->len);
2457                 filter = g_try_new(struct sock_filter, fprog.len);
2458                 if (filter == NULL) {
2459                     unlock_user_struct(tfilter, tfprog->filter, 1);
2460                     unlock_user_struct(tfprog, optval_addr, 1);
2461                     return -TARGET_ENOMEM;
2462                 }
2463                 for (i = 0; i < fprog.len; i++) {
2464                     filter[i].code = tswap16(tfilter[i].code);
2465                     filter[i].jt = tfilter[i].jt;
2466                     filter[i].jf = tfilter[i].jf;
2467                     filter[i].k = tswap32(tfilter[i].k);
2468                 }
2469                 fprog.filter = filter;
2470 
2471                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2472                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2473                 g_free(filter);
2474 
2475                 unlock_user_struct(tfilter, tfprog->filter, 1);
2476                 unlock_user_struct(tfprog, optval_addr, 1);
2477                 return ret;
2478         }
2479 	case TARGET_SO_BINDTODEVICE:
2480 	{
2481 		char *dev_ifname, *addr_ifname;
2482 
2483 		if (optlen > IFNAMSIZ - 1) {
2484 		    optlen = IFNAMSIZ - 1;
2485 		}
2486 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2487 		if (!dev_ifname) {
2488 		    return -TARGET_EFAULT;
2489 		}
2490 		optname = SO_BINDTODEVICE;
2491 		addr_ifname = alloca(IFNAMSIZ);
2492 		memcpy(addr_ifname, dev_ifname, optlen);
2493 		addr_ifname[optlen] = 0;
2494 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2495                                            addr_ifname, optlen));
2496 		unlock_user (dev_ifname, optval_addr, 0);
2497 		return ret;
2498 	}
2499         case TARGET_SO_LINGER:
2500         {
2501                 struct linger lg;
2502                 struct target_linger *tlg;
2503 
2504                 if (optlen != sizeof(struct target_linger)) {
2505                     return -TARGET_EINVAL;
2506                 }
2507                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2508                     return -TARGET_EFAULT;
2509                 }
2510                 __get_user(lg.l_onoff, &tlg->l_onoff);
2511                 __get_user(lg.l_linger, &tlg->l_linger);
2512                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2513                                 &lg, sizeof(lg)));
2514                 unlock_user_struct(tlg, optval_addr, 0);
2515                 return ret;
2516         }
2517             /* Options with 'int' argument.  */
2518         case TARGET_SO_DEBUG:
2519 		optname = SO_DEBUG;
2520 		break;
2521         case TARGET_SO_REUSEADDR:
2522 		optname = SO_REUSEADDR;
2523 		break;
2524 #ifdef SO_REUSEPORT
2525         case TARGET_SO_REUSEPORT:
2526                 optname = SO_REUSEPORT;
2527                 break;
2528 #endif
2529         case TARGET_SO_TYPE:
2530 		optname = SO_TYPE;
2531 		break;
2532         case TARGET_SO_ERROR:
2533 		optname = SO_ERROR;
2534 		break;
2535         case TARGET_SO_DONTROUTE:
2536 		optname = SO_DONTROUTE;
2537 		break;
2538         case TARGET_SO_BROADCAST:
2539 		optname = SO_BROADCAST;
2540 		break;
2541         case TARGET_SO_SNDBUF:
2542 		optname = SO_SNDBUF;
2543 		break;
2544         case TARGET_SO_SNDBUFFORCE:
2545                 optname = SO_SNDBUFFORCE;
2546                 break;
2547         case TARGET_SO_RCVBUF:
2548 		optname = SO_RCVBUF;
2549 		break;
2550         case TARGET_SO_RCVBUFFORCE:
2551                 optname = SO_RCVBUFFORCE;
2552                 break;
2553         case TARGET_SO_KEEPALIVE:
2554 		optname = SO_KEEPALIVE;
2555 		break;
2556         case TARGET_SO_OOBINLINE:
2557 		optname = SO_OOBINLINE;
2558 		break;
2559         case TARGET_SO_NO_CHECK:
2560 		optname = SO_NO_CHECK;
2561 		break;
2562         case TARGET_SO_PRIORITY:
2563 		optname = SO_PRIORITY;
2564 		break;
2565 #ifdef SO_BSDCOMPAT
2566         case TARGET_SO_BSDCOMPAT:
2567 		optname = SO_BSDCOMPAT;
2568 		break;
2569 #endif
2570         case TARGET_SO_PASSCRED:
2571 		optname = SO_PASSCRED;
2572 		break;
2573         case TARGET_SO_PASSSEC:
2574                 optname = SO_PASSSEC;
2575                 break;
2576         case TARGET_SO_TIMESTAMP:
2577 		optname = SO_TIMESTAMP;
2578 		break;
2579         case TARGET_SO_RCVLOWAT:
2580 		optname = SO_RCVLOWAT;
2581 		break;
2582         default:
2583             goto unimplemented;
2584         }
2585 	if (optlen < sizeof(uint32_t))
2586             return -TARGET_EINVAL;
2587 
2588 	if (get_user_u32(val, optval_addr))
2589             return -TARGET_EFAULT;
2590 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2591         break;
2592 #ifdef SOL_NETLINK
2593     case SOL_NETLINK:
2594         switch (optname) {
2595         case NETLINK_PKTINFO:
2596         case NETLINK_ADD_MEMBERSHIP:
2597         case NETLINK_DROP_MEMBERSHIP:
2598         case NETLINK_BROADCAST_ERROR:
2599         case NETLINK_NO_ENOBUFS:
2600 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2601         case NETLINK_LISTEN_ALL_NSID:
2602         case NETLINK_CAP_ACK:
2603 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2604 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2605         case NETLINK_EXT_ACK:
2606 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2607 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2608         case NETLINK_GET_STRICT_CHK:
2609 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2610             break;
2611         default:
2612             goto unimplemented;
2613         }
2614         val = 0;
2615         if (optlen < sizeof(uint32_t)) {
2616             return -TARGET_EINVAL;
2617         }
2618         if (get_user_u32(val, optval_addr)) {
2619             return -TARGET_EFAULT;
2620         }
2621         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2622                                    sizeof(val)));
2623         break;
2624 #endif /* SOL_NETLINK */
2625     default:
2626     unimplemented:
2627         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2628                       level, optname);
2629         ret = -TARGET_ENOPROTOOPT;
2630     }
2631     return ret;
2632 }
2633 
2634 /* do_getsockopt() Must return target values and target errnos. */
2635 static abi_long do_getsockopt(int sockfd, int level, int optname,
2636                               abi_ulong optval_addr, abi_ulong optlen)
2637 {
2638     abi_long ret;
2639     int len, val;
2640     socklen_t lv;
2641 
2642     switch(level) {
2643     case TARGET_SOL_SOCKET:
2644         level = SOL_SOCKET;
2645         switch (optname) {
2646         /* These don't just return a single integer */
2647         case TARGET_SO_PEERNAME:
2648             goto unimplemented;
2649         case TARGET_SO_RCVTIMEO: {
2650             struct timeval tv;
2651             socklen_t tvlen;
2652 
2653             optname = SO_RCVTIMEO;
2654 
2655 get_timeout:
2656             if (get_user_u32(len, optlen)) {
2657                 return -TARGET_EFAULT;
2658             }
2659             if (len < 0) {
2660                 return -TARGET_EINVAL;
2661             }
2662 
2663             tvlen = sizeof(tv);
2664             ret = get_errno(getsockopt(sockfd, level, optname,
2665                                        &tv, &tvlen));
2666             if (ret < 0) {
2667                 return ret;
2668             }
2669             if (len > sizeof(struct target_timeval)) {
2670                 len = sizeof(struct target_timeval);
2671             }
2672             if (copy_to_user_timeval(optval_addr, &tv)) {
2673                 return -TARGET_EFAULT;
2674             }
2675             if (put_user_u32(len, optlen)) {
2676                 return -TARGET_EFAULT;
2677             }
2678             break;
2679         }
2680         case TARGET_SO_SNDTIMEO:
2681             optname = SO_SNDTIMEO;
2682             goto get_timeout;
2683         case TARGET_SO_PEERCRED: {
2684             struct ucred cr;
2685             socklen_t crlen;
2686             struct target_ucred *tcr;
2687 
2688             if (get_user_u32(len, optlen)) {
2689                 return -TARGET_EFAULT;
2690             }
2691             if (len < 0) {
2692                 return -TARGET_EINVAL;
2693             }
2694 
2695             crlen = sizeof(cr);
2696             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2697                                        &cr, &crlen));
2698             if (ret < 0) {
2699                 return ret;
2700             }
2701             if (len > crlen) {
2702                 len = crlen;
2703             }
2704             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2705                 return -TARGET_EFAULT;
2706             }
2707             __put_user(cr.pid, &tcr->pid);
2708             __put_user(cr.uid, &tcr->uid);
2709             __put_user(cr.gid, &tcr->gid);
2710             unlock_user_struct(tcr, optval_addr, 1);
2711             if (put_user_u32(len, optlen)) {
2712                 return -TARGET_EFAULT;
2713             }
2714             break;
2715         }
2716         case TARGET_SO_PEERSEC: {
2717             char *name;
2718 
2719             if (get_user_u32(len, optlen)) {
2720                 return -TARGET_EFAULT;
2721             }
2722             if (len < 0) {
2723                 return -TARGET_EINVAL;
2724             }
2725             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2726             if (!name) {
2727                 return -TARGET_EFAULT;
2728             }
2729             lv = len;
2730             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2731                                        name, &lv));
2732             if (put_user_u32(lv, optlen)) {
2733                 ret = -TARGET_EFAULT;
2734             }
2735             unlock_user(name, optval_addr, lv);
2736             break;
2737         }
2738         case TARGET_SO_LINGER:
2739         {
2740             struct linger lg;
2741             socklen_t lglen;
2742             struct target_linger *tlg;
2743 
2744             if (get_user_u32(len, optlen)) {
2745                 return -TARGET_EFAULT;
2746             }
2747             if (len < 0) {
2748                 return -TARGET_EINVAL;
2749             }
2750 
2751             lglen = sizeof(lg);
2752             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2753                                        &lg, &lglen));
2754             if (ret < 0) {
2755                 return ret;
2756             }
2757             if (len > lglen) {
2758                 len = lglen;
2759             }
2760             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2761                 return -TARGET_EFAULT;
2762             }
2763             __put_user(lg.l_onoff, &tlg->l_onoff);
2764             __put_user(lg.l_linger, &tlg->l_linger);
2765             unlock_user_struct(tlg, optval_addr, 1);
2766             if (put_user_u32(len, optlen)) {
2767                 return -TARGET_EFAULT;
2768             }
2769             break;
2770         }
2771         /* Options with 'int' argument.  */
2772         case TARGET_SO_DEBUG:
2773             optname = SO_DEBUG;
2774             goto int_case;
2775         case TARGET_SO_REUSEADDR:
2776             optname = SO_REUSEADDR;
2777             goto int_case;
2778 #ifdef SO_REUSEPORT
2779         case TARGET_SO_REUSEPORT:
2780             optname = SO_REUSEPORT;
2781             goto int_case;
2782 #endif
2783         case TARGET_SO_TYPE:
2784             optname = SO_TYPE;
2785             goto int_case;
2786         case TARGET_SO_ERROR:
2787             optname = SO_ERROR;
2788             goto int_case;
2789         case TARGET_SO_DONTROUTE:
2790             optname = SO_DONTROUTE;
2791             goto int_case;
2792         case TARGET_SO_BROADCAST:
2793             optname = SO_BROADCAST;
2794             goto int_case;
2795         case TARGET_SO_SNDBUF:
2796             optname = SO_SNDBUF;
2797             goto int_case;
2798         case TARGET_SO_RCVBUF:
2799             optname = SO_RCVBUF;
2800             goto int_case;
2801         case TARGET_SO_KEEPALIVE:
2802             optname = SO_KEEPALIVE;
2803             goto int_case;
2804         case TARGET_SO_OOBINLINE:
2805             optname = SO_OOBINLINE;
2806             goto int_case;
2807         case TARGET_SO_NO_CHECK:
2808             optname = SO_NO_CHECK;
2809             goto int_case;
2810         case TARGET_SO_PRIORITY:
2811             optname = SO_PRIORITY;
2812             goto int_case;
2813 #ifdef SO_BSDCOMPAT
2814         case TARGET_SO_BSDCOMPAT:
2815             optname = SO_BSDCOMPAT;
2816             goto int_case;
2817 #endif
2818         case TARGET_SO_PASSCRED:
2819             optname = SO_PASSCRED;
2820             goto int_case;
2821         case TARGET_SO_TIMESTAMP:
2822             optname = SO_TIMESTAMP;
2823             goto int_case;
2824         case TARGET_SO_RCVLOWAT:
2825             optname = SO_RCVLOWAT;
2826             goto int_case;
2827         case TARGET_SO_ACCEPTCONN:
2828             optname = SO_ACCEPTCONN;
2829             goto int_case;
2830         default:
2831             goto int_case;
2832         }
2833         break;
2834     case SOL_TCP:
2835         /* TCP options all take an 'int' value.  */
2836     int_case:
2837         if (get_user_u32(len, optlen))
2838             return -TARGET_EFAULT;
2839         if (len < 0)
2840             return -TARGET_EINVAL;
2841         lv = sizeof(lv);
2842         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2843         if (ret < 0)
2844             return ret;
2845         if (optname == SO_TYPE) {
2846             val = host_to_target_sock_type(val);
2847         }
2848         if (len > lv)
2849             len = lv;
2850         if (len == 4) {
2851             if (put_user_u32(val, optval_addr))
2852                 return -TARGET_EFAULT;
2853         } else {
2854             if (put_user_u8(val, optval_addr))
2855                 return -TARGET_EFAULT;
2856         }
2857         if (put_user_u32(len, optlen))
2858             return -TARGET_EFAULT;
2859         break;
2860     case SOL_IP:
2861         switch(optname) {
2862         case IP_TOS:
2863         case IP_TTL:
2864         case IP_HDRINCL:
2865         case IP_ROUTER_ALERT:
2866         case IP_RECVOPTS:
2867         case IP_RETOPTS:
2868         case IP_PKTINFO:
2869         case IP_MTU_DISCOVER:
2870         case IP_RECVERR:
2871         case IP_RECVTOS:
2872 #ifdef IP_FREEBIND
2873         case IP_FREEBIND:
2874 #endif
2875         case IP_MULTICAST_TTL:
2876         case IP_MULTICAST_LOOP:
2877             if (get_user_u32(len, optlen))
2878                 return -TARGET_EFAULT;
2879             if (len < 0)
2880                 return -TARGET_EINVAL;
2881             lv = sizeof(lv);
2882             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2883             if (ret < 0)
2884                 return ret;
2885             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2886                 len = 1;
2887                 if (put_user_u32(len, optlen)
2888                     || put_user_u8(val, optval_addr))
2889                     return -TARGET_EFAULT;
2890             } else {
2891                 if (len > sizeof(int))
2892                     len = sizeof(int);
2893                 if (put_user_u32(len, optlen)
2894                     || put_user_u32(val, optval_addr))
2895                     return -TARGET_EFAULT;
2896             }
2897             break;
2898         default:
2899             ret = -TARGET_ENOPROTOOPT;
2900             break;
2901         }
2902         break;
2903     case SOL_IPV6:
2904         switch (optname) {
2905         case IPV6_MTU_DISCOVER:
2906         case IPV6_MTU:
2907         case IPV6_V6ONLY:
2908         case IPV6_RECVPKTINFO:
2909         case IPV6_UNICAST_HOPS:
2910         case IPV6_MULTICAST_HOPS:
2911         case IPV6_MULTICAST_LOOP:
2912         case IPV6_RECVERR:
2913         case IPV6_RECVHOPLIMIT:
2914         case IPV6_2292HOPLIMIT:
2915         case IPV6_CHECKSUM:
2916         case IPV6_ADDRFORM:
2917         case IPV6_2292PKTINFO:
2918         case IPV6_RECVTCLASS:
2919         case IPV6_RECVRTHDR:
2920         case IPV6_2292RTHDR:
2921         case IPV6_RECVHOPOPTS:
2922         case IPV6_2292HOPOPTS:
2923         case IPV6_RECVDSTOPTS:
2924         case IPV6_2292DSTOPTS:
2925         case IPV6_TCLASS:
2926 #ifdef IPV6_RECVPATHMTU
2927         case IPV6_RECVPATHMTU:
2928 #endif
2929 #ifdef IPV6_TRANSPARENT
2930         case IPV6_TRANSPARENT:
2931 #endif
2932 #ifdef IPV6_FREEBIND
2933         case IPV6_FREEBIND:
2934 #endif
2935 #ifdef IPV6_RECVORIGDSTADDR
2936         case IPV6_RECVORIGDSTADDR:
2937 #endif
2938             if (get_user_u32(len, optlen))
2939                 return -TARGET_EFAULT;
2940             if (len < 0)
2941                 return -TARGET_EINVAL;
2942             lv = sizeof(lv);
2943             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2944             if (ret < 0)
2945                 return ret;
2946             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2947                 len = 1;
2948                 if (put_user_u32(len, optlen)
2949                     || put_user_u8(val, optval_addr))
2950                     return -TARGET_EFAULT;
2951             } else {
2952                 if (len > sizeof(int))
2953                     len = sizeof(int);
2954                 if (put_user_u32(len, optlen)
2955                     || put_user_u32(val, optval_addr))
2956                     return -TARGET_EFAULT;
2957             }
2958             break;
2959         default:
2960             ret = -TARGET_ENOPROTOOPT;
2961             break;
2962         }
2963         break;
2964 #ifdef SOL_NETLINK
2965     case SOL_NETLINK:
2966         switch (optname) {
2967         case NETLINK_PKTINFO:
2968         case NETLINK_BROADCAST_ERROR:
2969         case NETLINK_NO_ENOBUFS:
2970 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2971         case NETLINK_LISTEN_ALL_NSID:
2972         case NETLINK_CAP_ACK:
2973 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2974 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2975         case NETLINK_EXT_ACK:
2976 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2977 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2978         case NETLINK_GET_STRICT_CHK:
2979 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2980             if (get_user_u32(len, optlen)) {
2981                 return -TARGET_EFAULT;
2982             }
2983             if (len != sizeof(val)) {
2984                 return -TARGET_EINVAL;
2985             }
2986             lv = len;
2987             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2988             if (ret < 0) {
2989                 return ret;
2990             }
2991             if (put_user_u32(lv, optlen)
2992                 || put_user_u32(val, optval_addr)) {
2993                 return -TARGET_EFAULT;
2994             }
2995             break;
2996 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2997         case NETLINK_LIST_MEMBERSHIPS:
2998         {
2999             uint32_t *results;
3000             int i;
3001             if (get_user_u32(len, optlen)) {
3002                 return -TARGET_EFAULT;
3003             }
3004             if (len < 0) {
3005                 return -TARGET_EINVAL;
3006             }
3007             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3008             if (!results) {
3009                 return -TARGET_EFAULT;
3010             }
3011             lv = len;
3012             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3013             if (ret < 0) {
3014                 unlock_user(results, optval_addr, 0);
3015                 return ret;
3016             }
3017             /* swap host endianess to target endianess. */
3018             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3019                 results[i] = tswap32(results[i]);
3020             }
3021             if (put_user_u32(lv, optlen)) {
3022                 return -TARGET_EFAULT;
3023             }
3024             unlock_user(results, optval_addr, 0);
3025             break;
3026         }
3027 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3028         default:
3029             goto unimplemented;
3030         }
3031         break;
3032 #endif /* SOL_NETLINK */
3033     default:
3034     unimplemented:
3035         qemu_log_mask(LOG_UNIMP,
3036                       "getsockopt level=%d optname=%d not yet supported\n",
3037                       level, optname);
3038         ret = -TARGET_EOPNOTSUPP;
3039         break;
3040     }
3041     return ret;
3042 }
3043 
3044 /* Convert target low/high pair representing file offset into the host
3045  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3046  * as the kernel doesn't handle them either.
3047  */
3048 static void target_to_host_low_high(abi_ulong tlow,
3049                                     abi_ulong thigh,
3050                                     unsigned long *hlow,
3051                                     unsigned long *hhigh)
3052 {
3053     uint64_t off = tlow |
3054         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3055         TARGET_LONG_BITS / 2;
3056 
3057     *hlow = off;
3058     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3059 }
3060 
3061 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3062                                 abi_ulong count, int copy)
3063 {
3064     struct target_iovec *target_vec;
3065     struct iovec *vec;
3066     abi_ulong total_len, max_len;
3067     int i;
3068     int err = 0;
3069     bool bad_address = false;
3070 
3071     if (count == 0) {
3072         errno = 0;
3073         return NULL;
3074     }
3075     if (count > IOV_MAX) {
3076         errno = EINVAL;
3077         return NULL;
3078     }
3079 
3080     vec = g_try_new0(struct iovec, count);
3081     if (vec == NULL) {
3082         errno = ENOMEM;
3083         return NULL;
3084     }
3085 
3086     target_vec = lock_user(VERIFY_READ, target_addr,
3087                            count * sizeof(struct target_iovec), 1);
3088     if (target_vec == NULL) {
3089         err = EFAULT;
3090         goto fail2;
3091     }
3092 
3093     /* ??? If host page size > target page size, this will result in a
3094        value larger than what we can actually support.  */
3095     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3096     total_len = 0;
3097 
3098     for (i = 0; i < count; i++) {
3099         abi_ulong base = tswapal(target_vec[i].iov_base);
3100         abi_long len = tswapal(target_vec[i].iov_len);
3101 
3102         if (len < 0) {
3103             err = EINVAL;
3104             goto fail;
3105         } else if (len == 0) {
3106             /* Zero length pointer is ignored.  */
3107             vec[i].iov_base = 0;
3108         } else {
3109             vec[i].iov_base = lock_user(type, base, len, copy);
3110             /* If the first buffer pointer is bad, this is a fault.  But
3111              * subsequent bad buffers will result in a partial write; this
3112              * is realized by filling the vector with null pointers and
3113              * zero lengths. */
3114             if (!vec[i].iov_base) {
3115                 if (i == 0) {
3116                     err = EFAULT;
3117                     goto fail;
3118                 } else {
3119                     bad_address = true;
3120                 }
3121             }
3122             if (bad_address) {
3123                 len = 0;
3124             }
3125             if (len > max_len - total_len) {
3126                 len = max_len - total_len;
3127             }
3128         }
3129         vec[i].iov_len = len;
3130         total_len += len;
3131     }
3132 
3133     unlock_user(target_vec, target_addr, 0);
3134     return vec;
3135 
3136  fail:
3137     while (--i >= 0) {
3138         if (tswapal(target_vec[i].iov_len) > 0) {
3139             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3140         }
3141     }
3142     unlock_user(target_vec, target_addr, 0);
3143  fail2:
3144     g_free(vec);
3145     errno = err;
3146     return NULL;
3147 }
3148 
3149 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3150                          abi_ulong count, int copy)
3151 {
3152     struct target_iovec *target_vec;
3153     int i;
3154 
3155     target_vec = lock_user(VERIFY_READ, target_addr,
3156                            count * sizeof(struct target_iovec), 1);
3157     if (target_vec) {
3158         for (i = 0; i < count; i++) {
3159             abi_ulong base = tswapal(target_vec[i].iov_base);
3160             abi_long len = tswapal(target_vec[i].iov_len);
3161             if (len < 0) {
3162                 break;
3163             }
3164             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3165         }
3166         unlock_user(target_vec, target_addr, 0);
3167     }
3168 
3169     g_free(vec);
3170 }
3171 
3172 static inline int target_to_host_sock_type(int *type)
3173 {
3174     int host_type = 0;
3175     int target_type = *type;
3176 
3177     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3178     case TARGET_SOCK_DGRAM:
3179         host_type = SOCK_DGRAM;
3180         break;
3181     case TARGET_SOCK_STREAM:
3182         host_type = SOCK_STREAM;
3183         break;
3184     default:
3185         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3186         break;
3187     }
3188     if (target_type & TARGET_SOCK_CLOEXEC) {
3189 #if defined(SOCK_CLOEXEC)
3190         host_type |= SOCK_CLOEXEC;
3191 #else
3192         return -TARGET_EINVAL;
3193 #endif
3194     }
3195     if (target_type & TARGET_SOCK_NONBLOCK) {
3196 #if defined(SOCK_NONBLOCK)
3197         host_type |= SOCK_NONBLOCK;
3198 #elif !defined(O_NONBLOCK)
3199         return -TARGET_EINVAL;
3200 #endif
3201     }
3202     *type = host_type;
3203     return 0;
3204 }
3205 
3206 /* Try to emulate socket type flags after socket creation.  */
3207 static int sock_flags_fixup(int fd, int target_type)
3208 {
3209 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3210     if (target_type & TARGET_SOCK_NONBLOCK) {
3211         int flags = fcntl(fd, F_GETFL);
3212         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3213             close(fd);
3214             return -TARGET_EINVAL;
3215         }
3216     }
3217 #endif
3218     return fd;
3219 }
3220 
3221 /* do_socket() Must return target values and target errnos. */
3222 static abi_long do_socket(int domain, int type, int protocol)
3223 {
3224     int target_type = type;
3225     int ret;
3226 
3227     ret = target_to_host_sock_type(&type);
3228     if (ret) {
3229         return ret;
3230     }
3231 
3232     if (domain == PF_NETLINK && !(
3233 #ifdef CONFIG_RTNETLINK
3234          protocol == NETLINK_ROUTE ||
3235 #endif
3236          protocol == NETLINK_KOBJECT_UEVENT ||
3237          protocol == NETLINK_AUDIT)) {
3238         return -TARGET_EPROTONOSUPPORT;
3239     }
3240 
3241     if (domain == AF_PACKET ||
3242         (domain == AF_INET && type == SOCK_PACKET)) {
3243         protocol = tswap16(protocol);
3244     }
3245 
3246     ret = get_errno(socket(domain, type, protocol));
3247     if (ret >= 0) {
3248         ret = sock_flags_fixup(ret, target_type);
3249         if (type == SOCK_PACKET) {
3250             /* Manage an obsolete case :
3251              * if socket type is SOCK_PACKET, bind by name
3252              */
3253             fd_trans_register(ret, &target_packet_trans);
3254         } else if (domain == PF_NETLINK) {
3255             switch (protocol) {
3256 #ifdef CONFIG_RTNETLINK
3257             case NETLINK_ROUTE:
3258                 fd_trans_register(ret, &target_netlink_route_trans);
3259                 break;
3260 #endif
3261             case NETLINK_KOBJECT_UEVENT:
3262                 /* nothing to do: messages are strings */
3263                 break;
3264             case NETLINK_AUDIT:
3265                 fd_trans_register(ret, &target_netlink_audit_trans);
3266                 break;
3267             default:
3268                 g_assert_not_reached();
3269             }
3270         }
3271     }
3272     return ret;
3273 }
3274 
3275 /* do_bind() Must return target values and target errnos. */
3276 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3277                         socklen_t addrlen)
3278 {
3279     void *addr;
3280     abi_long ret;
3281 
3282     if ((int)addrlen < 0) {
3283         return -TARGET_EINVAL;
3284     }
3285 
3286     addr = alloca(addrlen+1);
3287 
3288     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3289     if (ret)
3290         return ret;
3291 
3292     return get_errno(bind(sockfd, addr, addrlen));
3293 }
3294 
3295 /* do_connect() Must return target values and target errnos. */
3296 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3297                            socklen_t addrlen)
3298 {
3299     void *addr;
3300     abi_long ret;
3301 
3302     if ((int)addrlen < 0) {
3303         return -TARGET_EINVAL;
3304     }
3305 
3306     addr = alloca(addrlen+1);
3307 
3308     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3309     if (ret)
3310         return ret;
3311 
3312     return get_errno(safe_connect(sockfd, addr, addrlen));
3313 }
3314 
3315 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3316 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3317                                       int flags, int send)
3318 {
3319     abi_long ret, len;
3320     struct msghdr msg;
3321     abi_ulong count;
3322     struct iovec *vec;
3323     abi_ulong target_vec;
3324 
3325     if (msgp->msg_name) {
3326         msg.msg_namelen = tswap32(msgp->msg_namelen);
3327         msg.msg_name = alloca(msg.msg_namelen+1);
3328         ret = target_to_host_sockaddr(fd, msg.msg_name,
3329                                       tswapal(msgp->msg_name),
3330                                       msg.msg_namelen);
3331         if (ret == -TARGET_EFAULT) {
3332             /* For connected sockets msg_name and msg_namelen must
3333              * be ignored, so returning EFAULT immediately is wrong.
3334              * Instead, pass a bad msg_name to the host kernel, and
3335              * let it decide whether to return EFAULT or not.
3336              */
3337             msg.msg_name = (void *)-1;
3338         } else if (ret) {
3339             goto out2;
3340         }
3341     } else {
3342         msg.msg_name = NULL;
3343         msg.msg_namelen = 0;
3344     }
3345     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3346     msg.msg_control = alloca(msg.msg_controllen);
3347     memset(msg.msg_control, 0, msg.msg_controllen);
3348 
3349     msg.msg_flags = tswap32(msgp->msg_flags);
3350 
3351     count = tswapal(msgp->msg_iovlen);
3352     target_vec = tswapal(msgp->msg_iov);
3353 
3354     if (count > IOV_MAX) {
3355         /* sendrcvmsg returns a different errno for this condition than
3356          * readv/writev, so we must catch it here before lock_iovec() does.
3357          */
3358         ret = -TARGET_EMSGSIZE;
3359         goto out2;
3360     }
3361 
3362     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3363                      target_vec, count, send);
3364     if (vec == NULL) {
3365         ret = -host_to_target_errno(errno);
3366         goto out2;
3367     }
3368     msg.msg_iovlen = count;
3369     msg.msg_iov = vec;
3370 
3371     if (send) {
3372         if (fd_trans_target_to_host_data(fd)) {
3373             void *host_msg;
3374 
3375             host_msg = g_malloc(msg.msg_iov->iov_len);
3376             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3377             ret = fd_trans_target_to_host_data(fd)(host_msg,
3378                                                    msg.msg_iov->iov_len);
3379             if (ret >= 0) {
3380                 msg.msg_iov->iov_base = host_msg;
3381                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3382             }
3383             g_free(host_msg);
3384         } else {
3385             ret = target_to_host_cmsg(&msg, msgp);
3386             if (ret == 0) {
3387                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3388             }
3389         }
3390     } else {
3391         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3392         if (!is_error(ret)) {
3393             len = ret;
3394             if (fd_trans_host_to_target_data(fd)) {
3395                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3396                                                MIN(msg.msg_iov->iov_len, len));
3397             } else {
3398                 ret = host_to_target_cmsg(msgp, &msg);
3399             }
3400             if (!is_error(ret)) {
3401                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3402                 msgp->msg_flags = tswap32(msg.msg_flags);
3403                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3404                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3405                                     msg.msg_name, msg.msg_namelen);
3406                     if (ret) {
3407                         goto out;
3408                     }
3409                 }
3410 
3411                 ret = len;
3412             }
3413         }
3414     }
3415 
3416 out:
3417     unlock_iovec(vec, target_vec, count, !send);
3418 out2:
3419     return ret;
3420 }
3421 
3422 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3423                                int flags, int send)
3424 {
3425     abi_long ret;
3426     struct target_msghdr *msgp;
3427 
3428     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3429                           msgp,
3430                           target_msg,
3431                           send ? 1 : 0)) {
3432         return -TARGET_EFAULT;
3433     }
3434     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3435     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3436     return ret;
3437 }
3438 
3439 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3440  * so it might not have this *mmsg-specific flag either.
3441  */
3442 #ifndef MSG_WAITFORONE
3443 #define MSG_WAITFORONE 0x10000
3444 #endif
3445 
3446 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3447                                 unsigned int vlen, unsigned int flags,
3448                                 int send)
3449 {
3450     struct target_mmsghdr *mmsgp;
3451     abi_long ret = 0;
3452     int i;
3453 
3454     if (vlen > UIO_MAXIOV) {
3455         vlen = UIO_MAXIOV;
3456     }
3457 
3458     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3459     if (!mmsgp) {
3460         return -TARGET_EFAULT;
3461     }
3462 
3463     for (i = 0; i < vlen; i++) {
3464         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3465         if (is_error(ret)) {
3466             break;
3467         }
3468         mmsgp[i].msg_len = tswap32(ret);
3469         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3470         if (flags & MSG_WAITFORONE) {
3471             flags |= MSG_DONTWAIT;
3472         }
3473     }
3474 
3475     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3476 
3477     /* Return number of datagrams sent if we sent any at all;
3478      * otherwise return the error.
3479      */
3480     if (i) {
3481         return i;
3482     }
3483     return ret;
3484 }
3485 
3486 /* do_accept4() Must return target values and target errnos. */
3487 static abi_long do_accept4(int fd, abi_ulong target_addr,
3488                            abi_ulong target_addrlen_addr, int flags)
3489 {
3490     socklen_t addrlen, ret_addrlen;
3491     void *addr;
3492     abi_long ret;
3493     int host_flags;
3494 
3495     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3496 
3497     if (target_addr == 0) {
3498         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3499     }
3500 
3501     /* linux returns EFAULT if addrlen pointer is invalid */
3502     if (get_user_u32(addrlen, target_addrlen_addr))
3503         return -TARGET_EFAULT;
3504 
3505     if ((int)addrlen < 0) {
3506         return -TARGET_EINVAL;
3507     }
3508 
3509     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3510         return -TARGET_EFAULT;
3511 
3512     addr = alloca(addrlen);
3513 
3514     ret_addrlen = addrlen;
3515     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3516     if (!is_error(ret)) {
3517         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3518         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3519             ret = -TARGET_EFAULT;
3520         }
3521     }
3522     return ret;
3523 }
3524 
3525 /* do_getpeername() Must return target values and target errnos. */
3526 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3527                                abi_ulong target_addrlen_addr)
3528 {
3529     socklen_t addrlen, ret_addrlen;
3530     void *addr;
3531     abi_long ret;
3532 
3533     if (get_user_u32(addrlen, target_addrlen_addr))
3534         return -TARGET_EFAULT;
3535 
3536     if ((int)addrlen < 0) {
3537         return -TARGET_EINVAL;
3538     }
3539 
3540     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3541         return -TARGET_EFAULT;
3542 
3543     addr = alloca(addrlen);
3544 
3545     ret_addrlen = addrlen;
3546     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3547     if (!is_error(ret)) {
3548         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3549         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3550             ret = -TARGET_EFAULT;
3551         }
3552     }
3553     return ret;
3554 }
3555 
3556 /* do_getsockname() Must return target values and target errnos. */
3557 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3558                                abi_ulong target_addrlen_addr)
3559 {
3560     socklen_t addrlen, ret_addrlen;
3561     void *addr;
3562     abi_long ret;
3563 
3564     if (get_user_u32(addrlen, target_addrlen_addr))
3565         return -TARGET_EFAULT;
3566 
3567     if ((int)addrlen < 0) {
3568         return -TARGET_EINVAL;
3569     }
3570 
3571     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3572         return -TARGET_EFAULT;
3573 
3574     addr = alloca(addrlen);
3575 
3576     ret_addrlen = addrlen;
3577     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3578     if (!is_error(ret)) {
3579         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3580         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3581             ret = -TARGET_EFAULT;
3582         }
3583     }
3584     return ret;
3585 }
3586 
3587 /* do_socketpair() Must return target values and target errnos. */
3588 static abi_long do_socketpair(int domain, int type, int protocol,
3589                               abi_ulong target_tab_addr)
3590 {
3591     int tab[2];
3592     abi_long ret;
3593 
3594     target_to_host_sock_type(&type);
3595 
3596     ret = get_errno(socketpair(domain, type, protocol, tab));
3597     if (!is_error(ret)) {
3598         if (put_user_s32(tab[0], target_tab_addr)
3599             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3600             ret = -TARGET_EFAULT;
3601     }
3602     return ret;
3603 }
3604 
3605 /* do_sendto() Must return target values and target errnos. */
3606 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3607                           abi_ulong target_addr, socklen_t addrlen)
3608 {
3609     void *addr;
3610     void *host_msg;
3611     void *copy_msg = NULL;
3612     abi_long ret;
3613 
3614     if ((int)addrlen < 0) {
3615         return -TARGET_EINVAL;
3616     }
3617 
3618     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3619     if (!host_msg)
3620         return -TARGET_EFAULT;
3621     if (fd_trans_target_to_host_data(fd)) {
3622         copy_msg = host_msg;
3623         host_msg = g_malloc(len);
3624         memcpy(host_msg, copy_msg, len);
3625         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3626         if (ret < 0) {
3627             goto fail;
3628         }
3629     }
3630     if (target_addr) {
3631         addr = alloca(addrlen+1);
3632         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3633         if (ret) {
3634             goto fail;
3635         }
3636         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3637     } else {
3638         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3639     }
3640 fail:
3641     if (copy_msg) {
3642         g_free(host_msg);
3643         host_msg = copy_msg;
3644     }
3645     unlock_user(host_msg, msg, 0);
3646     return ret;
3647 }
3648 
3649 /* do_recvfrom() Must return target values and target errnos. */
3650 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3651                             abi_ulong target_addr,
3652                             abi_ulong target_addrlen)
3653 {
3654     socklen_t addrlen, ret_addrlen;
3655     void *addr;
3656     void *host_msg;
3657     abi_long ret;
3658 
3659     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3660     if (!host_msg)
3661         return -TARGET_EFAULT;
3662     if (target_addr) {
3663         if (get_user_u32(addrlen, target_addrlen)) {
3664             ret = -TARGET_EFAULT;
3665             goto fail;
3666         }
3667         if ((int)addrlen < 0) {
3668             ret = -TARGET_EINVAL;
3669             goto fail;
3670         }
3671         addr = alloca(addrlen);
3672         ret_addrlen = addrlen;
3673         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3674                                       addr, &ret_addrlen));
3675     } else {
3676         addr = NULL; /* To keep compiler quiet.  */
3677         addrlen = 0; /* To keep compiler quiet.  */
3678         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3679     }
3680     if (!is_error(ret)) {
3681         if (fd_trans_host_to_target_data(fd)) {
3682             abi_long trans;
3683             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3684             if (is_error(trans)) {
3685                 ret = trans;
3686                 goto fail;
3687             }
3688         }
3689         if (target_addr) {
3690             host_to_target_sockaddr(target_addr, addr,
3691                                     MIN(addrlen, ret_addrlen));
3692             if (put_user_u32(ret_addrlen, target_addrlen)) {
3693                 ret = -TARGET_EFAULT;
3694                 goto fail;
3695             }
3696         }
3697         unlock_user(host_msg, msg, len);
3698     } else {
3699 fail:
3700         unlock_user(host_msg, msg, 0);
3701     }
3702     return ret;
3703 }
3704 
3705 #ifdef TARGET_NR_socketcall
3706 /* do_socketcall() must return target values and target errnos. */
3707 static abi_long do_socketcall(int num, abi_ulong vptr)
3708 {
3709     static const unsigned nargs[] = { /* number of arguments per operation */
3710         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3711         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3712         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3713         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3714         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3715         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3716         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3717         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3718         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3719         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3720         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3721         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3722         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3723         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3724         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3725         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3726         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3727         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3728         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3729         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3730     };
3731     abi_long a[6]; /* max 6 args */
3732     unsigned i;
3733 
3734     /* check the range of the first argument num */
3735     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3736     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3737         return -TARGET_EINVAL;
3738     }
3739     /* ensure we have space for args */
3740     if (nargs[num] > ARRAY_SIZE(a)) {
3741         return -TARGET_EINVAL;
3742     }
3743     /* collect the arguments in a[] according to nargs[] */
3744     for (i = 0; i < nargs[num]; ++i) {
3745         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3746             return -TARGET_EFAULT;
3747         }
3748     }
3749     /* now when we have the args, invoke the appropriate underlying function */
3750     switch (num) {
3751     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3752         return do_socket(a[0], a[1], a[2]);
3753     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3754         return do_bind(a[0], a[1], a[2]);
3755     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3756         return do_connect(a[0], a[1], a[2]);
3757     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3758         return get_errno(listen(a[0], a[1]));
3759     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3760         return do_accept4(a[0], a[1], a[2], 0);
3761     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3762         return do_getsockname(a[0], a[1], a[2]);
3763     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3764         return do_getpeername(a[0], a[1], a[2]);
3765     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3766         return do_socketpair(a[0], a[1], a[2], a[3]);
3767     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3768         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3769     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3770         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3771     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3772         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3773     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3774         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3775     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3776         return get_errno(shutdown(a[0], a[1]));
3777     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3778         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3779     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3780         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3781     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3782         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3783     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3784         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3785     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3786         return do_accept4(a[0], a[1], a[2], a[3]);
3787     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3788         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3789     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3790         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3791     default:
3792         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3793         return -TARGET_EINVAL;
3794     }
3795 }
3796 #endif
3797 
3798 #define N_SHM_REGIONS	32
3799 
3800 static struct shm_region {
3801     abi_ulong start;
3802     abi_ulong size;
3803     bool in_use;
3804 } shm_regions[N_SHM_REGIONS];
3805 
3806 #ifndef TARGET_SEMID64_DS
3807 /* asm-generic version of this struct */
3808 struct target_semid64_ds
3809 {
3810   struct target_ipc_perm sem_perm;
3811   abi_ulong sem_otime;
3812 #if TARGET_ABI_BITS == 32
3813   abi_ulong __unused1;
3814 #endif
3815   abi_ulong sem_ctime;
3816 #if TARGET_ABI_BITS == 32
3817   abi_ulong __unused2;
3818 #endif
3819   abi_ulong sem_nsems;
3820   abi_ulong __unused3;
3821   abi_ulong __unused4;
3822 };
3823 #endif
3824 
3825 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3826                                                abi_ulong target_addr)
3827 {
3828     struct target_ipc_perm *target_ip;
3829     struct target_semid64_ds *target_sd;
3830 
3831     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3832         return -TARGET_EFAULT;
3833     target_ip = &(target_sd->sem_perm);
3834     host_ip->__key = tswap32(target_ip->__key);
3835     host_ip->uid = tswap32(target_ip->uid);
3836     host_ip->gid = tswap32(target_ip->gid);
3837     host_ip->cuid = tswap32(target_ip->cuid);
3838     host_ip->cgid = tswap32(target_ip->cgid);
3839 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3840     host_ip->mode = tswap32(target_ip->mode);
3841 #else
3842     host_ip->mode = tswap16(target_ip->mode);
3843 #endif
3844 #if defined(TARGET_PPC)
3845     host_ip->__seq = tswap32(target_ip->__seq);
3846 #else
3847     host_ip->__seq = tswap16(target_ip->__seq);
3848 #endif
3849     unlock_user_struct(target_sd, target_addr, 0);
3850     return 0;
3851 }
3852 
3853 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3854                                                struct ipc_perm *host_ip)
3855 {
3856     struct target_ipc_perm *target_ip;
3857     struct target_semid64_ds *target_sd;
3858 
3859     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3860         return -TARGET_EFAULT;
3861     target_ip = &(target_sd->sem_perm);
3862     target_ip->__key = tswap32(host_ip->__key);
3863     target_ip->uid = tswap32(host_ip->uid);
3864     target_ip->gid = tswap32(host_ip->gid);
3865     target_ip->cuid = tswap32(host_ip->cuid);
3866     target_ip->cgid = tswap32(host_ip->cgid);
3867 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3868     target_ip->mode = tswap32(host_ip->mode);
3869 #else
3870     target_ip->mode = tswap16(host_ip->mode);
3871 #endif
3872 #if defined(TARGET_PPC)
3873     target_ip->__seq = tswap32(host_ip->__seq);
3874 #else
3875     target_ip->__seq = tswap16(host_ip->__seq);
3876 #endif
3877     unlock_user_struct(target_sd, target_addr, 1);
3878     return 0;
3879 }
3880 
3881 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3882                                                abi_ulong target_addr)
3883 {
3884     struct target_semid64_ds *target_sd;
3885 
3886     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3887         return -TARGET_EFAULT;
3888     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3889         return -TARGET_EFAULT;
3890     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3891     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3892     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3893     unlock_user_struct(target_sd, target_addr, 0);
3894     return 0;
3895 }
3896 
3897 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3898                                                struct semid_ds *host_sd)
3899 {
3900     struct target_semid64_ds *target_sd;
3901 
3902     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3903         return -TARGET_EFAULT;
3904     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3905         return -TARGET_EFAULT;
3906     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3907     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3908     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3909     unlock_user_struct(target_sd, target_addr, 1);
3910     return 0;
3911 }
3912 
3913 struct target_seminfo {
3914     int semmap;
3915     int semmni;
3916     int semmns;
3917     int semmnu;
3918     int semmsl;
3919     int semopm;
3920     int semume;
3921     int semusz;
3922     int semvmx;
3923     int semaem;
3924 };
3925 
3926 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3927                                               struct seminfo *host_seminfo)
3928 {
3929     struct target_seminfo *target_seminfo;
3930     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3931         return -TARGET_EFAULT;
3932     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3933     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3934     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3935     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3936     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3937     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3938     __put_user(host_seminfo->semume, &target_seminfo->semume);
3939     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3940     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3941     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3942     unlock_user_struct(target_seminfo, target_addr, 1);
3943     return 0;
3944 }
3945 
3946 union semun {
3947 	int val;
3948 	struct semid_ds *buf;
3949 	unsigned short *array;
3950 	struct seminfo *__buf;
3951 };
3952 
3953 union target_semun {
3954 	int val;
3955 	abi_ulong buf;
3956 	abi_ulong array;
3957 	abi_ulong __buf;
3958 };
3959 
3960 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3961                                                abi_ulong target_addr)
3962 {
3963     int nsems;
3964     unsigned short *array;
3965     union semun semun;
3966     struct semid_ds semid_ds;
3967     int i, ret;
3968 
3969     semun.buf = &semid_ds;
3970 
3971     ret = semctl(semid, 0, IPC_STAT, semun);
3972     if (ret == -1)
3973         return get_errno(ret);
3974 
3975     nsems = semid_ds.sem_nsems;
3976 
3977     *host_array = g_try_new(unsigned short, nsems);
3978     if (!*host_array) {
3979         return -TARGET_ENOMEM;
3980     }
3981     array = lock_user(VERIFY_READ, target_addr,
3982                       nsems*sizeof(unsigned short), 1);
3983     if (!array) {
3984         g_free(*host_array);
3985         return -TARGET_EFAULT;
3986     }
3987 
3988     for(i=0; i<nsems; i++) {
3989         __get_user((*host_array)[i], &array[i]);
3990     }
3991     unlock_user(array, target_addr, 0);
3992 
3993     return 0;
3994 }
3995 
3996 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3997                                                unsigned short **host_array)
3998 {
3999     int nsems;
4000     unsigned short *array;
4001     union semun semun;
4002     struct semid_ds semid_ds;
4003     int i, ret;
4004 
4005     semun.buf = &semid_ds;
4006 
4007     ret = semctl(semid, 0, IPC_STAT, semun);
4008     if (ret == -1)
4009         return get_errno(ret);
4010 
4011     nsems = semid_ds.sem_nsems;
4012 
4013     array = lock_user(VERIFY_WRITE, target_addr,
4014                       nsems*sizeof(unsigned short), 0);
4015     if (!array)
4016         return -TARGET_EFAULT;
4017 
4018     for(i=0; i<nsems; i++) {
4019         __put_user((*host_array)[i], &array[i]);
4020     }
4021     g_free(*host_array);
4022     unlock_user(array, target_addr, 1);
4023 
4024     return 0;
4025 }
4026 
4027 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4028                                  abi_ulong target_arg)
4029 {
4030     union target_semun target_su = { .buf = target_arg };
4031     union semun arg;
4032     struct semid_ds dsarg;
4033     unsigned short *array = NULL;
4034     struct seminfo seminfo;
4035     abi_long ret = -TARGET_EINVAL;
4036     abi_long err;
4037     cmd &= 0xff;
4038 
4039     switch( cmd ) {
4040 	case GETVAL:
4041 	case SETVAL:
4042             /* In 64 bit cross-endian situations, we will erroneously pick up
4043              * the wrong half of the union for the "val" element.  To rectify
4044              * this, the entire 8-byte structure is byteswapped, followed by
4045 	     * a swap of the 4 byte val field. In other cases, the data is
4046 	     * already in proper host byte order. */
4047 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4048 		target_su.buf = tswapal(target_su.buf);
4049 		arg.val = tswap32(target_su.val);
4050 	    } else {
4051 		arg.val = target_su.val;
4052 	    }
4053             ret = get_errno(semctl(semid, semnum, cmd, arg));
4054             break;
4055 	case GETALL:
4056 	case SETALL:
4057             err = target_to_host_semarray(semid, &array, target_su.array);
4058             if (err)
4059                 return err;
4060             arg.array = array;
4061             ret = get_errno(semctl(semid, semnum, cmd, arg));
4062             err = host_to_target_semarray(semid, target_su.array, &array);
4063             if (err)
4064                 return err;
4065             break;
4066 	case IPC_STAT:
4067 	case IPC_SET:
4068 	case SEM_STAT:
4069             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4070             if (err)
4071                 return err;
4072             arg.buf = &dsarg;
4073             ret = get_errno(semctl(semid, semnum, cmd, arg));
4074             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4075             if (err)
4076                 return err;
4077             break;
4078 	case IPC_INFO:
4079 	case SEM_INFO:
4080             arg.__buf = &seminfo;
4081             ret = get_errno(semctl(semid, semnum, cmd, arg));
4082             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4083             if (err)
4084                 return err;
4085             break;
4086 	case IPC_RMID:
4087 	case GETPID:
4088 	case GETNCNT:
4089 	case GETZCNT:
4090             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4091             break;
4092     }
4093 
4094     return ret;
4095 }
4096 
4097 struct target_sembuf {
4098     unsigned short sem_num;
4099     short sem_op;
4100     short sem_flg;
4101 };
4102 
4103 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4104                                              abi_ulong target_addr,
4105                                              unsigned nsops)
4106 {
4107     struct target_sembuf *target_sembuf;
4108     int i;
4109 
4110     target_sembuf = lock_user(VERIFY_READ, target_addr,
4111                               nsops*sizeof(struct target_sembuf), 1);
4112     if (!target_sembuf)
4113         return -TARGET_EFAULT;
4114 
4115     for(i=0; i<nsops; i++) {
4116         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4117         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4118         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4119     }
4120 
4121     unlock_user(target_sembuf, target_addr, 0);
4122 
4123     return 0;
4124 }
4125 
4126 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4127     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4128 
4129 /*
4130  * This macro is required to handle the s390 variants, which passes the
4131  * arguments in a different order than default.
4132  */
4133 #ifdef __s390x__
4134 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4135   (__nsops), (__timeout), (__sops)
4136 #else
4137 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4138   (__nsops), 0, (__sops), (__timeout)
4139 #endif
4140 
4141 static inline abi_long do_semtimedop(int semid,
4142                                      abi_long ptr,
4143                                      unsigned nsops,
4144                                      abi_long timeout, bool time64)
4145 {
4146     struct sembuf *sops;
4147     struct timespec ts, *pts = NULL;
4148     abi_long ret;
4149 
4150     if (timeout) {
4151         pts = &ts;
4152         if (time64) {
4153             if (target_to_host_timespec64(pts, timeout)) {
4154                 return -TARGET_EFAULT;
4155             }
4156         } else {
4157             if (target_to_host_timespec(pts, timeout)) {
4158                 return -TARGET_EFAULT;
4159             }
4160         }
4161     }
4162 
4163     if (nsops > TARGET_SEMOPM) {
4164         return -TARGET_E2BIG;
4165     }
4166 
4167     sops = g_new(struct sembuf, nsops);
4168 
4169     if (target_to_host_sembuf(sops, ptr, nsops)) {
4170         g_free(sops);
4171         return -TARGET_EFAULT;
4172     }
4173 
4174     ret = -TARGET_ENOSYS;
4175 #ifdef __NR_semtimedop
4176     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4177 #endif
4178 #ifdef __NR_ipc
4179     if (ret == -TARGET_ENOSYS) {
4180         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4181                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4182     }
4183 #endif
4184     g_free(sops);
4185     return ret;
4186 }
4187 #endif
4188 
4189 struct target_msqid_ds
4190 {
4191     struct target_ipc_perm msg_perm;
4192     abi_ulong msg_stime;
4193 #if TARGET_ABI_BITS == 32
4194     abi_ulong __unused1;
4195 #endif
4196     abi_ulong msg_rtime;
4197 #if TARGET_ABI_BITS == 32
4198     abi_ulong __unused2;
4199 #endif
4200     abi_ulong msg_ctime;
4201 #if TARGET_ABI_BITS == 32
4202     abi_ulong __unused3;
4203 #endif
4204     abi_ulong __msg_cbytes;
4205     abi_ulong msg_qnum;
4206     abi_ulong msg_qbytes;
4207     abi_ulong msg_lspid;
4208     abi_ulong msg_lrpid;
4209     abi_ulong __unused4;
4210     abi_ulong __unused5;
4211 };
4212 
4213 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4214                                                abi_ulong target_addr)
4215 {
4216     struct target_msqid_ds *target_md;
4217 
4218     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4219         return -TARGET_EFAULT;
4220     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4221         return -TARGET_EFAULT;
4222     host_md->msg_stime = tswapal(target_md->msg_stime);
4223     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4224     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4225     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4226     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4227     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4228     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4229     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4230     unlock_user_struct(target_md, target_addr, 0);
4231     return 0;
4232 }
4233 
4234 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4235                                                struct msqid_ds *host_md)
4236 {
4237     struct target_msqid_ds *target_md;
4238 
4239     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4240         return -TARGET_EFAULT;
4241     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4242         return -TARGET_EFAULT;
4243     target_md->msg_stime = tswapal(host_md->msg_stime);
4244     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4245     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4246     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4247     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4248     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4249     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4250     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4251     unlock_user_struct(target_md, target_addr, 1);
4252     return 0;
4253 }
4254 
4255 struct target_msginfo {
4256     int msgpool;
4257     int msgmap;
4258     int msgmax;
4259     int msgmnb;
4260     int msgmni;
4261     int msgssz;
4262     int msgtql;
4263     unsigned short int msgseg;
4264 };
4265 
4266 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4267                                               struct msginfo *host_msginfo)
4268 {
4269     struct target_msginfo *target_msginfo;
4270     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4271         return -TARGET_EFAULT;
4272     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4273     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4274     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4275     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4276     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4277     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4278     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4279     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4280     unlock_user_struct(target_msginfo, target_addr, 1);
4281     return 0;
4282 }
4283 
4284 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4285 {
4286     struct msqid_ds dsarg;
4287     struct msginfo msginfo;
4288     abi_long ret = -TARGET_EINVAL;
4289 
4290     cmd &= 0xff;
4291 
4292     switch (cmd) {
4293     case IPC_STAT:
4294     case IPC_SET:
4295     case MSG_STAT:
4296         if (target_to_host_msqid_ds(&dsarg,ptr))
4297             return -TARGET_EFAULT;
4298         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4299         if (host_to_target_msqid_ds(ptr,&dsarg))
4300             return -TARGET_EFAULT;
4301         break;
4302     case IPC_RMID:
4303         ret = get_errno(msgctl(msgid, cmd, NULL));
4304         break;
4305     case IPC_INFO:
4306     case MSG_INFO:
4307         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4308         if (host_to_target_msginfo(ptr, &msginfo))
4309             return -TARGET_EFAULT;
4310         break;
4311     }
4312 
4313     return ret;
4314 }
4315 
4316 struct target_msgbuf {
4317     abi_long mtype;
4318     char	mtext[1];
4319 };
4320 
4321 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4322                                  ssize_t msgsz, int msgflg)
4323 {
4324     struct target_msgbuf *target_mb;
4325     struct msgbuf *host_mb;
4326     abi_long ret = 0;
4327 
4328     if (msgsz < 0) {
4329         return -TARGET_EINVAL;
4330     }
4331 
4332     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4333         return -TARGET_EFAULT;
4334     host_mb = g_try_malloc(msgsz + sizeof(long));
4335     if (!host_mb) {
4336         unlock_user_struct(target_mb, msgp, 0);
4337         return -TARGET_ENOMEM;
4338     }
4339     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4340     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4341     ret = -TARGET_ENOSYS;
4342 #ifdef __NR_msgsnd
4343     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4344 #endif
4345 #ifdef __NR_ipc
4346     if (ret == -TARGET_ENOSYS) {
4347 #ifdef __s390x__
4348         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4349                                  host_mb));
4350 #else
4351         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4352                                  host_mb, 0));
4353 #endif
4354     }
4355 #endif
4356     g_free(host_mb);
4357     unlock_user_struct(target_mb, msgp, 0);
4358 
4359     return ret;
4360 }
4361 
4362 #ifdef __NR_ipc
4363 #if defined(__sparc__)
4364 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4365 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4366 #elif defined(__s390x__)
4367 /* The s390 sys_ipc variant has only five parameters.  */
4368 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4369     ((long int[]){(long int)__msgp, __msgtyp})
4370 #else
4371 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4372     ((long int[]){(long int)__msgp, __msgtyp}), 0
4373 #endif
4374 #endif
4375 
4376 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4377                                  ssize_t msgsz, abi_long msgtyp,
4378                                  int msgflg)
4379 {
4380     struct target_msgbuf *target_mb;
4381     char *target_mtext;
4382     struct msgbuf *host_mb;
4383     abi_long ret = 0;
4384 
4385     if (msgsz < 0) {
4386         return -TARGET_EINVAL;
4387     }
4388 
4389     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4390         return -TARGET_EFAULT;
4391 
4392     host_mb = g_try_malloc(msgsz + sizeof(long));
4393     if (!host_mb) {
4394         ret = -TARGET_ENOMEM;
4395         goto end;
4396     }
4397     ret = -TARGET_ENOSYS;
4398 #ifdef __NR_msgrcv
4399     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4400 #endif
4401 #ifdef __NR_ipc
4402     if (ret == -TARGET_ENOSYS) {
4403         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4404                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4405     }
4406 #endif
4407 
4408     if (ret > 0) {
4409         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4410         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4411         if (!target_mtext) {
4412             ret = -TARGET_EFAULT;
4413             goto end;
4414         }
4415         memcpy(target_mb->mtext, host_mb->mtext, ret);
4416         unlock_user(target_mtext, target_mtext_addr, ret);
4417     }
4418 
4419     target_mb->mtype = tswapal(host_mb->mtype);
4420 
4421 end:
4422     if (target_mb)
4423         unlock_user_struct(target_mb, msgp, 1);
4424     g_free(host_mb);
4425     return ret;
4426 }
4427 
4428 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4429                                                abi_ulong target_addr)
4430 {
4431     struct target_shmid_ds *target_sd;
4432 
4433     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4434         return -TARGET_EFAULT;
4435     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4436         return -TARGET_EFAULT;
4437     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4438     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4439     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4440     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4441     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4442     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4443     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4444     unlock_user_struct(target_sd, target_addr, 0);
4445     return 0;
4446 }
4447 
4448 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4449                                                struct shmid_ds *host_sd)
4450 {
4451     struct target_shmid_ds *target_sd;
4452 
4453     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4454         return -TARGET_EFAULT;
4455     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4456         return -TARGET_EFAULT;
4457     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4458     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4459     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4460     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4461     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4462     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4463     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4464     unlock_user_struct(target_sd, target_addr, 1);
4465     return 0;
4466 }
4467 
4468 struct  target_shminfo {
4469     abi_ulong shmmax;
4470     abi_ulong shmmin;
4471     abi_ulong shmmni;
4472     abi_ulong shmseg;
4473     abi_ulong shmall;
4474 };
4475 
4476 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4477                                               struct shminfo *host_shminfo)
4478 {
4479     struct target_shminfo *target_shminfo;
4480     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4481         return -TARGET_EFAULT;
4482     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4483     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4484     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4485     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4486     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4487     unlock_user_struct(target_shminfo, target_addr, 1);
4488     return 0;
4489 }
4490 
4491 struct target_shm_info {
4492     int used_ids;
4493     abi_ulong shm_tot;
4494     abi_ulong shm_rss;
4495     abi_ulong shm_swp;
4496     abi_ulong swap_attempts;
4497     abi_ulong swap_successes;
4498 };
4499 
4500 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4501                                                struct shm_info *host_shm_info)
4502 {
4503     struct target_shm_info *target_shm_info;
4504     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4505         return -TARGET_EFAULT;
4506     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4507     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4508     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4509     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4510     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4511     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4512     unlock_user_struct(target_shm_info, target_addr, 1);
4513     return 0;
4514 }
4515 
4516 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4517 {
4518     struct shmid_ds dsarg;
4519     struct shminfo shminfo;
4520     struct shm_info shm_info;
4521     abi_long ret = -TARGET_EINVAL;
4522 
4523     cmd &= 0xff;
4524 
4525     switch(cmd) {
4526     case IPC_STAT:
4527     case IPC_SET:
4528     case SHM_STAT:
4529         if (target_to_host_shmid_ds(&dsarg, buf))
4530             return -TARGET_EFAULT;
4531         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4532         if (host_to_target_shmid_ds(buf, &dsarg))
4533             return -TARGET_EFAULT;
4534         break;
4535     case IPC_INFO:
4536         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4537         if (host_to_target_shminfo(buf, &shminfo))
4538             return -TARGET_EFAULT;
4539         break;
4540     case SHM_INFO:
4541         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4542         if (host_to_target_shm_info(buf, &shm_info))
4543             return -TARGET_EFAULT;
4544         break;
4545     case IPC_RMID:
4546     case SHM_LOCK:
4547     case SHM_UNLOCK:
4548         ret = get_errno(shmctl(shmid, cmd, NULL));
4549         break;
4550     }
4551 
4552     return ret;
4553 }
4554 
4555 #ifndef TARGET_FORCE_SHMLBA
4556 /* For most architectures, SHMLBA is the same as the page size;
4557  * some architectures have larger values, in which case they should
4558  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4559  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4560  * and defining its own value for SHMLBA.
4561  *
4562  * The kernel also permits SHMLBA to be set by the architecture to a
4563  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4564  * this means that addresses are rounded to the large size if
4565  * SHM_RND is set but addresses not aligned to that size are not rejected
4566  * as long as they are at least page-aligned. Since the only architecture
4567  * which uses this is ia64 this code doesn't provide for that oddity.
4568  */
4569 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4570 {
4571     return TARGET_PAGE_SIZE;
4572 }
4573 #endif
4574 
4575 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4576                                  int shmid, abi_ulong shmaddr, int shmflg)
4577 {
4578     abi_long raddr;
4579     void *host_raddr;
4580     struct shmid_ds shm_info;
4581     int i,ret;
4582     abi_ulong shmlba;
4583 
4584     /* find out the length of the shared memory segment */
4585     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4586     if (is_error(ret)) {
4587         /* can't get length, bail out */
4588         return ret;
4589     }
4590 
4591     shmlba = target_shmlba(cpu_env);
4592 
4593     if (shmaddr & (shmlba - 1)) {
4594         if (shmflg & SHM_RND) {
4595             shmaddr &= ~(shmlba - 1);
4596         } else {
4597             return -TARGET_EINVAL;
4598         }
4599     }
4600     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4601         return -TARGET_EINVAL;
4602     }
4603 
4604     mmap_lock();
4605 
4606     if (shmaddr)
4607         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4608     else {
4609         abi_ulong mmap_start;
4610 
4611         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4612         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4613 
4614         if (mmap_start == -1) {
4615             errno = ENOMEM;
4616             host_raddr = (void *)-1;
4617         } else
4618             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4619     }
4620 
4621     if (host_raddr == (void *)-1) {
4622         mmap_unlock();
4623         return get_errno((long)host_raddr);
4624     }
4625     raddr=h2g((unsigned long)host_raddr);
4626 
4627     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4628                    PAGE_VALID | PAGE_READ |
4629                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4630 
4631     for (i = 0; i < N_SHM_REGIONS; i++) {
4632         if (!shm_regions[i].in_use) {
4633             shm_regions[i].in_use = true;
4634             shm_regions[i].start = raddr;
4635             shm_regions[i].size = shm_info.shm_segsz;
4636             break;
4637         }
4638     }
4639 
4640     mmap_unlock();
4641     return raddr;
4642 
4643 }
4644 
4645 static inline abi_long do_shmdt(abi_ulong shmaddr)
4646 {
4647     int i;
4648     abi_long rv;
4649 
4650     mmap_lock();
4651 
4652     for (i = 0; i < N_SHM_REGIONS; ++i) {
4653         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4654             shm_regions[i].in_use = false;
4655             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4656             break;
4657         }
4658     }
4659     rv = get_errno(shmdt(g2h(shmaddr)));
4660 
4661     mmap_unlock();
4662 
4663     return rv;
4664 }
4665 
4666 #ifdef TARGET_NR_ipc
4667 /* ??? This only works with linear mappings.  */
4668 /* do_ipc() must return target values and target errnos. */
4669 static abi_long do_ipc(CPUArchState *cpu_env,
4670                        unsigned int call, abi_long first,
4671                        abi_long second, abi_long third,
4672                        abi_long ptr, abi_long fifth)
4673 {
4674     int version;
4675     abi_long ret = 0;
4676 
4677     version = call >> 16;
4678     call &= 0xffff;
4679 
4680     switch (call) {
4681     case IPCOP_semop:
4682         ret = do_semtimedop(first, ptr, second, 0, false);
4683         break;
4684     case IPCOP_semtimedop:
4685     /*
4686      * The s390 sys_ipc variant has only five parameters instead of six
4687      * (as for default variant) and the only difference is the handling of
4688      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4689      * to a struct timespec where the generic variant uses fifth parameter.
4690      */
4691 #if defined(TARGET_S390X)
4692         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4693 #else
4694         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4695 #endif
4696         break;
4697 
4698     case IPCOP_semget:
4699         ret = get_errno(semget(first, second, third));
4700         break;
4701 
4702     case IPCOP_semctl: {
4703         /* The semun argument to semctl is passed by value, so dereference the
4704          * ptr argument. */
4705         abi_ulong atptr;
4706         get_user_ual(atptr, ptr);
4707         ret = do_semctl(first, second, third, atptr);
4708         break;
4709     }
4710 
4711     case IPCOP_msgget:
4712         ret = get_errno(msgget(first, second));
4713         break;
4714 
4715     case IPCOP_msgsnd:
4716         ret = do_msgsnd(first, ptr, second, third);
4717         break;
4718 
4719     case IPCOP_msgctl:
4720         ret = do_msgctl(first, second, ptr);
4721         break;
4722 
4723     case IPCOP_msgrcv:
4724         switch (version) {
4725         case 0:
4726             {
4727                 struct target_ipc_kludge {
4728                     abi_long msgp;
4729                     abi_long msgtyp;
4730                 } *tmp;
4731 
4732                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4733                     ret = -TARGET_EFAULT;
4734                     break;
4735                 }
4736 
4737                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4738 
4739                 unlock_user_struct(tmp, ptr, 0);
4740                 break;
4741             }
4742         default:
4743             ret = do_msgrcv(first, ptr, second, fifth, third);
4744         }
4745         break;
4746 
4747     case IPCOP_shmat:
4748         switch (version) {
4749         default:
4750         {
4751             abi_ulong raddr;
4752             raddr = do_shmat(cpu_env, first, ptr, second);
4753             if (is_error(raddr))
4754                 return get_errno(raddr);
4755             if (put_user_ual(raddr, third))
4756                 return -TARGET_EFAULT;
4757             break;
4758         }
4759         case 1:
4760             ret = -TARGET_EINVAL;
4761             break;
4762         }
4763 	break;
4764     case IPCOP_shmdt:
4765         ret = do_shmdt(ptr);
4766 	break;
4767 
4768     case IPCOP_shmget:
4769 	/* IPC_* flag values are the same on all linux platforms */
4770 	ret = get_errno(shmget(first, second, third));
4771 	break;
4772 
4773 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4774     case IPCOP_shmctl:
4775         ret = do_shmctl(first, second, ptr);
4776         break;
4777     default:
4778         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4779                       call, version);
4780 	ret = -TARGET_ENOSYS;
4781 	break;
4782     }
4783     return ret;
4784 }
4785 #endif
4786 
4787 /* kernel structure types definitions */
4788 
4789 #define STRUCT(name, ...) STRUCT_ ## name,
4790 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4791 enum {
4792 #include "syscall_types.h"
4793 STRUCT_MAX
4794 };
4795 #undef STRUCT
4796 #undef STRUCT_SPECIAL
4797 
4798 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4799 #define STRUCT_SPECIAL(name)
4800 #include "syscall_types.h"
4801 #undef STRUCT
4802 #undef STRUCT_SPECIAL
4803 
4804 #define MAX_STRUCT_SIZE 4096
4805 
4806 #ifdef CONFIG_FIEMAP
4807 /* So fiemap access checks don't overflow on 32 bit systems.
4808  * This is very slightly smaller than the limit imposed by
4809  * the underlying kernel.
4810  */
4811 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4812                             / sizeof(struct fiemap_extent))
4813 
4814 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4815                                        int fd, int cmd, abi_long arg)
4816 {
4817     /* The parameter for this ioctl is a struct fiemap followed
4818      * by an array of struct fiemap_extent whose size is set
4819      * in fiemap->fm_extent_count. The array is filled in by the
4820      * ioctl.
4821      */
4822     int target_size_in, target_size_out;
4823     struct fiemap *fm;
4824     const argtype *arg_type = ie->arg_type;
4825     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4826     void *argptr, *p;
4827     abi_long ret;
4828     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4829     uint32_t outbufsz;
4830     int free_fm = 0;
4831 
4832     assert(arg_type[0] == TYPE_PTR);
4833     assert(ie->access == IOC_RW);
4834     arg_type++;
4835     target_size_in = thunk_type_size(arg_type, 0);
4836     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4837     if (!argptr) {
4838         return -TARGET_EFAULT;
4839     }
4840     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4841     unlock_user(argptr, arg, 0);
4842     fm = (struct fiemap *)buf_temp;
4843     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4844         return -TARGET_EINVAL;
4845     }
4846 
4847     outbufsz = sizeof (*fm) +
4848         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4849 
4850     if (outbufsz > MAX_STRUCT_SIZE) {
4851         /* We can't fit all the extents into the fixed size buffer.
4852          * Allocate one that is large enough and use it instead.
4853          */
4854         fm = g_try_malloc(outbufsz);
4855         if (!fm) {
4856             return -TARGET_ENOMEM;
4857         }
4858         memcpy(fm, buf_temp, sizeof(struct fiemap));
4859         free_fm = 1;
4860     }
4861     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4862     if (!is_error(ret)) {
4863         target_size_out = target_size_in;
4864         /* An extent_count of 0 means we were only counting the extents
4865          * so there are no structs to copy
4866          */
4867         if (fm->fm_extent_count != 0) {
4868             target_size_out += fm->fm_mapped_extents * extent_size;
4869         }
4870         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4871         if (!argptr) {
4872             ret = -TARGET_EFAULT;
4873         } else {
4874             /* Convert the struct fiemap */
4875             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4876             if (fm->fm_extent_count != 0) {
4877                 p = argptr + target_size_in;
4878                 /* ...and then all the struct fiemap_extents */
4879                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4880                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4881                                   THUNK_TARGET);
4882                     p += extent_size;
4883                 }
4884             }
4885             unlock_user(argptr, arg, target_size_out);
4886         }
4887     }
4888     if (free_fm) {
4889         g_free(fm);
4890     }
4891     return ret;
4892 }
4893 #endif
4894 
4895 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4896                                 int fd, int cmd, abi_long arg)
4897 {
4898     const argtype *arg_type = ie->arg_type;
4899     int target_size;
4900     void *argptr;
4901     int ret;
4902     struct ifconf *host_ifconf;
4903     uint32_t outbufsz;
4904     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4905     int target_ifreq_size;
4906     int nb_ifreq;
4907     int free_buf = 0;
4908     int i;
4909     int target_ifc_len;
4910     abi_long target_ifc_buf;
4911     int host_ifc_len;
4912     char *host_ifc_buf;
4913 
4914     assert(arg_type[0] == TYPE_PTR);
4915     assert(ie->access == IOC_RW);
4916 
4917     arg_type++;
4918     target_size = thunk_type_size(arg_type, 0);
4919 
4920     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4921     if (!argptr)
4922         return -TARGET_EFAULT;
4923     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4924     unlock_user(argptr, arg, 0);
4925 
4926     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4927     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4928     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4929 
4930     if (target_ifc_buf != 0) {
4931         target_ifc_len = host_ifconf->ifc_len;
4932         nb_ifreq = target_ifc_len / target_ifreq_size;
4933         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4934 
4935         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4936         if (outbufsz > MAX_STRUCT_SIZE) {
4937             /*
4938              * We can't fit all the extents into the fixed size buffer.
4939              * Allocate one that is large enough and use it instead.
4940              */
4941             host_ifconf = malloc(outbufsz);
4942             if (!host_ifconf) {
4943                 return -TARGET_ENOMEM;
4944             }
4945             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4946             free_buf = 1;
4947         }
4948         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4949 
4950         host_ifconf->ifc_len = host_ifc_len;
4951     } else {
4952       host_ifc_buf = NULL;
4953     }
4954     host_ifconf->ifc_buf = host_ifc_buf;
4955 
4956     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4957     if (!is_error(ret)) {
4958 	/* convert host ifc_len to target ifc_len */
4959 
4960         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4961         target_ifc_len = nb_ifreq * target_ifreq_size;
4962         host_ifconf->ifc_len = target_ifc_len;
4963 
4964 	/* restore target ifc_buf */
4965 
4966         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4967 
4968 	/* copy struct ifconf to target user */
4969 
4970         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4971         if (!argptr)
4972             return -TARGET_EFAULT;
4973         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4974         unlock_user(argptr, arg, target_size);
4975 
4976         if (target_ifc_buf != 0) {
4977             /* copy ifreq[] to target user */
4978             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4979             for (i = 0; i < nb_ifreq ; i++) {
4980                 thunk_convert(argptr + i * target_ifreq_size,
4981                               host_ifc_buf + i * sizeof(struct ifreq),
4982                               ifreq_arg_type, THUNK_TARGET);
4983             }
4984             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4985         }
4986     }
4987 
4988     if (free_buf) {
4989         free(host_ifconf);
4990     }
4991 
4992     return ret;
4993 }
4994 
4995 #if defined(CONFIG_USBFS)
4996 #if HOST_LONG_BITS > 64
4997 #error USBDEVFS thunks do not support >64 bit hosts yet.
4998 #endif
4999 struct live_urb {
5000     uint64_t target_urb_adr;
5001     uint64_t target_buf_adr;
5002     char *target_buf_ptr;
5003     struct usbdevfs_urb host_urb;
5004 };
5005 
5006 static GHashTable *usbdevfs_urb_hashtable(void)
5007 {
5008     static GHashTable *urb_hashtable;
5009 
5010     if (!urb_hashtable) {
5011         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5012     }
5013     return urb_hashtable;
5014 }
5015 
5016 static void urb_hashtable_insert(struct live_urb *urb)
5017 {
5018     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5019     g_hash_table_insert(urb_hashtable, urb, urb);
5020 }
5021 
5022 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5023 {
5024     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5025     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5026 }
5027 
5028 static void urb_hashtable_remove(struct live_urb *urb)
5029 {
5030     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5031     g_hash_table_remove(urb_hashtable, urb);
5032 }
5033 
5034 static abi_long
5035 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5036                           int fd, int cmd, abi_long arg)
5037 {
5038     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5039     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5040     struct live_urb *lurb;
5041     void *argptr;
5042     uint64_t hurb;
5043     int target_size;
5044     uintptr_t target_urb_adr;
5045     abi_long ret;
5046 
5047     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5048 
5049     memset(buf_temp, 0, sizeof(uint64_t));
5050     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5051     if (is_error(ret)) {
5052         return ret;
5053     }
5054 
5055     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5056     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5057     if (!lurb->target_urb_adr) {
5058         return -TARGET_EFAULT;
5059     }
5060     urb_hashtable_remove(lurb);
5061     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5062         lurb->host_urb.buffer_length);
5063     lurb->target_buf_ptr = NULL;
5064 
5065     /* restore the guest buffer pointer */
5066     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5067 
5068     /* update the guest urb struct */
5069     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5070     if (!argptr) {
5071         g_free(lurb);
5072         return -TARGET_EFAULT;
5073     }
5074     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5075     unlock_user(argptr, lurb->target_urb_adr, target_size);
5076 
5077     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5078     /* write back the urb handle */
5079     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5080     if (!argptr) {
5081         g_free(lurb);
5082         return -TARGET_EFAULT;
5083     }
5084 
5085     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5086     target_urb_adr = lurb->target_urb_adr;
5087     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5088     unlock_user(argptr, arg, target_size);
5089 
5090     g_free(lurb);
5091     return ret;
5092 }
5093 
5094 static abi_long
5095 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5096                              uint8_t *buf_temp __attribute__((unused)),
5097                              int fd, int cmd, abi_long arg)
5098 {
5099     struct live_urb *lurb;
5100 
5101     /* map target address back to host URB with metadata. */
5102     lurb = urb_hashtable_lookup(arg);
5103     if (!lurb) {
5104         return -TARGET_EFAULT;
5105     }
5106     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5107 }
5108 
5109 static abi_long
5110 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5111                             int fd, int cmd, abi_long arg)
5112 {
5113     const argtype *arg_type = ie->arg_type;
5114     int target_size;
5115     abi_long ret;
5116     void *argptr;
5117     int rw_dir;
5118     struct live_urb *lurb;
5119 
5120     /*
5121      * each submitted URB needs to map to a unique ID for the
5122      * kernel, and that unique ID needs to be a pointer to
5123      * host memory.  hence, we need to malloc for each URB.
5124      * isochronous transfers have a variable length struct.
5125      */
5126     arg_type++;
5127     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5128 
5129     /* construct host copy of urb and metadata */
5130     lurb = g_try_malloc0(sizeof(struct live_urb));
5131     if (!lurb) {
5132         return -TARGET_ENOMEM;
5133     }
5134 
5135     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5136     if (!argptr) {
5137         g_free(lurb);
5138         return -TARGET_EFAULT;
5139     }
5140     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5141     unlock_user(argptr, arg, 0);
5142 
5143     lurb->target_urb_adr = arg;
5144     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5145 
5146     /* buffer space used depends on endpoint type so lock the entire buffer */
5147     /* control type urbs should check the buffer contents for true direction */
5148     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5149     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5150         lurb->host_urb.buffer_length, 1);
5151     if (lurb->target_buf_ptr == NULL) {
5152         g_free(lurb);
5153         return -TARGET_EFAULT;
5154     }
5155 
5156     /* update buffer pointer in host copy */
5157     lurb->host_urb.buffer = lurb->target_buf_ptr;
5158 
5159     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5160     if (is_error(ret)) {
5161         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5162         g_free(lurb);
5163     } else {
5164         urb_hashtable_insert(lurb);
5165     }
5166 
5167     return ret;
5168 }
5169 #endif /* CONFIG_USBFS */
5170 
5171 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5172                             int cmd, abi_long arg)
5173 {
5174     void *argptr;
5175     struct dm_ioctl *host_dm;
5176     abi_long guest_data;
5177     uint32_t guest_data_size;
5178     int target_size;
5179     const argtype *arg_type = ie->arg_type;
5180     abi_long ret;
5181     void *big_buf = NULL;
5182     char *host_data;
5183 
5184     arg_type++;
5185     target_size = thunk_type_size(arg_type, 0);
5186     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5187     if (!argptr) {
5188         ret = -TARGET_EFAULT;
5189         goto out;
5190     }
5191     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5192     unlock_user(argptr, arg, 0);
5193 
5194     /* buf_temp is too small, so fetch things into a bigger buffer */
5195     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5196     memcpy(big_buf, buf_temp, target_size);
5197     buf_temp = big_buf;
5198     host_dm = big_buf;
5199 
5200     guest_data = arg + host_dm->data_start;
5201     if ((guest_data - arg) < 0) {
5202         ret = -TARGET_EINVAL;
5203         goto out;
5204     }
5205     guest_data_size = host_dm->data_size - host_dm->data_start;
5206     host_data = (char*)host_dm + host_dm->data_start;
5207 
5208     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5209     if (!argptr) {
5210         ret = -TARGET_EFAULT;
5211         goto out;
5212     }
5213 
5214     switch (ie->host_cmd) {
5215     case DM_REMOVE_ALL:
5216     case DM_LIST_DEVICES:
5217     case DM_DEV_CREATE:
5218     case DM_DEV_REMOVE:
5219     case DM_DEV_SUSPEND:
5220     case DM_DEV_STATUS:
5221     case DM_DEV_WAIT:
5222     case DM_TABLE_STATUS:
5223     case DM_TABLE_CLEAR:
5224     case DM_TABLE_DEPS:
5225     case DM_LIST_VERSIONS:
5226         /* no input data */
5227         break;
5228     case DM_DEV_RENAME:
5229     case DM_DEV_SET_GEOMETRY:
5230         /* data contains only strings */
5231         memcpy(host_data, argptr, guest_data_size);
5232         break;
5233     case DM_TARGET_MSG:
5234         memcpy(host_data, argptr, guest_data_size);
5235         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5236         break;
5237     case DM_TABLE_LOAD:
5238     {
5239         void *gspec = argptr;
5240         void *cur_data = host_data;
5241         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5242         int spec_size = thunk_type_size(arg_type, 0);
5243         int i;
5244 
5245         for (i = 0; i < host_dm->target_count; i++) {
5246             struct dm_target_spec *spec = cur_data;
5247             uint32_t next;
5248             int slen;
5249 
5250             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5251             slen = strlen((char*)gspec + spec_size) + 1;
5252             next = spec->next;
5253             spec->next = sizeof(*spec) + slen;
5254             strcpy((char*)&spec[1], gspec + spec_size);
5255             gspec += next;
5256             cur_data += spec->next;
5257         }
5258         break;
5259     }
5260     default:
5261         ret = -TARGET_EINVAL;
5262         unlock_user(argptr, guest_data, 0);
5263         goto out;
5264     }
5265     unlock_user(argptr, guest_data, 0);
5266 
5267     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5268     if (!is_error(ret)) {
5269         guest_data = arg + host_dm->data_start;
5270         guest_data_size = host_dm->data_size - host_dm->data_start;
5271         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5272         switch (ie->host_cmd) {
5273         case DM_REMOVE_ALL:
5274         case DM_DEV_CREATE:
5275         case DM_DEV_REMOVE:
5276         case DM_DEV_RENAME:
5277         case DM_DEV_SUSPEND:
5278         case DM_DEV_STATUS:
5279         case DM_TABLE_LOAD:
5280         case DM_TABLE_CLEAR:
5281         case DM_TARGET_MSG:
5282         case DM_DEV_SET_GEOMETRY:
5283             /* no return data */
5284             break;
5285         case DM_LIST_DEVICES:
5286         {
5287             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5288             uint32_t remaining_data = guest_data_size;
5289             void *cur_data = argptr;
5290             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5291             int nl_size = 12; /* can't use thunk_size due to alignment */
5292 
5293             while (1) {
5294                 uint32_t next = nl->next;
5295                 if (next) {
5296                     nl->next = nl_size + (strlen(nl->name) + 1);
5297                 }
5298                 if (remaining_data < nl->next) {
5299                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5300                     break;
5301                 }
5302                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5303                 strcpy(cur_data + nl_size, nl->name);
5304                 cur_data += nl->next;
5305                 remaining_data -= nl->next;
5306                 if (!next) {
5307                     break;
5308                 }
5309                 nl = (void*)nl + next;
5310             }
5311             break;
5312         }
5313         case DM_DEV_WAIT:
5314         case DM_TABLE_STATUS:
5315         {
5316             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5317             void *cur_data = argptr;
5318             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5319             int spec_size = thunk_type_size(arg_type, 0);
5320             int i;
5321 
5322             for (i = 0; i < host_dm->target_count; i++) {
5323                 uint32_t next = spec->next;
5324                 int slen = strlen((char*)&spec[1]) + 1;
5325                 spec->next = (cur_data - argptr) + spec_size + slen;
5326                 if (guest_data_size < spec->next) {
5327                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5328                     break;
5329                 }
5330                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5331                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5332                 cur_data = argptr + spec->next;
5333                 spec = (void*)host_dm + host_dm->data_start + next;
5334             }
5335             break;
5336         }
5337         case DM_TABLE_DEPS:
5338         {
5339             void *hdata = (void*)host_dm + host_dm->data_start;
5340             int count = *(uint32_t*)hdata;
5341             uint64_t *hdev = hdata + 8;
5342             uint64_t *gdev = argptr + 8;
5343             int i;
5344 
5345             *(uint32_t*)argptr = tswap32(count);
5346             for (i = 0; i < count; i++) {
5347                 *gdev = tswap64(*hdev);
5348                 gdev++;
5349                 hdev++;
5350             }
5351             break;
5352         }
5353         case DM_LIST_VERSIONS:
5354         {
5355             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5356             uint32_t remaining_data = guest_data_size;
5357             void *cur_data = argptr;
5358             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5359             int vers_size = thunk_type_size(arg_type, 0);
5360 
5361             while (1) {
5362                 uint32_t next = vers->next;
5363                 if (next) {
5364                     vers->next = vers_size + (strlen(vers->name) + 1);
5365                 }
5366                 if (remaining_data < vers->next) {
5367                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5368                     break;
5369                 }
5370                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5371                 strcpy(cur_data + vers_size, vers->name);
5372                 cur_data += vers->next;
5373                 remaining_data -= vers->next;
5374                 if (!next) {
5375                     break;
5376                 }
5377                 vers = (void*)vers + next;
5378             }
5379             break;
5380         }
5381         default:
5382             unlock_user(argptr, guest_data, 0);
5383             ret = -TARGET_EINVAL;
5384             goto out;
5385         }
5386         unlock_user(argptr, guest_data, guest_data_size);
5387 
5388         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5389         if (!argptr) {
5390             ret = -TARGET_EFAULT;
5391             goto out;
5392         }
5393         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5394         unlock_user(argptr, arg, target_size);
5395     }
5396 out:
5397     g_free(big_buf);
5398     return ret;
5399 }
5400 
5401 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5402                                int cmd, abi_long arg)
5403 {
5404     void *argptr;
5405     int target_size;
5406     const argtype *arg_type = ie->arg_type;
5407     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5408     abi_long ret;
5409 
5410     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5411     struct blkpg_partition host_part;
5412 
5413     /* Read and convert blkpg */
5414     arg_type++;
5415     target_size = thunk_type_size(arg_type, 0);
5416     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5417     if (!argptr) {
5418         ret = -TARGET_EFAULT;
5419         goto out;
5420     }
5421     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5422     unlock_user(argptr, arg, 0);
5423 
5424     switch (host_blkpg->op) {
5425     case BLKPG_ADD_PARTITION:
5426     case BLKPG_DEL_PARTITION:
5427         /* payload is struct blkpg_partition */
5428         break;
5429     default:
5430         /* Unknown opcode */
5431         ret = -TARGET_EINVAL;
5432         goto out;
5433     }
5434 
5435     /* Read and convert blkpg->data */
5436     arg = (abi_long)(uintptr_t)host_blkpg->data;
5437     target_size = thunk_type_size(part_arg_type, 0);
5438     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5439     if (!argptr) {
5440         ret = -TARGET_EFAULT;
5441         goto out;
5442     }
5443     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5444     unlock_user(argptr, arg, 0);
5445 
5446     /* Swizzle the data pointer to our local copy and call! */
5447     host_blkpg->data = &host_part;
5448     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5449 
5450 out:
5451     return ret;
5452 }
5453 
5454 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5455                                 int fd, int cmd, abi_long arg)
5456 {
5457     const argtype *arg_type = ie->arg_type;
5458     const StructEntry *se;
5459     const argtype *field_types;
5460     const int *dst_offsets, *src_offsets;
5461     int target_size;
5462     void *argptr;
5463     abi_ulong *target_rt_dev_ptr = NULL;
5464     unsigned long *host_rt_dev_ptr = NULL;
5465     abi_long ret;
5466     int i;
5467 
5468     assert(ie->access == IOC_W);
5469     assert(*arg_type == TYPE_PTR);
5470     arg_type++;
5471     assert(*arg_type == TYPE_STRUCT);
5472     target_size = thunk_type_size(arg_type, 0);
5473     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5474     if (!argptr) {
5475         return -TARGET_EFAULT;
5476     }
5477     arg_type++;
5478     assert(*arg_type == (int)STRUCT_rtentry);
5479     se = struct_entries + *arg_type++;
5480     assert(se->convert[0] == NULL);
5481     /* convert struct here to be able to catch rt_dev string */
5482     field_types = se->field_types;
5483     dst_offsets = se->field_offsets[THUNK_HOST];
5484     src_offsets = se->field_offsets[THUNK_TARGET];
5485     for (i = 0; i < se->nb_fields; i++) {
5486         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5487             assert(*field_types == TYPE_PTRVOID);
5488             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5489             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5490             if (*target_rt_dev_ptr != 0) {
5491                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5492                                                   tswapal(*target_rt_dev_ptr));
5493                 if (!*host_rt_dev_ptr) {
5494                     unlock_user(argptr, arg, 0);
5495                     return -TARGET_EFAULT;
5496                 }
5497             } else {
5498                 *host_rt_dev_ptr = 0;
5499             }
5500             field_types++;
5501             continue;
5502         }
5503         field_types = thunk_convert(buf_temp + dst_offsets[i],
5504                                     argptr + src_offsets[i],
5505                                     field_types, THUNK_HOST);
5506     }
5507     unlock_user(argptr, arg, 0);
5508 
5509     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5510 
5511     assert(host_rt_dev_ptr != NULL);
5512     assert(target_rt_dev_ptr != NULL);
5513     if (*host_rt_dev_ptr != 0) {
5514         unlock_user((void *)*host_rt_dev_ptr,
5515                     *target_rt_dev_ptr, 0);
5516     }
5517     return ret;
5518 }
5519 
5520 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5521                                      int fd, int cmd, abi_long arg)
5522 {
5523     int sig = target_to_host_signal(arg);
5524     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5525 }
5526 
5527 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5528                                     int fd, int cmd, abi_long arg)
5529 {
5530     struct timeval tv;
5531     abi_long ret;
5532 
5533     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5534     if (is_error(ret)) {
5535         return ret;
5536     }
5537 
5538     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5539         if (copy_to_user_timeval(arg, &tv)) {
5540             return -TARGET_EFAULT;
5541         }
5542     } else {
5543         if (copy_to_user_timeval64(arg, &tv)) {
5544             return -TARGET_EFAULT;
5545         }
5546     }
5547 
5548     return ret;
5549 }
5550 
5551 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5552                                       int fd, int cmd, abi_long arg)
5553 {
5554     struct timespec ts;
5555     abi_long ret;
5556 
5557     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5558     if (is_error(ret)) {
5559         return ret;
5560     }
5561 
5562     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5563         if (host_to_target_timespec(arg, &ts)) {
5564             return -TARGET_EFAULT;
5565         }
5566     } else{
5567         if (host_to_target_timespec64(arg, &ts)) {
5568             return -TARGET_EFAULT;
5569         }
5570     }
5571 
5572     return ret;
5573 }
5574 
5575 #ifdef TIOCGPTPEER
5576 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5577                                      int fd, int cmd, abi_long arg)
5578 {
5579     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5580     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5581 }
5582 #endif
5583 
5584 #ifdef HAVE_DRM_H
5585 
5586 static void unlock_drm_version(struct drm_version *host_ver,
5587                                struct target_drm_version *target_ver,
5588                                bool copy)
5589 {
5590     unlock_user(host_ver->name, target_ver->name,
5591                                 copy ? host_ver->name_len : 0);
5592     unlock_user(host_ver->date, target_ver->date,
5593                                 copy ? host_ver->date_len : 0);
5594     unlock_user(host_ver->desc, target_ver->desc,
5595                                 copy ? host_ver->desc_len : 0);
5596 }
5597 
5598 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5599                                           struct target_drm_version *target_ver)
5600 {
5601     memset(host_ver, 0, sizeof(*host_ver));
5602 
5603     __get_user(host_ver->name_len, &target_ver->name_len);
5604     if (host_ver->name_len) {
5605         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5606                                    target_ver->name_len, 0);
5607         if (!host_ver->name) {
5608             return -EFAULT;
5609         }
5610     }
5611 
5612     __get_user(host_ver->date_len, &target_ver->date_len);
5613     if (host_ver->date_len) {
5614         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5615                                    target_ver->date_len, 0);
5616         if (!host_ver->date) {
5617             goto err;
5618         }
5619     }
5620 
5621     __get_user(host_ver->desc_len, &target_ver->desc_len);
5622     if (host_ver->desc_len) {
5623         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5624                                    target_ver->desc_len, 0);
5625         if (!host_ver->desc) {
5626             goto err;
5627         }
5628     }
5629 
5630     return 0;
5631 err:
5632     unlock_drm_version(host_ver, target_ver, false);
5633     return -EFAULT;
5634 }
5635 
5636 static inline void host_to_target_drmversion(
5637                                           struct target_drm_version *target_ver,
5638                                           struct drm_version *host_ver)
5639 {
5640     __put_user(host_ver->version_major, &target_ver->version_major);
5641     __put_user(host_ver->version_minor, &target_ver->version_minor);
5642     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5643     __put_user(host_ver->name_len, &target_ver->name_len);
5644     __put_user(host_ver->date_len, &target_ver->date_len);
5645     __put_user(host_ver->desc_len, &target_ver->desc_len);
5646     unlock_drm_version(host_ver, target_ver, true);
5647 }
5648 
5649 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5650                              int fd, int cmd, abi_long arg)
5651 {
5652     struct drm_version *ver;
5653     struct target_drm_version *target_ver;
5654     abi_long ret;
5655 
5656     switch (ie->host_cmd) {
5657     case DRM_IOCTL_VERSION:
5658         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5659             return -TARGET_EFAULT;
5660         }
5661         ver = (struct drm_version *)buf_temp;
5662         ret = target_to_host_drmversion(ver, target_ver);
5663         if (!is_error(ret)) {
5664             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5665             if (is_error(ret)) {
5666                 unlock_drm_version(ver, target_ver, false);
5667             } else {
5668                 host_to_target_drmversion(target_ver, ver);
5669             }
5670         }
5671         unlock_user_struct(target_ver, arg, 0);
5672         return ret;
5673     }
5674     return -TARGET_ENOSYS;
5675 }
5676 
5677 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5678                                            struct drm_i915_getparam *gparam,
5679                                            int fd, abi_long arg)
5680 {
5681     abi_long ret;
5682     int value;
5683     struct target_drm_i915_getparam *target_gparam;
5684 
5685     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5686         return -TARGET_EFAULT;
5687     }
5688 
5689     __get_user(gparam->param, &target_gparam->param);
5690     gparam->value = &value;
5691     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5692     put_user_s32(value, target_gparam->value);
5693 
5694     unlock_user_struct(target_gparam, arg, 0);
5695     return ret;
5696 }
5697 
5698 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5699                                   int fd, int cmd, abi_long arg)
5700 {
5701     switch (ie->host_cmd) {
5702     case DRM_IOCTL_I915_GETPARAM:
5703         return do_ioctl_drm_i915_getparam(ie,
5704                                           (struct drm_i915_getparam *)buf_temp,
5705                                           fd, arg);
5706     default:
5707         return -TARGET_ENOSYS;
5708     }
5709 }
5710 
5711 #endif
5712 
5713 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5714                                         int fd, int cmd, abi_long arg)
5715 {
5716     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5717     struct tun_filter *target_filter;
5718     char *target_addr;
5719 
5720     assert(ie->access == IOC_W);
5721 
5722     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5723     if (!target_filter) {
5724         return -TARGET_EFAULT;
5725     }
5726     filter->flags = tswap16(target_filter->flags);
5727     filter->count = tswap16(target_filter->count);
5728     unlock_user(target_filter, arg, 0);
5729 
5730     if (filter->count) {
5731         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5732             MAX_STRUCT_SIZE) {
5733             return -TARGET_EFAULT;
5734         }
5735 
5736         target_addr = lock_user(VERIFY_READ,
5737                                 arg + offsetof(struct tun_filter, addr),
5738                                 filter->count * ETH_ALEN, 1);
5739         if (!target_addr) {
5740             return -TARGET_EFAULT;
5741         }
5742         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5743         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5744     }
5745 
5746     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5747 }
5748 
5749 IOCTLEntry ioctl_entries[] = {
5750 #define IOCTL(cmd, access, ...) \
5751     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5752 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5753     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5754 #define IOCTL_IGNORE(cmd) \
5755     { TARGET_ ## cmd, 0, #cmd },
5756 #include "ioctls.h"
5757     { 0, 0, },
5758 };
5759 
5760 /* ??? Implement proper locking for ioctls.  */
5761 /* do_ioctl() Must return target values and target errnos. */
5762 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5763 {
5764     const IOCTLEntry *ie;
5765     const argtype *arg_type;
5766     abi_long ret;
5767     uint8_t buf_temp[MAX_STRUCT_SIZE];
5768     int target_size;
5769     void *argptr;
5770 
5771     ie = ioctl_entries;
5772     for(;;) {
5773         if (ie->target_cmd == 0) {
5774             qemu_log_mask(
5775                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5776             return -TARGET_ENOSYS;
5777         }
5778         if (ie->target_cmd == cmd)
5779             break;
5780         ie++;
5781     }
5782     arg_type = ie->arg_type;
5783     if (ie->do_ioctl) {
5784         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5785     } else if (!ie->host_cmd) {
5786         /* Some architectures define BSD ioctls in their headers
5787            that are not implemented in Linux.  */
5788         return -TARGET_ENOSYS;
5789     }
5790 
5791     switch(arg_type[0]) {
5792     case TYPE_NULL:
5793         /* no argument */
5794         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5795         break;
5796     case TYPE_PTRVOID:
5797     case TYPE_INT:
5798     case TYPE_LONG:
5799     case TYPE_ULONG:
5800         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5801         break;
5802     case TYPE_PTR:
5803         arg_type++;
5804         target_size = thunk_type_size(arg_type, 0);
5805         switch(ie->access) {
5806         case IOC_R:
5807             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5808             if (!is_error(ret)) {
5809                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5810                 if (!argptr)
5811                     return -TARGET_EFAULT;
5812                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5813                 unlock_user(argptr, arg, target_size);
5814             }
5815             break;
5816         case IOC_W:
5817             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5818             if (!argptr)
5819                 return -TARGET_EFAULT;
5820             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5821             unlock_user(argptr, arg, 0);
5822             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5823             break;
5824         default:
5825         case IOC_RW:
5826             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5827             if (!argptr)
5828                 return -TARGET_EFAULT;
5829             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5830             unlock_user(argptr, arg, 0);
5831             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5832             if (!is_error(ret)) {
5833                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5834                 if (!argptr)
5835                     return -TARGET_EFAULT;
5836                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5837                 unlock_user(argptr, arg, target_size);
5838             }
5839             break;
5840         }
5841         break;
5842     default:
5843         qemu_log_mask(LOG_UNIMP,
5844                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5845                       (long)cmd, arg_type[0]);
5846         ret = -TARGET_ENOSYS;
5847         break;
5848     }
5849     return ret;
5850 }
5851 
5852 static const bitmask_transtbl iflag_tbl[] = {
5853         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5854         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5855         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5856         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5857         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5858         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5859         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5860         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5861         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5862         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5863         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5864         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5865         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5866         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5867         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5868         { 0, 0, 0, 0 }
5869 };
5870 
5871 static const bitmask_transtbl oflag_tbl[] = {
5872 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5873 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5874 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5875 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5876 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5877 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5878 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5879 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5880 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5881 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5882 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5883 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5884 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5885 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5886 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5887 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5888 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5889 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5890 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5891 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5892 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5893 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5894 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5895 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5896 	{ 0, 0, 0, 0 }
5897 };
5898 
5899 static const bitmask_transtbl cflag_tbl[] = {
5900 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5901 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5902 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5903 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5904 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5905 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5906 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5907 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5908 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5909 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5910 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5911 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5912 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5913 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5914 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5915 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5916 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5917 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5918 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5919 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5920 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5921 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5922 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5923 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5924 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5925 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5926 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5927 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5928 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5929 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5930 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5931 	{ 0, 0, 0, 0 }
5932 };
5933 
5934 static const bitmask_transtbl lflag_tbl[] = {
5935   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5936   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5937   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5938   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5939   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5940   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5941   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5942   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5943   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5944   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5945   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5946   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5947   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5948   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5949   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5950   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5951   { 0, 0, 0, 0 }
5952 };
5953 
5954 static void target_to_host_termios (void *dst, const void *src)
5955 {
5956     struct host_termios *host = dst;
5957     const struct target_termios *target = src;
5958 
5959     host->c_iflag =
5960         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5961     host->c_oflag =
5962         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5963     host->c_cflag =
5964         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5965     host->c_lflag =
5966         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5967     host->c_line = target->c_line;
5968 
5969     memset(host->c_cc, 0, sizeof(host->c_cc));
5970     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5971     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5972     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5973     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5974     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5975     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5976     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5977     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5978     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5979     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5980     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5981     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5982     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5983     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5984     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5985     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5986     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5987 }
5988 
5989 static void host_to_target_termios (void *dst, const void *src)
5990 {
5991     struct target_termios *target = dst;
5992     const struct host_termios *host = src;
5993 
5994     target->c_iflag =
5995         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5996     target->c_oflag =
5997         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5998     target->c_cflag =
5999         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6000     target->c_lflag =
6001         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6002     target->c_line = host->c_line;
6003 
6004     memset(target->c_cc, 0, sizeof(target->c_cc));
6005     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6006     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6007     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6008     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6009     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6010     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6011     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6012     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6013     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6014     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6015     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6016     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6017     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6018     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6019     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6020     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6021     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6022 }
6023 
6024 static const StructEntry struct_termios_def = {
6025     .convert = { host_to_target_termios, target_to_host_termios },
6026     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6027     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6028     .print = print_termios,
6029 };
6030 
6031 static bitmask_transtbl mmap_flags_tbl[] = {
6032     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6033     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6034     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6035     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6036       MAP_ANONYMOUS, MAP_ANONYMOUS },
6037     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6038       MAP_GROWSDOWN, MAP_GROWSDOWN },
6039     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6040       MAP_DENYWRITE, MAP_DENYWRITE },
6041     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6042       MAP_EXECUTABLE, MAP_EXECUTABLE },
6043     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6044     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6045       MAP_NORESERVE, MAP_NORESERVE },
6046     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6047     /* MAP_STACK had been ignored by the kernel for quite some time.
6048        Recognize it for the target insofar as we do not want to pass
6049        it through to the host.  */
6050     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6051     { 0, 0, 0, 0 }
6052 };
6053 
6054 /*
6055  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6056  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6057  */
6058 #if defined(TARGET_I386)
6059 
6060 /* NOTE: there is really one LDT for all the threads */
6061 static uint8_t *ldt_table;
6062 
6063 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6064 {
6065     int size;
6066     void *p;
6067 
6068     if (!ldt_table)
6069         return 0;
6070     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6071     if (size > bytecount)
6072         size = bytecount;
6073     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6074     if (!p)
6075         return -TARGET_EFAULT;
6076     /* ??? Should this by byteswapped?  */
6077     memcpy(p, ldt_table, size);
6078     unlock_user(p, ptr, size);
6079     return size;
6080 }
6081 
6082 /* XXX: add locking support */
6083 static abi_long write_ldt(CPUX86State *env,
6084                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6085 {
6086     struct target_modify_ldt_ldt_s ldt_info;
6087     struct target_modify_ldt_ldt_s *target_ldt_info;
6088     int seg_32bit, contents, read_exec_only, limit_in_pages;
6089     int seg_not_present, useable, lm;
6090     uint32_t *lp, entry_1, entry_2;
6091 
6092     if (bytecount != sizeof(ldt_info))
6093         return -TARGET_EINVAL;
6094     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6095         return -TARGET_EFAULT;
6096     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6097     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6098     ldt_info.limit = tswap32(target_ldt_info->limit);
6099     ldt_info.flags = tswap32(target_ldt_info->flags);
6100     unlock_user_struct(target_ldt_info, ptr, 0);
6101 
6102     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6103         return -TARGET_EINVAL;
6104     seg_32bit = ldt_info.flags & 1;
6105     contents = (ldt_info.flags >> 1) & 3;
6106     read_exec_only = (ldt_info.flags >> 3) & 1;
6107     limit_in_pages = (ldt_info.flags >> 4) & 1;
6108     seg_not_present = (ldt_info.flags >> 5) & 1;
6109     useable = (ldt_info.flags >> 6) & 1;
6110 #ifdef TARGET_ABI32
6111     lm = 0;
6112 #else
6113     lm = (ldt_info.flags >> 7) & 1;
6114 #endif
6115     if (contents == 3) {
6116         if (oldmode)
6117             return -TARGET_EINVAL;
6118         if (seg_not_present == 0)
6119             return -TARGET_EINVAL;
6120     }
6121     /* allocate the LDT */
6122     if (!ldt_table) {
6123         env->ldt.base = target_mmap(0,
6124                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6125                                     PROT_READ|PROT_WRITE,
6126                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6127         if (env->ldt.base == -1)
6128             return -TARGET_ENOMEM;
6129         memset(g2h(env->ldt.base), 0,
6130                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6131         env->ldt.limit = 0xffff;
6132         ldt_table = g2h(env->ldt.base);
6133     }
6134 
6135     /* NOTE: same code as Linux kernel */
6136     /* Allow LDTs to be cleared by the user. */
6137     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6138         if (oldmode ||
6139             (contents == 0		&&
6140              read_exec_only == 1	&&
6141              seg_32bit == 0		&&
6142              limit_in_pages == 0	&&
6143              seg_not_present == 1	&&
6144              useable == 0 )) {
6145             entry_1 = 0;
6146             entry_2 = 0;
6147             goto install;
6148         }
6149     }
6150 
6151     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6152         (ldt_info.limit & 0x0ffff);
6153     entry_2 = (ldt_info.base_addr & 0xff000000) |
6154         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6155         (ldt_info.limit & 0xf0000) |
6156         ((read_exec_only ^ 1) << 9) |
6157         (contents << 10) |
6158         ((seg_not_present ^ 1) << 15) |
6159         (seg_32bit << 22) |
6160         (limit_in_pages << 23) |
6161         (lm << 21) |
6162         0x7000;
6163     if (!oldmode)
6164         entry_2 |= (useable << 20);
6165 
6166     /* Install the new entry ...  */
6167 install:
6168     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6169     lp[0] = tswap32(entry_1);
6170     lp[1] = tswap32(entry_2);
6171     return 0;
6172 }
6173 
6174 /* specific and weird i386 syscalls */
6175 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6176                               unsigned long bytecount)
6177 {
6178     abi_long ret;
6179 
6180     switch (func) {
6181     case 0:
6182         ret = read_ldt(ptr, bytecount);
6183         break;
6184     case 1:
6185         ret = write_ldt(env, ptr, bytecount, 1);
6186         break;
6187     case 0x11:
6188         ret = write_ldt(env, ptr, bytecount, 0);
6189         break;
6190     default:
6191         ret = -TARGET_ENOSYS;
6192         break;
6193     }
6194     return ret;
6195 }
6196 
6197 #if defined(TARGET_ABI32)
6198 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6199 {
6200     uint64_t *gdt_table = g2h(env->gdt.base);
6201     struct target_modify_ldt_ldt_s ldt_info;
6202     struct target_modify_ldt_ldt_s *target_ldt_info;
6203     int seg_32bit, contents, read_exec_only, limit_in_pages;
6204     int seg_not_present, useable, lm;
6205     uint32_t *lp, entry_1, entry_2;
6206     int i;
6207 
6208     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6209     if (!target_ldt_info)
6210         return -TARGET_EFAULT;
6211     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6212     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6213     ldt_info.limit = tswap32(target_ldt_info->limit);
6214     ldt_info.flags = tswap32(target_ldt_info->flags);
6215     if (ldt_info.entry_number == -1) {
6216         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6217             if (gdt_table[i] == 0) {
6218                 ldt_info.entry_number = i;
6219                 target_ldt_info->entry_number = tswap32(i);
6220                 break;
6221             }
6222         }
6223     }
6224     unlock_user_struct(target_ldt_info, ptr, 1);
6225 
6226     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6227         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6228            return -TARGET_EINVAL;
6229     seg_32bit = ldt_info.flags & 1;
6230     contents = (ldt_info.flags >> 1) & 3;
6231     read_exec_only = (ldt_info.flags >> 3) & 1;
6232     limit_in_pages = (ldt_info.flags >> 4) & 1;
6233     seg_not_present = (ldt_info.flags >> 5) & 1;
6234     useable = (ldt_info.flags >> 6) & 1;
6235 #ifdef TARGET_ABI32
6236     lm = 0;
6237 #else
6238     lm = (ldt_info.flags >> 7) & 1;
6239 #endif
6240 
6241     if (contents == 3) {
6242         if (seg_not_present == 0)
6243             return -TARGET_EINVAL;
6244     }
6245 
6246     /* NOTE: same code as Linux kernel */
6247     /* Allow LDTs to be cleared by the user. */
6248     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6249         if ((contents == 0             &&
6250              read_exec_only == 1       &&
6251              seg_32bit == 0            &&
6252              limit_in_pages == 0       &&
6253              seg_not_present == 1      &&
6254              useable == 0 )) {
6255             entry_1 = 0;
6256             entry_2 = 0;
6257             goto install;
6258         }
6259     }
6260 
6261     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6262         (ldt_info.limit & 0x0ffff);
6263     entry_2 = (ldt_info.base_addr & 0xff000000) |
6264         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6265         (ldt_info.limit & 0xf0000) |
6266         ((read_exec_only ^ 1) << 9) |
6267         (contents << 10) |
6268         ((seg_not_present ^ 1) << 15) |
6269         (seg_32bit << 22) |
6270         (limit_in_pages << 23) |
6271         (useable << 20) |
6272         (lm << 21) |
6273         0x7000;
6274 
6275     /* Install the new entry ...  */
6276 install:
6277     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6278     lp[0] = tswap32(entry_1);
6279     lp[1] = tswap32(entry_2);
6280     return 0;
6281 }
6282 
6283 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6284 {
6285     struct target_modify_ldt_ldt_s *target_ldt_info;
6286     uint64_t *gdt_table = g2h(env->gdt.base);
6287     uint32_t base_addr, limit, flags;
6288     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6289     int seg_not_present, useable, lm;
6290     uint32_t *lp, entry_1, entry_2;
6291 
6292     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6293     if (!target_ldt_info)
6294         return -TARGET_EFAULT;
6295     idx = tswap32(target_ldt_info->entry_number);
6296     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6297         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6298         unlock_user_struct(target_ldt_info, ptr, 1);
6299         return -TARGET_EINVAL;
6300     }
6301     lp = (uint32_t *)(gdt_table + idx);
6302     entry_1 = tswap32(lp[0]);
6303     entry_2 = tswap32(lp[1]);
6304 
6305     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6306     contents = (entry_2 >> 10) & 3;
6307     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6308     seg_32bit = (entry_2 >> 22) & 1;
6309     limit_in_pages = (entry_2 >> 23) & 1;
6310     useable = (entry_2 >> 20) & 1;
6311 #ifdef TARGET_ABI32
6312     lm = 0;
6313 #else
6314     lm = (entry_2 >> 21) & 1;
6315 #endif
6316     flags = (seg_32bit << 0) | (contents << 1) |
6317         (read_exec_only << 3) | (limit_in_pages << 4) |
6318         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6319     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6320     base_addr = (entry_1 >> 16) |
6321         (entry_2 & 0xff000000) |
6322         ((entry_2 & 0xff) << 16);
6323     target_ldt_info->base_addr = tswapal(base_addr);
6324     target_ldt_info->limit = tswap32(limit);
6325     target_ldt_info->flags = tswap32(flags);
6326     unlock_user_struct(target_ldt_info, ptr, 1);
6327     return 0;
6328 }
6329 
6330 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6331 {
6332     return -TARGET_ENOSYS;
6333 }
6334 #else
6335 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6336 {
6337     abi_long ret = 0;
6338     abi_ulong val;
6339     int idx;
6340 
6341     switch(code) {
6342     case TARGET_ARCH_SET_GS:
6343     case TARGET_ARCH_SET_FS:
6344         if (code == TARGET_ARCH_SET_GS)
6345             idx = R_GS;
6346         else
6347             idx = R_FS;
6348         cpu_x86_load_seg(env, idx, 0);
6349         env->segs[idx].base = addr;
6350         break;
6351     case TARGET_ARCH_GET_GS:
6352     case TARGET_ARCH_GET_FS:
6353         if (code == TARGET_ARCH_GET_GS)
6354             idx = R_GS;
6355         else
6356             idx = R_FS;
6357         val = env->segs[idx].base;
6358         if (put_user(val, addr, abi_ulong))
6359             ret = -TARGET_EFAULT;
6360         break;
6361     default:
6362         ret = -TARGET_EINVAL;
6363         break;
6364     }
6365     return ret;
6366 }
6367 #endif /* defined(TARGET_ABI32 */
6368 
6369 #endif /* defined(TARGET_I386) */
6370 
6371 #define NEW_STACK_SIZE 0x40000
6372 
6373 
6374 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6375 typedef struct {
6376     CPUArchState *env;
6377     pthread_mutex_t mutex;
6378     pthread_cond_t cond;
6379     pthread_t thread;
6380     uint32_t tid;
6381     abi_ulong child_tidptr;
6382     abi_ulong parent_tidptr;
6383     sigset_t sigmask;
6384 } new_thread_info;
6385 
6386 static void *clone_func(void *arg)
6387 {
6388     new_thread_info *info = arg;
6389     CPUArchState *env;
6390     CPUState *cpu;
6391     TaskState *ts;
6392 
6393     rcu_register_thread();
6394     tcg_register_thread();
6395     env = info->env;
6396     cpu = env_cpu(env);
6397     thread_cpu = cpu;
6398     ts = (TaskState *)cpu->opaque;
6399     info->tid = sys_gettid();
6400     task_settid(ts);
6401     if (info->child_tidptr)
6402         put_user_u32(info->tid, info->child_tidptr);
6403     if (info->parent_tidptr)
6404         put_user_u32(info->tid, info->parent_tidptr);
6405     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6406     /* Enable signals.  */
6407     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6408     /* Signal to the parent that we're ready.  */
6409     pthread_mutex_lock(&info->mutex);
6410     pthread_cond_broadcast(&info->cond);
6411     pthread_mutex_unlock(&info->mutex);
6412     /* Wait until the parent has finished initializing the tls state.  */
6413     pthread_mutex_lock(&clone_lock);
6414     pthread_mutex_unlock(&clone_lock);
6415     cpu_loop(env);
6416     /* never exits */
6417     return NULL;
6418 }
6419 
6420 /* do_fork() Must return host values and target errnos (unlike most
6421    do_*() functions). */
6422 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6423                    abi_ulong parent_tidptr, target_ulong newtls,
6424                    abi_ulong child_tidptr)
6425 {
6426     CPUState *cpu = env_cpu(env);
6427     int ret;
6428     TaskState *ts;
6429     CPUState *new_cpu;
6430     CPUArchState *new_env;
6431     sigset_t sigmask;
6432 
6433     flags &= ~CLONE_IGNORED_FLAGS;
6434 
6435     /* Emulate vfork() with fork() */
6436     if (flags & CLONE_VFORK)
6437         flags &= ~(CLONE_VFORK | CLONE_VM);
6438 
6439     if (flags & CLONE_VM) {
6440         TaskState *parent_ts = (TaskState *)cpu->opaque;
6441         new_thread_info info;
6442         pthread_attr_t attr;
6443 
6444         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6445             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6446             return -TARGET_EINVAL;
6447         }
6448 
6449         ts = g_new0(TaskState, 1);
6450         init_task_state(ts);
6451 
6452         /* Grab a mutex so that thread setup appears atomic.  */
6453         pthread_mutex_lock(&clone_lock);
6454 
6455         /* we create a new CPU instance. */
6456         new_env = cpu_copy(env);
6457         /* Init regs that differ from the parent.  */
6458         cpu_clone_regs_child(new_env, newsp, flags);
6459         cpu_clone_regs_parent(env, flags);
6460         new_cpu = env_cpu(new_env);
6461         new_cpu->opaque = ts;
6462         ts->bprm = parent_ts->bprm;
6463         ts->info = parent_ts->info;
6464         ts->signal_mask = parent_ts->signal_mask;
6465 
6466         if (flags & CLONE_CHILD_CLEARTID) {
6467             ts->child_tidptr = child_tidptr;
6468         }
6469 
6470         if (flags & CLONE_SETTLS) {
6471             cpu_set_tls (new_env, newtls);
6472         }
6473 
6474         memset(&info, 0, sizeof(info));
6475         pthread_mutex_init(&info.mutex, NULL);
6476         pthread_mutex_lock(&info.mutex);
6477         pthread_cond_init(&info.cond, NULL);
6478         info.env = new_env;
6479         if (flags & CLONE_CHILD_SETTID) {
6480             info.child_tidptr = child_tidptr;
6481         }
6482         if (flags & CLONE_PARENT_SETTID) {
6483             info.parent_tidptr = parent_tidptr;
6484         }
6485 
6486         ret = pthread_attr_init(&attr);
6487         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6488         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6489         /* It is not safe to deliver signals until the child has finished
6490            initializing, so temporarily block all signals.  */
6491         sigfillset(&sigmask);
6492         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6493         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6494 
6495         /* If this is our first additional thread, we need to ensure we
6496          * generate code for parallel execution and flush old translations.
6497          */
6498         if (!parallel_cpus) {
6499             parallel_cpus = true;
6500             tb_flush(cpu);
6501         }
6502 
6503         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6504         /* TODO: Free new CPU state if thread creation failed.  */
6505 
6506         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6507         pthread_attr_destroy(&attr);
6508         if (ret == 0) {
6509             /* Wait for the child to initialize.  */
6510             pthread_cond_wait(&info.cond, &info.mutex);
6511             ret = info.tid;
6512         } else {
6513             ret = -1;
6514         }
6515         pthread_mutex_unlock(&info.mutex);
6516         pthread_cond_destroy(&info.cond);
6517         pthread_mutex_destroy(&info.mutex);
6518         pthread_mutex_unlock(&clone_lock);
6519     } else {
6520         /* if no CLONE_VM, we consider it is a fork */
6521         if (flags & CLONE_INVALID_FORK_FLAGS) {
6522             return -TARGET_EINVAL;
6523         }
6524 
6525         /* We can't support custom termination signals */
6526         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6527             return -TARGET_EINVAL;
6528         }
6529 
6530         if (block_signals()) {
6531             return -TARGET_ERESTARTSYS;
6532         }
6533 
6534         fork_start();
6535         ret = fork();
6536         if (ret == 0) {
6537             /* Child Process.  */
6538             cpu_clone_regs_child(env, newsp, flags);
6539             fork_end(1);
6540             /* There is a race condition here.  The parent process could
6541                theoretically read the TID in the child process before the child
6542                tid is set.  This would require using either ptrace
6543                (not implemented) or having *_tidptr to point at a shared memory
6544                mapping.  We can't repeat the spinlock hack used above because
6545                the child process gets its own copy of the lock.  */
6546             if (flags & CLONE_CHILD_SETTID)
6547                 put_user_u32(sys_gettid(), child_tidptr);
6548             if (flags & CLONE_PARENT_SETTID)
6549                 put_user_u32(sys_gettid(), parent_tidptr);
6550             ts = (TaskState *)cpu->opaque;
6551             if (flags & CLONE_SETTLS)
6552                 cpu_set_tls (env, newtls);
6553             if (flags & CLONE_CHILD_CLEARTID)
6554                 ts->child_tidptr = child_tidptr;
6555         } else {
6556             cpu_clone_regs_parent(env, flags);
6557             fork_end(0);
6558         }
6559     }
6560     return ret;
6561 }
6562 
6563 /* warning : doesn't handle linux specific flags... */
6564 static int target_to_host_fcntl_cmd(int cmd)
6565 {
6566     int ret;
6567 
6568     switch(cmd) {
6569     case TARGET_F_DUPFD:
6570     case TARGET_F_GETFD:
6571     case TARGET_F_SETFD:
6572     case TARGET_F_GETFL:
6573     case TARGET_F_SETFL:
6574     case TARGET_F_OFD_GETLK:
6575     case TARGET_F_OFD_SETLK:
6576     case TARGET_F_OFD_SETLKW:
6577         ret = cmd;
6578         break;
6579     case TARGET_F_GETLK:
6580         ret = F_GETLK64;
6581         break;
6582     case TARGET_F_SETLK:
6583         ret = F_SETLK64;
6584         break;
6585     case TARGET_F_SETLKW:
6586         ret = F_SETLKW64;
6587         break;
6588     case TARGET_F_GETOWN:
6589         ret = F_GETOWN;
6590         break;
6591     case TARGET_F_SETOWN:
6592         ret = F_SETOWN;
6593         break;
6594     case TARGET_F_GETSIG:
6595         ret = F_GETSIG;
6596         break;
6597     case TARGET_F_SETSIG:
6598         ret = F_SETSIG;
6599         break;
6600 #if TARGET_ABI_BITS == 32
6601     case TARGET_F_GETLK64:
6602         ret = F_GETLK64;
6603         break;
6604     case TARGET_F_SETLK64:
6605         ret = F_SETLK64;
6606         break;
6607     case TARGET_F_SETLKW64:
6608         ret = F_SETLKW64;
6609         break;
6610 #endif
6611     case TARGET_F_SETLEASE:
6612         ret = F_SETLEASE;
6613         break;
6614     case TARGET_F_GETLEASE:
6615         ret = F_GETLEASE;
6616         break;
6617 #ifdef F_DUPFD_CLOEXEC
6618     case TARGET_F_DUPFD_CLOEXEC:
6619         ret = F_DUPFD_CLOEXEC;
6620         break;
6621 #endif
6622     case TARGET_F_NOTIFY:
6623         ret = F_NOTIFY;
6624         break;
6625 #ifdef F_GETOWN_EX
6626     case TARGET_F_GETOWN_EX:
6627         ret = F_GETOWN_EX;
6628         break;
6629 #endif
6630 #ifdef F_SETOWN_EX
6631     case TARGET_F_SETOWN_EX:
6632         ret = F_SETOWN_EX;
6633         break;
6634 #endif
6635 #ifdef F_SETPIPE_SZ
6636     case TARGET_F_SETPIPE_SZ:
6637         ret = F_SETPIPE_SZ;
6638         break;
6639     case TARGET_F_GETPIPE_SZ:
6640         ret = F_GETPIPE_SZ;
6641         break;
6642 #endif
6643     default:
6644         ret = -TARGET_EINVAL;
6645         break;
6646     }
6647 
6648 #if defined(__powerpc64__)
6649     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6650      * is not supported by kernel. The glibc fcntl call actually adjusts
6651      * them to 5, 6 and 7 before making the syscall(). Since we make the
6652      * syscall directly, adjust to what is supported by the kernel.
6653      */
6654     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6655         ret -= F_GETLK64 - 5;
6656     }
6657 #endif
6658 
6659     return ret;
6660 }
6661 
6662 #define FLOCK_TRANSTBL \
6663     switch (type) { \
6664     TRANSTBL_CONVERT(F_RDLCK); \
6665     TRANSTBL_CONVERT(F_WRLCK); \
6666     TRANSTBL_CONVERT(F_UNLCK); \
6667     TRANSTBL_CONVERT(F_EXLCK); \
6668     TRANSTBL_CONVERT(F_SHLCK); \
6669     }
6670 
6671 static int target_to_host_flock(int type)
6672 {
6673 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6674     FLOCK_TRANSTBL
6675 #undef  TRANSTBL_CONVERT
6676     return -TARGET_EINVAL;
6677 }
6678 
6679 static int host_to_target_flock(int type)
6680 {
6681 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6682     FLOCK_TRANSTBL
6683 #undef  TRANSTBL_CONVERT
6684     /* if we don't know how to convert the value coming
6685      * from the host we copy to the target field as-is
6686      */
6687     return type;
6688 }
6689 
6690 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6691                                             abi_ulong target_flock_addr)
6692 {
6693     struct target_flock *target_fl;
6694     int l_type;
6695 
6696     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6697         return -TARGET_EFAULT;
6698     }
6699 
6700     __get_user(l_type, &target_fl->l_type);
6701     l_type = target_to_host_flock(l_type);
6702     if (l_type < 0) {
6703         return l_type;
6704     }
6705     fl->l_type = l_type;
6706     __get_user(fl->l_whence, &target_fl->l_whence);
6707     __get_user(fl->l_start, &target_fl->l_start);
6708     __get_user(fl->l_len, &target_fl->l_len);
6709     __get_user(fl->l_pid, &target_fl->l_pid);
6710     unlock_user_struct(target_fl, target_flock_addr, 0);
6711     return 0;
6712 }
6713 
6714 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6715                                           const struct flock64 *fl)
6716 {
6717     struct target_flock *target_fl;
6718     short l_type;
6719 
6720     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6721         return -TARGET_EFAULT;
6722     }
6723 
6724     l_type = host_to_target_flock(fl->l_type);
6725     __put_user(l_type, &target_fl->l_type);
6726     __put_user(fl->l_whence, &target_fl->l_whence);
6727     __put_user(fl->l_start, &target_fl->l_start);
6728     __put_user(fl->l_len, &target_fl->l_len);
6729     __put_user(fl->l_pid, &target_fl->l_pid);
6730     unlock_user_struct(target_fl, target_flock_addr, 1);
6731     return 0;
6732 }
6733 
6734 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6735 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6736 
6737 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6738 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6739                                                    abi_ulong target_flock_addr)
6740 {
6741     struct target_oabi_flock64 *target_fl;
6742     int l_type;
6743 
6744     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6745         return -TARGET_EFAULT;
6746     }
6747 
6748     __get_user(l_type, &target_fl->l_type);
6749     l_type = target_to_host_flock(l_type);
6750     if (l_type < 0) {
6751         return l_type;
6752     }
6753     fl->l_type = l_type;
6754     __get_user(fl->l_whence, &target_fl->l_whence);
6755     __get_user(fl->l_start, &target_fl->l_start);
6756     __get_user(fl->l_len, &target_fl->l_len);
6757     __get_user(fl->l_pid, &target_fl->l_pid);
6758     unlock_user_struct(target_fl, target_flock_addr, 0);
6759     return 0;
6760 }
6761 
6762 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6763                                                  const struct flock64 *fl)
6764 {
6765     struct target_oabi_flock64 *target_fl;
6766     short l_type;
6767 
6768     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6769         return -TARGET_EFAULT;
6770     }
6771 
6772     l_type = host_to_target_flock(fl->l_type);
6773     __put_user(l_type, &target_fl->l_type);
6774     __put_user(fl->l_whence, &target_fl->l_whence);
6775     __put_user(fl->l_start, &target_fl->l_start);
6776     __put_user(fl->l_len, &target_fl->l_len);
6777     __put_user(fl->l_pid, &target_fl->l_pid);
6778     unlock_user_struct(target_fl, target_flock_addr, 1);
6779     return 0;
6780 }
6781 #endif
6782 
6783 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6784                                               abi_ulong target_flock_addr)
6785 {
6786     struct target_flock64 *target_fl;
6787     int l_type;
6788 
6789     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6790         return -TARGET_EFAULT;
6791     }
6792 
6793     __get_user(l_type, &target_fl->l_type);
6794     l_type = target_to_host_flock(l_type);
6795     if (l_type < 0) {
6796         return l_type;
6797     }
6798     fl->l_type = l_type;
6799     __get_user(fl->l_whence, &target_fl->l_whence);
6800     __get_user(fl->l_start, &target_fl->l_start);
6801     __get_user(fl->l_len, &target_fl->l_len);
6802     __get_user(fl->l_pid, &target_fl->l_pid);
6803     unlock_user_struct(target_fl, target_flock_addr, 0);
6804     return 0;
6805 }
6806 
6807 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6808                                             const struct flock64 *fl)
6809 {
6810     struct target_flock64 *target_fl;
6811     short l_type;
6812 
6813     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6814         return -TARGET_EFAULT;
6815     }
6816 
6817     l_type = host_to_target_flock(fl->l_type);
6818     __put_user(l_type, &target_fl->l_type);
6819     __put_user(fl->l_whence, &target_fl->l_whence);
6820     __put_user(fl->l_start, &target_fl->l_start);
6821     __put_user(fl->l_len, &target_fl->l_len);
6822     __put_user(fl->l_pid, &target_fl->l_pid);
6823     unlock_user_struct(target_fl, target_flock_addr, 1);
6824     return 0;
6825 }
6826 
6827 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6828 {
6829     struct flock64 fl64;
6830 #ifdef F_GETOWN_EX
6831     struct f_owner_ex fox;
6832     struct target_f_owner_ex *target_fox;
6833 #endif
6834     abi_long ret;
6835     int host_cmd = target_to_host_fcntl_cmd(cmd);
6836 
6837     if (host_cmd == -TARGET_EINVAL)
6838 	    return host_cmd;
6839 
6840     switch(cmd) {
6841     case TARGET_F_GETLK:
6842         ret = copy_from_user_flock(&fl64, arg);
6843         if (ret) {
6844             return ret;
6845         }
6846         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6847         if (ret == 0) {
6848             ret = copy_to_user_flock(arg, &fl64);
6849         }
6850         break;
6851 
6852     case TARGET_F_SETLK:
6853     case TARGET_F_SETLKW:
6854         ret = copy_from_user_flock(&fl64, arg);
6855         if (ret) {
6856             return ret;
6857         }
6858         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6859         break;
6860 
6861     case TARGET_F_GETLK64:
6862     case TARGET_F_OFD_GETLK:
6863         ret = copy_from_user_flock64(&fl64, arg);
6864         if (ret) {
6865             return ret;
6866         }
6867         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6868         if (ret == 0) {
6869             ret = copy_to_user_flock64(arg, &fl64);
6870         }
6871         break;
6872     case TARGET_F_SETLK64:
6873     case TARGET_F_SETLKW64:
6874     case TARGET_F_OFD_SETLK:
6875     case TARGET_F_OFD_SETLKW:
6876         ret = copy_from_user_flock64(&fl64, arg);
6877         if (ret) {
6878             return ret;
6879         }
6880         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6881         break;
6882 
6883     case TARGET_F_GETFL:
6884         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6885         if (ret >= 0) {
6886             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6887         }
6888         break;
6889 
6890     case TARGET_F_SETFL:
6891         ret = get_errno(safe_fcntl(fd, host_cmd,
6892                                    target_to_host_bitmask(arg,
6893                                                           fcntl_flags_tbl)));
6894         break;
6895 
6896 #ifdef F_GETOWN_EX
6897     case TARGET_F_GETOWN_EX:
6898         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6899         if (ret >= 0) {
6900             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6901                 return -TARGET_EFAULT;
6902             target_fox->type = tswap32(fox.type);
6903             target_fox->pid = tswap32(fox.pid);
6904             unlock_user_struct(target_fox, arg, 1);
6905         }
6906         break;
6907 #endif
6908 
6909 #ifdef F_SETOWN_EX
6910     case TARGET_F_SETOWN_EX:
6911         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6912             return -TARGET_EFAULT;
6913         fox.type = tswap32(target_fox->type);
6914         fox.pid = tswap32(target_fox->pid);
6915         unlock_user_struct(target_fox, arg, 0);
6916         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6917         break;
6918 #endif
6919 
6920     case TARGET_F_SETSIG:
6921         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6922         break;
6923 
6924     case TARGET_F_GETSIG:
6925         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6926         break;
6927 
6928     case TARGET_F_SETOWN:
6929     case TARGET_F_GETOWN:
6930     case TARGET_F_SETLEASE:
6931     case TARGET_F_GETLEASE:
6932     case TARGET_F_SETPIPE_SZ:
6933     case TARGET_F_GETPIPE_SZ:
6934         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6935         break;
6936 
6937     default:
6938         ret = get_errno(safe_fcntl(fd, cmd, arg));
6939         break;
6940     }
6941     return ret;
6942 }
6943 
6944 #ifdef USE_UID16
6945 
6946 static inline int high2lowuid(int uid)
6947 {
6948     if (uid > 65535)
6949         return 65534;
6950     else
6951         return uid;
6952 }
6953 
6954 static inline int high2lowgid(int gid)
6955 {
6956     if (gid > 65535)
6957         return 65534;
6958     else
6959         return gid;
6960 }
6961 
6962 static inline int low2highuid(int uid)
6963 {
6964     if ((int16_t)uid == -1)
6965         return -1;
6966     else
6967         return uid;
6968 }
6969 
6970 static inline int low2highgid(int gid)
6971 {
6972     if ((int16_t)gid == -1)
6973         return -1;
6974     else
6975         return gid;
6976 }
6977 static inline int tswapid(int id)
6978 {
6979     return tswap16(id);
6980 }
6981 
6982 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6983 
6984 #else /* !USE_UID16 */
6985 static inline int high2lowuid(int uid)
6986 {
6987     return uid;
6988 }
6989 static inline int high2lowgid(int gid)
6990 {
6991     return gid;
6992 }
6993 static inline int low2highuid(int uid)
6994 {
6995     return uid;
6996 }
6997 static inline int low2highgid(int gid)
6998 {
6999     return gid;
7000 }
7001 static inline int tswapid(int id)
7002 {
7003     return tswap32(id);
7004 }
7005 
7006 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7007 
7008 #endif /* USE_UID16 */
7009 
7010 /* We must do direct syscalls for setting UID/GID, because we want to
7011  * implement the Linux system call semantics of "change only for this thread",
7012  * not the libc/POSIX semantics of "change for all threads in process".
7013  * (See http://ewontfix.com/17/ for more details.)
7014  * We use the 32-bit version of the syscalls if present; if it is not
7015  * then either the host architecture supports 32-bit UIDs natively with
7016  * the standard syscall, or the 16-bit UID is the best we can do.
7017  */
7018 #ifdef __NR_setuid32
7019 #define __NR_sys_setuid __NR_setuid32
7020 #else
7021 #define __NR_sys_setuid __NR_setuid
7022 #endif
7023 #ifdef __NR_setgid32
7024 #define __NR_sys_setgid __NR_setgid32
7025 #else
7026 #define __NR_sys_setgid __NR_setgid
7027 #endif
7028 #ifdef __NR_setresuid32
7029 #define __NR_sys_setresuid __NR_setresuid32
7030 #else
7031 #define __NR_sys_setresuid __NR_setresuid
7032 #endif
7033 #ifdef __NR_setresgid32
7034 #define __NR_sys_setresgid __NR_setresgid32
7035 #else
7036 #define __NR_sys_setresgid __NR_setresgid
7037 #endif
7038 
7039 _syscall1(int, sys_setuid, uid_t, uid)
7040 _syscall1(int, sys_setgid, gid_t, gid)
7041 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7042 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7043 
7044 void syscall_init(void)
7045 {
7046     IOCTLEntry *ie;
7047     const argtype *arg_type;
7048     int size;
7049     int i;
7050 
7051     thunk_init(STRUCT_MAX);
7052 
7053 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7054 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7055 #include "syscall_types.h"
7056 #undef STRUCT
7057 #undef STRUCT_SPECIAL
7058 
7059     /* Build target_to_host_errno_table[] table from
7060      * host_to_target_errno_table[]. */
7061     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7062         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7063     }
7064 
7065     /* we patch the ioctl size if necessary. We rely on the fact that
7066        no ioctl has all the bits at '1' in the size field */
7067     ie = ioctl_entries;
7068     while (ie->target_cmd != 0) {
7069         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7070             TARGET_IOC_SIZEMASK) {
7071             arg_type = ie->arg_type;
7072             if (arg_type[0] != TYPE_PTR) {
7073                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7074                         ie->target_cmd);
7075                 exit(1);
7076             }
7077             arg_type++;
7078             size = thunk_type_size(arg_type, 0);
7079             ie->target_cmd = (ie->target_cmd &
7080                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7081                 (size << TARGET_IOC_SIZESHIFT);
7082         }
7083 
7084         /* automatic consistency check if same arch */
7085 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7086     (defined(__x86_64__) && defined(TARGET_X86_64))
7087         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7088             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7089                     ie->name, ie->target_cmd, ie->host_cmd);
7090         }
7091 #endif
7092         ie++;
7093     }
7094 }
7095 
7096 #ifdef TARGET_NR_truncate64
7097 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7098                                          abi_long arg2,
7099                                          abi_long arg3,
7100                                          abi_long arg4)
7101 {
7102     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7103         arg2 = arg3;
7104         arg3 = arg4;
7105     }
7106     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7107 }
7108 #endif
7109 
7110 #ifdef TARGET_NR_ftruncate64
7111 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7112                                           abi_long arg2,
7113                                           abi_long arg3,
7114                                           abi_long arg4)
7115 {
7116     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7117         arg2 = arg3;
7118         arg3 = arg4;
7119     }
7120     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7121 }
7122 #endif
7123 
7124 #if defined(TARGET_NR_timer_settime) || \
7125     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7126 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7127                                                  abi_ulong target_addr)
7128 {
7129     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7130                                 offsetof(struct target_itimerspec,
7131                                          it_interval)) ||
7132         target_to_host_timespec(&host_its->it_value, target_addr +
7133                                 offsetof(struct target_itimerspec,
7134                                          it_value))) {
7135         return -TARGET_EFAULT;
7136     }
7137 
7138     return 0;
7139 }
7140 #endif
7141 
7142 #if defined(TARGET_NR_timer_settime64) || \
7143     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7144 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7145                                                    abi_ulong target_addr)
7146 {
7147     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7148                                   offsetof(struct target__kernel_itimerspec,
7149                                            it_interval)) ||
7150         target_to_host_timespec64(&host_its->it_value, target_addr +
7151                                   offsetof(struct target__kernel_itimerspec,
7152                                            it_value))) {
7153         return -TARGET_EFAULT;
7154     }
7155 
7156     return 0;
7157 }
7158 #endif
7159 
7160 #if ((defined(TARGET_NR_timerfd_gettime) || \
7161       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7162       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7163 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7164                                                  struct itimerspec *host_its)
7165 {
7166     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7167                                                        it_interval),
7168                                 &host_its->it_interval) ||
7169         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7170                                                        it_value),
7171                                 &host_its->it_value)) {
7172         return -TARGET_EFAULT;
7173     }
7174     return 0;
7175 }
7176 #endif
7177 
7178 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7179       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7180       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7181 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7182                                                    struct itimerspec *host_its)
7183 {
7184     if (host_to_target_timespec64(target_addr +
7185                                   offsetof(struct target__kernel_itimerspec,
7186                                            it_interval),
7187                                   &host_its->it_interval) ||
7188         host_to_target_timespec64(target_addr +
7189                                   offsetof(struct target__kernel_itimerspec,
7190                                            it_value),
7191                                   &host_its->it_value)) {
7192         return -TARGET_EFAULT;
7193     }
7194     return 0;
7195 }
7196 #endif
7197 
7198 #if defined(TARGET_NR_adjtimex) || \
7199     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7200 static inline abi_long target_to_host_timex(struct timex *host_tx,
7201                                             abi_long target_addr)
7202 {
7203     struct target_timex *target_tx;
7204 
7205     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7206         return -TARGET_EFAULT;
7207     }
7208 
7209     __get_user(host_tx->modes, &target_tx->modes);
7210     __get_user(host_tx->offset, &target_tx->offset);
7211     __get_user(host_tx->freq, &target_tx->freq);
7212     __get_user(host_tx->maxerror, &target_tx->maxerror);
7213     __get_user(host_tx->esterror, &target_tx->esterror);
7214     __get_user(host_tx->status, &target_tx->status);
7215     __get_user(host_tx->constant, &target_tx->constant);
7216     __get_user(host_tx->precision, &target_tx->precision);
7217     __get_user(host_tx->tolerance, &target_tx->tolerance);
7218     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7219     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7220     __get_user(host_tx->tick, &target_tx->tick);
7221     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7222     __get_user(host_tx->jitter, &target_tx->jitter);
7223     __get_user(host_tx->shift, &target_tx->shift);
7224     __get_user(host_tx->stabil, &target_tx->stabil);
7225     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7226     __get_user(host_tx->calcnt, &target_tx->calcnt);
7227     __get_user(host_tx->errcnt, &target_tx->errcnt);
7228     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7229     __get_user(host_tx->tai, &target_tx->tai);
7230 
7231     unlock_user_struct(target_tx, target_addr, 0);
7232     return 0;
7233 }
7234 
7235 static inline abi_long host_to_target_timex(abi_long target_addr,
7236                                             struct timex *host_tx)
7237 {
7238     struct target_timex *target_tx;
7239 
7240     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7241         return -TARGET_EFAULT;
7242     }
7243 
7244     __put_user(host_tx->modes, &target_tx->modes);
7245     __put_user(host_tx->offset, &target_tx->offset);
7246     __put_user(host_tx->freq, &target_tx->freq);
7247     __put_user(host_tx->maxerror, &target_tx->maxerror);
7248     __put_user(host_tx->esterror, &target_tx->esterror);
7249     __put_user(host_tx->status, &target_tx->status);
7250     __put_user(host_tx->constant, &target_tx->constant);
7251     __put_user(host_tx->precision, &target_tx->precision);
7252     __put_user(host_tx->tolerance, &target_tx->tolerance);
7253     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7254     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7255     __put_user(host_tx->tick, &target_tx->tick);
7256     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7257     __put_user(host_tx->jitter, &target_tx->jitter);
7258     __put_user(host_tx->shift, &target_tx->shift);
7259     __put_user(host_tx->stabil, &target_tx->stabil);
7260     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7261     __put_user(host_tx->calcnt, &target_tx->calcnt);
7262     __put_user(host_tx->errcnt, &target_tx->errcnt);
7263     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7264     __put_user(host_tx->tai, &target_tx->tai);
7265 
7266     unlock_user_struct(target_tx, target_addr, 1);
7267     return 0;
7268 }
7269 #endif
7270 
7271 
7272 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7273 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7274                                               abi_long target_addr)
7275 {
7276     struct target__kernel_timex *target_tx;
7277 
7278     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7279                                  offsetof(struct target__kernel_timex,
7280                                           time))) {
7281         return -TARGET_EFAULT;
7282     }
7283 
7284     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7285         return -TARGET_EFAULT;
7286     }
7287 
7288     __get_user(host_tx->modes, &target_tx->modes);
7289     __get_user(host_tx->offset, &target_tx->offset);
7290     __get_user(host_tx->freq, &target_tx->freq);
7291     __get_user(host_tx->maxerror, &target_tx->maxerror);
7292     __get_user(host_tx->esterror, &target_tx->esterror);
7293     __get_user(host_tx->status, &target_tx->status);
7294     __get_user(host_tx->constant, &target_tx->constant);
7295     __get_user(host_tx->precision, &target_tx->precision);
7296     __get_user(host_tx->tolerance, &target_tx->tolerance);
7297     __get_user(host_tx->tick, &target_tx->tick);
7298     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7299     __get_user(host_tx->jitter, &target_tx->jitter);
7300     __get_user(host_tx->shift, &target_tx->shift);
7301     __get_user(host_tx->stabil, &target_tx->stabil);
7302     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7303     __get_user(host_tx->calcnt, &target_tx->calcnt);
7304     __get_user(host_tx->errcnt, &target_tx->errcnt);
7305     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7306     __get_user(host_tx->tai, &target_tx->tai);
7307 
7308     unlock_user_struct(target_tx, target_addr, 0);
7309     return 0;
7310 }
7311 
7312 static inline abi_long host_to_target_timex64(abi_long target_addr,
7313                                               struct timex *host_tx)
7314 {
7315     struct target__kernel_timex *target_tx;
7316 
7317    if (copy_to_user_timeval64(target_addr +
7318                               offsetof(struct target__kernel_timex, time),
7319                               &host_tx->time)) {
7320         return -TARGET_EFAULT;
7321     }
7322 
7323     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7324         return -TARGET_EFAULT;
7325     }
7326 
7327     __put_user(host_tx->modes, &target_tx->modes);
7328     __put_user(host_tx->offset, &target_tx->offset);
7329     __put_user(host_tx->freq, &target_tx->freq);
7330     __put_user(host_tx->maxerror, &target_tx->maxerror);
7331     __put_user(host_tx->esterror, &target_tx->esterror);
7332     __put_user(host_tx->status, &target_tx->status);
7333     __put_user(host_tx->constant, &target_tx->constant);
7334     __put_user(host_tx->precision, &target_tx->precision);
7335     __put_user(host_tx->tolerance, &target_tx->tolerance);
7336     __put_user(host_tx->tick, &target_tx->tick);
7337     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7338     __put_user(host_tx->jitter, &target_tx->jitter);
7339     __put_user(host_tx->shift, &target_tx->shift);
7340     __put_user(host_tx->stabil, &target_tx->stabil);
7341     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7342     __put_user(host_tx->calcnt, &target_tx->calcnt);
7343     __put_user(host_tx->errcnt, &target_tx->errcnt);
7344     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7345     __put_user(host_tx->tai, &target_tx->tai);
7346 
7347     unlock_user_struct(target_tx, target_addr, 1);
7348     return 0;
7349 }
7350 #endif
7351 
7352 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7353                                                abi_ulong target_addr)
7354 {
7355     struct target_sigevent *target_sevp;
7356 
7357     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7358         return -TARGET_EFAULT;
7359     }
7360 
7361     /* This union is awkward on 64 bit systems because it has a 32 bit
7362      * integer and a pointer in it; we follow the conversion approach
7363      * used for handling sigval types in signal.c so the guest should get
7364      * the correct value back even if we did a 64 bit byteswap and it's
7365      * using the 32 bit integer.
7366      */
7367     host_sevp->sigev_value.sival_ptr =
7368         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7369     host_sevp->sigev_signo =
7370         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7371     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7372     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7373 
7374     unlock_user_struct(target_sevp, target_addr, 1);
7375     return 0;
7376 }
7377 
7378 #if defined(TARGET_NR_mlockall)
7379 static inline int target_to_host_mlockall_arg(int arg)
7380 {
7381     int result = 0;
7382 
7383     if (arg & TARGET_MCL_CURRENT) {
7384         result |= MCL_CURRENT;
7385     }
7386     if (arg & TARGET_MCL_FUTURE) {
7387         result |= MCL_FUTURE;
7388     }
7389 #ifdef MCL_ONFAULT
7390     if (arg & TARGET_MCL_ONFAULT) {
7391         result |= MCL_ONFAULT;
7392     }
7393 #endif
7394 
7395     return result;
7396 }
7397 #endif
7398 
7399 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7400      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7401      defined(TARGET_NR_newfstatat))
7402 static inline abi_long host_to_target_stat64(void *cpu_env,
7403                                              abi_ulong target_addr,
7404                                              struct stat *host_st)
7405 {
7406 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7407     if (((CPUARMState *)cpu_env)->eabi) {
7408         struct target_eabi_stat64 *target_st;
7409 
7410         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7411             return -TARGET_EFAULT;
7412         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7413         __put_user(host_st->st_dev, &target_st->st_dev);
7414         __put_user(host_st->st_ino, &target_st->st_ino);
7415 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7416         __put_user(host_st->st_ino, &target_st->__st_ino);
7417 #endif
7418         __put_user(host_st->st_mode, &target_st->st_mode);
7419         __put_user(host_st->st_nlink, &target_st->st_nlink);
7420         __put_user(host_st->st_uid, &target_st->st_uid);
7421         __put_user(host_st->st_gid, &target_st->st_gid);
7422         __put_user(host_st->st_rdev, &target_st->st_rdev);
7423         __put_user(host_st->st_size, &target_st->st_size);
7424         __put_user(host_st->st_blksize, &target_st->st_blksize);
7425         __put_user(host_st->st_blocks, &target_st->st_blocks);
7426         __put_user(host_st->st_atime, &target_st->target_st_atime);
7427         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7428         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7429 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7430         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7431         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7432         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7433 #endif
7434         unlock_user_struct(target_st, target_addr, 1);
7435     } else
7436 #endif
7437     {
7438 #if defined(TARGET_HAS_STRUCT_STAT64)
7439         struct target_stat64 *target_st;
7440 #else
7441         struct target_stat *target_st;
7442 #endif
7443 
7444         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7445             return -TARGET_EFAULT;
7446         memset(target_st, 0, sizeof(*target_st));
7447         __put_user(host_st->st_dev, &target_st->st_dev);
7448         __put_user(host_st->st_ino, &target_st->st_ino);
7449 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7450         __put_user(host_st->st_ino, &target_st->__st_ino);
7451 #endif
7452         __put_user(host_st->st_mode, &target_st->st_mode);
7453         __put_user(host_st->st_nlink, &target_st->st_nlink);
7454         __put_user(host_st->st_uid, &target_st->st_uid);
7455         __put_user(host_st->st_gid, &target_st->st_gid);
7456         __put_user(host_st->st_rdev, &target_st->st_rdev);
7457         /* XXX: better use of kernel struct */
7458         __put_user(host_st->st_size, &target_st->st_size);
7459         __put_user(host_st->st_blksize, &target_st->st_blksize);
7460         __put_user(host_st->st_blocks, &target_st->st_blocks);
7461         __put_user(host_st->st_atime, &target_st->target_st_atime);
7462         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7463         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7464 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7465         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7466         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7467         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7468 #endif
7469         unlock_user_struct(target_st, target_addr, 1);
7470     }
7471 
7472     return 0;
7473 }
7474 #endif
7475 
7476 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7477 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7478                                             abi_ulong target_addr)
7479 {
7480     struct target_statx *target_stx;
7481 
7482     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7483         return -TARGET_EFAULT;
7484     }
7485     memset(target_stx, 0, sizeof(*target_stx));
7486 
7487     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7488     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7489     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7490     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7491     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7492     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7493     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7494     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7495     __put_user(host_stx->stx_size, &target_stx->stx_size);
7496     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7497     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7498     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7499     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7500     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7501     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7502     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7503     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7504     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7505     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7506     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7507     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7508     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7509     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7510 
7511     unlock_user_struct(target_stx, target_addr, 1);
7512 
7513     return 0;
7514 }
7515 #endif
7516 
7517 static int do_sys_futex(int *uaddr, int op, int val,
7518                          const struct timespec *timeout, int *uaddr2,
7519                          int val3)
7520 {
7521 #if HOST_LONG_BITS == 64
7522 #if defined(__NR_futex)
7523     /* always a 64-bit time_t, it doesn't define _time64 version  */
7524     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7525 
7526 #endif
7527 #else /* HOST_LONG_BITS == 64 */
7528 #if defined(__NR_futex_time64)
7529     if (sizeof(timeout->tv_sec) == 8) {
7530         /* _time64 function on 32bit arch */
7531         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7532     }
7533 #endif
7534 #if defined(__NR_futex)
7535     /* old function on 32bit arch */
7536     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7537 #endif
7538 #endif /* HOST_LONG_BITS == 64 */
7539     g_assert_not_reached();
7540 }
7541 
7542 static int do_safe_futex(int *uaddr, int op, int val,
7543                          const struct timespec *timeout, int *uaddr2,
7544                          int val3)
7545 {
7546 #if HOST_LONG_BITS == 64
7547 #if defined(__NR_futex)
7548     /* always a 64-bit time_t, it doesn't define _time64 version  */
7549     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7550 #endif
7551 #else /* HOST_LONG_BITS == 64 */
7552 #if defined(__NR_futex_time64)
7553     if (sizeof(timeout->tv_sec) == 8) {
7554         /* _time64 function on 32bit arch */
7555         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7556                                            val3));
7557     }
7558 #endif
7559 #if defined(__NR_futex)
7560     /* old function on 32bit arch */
7561     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7562 #endif
7563 #endif /* HOST_LONG_BITS == 64 */
7564     return -TARGET_ENOSYS;
7565 }
7566 
7567 /* ??? Using host futex calls even when target atomic operations
7568    are not really atomic probably breaks things.  However implementing
7569    futexes locally would make futexes shared between multiple processes
7570    tricky.  However they're probably useless because guest atomic
7571    operations won't work either.  */
7572 #if defined(TARGET_NR_futex)
7573 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7574                     target_ulong uaddr2, int val3)
7575 {
7576     struct timespec ts, *pts;
7577     int base_op;
7578 
7579     /* ??? We assume FUTEX_* constants are the same on both host
7580        and target.  */
7581 #ifdef FUTEX_CMD_MASK
7582     base_op = op & FUTEX_CMD_MASK;
7583 #else
7584     base_op = op;
7585 #endif
7586     switch (base_op) {
7587     case FUTEX_WAIT:
7588     case FUTEX_WAIT_BITSET:
7589         if (timeout) {
7590             pts = &ts;
7591             target_to_host_timespec(pts, timeout);
7592         } else {
7593             pts = NULL;
7594         }
7595         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7596     case FUTEX_WAKE:
7597         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7598     case FUTEX_FD:
7599         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7600     case FUTEX_REQUEUE:
7601     case FUTEX_CMP_REQUEUE:
7602     case FUTEX_WAKE_OP:
7603         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7604            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7605            But the prototype takes a `struct timespec *'; insert casts
7606            to satisfy the compiler.  We do not need to tswap TIMEOUT
7607            since it's not compared to guest memory.  */
7608         pts = (struct timespec *)(uintptr_t) timeout;
7609         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7610                              (base_op == FUTEX_CMP_REQUEUE
7611                                       ? tswap32(val3)
7612                                       : val3));
7613     default:
7614         return -TARGET_ENOSYS;
7615     }
7616 }
7617 #endif
7618 
7619 #if defined(TARGET_NR_futex_time64)
7620 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7621                            target_ulong uaddr2, int val3)
7622 {
7623     struct timespec ts, *pts;
7624     int base_op;
7625 
7626     /* ??? We assume FUTEX_* constants are the same on both host
7627        and target.  */
7628 #ifdef FUTEX_CMD_MASK
7629     base_op = op & FUTEX_CMD_MASK;
7630 #else
7631     base_op = op;
7632 #endif
7633     switch (base_op) {
7634     case FUTEX_WAIT:
7635     case FUTEX_WAIT_BITSET:
7636         if (timeout) {
7637             pts = &ts;
7638             if (target_to_host_timespec64(pts, timeout)) {
7639                 return -TARGET_EFAULT;
7640             }
7641         } else {
7642             pts = NULL;
7643         }
7644         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7645     case FUTEX_WAKE:
7646         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7647     case FUTEX_FD:
7648         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7649     case FUTEX_REQUEUE:
7650     case FUTEX_CMP_REQUEUE:
7651     case FUTEX_WAKE_OP:
7652         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7653            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7654            But the prototype takes a `struct timespec *'; insert casts
7655            to satisfy the compiler.  We do not need to tswap TIMEOUT
7656            since it's not compared to guest memory.  */
7657         pts = (struct timespec *)(uintptr_t) timeout;
7658         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7659                              (base_op == FUTEX_CMP_REQUEUE
7660                                       ? tswap32(val3)
7661                                       : val3));
7662     default:
7663         return -TARGET_ENOSYS;
7664     }
7665 }
7666 #endif
7667 
7668 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7669 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7670                                      abi_long handle, abi_long mount_id,
7671                                      abi_long flags)
7672 {
7673     struct file_handle *target_fh;
7674     struct file_handle *fh;
7675     int mid = 0;
7676     abi_long ret;
7677     char *name;
7678     unsigned int size, total_size;
7679 
7680     if (get_user_s32(size, handle)) {
7681         return -TARGET_EFAULT;
7682     }
7683 
7684     name = lock_user_string(pathname);
7685     if (!name) {
7686         return -TARGET_EFAULT;
7687     }
7688 
7689     total_size = sizeof(struct file_handle) + size;
7690     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7691     if (!target_fh) {
7692         unlock_user(name, pathname, 0);
7693         return -TARGET_EFAULT;
7694     }
7695 
7696     fh = g_malloc0(total_size);
7697     fh->handle_bytes = size;
7698 
7699     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7700     unlock_user(name, pathname, 0);
7701 
7702     /* man name_to_handle_at(2):
7703      * Other than the use of the handle_bytes field, the caller should treat
7704      * the file_handle structure as an opaque data type
7705      */
7706 
7707     memcpy(target_fh, fh, total_size);
7708     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7709     target_fh->handle_type = tswap32(fh->handle_type);
7710     g_free(fh);
7711     unlock_user(target_fh, handle, total_size);
7712 
7713     if (put_user_s32(mid, mount_id)) {
7714         return -TARGET_EFAULT;
7715     }
7716 
7717     return ret;
7718 
7719 }
7720 #endif
7721 
7722 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7723 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7724                                      abi_long flags)
7725 {
7726     struct file_handle *target_fh;
7727     struct file_handle *fh;
7728     unsigned int size, total_size;
7729     abi_long ret;
7730 
7731     if (get_user_s32(size, handle)) {
7732         return -TARGET_EFAULT;
7733     }
7734 
7735     total_size = sizeof(struct file_handle) + size;
7736     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7737     if (!target_fh) {
7738         return -TARGET_EFAULT;
7739     }
7740 
7741     fh = g_memdup(target_fh, total_size);
7742     fh->handle_bytes = size;
7743     fh->handle_type = tswap32(target_fh->handle_type);
7744 
7745     ret = get_errno(open_by_handle_at(mount_fd, fh,
7746                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7747 
7748     g_free(fh);
7749 
7750     unlock_user(target_fh, handle, total_size);
7751 
7752     return ret;
7753 }
7754 #endif
7755 
7756 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7757 
7758 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7759 {
7760     int host_flags;
7761     target_sigset_t *target_mask;
7762     sigset_t host_mask;
7763     abi_long ret;
7764 
7765     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7766         return -TARGET_EINVAL;
7767     }
7768     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7769         return -TARGET_EFAULT;
7770     }
7771 
7772     target_to_host_sigset(&host_mask, target_mask);
7773 
7774     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7775 
7776     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7777     if (ret >= 0) {
7778         fd_trans_register(ret, &target_signalfd_trans);
7779     }
7780 
7781     unlock_user_struct(target_mask, mask, 0);
7782 
7783     return ret;
7784 }
7785 #endif
7786 
7787 /* Map host to target signal numbers for the wait family of syscalls.
7788    Assume all other status bits are the same.  */
7789 int host_to_target_waitstatus(int status)
7790 {
7791     if (WIFSIGNALED(status)) {
7792         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7793     }
7794     if (WIFSTOPPED(status)) {
7795         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7796                | (status & 0xff);
7797     }
7798     return status;
7799 }
7800 
7801 static int open_self_cmdline(void *cpu_env, int fd)
7802 {
7803     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7804     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7805     int i;
7806 
7807     for (i = 0; i < bprm->argc; i++) {
7808         size_t len = strlen(bprm->argv[i]) + 1;
7809 
7810         if (write(fd, bprm->argv[i], len) != len) {
7811             return -1;
7812         }
7813     }
7814 
7815     return 0;
7816 }
7817 
7818 static int open_self_maps(void *cpu_env, int fd)
7819 {
7820     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7821     TaskState *ts = cpu->opaque;
7822     GSList *map_info = read_self_maps();
7823     GSList *s;
7824     int count;
7825 
7826     for (s = map_info; s; s = g_slist_next(s)) {
7827         MapInfo *e = (MapInfo *) s->data;
7828 
7829         if (h2g_valid(e->start)) {
7830             unsigned long min = e->start;
7831             unsigned long max = e->end;
7832             int flags = page_get_flags(h2g(min));
7833             const char *path;
7834 
7835             max = h2g_valid(max - 1) ?
7836                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7837 
7838             if (page_check_range(h2g(min), max - min, flags) == -1) {
7839                 continue;
7840             }
7841 
7842             if (h2g(min) == ts->info->stack_limit) {
7843                 path = "[stack]";
7844             } else {
7845                 path = e->path;
7846             }
7847 
7848             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7849                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7850                             h2g(min), h2g(max - 1) + 1,
7851                             e->is_read ? 'r' : '-',
7852                             e->is_write ? 'w' : '-',
7853                             e->is_exec ? 'x' : '-',
7854                             e->is_priv ? 'p' : '-',
7855                             (uint64_t) e->offset, e->dev, e->inode);
7856             if (path) {
7857                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7858             } else {
7859                 dprintf(fd, "\n");
7860             }
7861         }
7862     }
7863 
7864     free_self_maps(map_info);
7865 
7866 #ifdef TARGET_VSYSCALL_PAGE
7867     /*
7868      * We only support execution from the vsyscall page.
7869      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7870      */
7871     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7872                     " --xp 00000000 00:00 0",
7873                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7874     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7875 #endif
7876 
7877     return 0;
7878 }
7879 
7880 static int open_self_stat(void *cpu_env, int fd)
7881 {
7882     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7883     TaskState *ts = cpu->opaque;
7884     g_autoptr(GString) buf = g_string_new(NULL);
7885     int i;
7886 
7887     for (i = 0; i < 44; i++) {
7888         if (i == 0) {
7889             /* pid */
7890             g_string_printf(buf, FMT_pid " ", getpid());
7891         } else if (i == 1) {
7892             /* app name */
7893             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7894             bin = bin ? bin + 1 : ts->bprm->argv[0];
7895             g_string_printf(buf, "(%.15s) ", bin);
7896         } else if (i == 27) {
7897             /* stack bottom */
7898             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7899         } else {
7900             /* for the rest, there is MasterCard */
7901             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7902         }
7903 
7904         if (write(fd, buf->str, buf->len) != buf->len) {
7905             return -1;
7906         }
7907     }
7908 
7909     return 0;
7910 }
7911 
7912 static int open_self_auxv(void *cpu_env, int fd)
7913 {
7914     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7915     TaskState *ts = cpu->opaque;
7916     abi_ulong auxv = ts->info->saved_auxv;
7917     abi_ulong len = ts->info->auxv_len;
7918     char *ptr;
7919 
7920     /*
7921      * Auxiliary vector is stored in target process stack.
7922      * read in whole auxv vector and copy it to file
7923      */
7924     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7925     if (ptr != NULL) {
7926         while (len > 0) {
7927             ssize_t r;
7928             r = write(fd, ptr, len);
7929             if (r <= 0) {
7930                 break;
7931             }
7932             len -= r;
7933             ptr += r;
7934         }
7935         lseek(fd, 0, SEEK_SET);
7936         unlock_user(ptr, auxv, len);
7937     }
7938 
7939     return 0;
7940 }
7941 
7942 static int is_proc_myself(const char *filename, const char *entry)
7943 {
7944     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7945         filename += strlen("/proc/");
7946         if (!strncmp(filename, "self/", strlen("self/"))) {
7947             filename += strlen("self/");
7948         } else if (*filename >= '1' && *filename <= '9') {
7949             char myself[80];
7950             snprintf(myself, sizeof(myself), "%d/", getpid());
7951             if (!strncmp(filename, myself, strlen(myself))) {
7952                 filename += strlen(myself);
7953             } else {
7954                 return 0;
7955             }
7956         } else {
7957             return 0;
7958         }
7959         if (!strcmp(filename, entry)) {
7960             return 1;
7961         }
7962     }
7963     return 0;
7964 }
7965 
7966 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7967     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7968 static int is_proc(const char *filename, const char *entry)
7969 {
7970     return strcmp(filename, entry) == 0;
7971 }
7972 #endif
7973 
7974 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7975 static int open_net_route(void *cpu_env, int fd)
7976 {
7977     FILE *fp;
7978     char *line = NULL;
7979     size_t len = 0;
7980     ssize_t read;
7981 
7982     fp = fopen("/proc/net/route", "r");
7983     if (fp == NULL) {
7984         return -1;
7985     }
7986 
7987     /* read header */
7988 
7989     read = getline(&line, &len, fp);
7990     dprintf(fd, "%s", line);
7991 
7992     /* read routes */
7993 
7994     while ((read = getline(&line, &len, fp)) != -1) {
7995         char iface[16];
7996         uint32_t dest, gw, mask;
7997         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7998         int fields;
7999 
8000         fields = sscanf(line,
8001                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8002                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8003                         &mask, &mtu, &window, &irtt);
8004         if (fields != 11) {
8005             continue;
8006         }
8007         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8008                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8009                 metric, tswap32(mask), mtu, window, irtt);
8010     }
8011 
8012     free(line);
8013     fclose(fp);
8014 
8015     return 0;
8016 }
8017 #endif
8018 
8019 #if defined(TARGET_SPARC)
8020 static int open_cpuinfo(void *cpu_env, int fd)
8021 {
8022     dprintf(fd, "type\t\t: sun4u\n");
8023     return 0;
8024 }
8025 #endif
8026 
8027 #if defined(TARGET_HPPA)
8028 static int open_cpuinfo(void *cpu_env, int fd)
8029 {
8030     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8031     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8032     dprintf(fd, "capabilities\t: os32\n");
8033     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8034     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8035     return 0;
8036 }
8037 #endif
8038 
8039 #if defined(TARGET_M68K)
8040 static int open_hardware(void *cpu_env, int fd)
8041 {
8042     dprintf(fd, "Model:\t\tqemu-m68k\n");
8043     return 0;
8044 }
8045 #endif
8046 
8047 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8048 {
8049     struct fake_open {
8050         const char *filename;
8051         int (*fill)(void *cpu_env, int fd);
8052         int (*cmp)(const char *s1, const char *s2);
8053     };
8054     const struct fake_open *fake_open;
8055     static const struct fake_open fakes[] = {
8056         { "maps", open_self_maps, is_proc_myself },
8057         { "stat", open_self_stat, is_proc_myself },
8058         { "auxv", open_self_auxv, is_proc_myself },
8059         { "cmdline", open_self_cmdline, is_proc_myself },
8060 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8061         { "/proc/net/route", open_net_route, is_proc },
8062 #endif
8063 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8064         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8065 #endif
8066 #if defined(TARGET_M68K)
8067         { "/proc/hardware", open_hardware, is_proc },
8068 #endif
8069         { NULL, NULL, NULL }
8070     };
8071 
8072     if (is_proc_myself(pathname, "exe")) {
8073         int execfd = qemu_getauxval(AT_EXECFD);
8074         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8075     }
8076 
8077     for (fake_open = fakes; fake_open->filename; fake_open++) {
8078         if (fake_open->cmp(pathname, fake_open->filename)) {
8079             break;
8080         }
8081     }
8082 
8083     if (fake_open->filename) {
8084         const char *tmpdir;
8085         char filename[PATH_MAX];
8086         int fd, r;
8087 
8088         /* create temporary file to map stat to */
8089         tmpdir = getenv("TMPDIR");
8090         if (!tmpdir)
8091             tmpdir = "/tmp";
8092         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8093         fd = mkstemp(filename);
8094         if (fd < 0) {
8095             return fd;
8096         }
8097         unlink(filename);
8098 
8099         if ((r = fake_open->fill(cpu_env, fd))) {
8100             int e = errno;
8101             close(fd);
8102             errno = e;
8103             return r;
8104         }
8105         lseek(fd, 0, SEEK_SET);
8106 
8107         return fd;
8108     }
8109 
8110     return safe_openat(dirfd, path(pathname), flags, mode);
8111 }
8112 
8113 #define TIMER_MAGIC 0x0caf0000
8114 #define TIMER_MAGIC_MASK 0xffff0000
8115 
8116 /* Convert QEMU provided timer ID back to internal 16bit index format */
8117 static target_timer_t get_timer_id(abi_long arg)
8118 {
8119     target_timer_t timerid = arg;
8120 
8121     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8122         return -TARGET_EINVAL;
8123     }
8124 
8125     timerid &= 0xffff;
8126 
8127     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8128         return -TARGET_EINVAL;
8129     }
8130 
8131     return timerid;
8132 }
8133 
8134 static int target_to_host_cpu_mask(unsigned long *host_mask,
8135                                    size_t host_size,
8136                                    abi_ulong target_addr,
8137                                    size_t target_size)
8138 {
8139     unsigned target_bits = sizeof(abi_ulong) * 8;
8140     unsigned host_bits = sizeof(*host_mask) * 8;
8141     abi_ulong *target_mask;
8142     unsigned i, j;
8143 
8144     assert(host_size >= target_size);
8145 
8146     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8147     if (!target_mask) {
8148         return -TARGET_EFAULT;
8149     }
8150     memset(host_mask, 0, host_size);
8151 
8152     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8153         unsigned bit = i * target_bits;
8154         abi_ulong val;
8155 
8156         __get_user(val, &target_mask[i]);
8157         for (j = 0; j < target_bits; j++, bit++) {
8158             if (val & (1UL << j)) {
8159                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8160             }
8161         }
8162     }
8163 
8164     unlock_user(target_mask, target_addr, 0);
8165     return 0;
8166 }
8167 
8168 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8169                                    size_t host_size,
8170                                    abi_ulong target_addr,
8171                                    size_t target_size)
8172 {
8173     unsigned target_bits = sizeof(abi_ulong) * 8;
8174     unsigned host_bits = sizeof(*host_mask) * 8;
8175     abi_ulong *target_mask;
8176     unsigned i, j;
8177 
8178     assert(host_size >= target_size);
8179 
8180     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8181     if (!target_mask) {
8182         return -TARGET_EFAULT;
8183     }
8184 
8185     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8186         unsigned bit = i * target_bits;
8187         abi_ulong val = 0;
8188 
8189         for (j = 0; j < target_bits; j++, bit++) {
8190             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8191                 val |= 1UL << j;
8192             }
8193         }
8194         __put_user(val, &target_mask[i]);
8195     }
8196 
8197     unlock_user(target_mask, target_addr, target_size);
8198     return 0;
8199 }
8200 
8201 /* This is an internal helper for do_syscall so that it is easier
8202  * to have a single return point, so that actions, such as logging
8203  * of syscall results, can be performed.
8204  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8205  */
8206 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8207                             abi_long arg2, abi_long arg3, abi_long arg4,
8208                             abi_long arg5, abi_long arg6, abi_long arg7,
8209                             abi_long arg8)
8210 {
8211     CPUState *cpu = env_cpu(cpu_env);
8212     abi_long ret;
8213 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8214     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8215     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8216     || defined(TARGET_NR_statx)
8217     struct stat st;
8218 #endif
8219 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8220     || defined(TARGET_NR_fstatfs)
8221     struct statfs stfs;
8222 #endif
8223     void *p;
8224 
8225     switch(num) {
8226     case TARGET_NR_exit:
8227         /* In old applications this may be used to implement _exit(2).
8228            However in threaded applications it is used for thread termination,
8229            and _exit_group is used for application termination.
8230            Do thread termination if we have more then one thread.  */
8231 
8232         if (block_signals()) {
8233             return -TARGET_ERESTARTSYS;
8234         }
8235 
8236         pthread_mutex_lock(&clone_lock);
8237 
8238         if (CPU_NEXT(first_cpu)) {
8239             TaskState *ts = cpu->opaque;
8240 
8241             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8242             object_unref(OBJECT(cpu));
8243             /*
8244              * At this point the CPU should be unrealized and removed
8245              * from cpu lists. We can clean-up the rest of the thread
8246              * data without the lock held.
8247              */
8248 
8249             pthread_mutex_unlock(&clone_lock);
8250 
8251             if (ts->child_tidptr) {
8252                 put_user_u32(0, ts->child_tidptr);
8253                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8254                           NULL, NULL, 0);
8255             }
8256             thread_cpu = NULL;
8257             g_free(ts);
8258             rcu_unregister_thread();
8259             pthread_exit(NULL);
8260         }
8261 
8262         pthread_mutex_unlock(&clone_lock);
8263         preexit_cleanup(cpu_env, arg1);
8264         _exit(arg1);
8265         return 0; /* avoid warning */
8266     case TARGET_NR_read:
8267         if (arg2 == 0 && arg3 == 0) {
8268             return get_errno(safe_read(arg1, 0, 0));
8269         } else {
8270             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8271                 return -TARGET_EFAULT;
8272             ret = get_errno(safe_read(arg1, p, arg3));
8273             if (ret >= 0 &&
8274                 fd_trans_host_to_target_data(arg1)) {
8275                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8276             }
8277             unlock_user(p, arg2, ret);
8278         }
8279         return ret;
8280     case TARGET_NR_write:
8281         if (arg2 == 0 && arg3 == 0) {
8282             return get_errno(safe_write(arg1, 0, 0));
8283         }
8284         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8285             return -TARGET_EFAULT;
8286         if (fd_trans_target_to_host_data(arg1)) {
8287             void *copy = g_malloc(arg3);
8288             memcpy(copy, p, arg3);
8289             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8290             if (ret >= 0) {
8291                 ret = get_errno(safe_write(arg1, copy, ret));
8292             }
8293             g_free(copy);
8294         } else {
8295             ret = get_errno(safe_write(arg1, p, arg3));
8296         }
8297         unlock_user(p, arg2, 0);
8298         return ret;
8299 
8300 #ifdef TARGET_NR_open
8301     case TARGET_NR_open:
8302         if (!(p = lock_user_string(arg1)))
8303             return -TARGET_EFAULT;
8304         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8305                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8306                                   arg3));
8307         fd_trans_unregister(ret);
8308         unlock_user(p, arg1, 0);
8309         return ret;
8310 #endif
8311     case TARGET_NR_openat:
8312         if (!(p = lock_user_string(arg2)))
8313             return -TARGET_EFAULT;
8314         ret = get_errno(do_openat(cpu_env, arg1, p,
8315                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8316                                   arg4));
8317         fd_trans_unregister(ret);
8318         unlock_user(p, arg2, 0);
8319         return ret;
8320 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8321     case TARGET_NR_name_to_handle_at:
8322         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8323         return ret;
8324 #endif
8325 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8326     case TARGET_NR_open_by_handle_at:
8327         ret = do_open_by_handle_at(arg1, arg2, arg3);
8328         fd_trans_unregister(ret);
8329         return ret;
8330 #endif
8331     case TARGET_NR_close:
8332         fd_trans_unregister(arg1);
8333         return get_errno(close(arg1));
8334 
8335     case TARGET_NR_brk:
8336         return do_brk(arg1);
8337 #ifdef TARGET_NR_fork
8338     case TARGET_NR_fork:
8339         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8340 #endif
8341 #ifdef TARGET_NR_waitpid
8342     case TARGET_NR_waitpid:
8343         {
8344             int status;
8345             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8346             if (!is_error(ret) && arg2 && ret
8347                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8348                 return -TARGET_EFAULT;
8349         }
8350         return ret;
8351 #endif
8352 #ifdef TARGET_NR_waitid
8353     case TARGET_NR_waitid:
8354         {
8355             siginfo_t info;
8356             info.si_pid = 0;
8357             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8358             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8359                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8360                     return -TARGET_EFAULT;
8361                 host_to_target_siginfo(p, &info);
8362                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8363             }
8364         }
8365         return ret;
8366 #endif
8367 #ifdef TARGET_NR_creat /* not on alpha */
8368     case TARGET_NR_creat:
8369         if (!(p = lock_user_string(arg1)))
8370             return -TARGET_EFAULT;
8371         ret = get_errno(creat(p, arg2));
8372         fd_trans_unregister(ret);
8373         unlock_user(p, arg1, 0);
8374         return ret;
8375 #endif
8376 #ifdef TARGET_NR_link
8377     case TARGET_NR_link:
8378         {
8379             void * p2;
8380             p = lock_user_string(arg1);
8381             p2 = lock_user_string(arg2);
8382             if (!p || !p2)
8383                 ret = -TARGET_EFAULT;
8384             else
8385                 ret = get_errno(link(p, p2));
8386             unlock_user(p2, arg2, 0);
8387             unlock_user(p, arg1, 0);
8388         }
8389         return ret;
8390 #endif
8391 #if defined(TARGET_NR_linkat)
8392     case TARGET_NR_linkat:
8393         {
8394             void * p2 = NULL;
8395             if (!arg2 || !arg4)
8396                 return -TARGET_EFAULT;
8397             p  = lock_user_string(arg2);
8398             p2 = lock_user_string(arg4);
8399             if (!p || !p2)
8400                 ret = -TARGET_EFAULT;
8401             else
8402                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8403             unlock_user(p, arg2, 0);
8404             unlock_user(p2, arg4, 0);
8405         }
8406         return ret;
8407 #endif
8408 #ifdef TARGET_NR_unlink
8409     case TARGET_NR_unlink:
8410         if (!(p = lock_user_string(arg1)))
8411             return -TARGET_EFAULT;
8412         ret = get_errno(unlink(p));
8413         unlock_user(p, arg1, 0);
8414         return ret;
8415 #endif
8416 #if defined(TARGET_NR_unlinkat)
8417     case TARGET_NR_unlinkat:
8418         if (!(p = lock_user_string(arg2)))
8419             return -TARGET_EFAULT;
8420         ret = get_errno(unlinkat(arg1, p, arg3));
8421         unlock_user(p, arg2, 0);
8422         return ret;
8423 #endif
8424     case TARGET_NR_execve:
8425         {
8426             char **argp, **envp;
8427             int argc, envc;
8428             abi_ulong gp;
8429             abi_ulong guest_argp;
8430             abi_ulong guest_envp;
8431             abi_ulong addr;
8432             char **q;
8433             int total_size = 0;
8434 
8435             argc = 0;
8436             guest_argp = arg2;
8437             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8438                 if (get_user_ual(addr, gp))
8439                     return -TARGET_EFAULT;
8440                 if (!addr)
8441                     break;
8442                 argc++;
8443             }
8444             envc = 0;
8445             guest_envp = arg3;
8446             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8447                 if (get_user_ual(addr, gp))
8448                     return -TARGET_EFAULT;
8449                 if (!addr)
8450                     break;
8451                 envc++;
8452             }
8453 
8454             argp = g_new0(char *, argc + 1);
8455             envp = g_new0(char *, envc + 1);
8456 
8457             for (gp = guest_argp, q = argp; gp;
8458                   gp += sizeof(abi_ulong), q++) {
8459                 if (get_user_ual(addr, gp))
8460                     goto execve_efault;
8461                 if (!addr)
8462                     break;
8463                 if (!(*q = lock_user_string(addr)))
8464                     goto execve_efault;
8465                 total_size += strlen(*q) + 1;
8466             }
8467             *q = NULL;
8468 
8469             for (gp = guest_envp, q = envp; gp;
8470                   gp += sizeof(abi_ulong), q++) {
8471                 if (get_user_ual(addr, gp))
8472                     goto execve_efault;
8473                 if (!addr)
8474                     break;
8475                 if (!(*q = lock_user_string(addr)))
8476                     goto execve_efault;
8477                 total_size += strlen(*q) + 1;
8478             }
8479             *q = NULL;
8480 
8481             if (!(p = lock_user_string(arg1)))
8482                 goto execve_efault;
8483             /* Although execve() is not an interruptible syscall it is
8484              * a special case where we must use the safe_syscall wrapper:
8485              * if we allow a signal to happen before we make the host
8486              * syscall then we will 'lose' it, because at the point of
8487              * execve the process leaves QEMU's control. So we use the
8488              * safe syscall wrapper to ensure that we either take the
8489              * signal as a guest signal, or else it does not happen
8490              * before the execve completes and makes it the other
8491              * program's problem.
8492              */
8493             ret = get_errno(safe_execve(p, argp, envp));
8494             unlock_user(p, arg1, 0);
8495 
8496             goto execve_end;
8497 
8498         execve_efault:
8499             ret = -TARGET_EFAULT;
8500 
8501         execve_end:
8502             for (gp = guest_argp, q = argp; *q;
8503                   gp += sizeof(abi_ulong), q++) {
8504                 if (get_user_ual(addr, gp)
8505                     || !addr)
8506                     break;
8507                 unlock_user(*q, addr, 0);
8508             }
8509             for (gp = guest_envp, q = envp; *q;
8510                   gp += sizeof(abi_ulong), q++) {
8511                 if (get_user_ual(addr, gp)
8512                     || !addr)
8513                     break;
8514                 unlock_user(*q, addr, 0);
8515             }
8516 
8517             g_free(argp);
8518             g_free(envp);
8519         }
8520         return ret;
8521     case TARGET_NR_chdir:
8522         if (!(p = lock_user_string(arg1)))
8523             return -TARGET_EFAULT;
8524         ret = get_errno(chdir(p));
8525         unlock_user(p, arg1, 0);
8526         return ret;
8527 #ifdef TARGET_NR_time
8528     case TARGET_NR_time:
8529         {
8530             time_t host_time;
8531             ret = get_errno(time(&host_time));
8532             if (!is_error(ret)
8533                 && arg1
8534                 && put_user_sal(host_time, arg1))
8535                 return -TARGET_EFAULT;
8536         }
8537         return ret;
8538 #endif
8539 #ifdef TARGET_NR_mknod
8540     case TARGET_NR_mknod:
8541         if (!(p = lock_user_string(arg1)))
8542             return -TARGET_EFAULT;
8543         ret = get_errno(mknod(p, arg2, arg3));
8544         unlock_user(p, arg1, 0);
8545         return ret;
8546 #endif
8547 #if defined(TARGET_NR_mknodat)
8548     case TARGET_NR_mknodat:
8549         if (!(p = lock_user_string(arg2)))
8550             return -TARGET_EFAULT;
8551         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8552         unlock_user(p, arg2, 0);
8553         return ret;
8554 #endif
8555 #ifdef TARGET_NR_chmod
8556     case TARGET_NR_chmod:
8557         if (!(p = lock_user_string(arg1)))
8558             return -TARGET_EFAULT;
8559         ret = get_errno(chmod(p, arg2));
8560         unlock_user(p, arg1, 0);
8561         return ret;
8562 #endif
8563 #ifdef TARGET_NR_lseek
8564     case TARGET_NR_lseek:
8565         return get_errno(lseek(arg1, arg2, arg3));
8566 #endif
8567 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8568     /* Alpha specific */
8569     case TARGET_NR_getxpid:
8570         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8571         return get_errno(getpid());
8572 #endif
8573 #ifdef TARGET_NR_getpid
8574     case TARGET_NR_getpid:
8575         return get_errno(getpid());
8576 #endif
8577     case TARGET_NR_mount:
8578         {
8579             /* need to look at the data field */
8580             void *p2, *p3;
8581 
8582             if (arg1) {
8583                 p = lock_user_string(arg1);
8584                 if (!p) {
8585                     return -TARGET_EFAULT;
8586                 }
8587             } else {
8588                 p = NULL;
8589             }
8590 
8591             p2 = lock_user_string(arg2);
8592             if (!p2) {
8593                 if (arg1) {
8594                     unlock_user(p, arg1, 0);
8595                 }
8596                 return -TARGET_EFAULT;
8597             }
8598 
8599             if (arg3) {
8600                 p3 = lock_user_string(arg3);
8601                 if (!p3) {
8602                     if (arg1) {
8603                         unlock_user(p, arg1, 0);
8604                     }
8605                     unlock_user(p2, arg2, 0);
8606                     return -TARGET_EFAULT;
8607                 }
8608             } else {
8609                 p3 = NULL;
8610             }
8611 
8612             /* FIXME - arg5 should be locked, but it isn't clear how to
8613              * do that since it's not guaranteed to be a NULL-terminated
8614              * string.
8615              */
8616             if (!arg5) {
8617                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8618             } else {
8619                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8620             }
8621             ret = get_errno(ret);
8622 
8623             if (arg1) {
8624                 unlock_user(p, arg1, 0);
8625             }
8626             unlock_user(p2, arg2, 0);
8627             if (arg3) {
8628                 unlock_user(p3, arg3, 0);
8629             }
8630         }
8631         return ret;
8632 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8633 #if defined(TARGET_NR_umount)
8634     case TARGET_NR_umount:
8635 #endif
8636 #if defined(TARGET_NR_oldumount)
8637     case TARGET_NR_oldumount:
8638 #endif
8639         if (!(p = lock_user_string(arg1)))
8640             return -TARGET_EFAULT;
8641         ret = get_errno(umount(p));
8642         unlock_user(p, arg1, 0);
8643         return ret;
8644 #endif
8645 #ifdef TARGET_NR_stime /* not on alpha */
8646     case TARGET_NR_stime:
8647         {
8648             struct timespec ts;
8649             ts.tv_nsec = 0;
8650             if (get_user_sal(ts.tv_sec, arg1)) {
8651                 return -TARGET_EFAULT;
8652             }
8653             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8654         }
8655 #endif
8656 #ifdef TARGET_NR_alarm /* not on alpha */
8657     case TARGET_NR_alarm:
8658         return alarm(arg1);
8659 #endif
8660 #ifdef TARGET_NR_pause /* not on alpha */
8661     case TARGET_NR_pause:
8662         if (!block_signals()) {
8663             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8664         }
8665         return -TARGET_EINTR;
8666 #endif
8667 #ifdef TARGET_NR_utime
8668     case TARGET_NR_utime:
8669         {
8670             struct utimbuf tbuf, *host_tbuf;
8671             struct target_utimbuf *target_tbuf;
8672             if (arg2) {
8673                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8674                     return -TARGET_EFAULT;
8675                 tbuf.actime = tswapal(target_tbuf->actime);
8676                 tbuf.modtime = tswapal(target_tbuf->modtime);
8677                 unlock_user_struct(target_tbuf, arg2, 0);
8678                 host_tbuf = &tbuf;
8679             } else {
8680                 host_tbuf = NULL;
8681             }
8682             if (!(p = lock_user_string(arg1)))
8683                 return -TARGET_EFAULT;
8684             ret = get_errno(utime(p, host_tbuf));
8685             unlock_user(p, arg1, 0);
8686         }
8687         return ret;
8688 #endif
8689 #ifdef TARGET_NR_utimes
8690     case TARGET_NR_utimes:
8691         {
8692             struct timeval *tvp, tv[2];
8693             if (arg2) {
8694                 if (copy_from_user_timeval(&tv[0], arg2)
8695                     || copy_from_user_timeval(&tv[1],
8696                                               arg2 + sizeof(struct target_timeval)))
8697                     return -TARGET_EFAULT;
8698                 tvp = tv;
8699             } else {
8700                 tvp = NULL;
8701             }
8702             if (!(p = lock_user_string(arg1)))
8703                 return -TARGET_EFAULT;
8704             ret = get_errno(utimes(p, tvp));
8705             unlock_user(p, arg1, 0);
8706         }
8707         return ret;
8708 #endif
8709 #if defined(TARGET_NR_futimesat)
8710     case TARGET_NR_futimesat:
8711         {
8712             struct timeval *tvp, tv[2];
8713             if (arg3) {
8714                 if (copy_from_user_timeval(&tv[0], arg3)
8715                     || copy_from_user_timeval(&tv[1],
8716                                               arg3 + sizeof(struct target_timeval)))
8717                     return -TARGET_EFAULT;
8718                 tvp = tv;
8719             } else {
8720                 tvp = NULL;
8721             }
8722             if (!(p = lock_user_string(arg2))) {
8723                 return -TARGET_EFAULT;
8724             }
8725             ret = get_errno(futimesat(arg1, path(p), tvp));
8726             unlock_user(p, arg2, 0);
8727         }
8728         return ret;
8729 #endif
8730 #ifdef TARGET_NR_access
8731     case TARGET_NR_access:
8732         if (!(p = lock_user_string(arg1))) {
8733             return -TARGET_EFAULT;
8734         }
8735         ret = get_errno(access(path(p), arg2));
8736         unlock_user(p, arg1, 0);
8737         return ret;
8738 #endif
8739 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8740     case TARGET_NR_faccessat:
8741         if (!(p = lock_user_string(arg2))) {
8742             return -TARGET_EFAULT;
8743         }
8744         ret = get_errno(faccessat(arg1, p, arg3, 0));
8745         unlock_user(p, arg2, 0);
8746         return ret;
8747 #endif
8748 #ifdef TARGET_NR_nice /* not on alpha */
8749     case TARGET_NR_nice:
8750         return get_errno(nice(arg1));
8751 #endif
8752     case TARGET_NR_sync:
8753         sync();
8754         return 0;
8755 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8756     case TARGET_NR_syncfs:
8757         return get_errno(syncfs(arg1));
8758 #endif
8759     case TARGET_NR_kill:
8760         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8761 #ifdef TARGET_NR_rename
8762     case TARGET_NR_rename:
8763         {
8764             void *p2;
8765             p = lock_user_string(arg1);
8766             p2 = lock_user_string(arg2);
8767             if (!p || !p2)
8768                 ret = -TARGET_EFAULT;
8769             else
8770                 ret = get_errno(rename(p, p2));
8771             unlock_user(p2, arg2, 0);
8772             unlock_user(p, arg1, 0);
8773         }
8774         return ret;
8775 #endif
8776 #if defined(TARGET_NR_renameat)
8777     case TARGET_NR_renameat:
8778         {
8779             void *p2;
8780             p  = lock_user_string(arg2);
8781             p2 = lock_user_string(arg4);
8782             if (!p || !p2)
8783                 ret = -TARGET_EFAULT;
8784             else
8785                 ret = get_errno(renameat(arg1, p, arg3, p2));
8786             unlock_user(p2, arg4, 0);
8787             unlock_user(p, arg2, 0);
8788         }
8789         return ret;
8790 #endif
8791 #if defined(TARGET_NR_renameat2)
8792     case TARGET_NR_renameat2:
8793         {
8794             void *p2;
8795             p  = lock_user_string(arg2);
8796             p2 = lock_user_string(arg4);
8797             if (!p || !p2) {
8798                 ret = -TARGET_EFAULT;
8799             } else {
8800                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8801             }
8802             unlock_user(p2, arg4, 0);
8803             unlock_user(p, arg2, 0);
8804         }
8805         return ret;
8806 #endif
8807 #ifdef TARGET_NR_mkdir
8808     case TARGET_NR_mkdir:
8809         if (!(p = lock_user_string(arg1)))
8810             return -TARGET_EFAULT;
8811         ret = get_errno(mkdir(p, arg2));
8812         unlock_user(p, arg1, 0);
8813         return ret;
8814 #endif
8815 #if defined(TARGET_NR_mkdirat)
8816     case TARGET_NR_mkdirat:
8817         if (!(p = lock_user_string(arg2)))
8818             return -TARGET_EFAULT;
8819         ret = get_errno(mkdirat(arg1, p, arg3));
8820         unlock_user(p, arg2, 0);
8821         return ret;
8822 #endif
8823 #ifdef TARGET_NR_rmdir
8824     case TARGET_NR_rmdir:
8825         if (!(p = lock_user_string(arg1)))
8826             return -TARGET_EFAULT;
8827         ret = get_errno(rmdir(p));
8828         unlock_user(p, arg1, 0);
8829         return ret;
8830 #endif
8831     case TARGET_NR_dup:
8832         ret = get_errno(dup(arg1));
8833         if (ret >= 0) {
8834             fd_trans_dup(arg1, ret);
8835         }
8836         return ret;
8837 #ifdef TARGET_NR_pipe
8838     case TARGET_NR_pipe:
8839         return do_pipe(cpu_env, arg1, 0, 0);
8840 #endif
8841 #ifdef TARGET_NR_pipe2
8842     case TARGET_NR_pipe2:
8843         return do_pipe(cpu_env, arg1,
8844                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8845 #endif
8846     case TARGET_NR_times:
8847         {
8848             struct target_tms *tmsp;
8849             struct tms tms;
8850             ret = get_errno(times(&tms));
8851             if (arg1) {
8852                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8853                 if (!tmsp)
8854                     return -TARGET_EFAULT;
8855                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8856                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8857                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8858                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8859             }
8860             if (!is_error(ret))
8861                 ret = host_to_target_clock_t(ret);
8862         }
8863         return ret;
8864     case TARGET_NR_acct:
8865         if (arg1 == 0) {
8866             ret = get_errno(acct(NULL));
8867         } else {
8868             if (!(p = lock_user_string(arg1))) {
8869                 return -TARGET_EFAULT;
8870             }
8871             ret = get_errno(acct(path(p)));
8872             unlock_user(p, arg1, 0);
8873         }
8874         return ret;
8875 #ifdef TARGET_NR_umount2
8876     case TARGET_NR_umount2:
8877         if (!(p = lock_user_string(arg1)))
8878             return -TARGET_EFAULT;
8879         ret = get_errno(umount2(p, arg2));
8880         unlock_user(p, arg1, 0);
8881         return ret;
8882 #endif
8883     case TARGET_NR_ioctl:
8884         return do_ioctl(arg1, arg2, arg3);
8885 #ifdef TARGET_NR_fcntl
8886     case TARGET_NR_fcntl:
8887         return do_fcntl(arg1, arg2, arg3);
8888 #endif
8889     case TARGET_NR_setpgid:
8890         return get_errno(setpgid(arg1, arg2));
8891     case TARGET_NR_umask:
8892         return get_errno(umask(arg1));
8893     case TARGET_NR_chroot:
8894         if (!(p = lock_user_string(arg1)))
8895             return -TARGET_EFAULT;
8896         ret = get_errno(chroot(p));
8897         unlock_user(p, arg1, 0);
8898         return ret;
8899 #ifdef TARGET_NR_dup2
8900     case TARGET_NR_dup2:
8901         ret = get_errno(dup2(arg1, arg2));
8902         if (ret >= 0) {
8903             fd_trans_dup(arg1, arg2);
8904         }
8905         return ret;
8906 #endif
8907 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8908     case TARGET_NR_dup3:
8909     {
8910         int host_flags;
8911 
8912         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8913             return -EINVAL;
8914         }
8915         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8916         ret = get_errno(dup3(arg1, arg2, host_flags));
8917         if (ret >= 0) {
8918             fd_trans_dup(arg1, arg2);
8919         }
8920         return ret;
8921     }
8922 #endif
8923 #ifdef TARGET_NR_getppid /* not on alpha */
8924     case TARGET_NR_getppid:
8925         return get_errno(getppid());
8926 #endif
8927 #ifdef TARGET_NR_getpgrp
8928     case TARGET_NR_getpgrp:
8929         return get_errno(getpgrp());
8930 #endif
8931     case TARGET_NR_setsid:
8932         return get_errno(setsid());
8933 #ifdef TARGET_NR_sigaction
8934     case TARGET_NR_sigaction:
8935         {
8936 #if defined(TARGET_ALPHA)
8937             struct target_sigaction act, oact, *pact = 0;
8938             struct target_old_sigaction *old_act;
8939             if (arg2) {
8940                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8941                     return -TARGET_EFAULT;
8942                 act._sa_handler = old_act->_sa_handler;
8943                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8944                 act.sa_flags = old_act->sa_flags;
8945                 act.sa_restorer = 0;
8946                 unlock_user_struct(old_act, arg2, 0);
8947                 pact = &act;
8948             }
8949             ret = get_errno(do_sigaction(arg1, pact, &oact));
8950             if (!is_error(ret) && arg3) {
8951                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8952                     return -TARGET_EFAULT;
8953                 old_act->_sa_handler = oact._sa_handler;
8954                 old_act->sa_mask = oact.sa_mask.sig[0];
8955                 old_act->sa_flags = oact.sa_flags;
8956                 unlock_user_struct(old_act, arg3, 1);
8957             }
8958 #elif defined(TARGET_MIPS)
8959 	    struct target_sigaction act, oact, *pact, *old_act;
8960 
8961 	    if (arg2) {
8962                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8963                     return -TARGET_EFAULT;
8964 		act._sa_handler = old_act->_sa_handler;
8965 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8966 		act.sa_flags = old_act->sa_flags;
8967 		unlock_user_struct(old_act, arg2, 0);
8968 		pact = &act;
8969 	    } else {
8970 		pact = NULL;
8971 	    }
8972 
8973 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8974 
8975 	    if (!is_error(ret) && arg3) {
8976                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8977                     return -TARGET_EFAULT;
8978 		old_act->_sa_handler = oact._sa_handler;
8979 		old_act->sa_flags = oact.sa_flags;
8980 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8981 		old_act->sa_mask.sig[1] = 0;
8982 		old_act->sa_mask.sig[2] = 0;
8983 		old_act->sa_mask.sig[3] = 0;
8984 		unlock_user_struct(old_act, arg3, 1);
8985 	    }
8986 #else
8987             struct target_old_sigaction *old_act;
8988             struct target_sigaction act, oact, *pact;
8989             if (arg2) {
8990                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8991                     return -TARGET_EFAULT;
8992                 act._sa_handler = old_act->_sa_handler;
8993                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8994                 act.sa_flags = old_act->sa_flags;
8995                 act.sa_restorer = old_act->sa_restorer;
8996 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8997                 act.ka_restorer = 0;
8998 #endif
8999                 unlock_user_struct(old_act, arg2, 0);
9000                 pact = &act;
9001             } else {
9002                 pact = NULL;
9003             }
9004             ret = get_errno(do_sigaction(arg1, pact, &oact));
9005             if (!is_error(ret) && arg3) {
9006                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9007                     return -TARGET_EFAULT;
9008                 old_act->_sa_handler = oact._sa_handler;
9009                 old_act->sa_mask = oact.sa_mask.sig[0];
9010                 old_act->sa_flags = oact.sa_flags;
9011                 old_act->sa_restorer = oact.sa_restorer;
9012                 unlock_user_struct(old_act, arg3, 1);
9013             }
9014 #endif
9015         }
9016         return ret;
9017 #endif
9018     case TARGET_NR_rt_sigaction:
9019         {
9020 #if defined(TARGET_ALPHA)
9021             /* For Alpha and SPARC this is a 5 argument syscall, with
9022              * a 'restorer' parameter which must be copied into the
9023              * sa_restorer field of the sigaction struct.
9024              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9025              * and arg5 is the sigsetsize.
9026              * Alpha also has a separate rt_sigaction struct that it uses
9027              * here; SPARC uses the usual sigaction struct.
9028              */
9029             struct target_rt_sigaction *rt_act;
9030             struct target_sigaction act, oact, *pact = 0;
9031 
9032             if (arg4 != sizeof(target_sigset_t)) {
9033                 return -TARGET_EINVAL;
9034             }
9035             if (arg2) {
9036                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9037                     return -TARGET_EFAULT;
9038                 act._sa_handler = rt_act->_sa_handler;
9039                 act.sa_mask = rt_act->sa_mask;
9040                 act.sa_flags = rt_act->sa_flags;
9041                 act.sa_restorer = arg5;
9042                 unlock_user_struct(rt_act, arg2, 0);
9043                 pact = &act;
9044             }
9045             ret = get_errno(do_sigaction(arg1, pact, &oact));
9046             if (!is_error(ret) && arg3) {
9047                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9048                     return -TARGET_EFAULT;
9049                 rt_act->_sa_handler = oact._sa_handler;
9050                 rt_act->sa_mask = oact.sa_mask;
9051                 rt_act->sa_flags = oact.sa_flags;
9052                 unlock_user_struct(rt_act, arg3, 1);
9053             }
9054 #else
9055 #ifdef TARGET_SPARC
9056             target_ulong restorer = arg4;
9057             target_ulong sigsetsize = arg5;
9058 #else
9059             target_ulong sigsetsize = arg4;
9060 #endif
9061             struct target_sigaction *act;
9062             struct target_sigaction *oact;
9063 
9064             if (sigsetsize != sizeof(target_sigset_t)) {
9065                 return -TARGET_EINVAL;
9066             }
9067             if (arg2) {
9068                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9069                     return -TARGET_EFAULT;
9070                 }
9071 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9072                 act->ka_restorer = restorer;
9073 #endif
9074             } else {
9075                 act = NULL;
9076             }
9077             if (arg3) {
9078                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9079                     ret = -TARGET_EFAULT;
9080                     goto rt_sigaction_fail;
9081                 }
9082             } else
9083                 oact = NULL;
9084             ret = get_errno(do_sigaction(arg1, act, oact));
9085 	rt_sigaction_fail:
9086             if (act)
9087                 unlock_user_struct(act, arg2, 0);
9088             if (oact)
9089                 unlock_user_struct(oact, arg3, 1);
9090 #endif
9091         }
9092         return ret;
9093 #ifdef TARGET_NR_sgetmask /* not on alpha */
9094     case TARGET_NR_sgetmask:
9095         {
9096             sigset_t cur_set;
9097             abi_ulong target_set;
9098             ret = do_sigprocmask(0, NULL, &cur_set);
9099             if (!ret) {
9100                 host_to_target_old_sigset(&target_set, &cur_set);
9101                 ret = target_set;
9102             }
9103         }
9104         return ret;
9105 #endif
9106 #ifdef TARGET_NR_ssetmask /* not on alpha */
9107     case TARGET_NR_ssetmask:
9108         {
9109             sigset_t set, oset;
9110             abi_ulong target_set = arg1;
9111             target_to_host_old_sigset(&set, &target_set);
9112             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9113             if (!ret) {
9114                 host_to_target_old_sigset(&target_set, &oset);
9115                 ret = target_set;
9116             }
9117         }
9118         return ret;
9119 #endif
9120 #ifdef TARGET_NR_sigprocmask
9121     case TARGET_NR_sigprocmask:
9122         {
9123 #if defined(TARGET_ALPHA)
9124             sigset_t set, oldset;
9125             abi_ulong mask;
9126             int how;
9127 
9128             switch (arg1) {
9129             case TARGET_SIG_BLOCK:
9130                 how = SIG_BLOCK;
9131                 break;
9132             case TARGET_SIG_UNBLOCK:
9133                 how = SIG_UNBLOCK;
9134                 break;
9135             case TARGET_SIG_SETMASK:
9136                 how = SIG_SETMASK;
9137                 break;
9138             default:
9139                 return -TARGET_EINVAL;
9140             }
9141             mask = arg2;
9142             target_to_host_old_sigset(&set, &mask);
9143 
9144             ret = do_sigprocmask(how, &set, &oldset);
9145             if (!is_error(ret)) {
9146                 host_to_target_old_sigset(&mask, &oldset);
9147                 ret = mask;
9148                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9149             }
9150 #else
9151             sigset_t set, oldset, *set_ptr;
9152             int how;
9153 
9154             if (arg2) {
9155                 switch (arg1) {
9156                 case TARGET_SIG_BLOCK:
9157                     how = SIG_BLOCK;
9158                     break;
9159                 case TARGET_SIG_UNBLOCK:
9160                     how = SIG_UNBLOCK;
9161                     break;
9162                 case TARGET_SIG_SETMASK:
9163                     how = SIG_SETMASK;
9164                     break;
9165                 default:
9166                     return -TARGET_EINVAL;
9167                 }
9168                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9169                     return -TARGET_EFAULT;
9170                 target_to_host_old_sigset(&set, p);
9171                 unlock_user(p, arg2, 0);
9172                 set_ptr = &set;
9173             } else {
9174                 how = 0;
9175                 set_ptr = NULL;
9176             }
9177             ret = do_sigprocmask(how, set_ptr, &oldset);
9178             if (!is_error(ret) && arg3) {
9179                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9180                     return -TARGET_EFAULT;
9181                 host_to_target_old_sigset(p, &oldset);
9182                 unlock_user(p, arg3, sizeof(target_sigset_t));
9183             }
9184 #endif
9185         }
9186         return ret;
9187 #endif
9188     case TARGET_NR_rt_sigprocmask:
9189         {
9190             int how = arg1;
9191             sigset_t set, oldset, *set_ptr;
9192 
9193             if (arg4 != sizeof(target_sigset_t)) {
9194                 return -TARGET_EINVAL;
9195             }
9196 
9197             if (arg2) {
9198                 switch(how) {
9199                 case TARGET_SIG_BLOCK:
9200                     how = SIG_BLOCK;
9201                     break;
9202                 case TARGET_SIG_UNBLOCK:
9203                     how = SIG_UNBLOCK;
9204                     break;
9205                 case TARGET_SIG_SETMASK:
9206                     how = SIG_SETMASK;
9207                     break;
9208                 default:
9209                     return -TARGET_EINVAL;
9210                 }
9211                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9212                     return -TARGET_EFAULT;
9213                 target_to_host_sigset(&set, p);
9214                 unlock_user(p, arg2, 0);
9215                 set_ptr = &set;
9216             } else {
9217                 how = 0;
9218                 set_ptr = NULL;
9219             }
9220             ret = do_sigprocmask(how, set_ptr, &oldset);
9221             if (!is_error(ret) && arg3) {
9222                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9223                     return -TARGET_EFAULT;
9224                 host_to_target_sigset(p, &oldset);
9225                 unlock_user(p, arg3, sizeof(target_sigset_t));
9226             }
9227         }
9228         return ret;
9229 #ifdef TARGET_NR_sigpending
9230     case TARGET_NR_sigpending:
9231         {
9232             sigset_t set;
9233             ret = get_errno(sigpending(&set));
9234             if (!is_error(ret)) {
9235                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9236                     return -TARGET_EFAULT;
9237                 host_to_target_old_sigset(p, &set);
9238                 unlock_user(p, arg1, sizeof(target_sigset_t));
9239             }
9240         }
9241         return ret;
9242 #endif
9243     case TARGET_NR_rt_sigpending:
9244         {
9245             sigset_t set;
9246 
9247             /* Yes, this check is >, not != like most. We follow the kernel's
9248              * logic and it does it like this because it implements
9249              * NR_sigpending through the same code path, and in that case
9250              * the old_sigset_t is smaller in size.
9251              */
9252             if (arg2 > sizeof(target_sigset_t)) {
9253                 return -TARGET_EINVAL;
9254             }
9255 
9256             ret = get_errno(sigpending(&set));
9257             if (!is_error(ret)) {
9258                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9259                     return -TARGET_EFAULT;
9260                 host_to_target_sigset(p, &set);
9261                 unlock_user(p, arg1, sizeof(target_sigset_t));
9262             }
9263         }
9264         return ret;
9265 #ifdef TARGET_NR_sigsuspend
9266     case TARGET_NR_sigsuspend:
9267         {
9268             TaskState *ts = cpu->opaque;
9269 #if defined(TARGET_ALPHA)
9270             abi_ulong mask = arg1;
9271             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9272 #else
9273             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9274                 return -TARGET_EFAULT;
9275             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9276             unlock_user(p, arg1, 0);
9277 #endif
9278             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9279                                                SIGSET_T_SIZE));
9280             if (ret != -TARGET_ERESTARTSYS) {
9281                 ts->in_sigsuspend = 1;
9282             }
9283         }
9284         return ret;
9285 #endif
9286     case TARGET_NR_rt_sigsuspend:
9287         {
9288             TaskState *ts = cpu->opaque;
9289 
9290             if (arg2 != sizeof(target_sigset_t)) {
9291                 return -TARGET_EINVAL;
9292             }
9293             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9294                 return -TARGET_EFAULT;
9295             target_to_host_sigset(&ts->sigsuspend_mask, p);
9296             unlock_user(p, arg1, 0);
9297             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9298                                                SIGSET_T_SIZE));
9299             if (ret != -TARGET_ERESTARTSYS) {
9300                 ts->in_sigsuspend = 1;
9301             }
9302         }
9303         return ret;
9304 #ifdef TARGET_NR_rt_sigtimedwait
9305     case TARGET_NR_rt_sigtimedwait:
9306         {
9307             sigset_t set;
9308             struct timespec uts, *puts;
9309             siginfo_t uinfo;
9310 
9311             if (arg4 != sizeof(target_sigset_t)) {
9312                 return -TARGET_EINVAL;
9313             }
9314 
9315             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9316                 return -TARGET_EFAULT;
9317             target_to_host_sigset(&set, p);
9318             unlock_user(p, arg1, 0);
9319             if (arg3) {
9320                 puts = &uts;
9321                 if (target_to_host_timespec(puts, arg3)) {
9322                     return -TARGET_EFAULT;
9323                 }
9324             } else {
9325                 puts = NULL;
9326             }
9327             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9328                                                  SIGSET_T_SIZE));
9329             if (!is_error(ret)) {
9330                 if (arg2) {
9331                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9332                                   0);
9333                     if (!p) {
9334                         return -TARGET_EFAULT;
9335                     }
9336                     host_to_target_siginfo(p, &uinfo);
9337                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9338                 }
9339                 ret = host_to_target_signal(ret);
9340             }
9341         }
9342         return ret;
9343 #endif
9344 #ifdef TARGET_NR_rt_sigtimedwait_time64
9345     case TARGET_NR_rt_sigtimedwait_time64:
9346         {
9347             sigset_t set;
9348             struct timespec uts, *puts;
9349             siginfo_t uinfo;
9350 
9351             if (arg4 != sizeof(target_sigset_t)) {
9352                 return -TARGET_EINVAL;
9353             }
9354 
9355             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9356             if (!p) {
9357                 return -TARGET_EFAULT;
9358             }
9359             target_to_host_sigset(&set, p);
9360             unlock_user(p, arg1, 0);
9361             if (arg3) {
9362                 puts = &uts;
9363                 if (target_to_host_timespec64(puts, arg3)) {
9364                     return -TARGET_EFAULT;
9365                 }
9366             } else {
9367                 puts = NULL;
9368             }
9369             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9370                                                  SIGSET_T_SIZE));
9371             if (!is_error(ret)) {
9372                 if (arg2) {
9373                     p = lock_user(VERIFY_WRITE, arg2,
9374                                   sizeof(target_siginfo_t), 0);
9375                     if (!p) {
9376                         return -TARGET_EFAULT;
9377                     }
9378                     host_to_target_siginfo(p, &uinfo);
9379                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9380                 }
9381                 ret = host_to_target_signal(ret);
9382             }
9383         }
9384         return ret;
9385 #endif
9386     case TARGET_NR_rt_sigqueueinfo:
9387         {
9388             siginfo_t uinfo;
9389 
9390             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9391             if (!p) {
9392                 return -TARGET_EFAULT;
9393             }
9394             target_to_host_siginfo(&uinfo, p);
9395             unlock_user(p, arg3, 0);
9396             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9397         }
9398         return ret;
9399     case TARGET_NR_rt_tgsigqueueinfo:
9400         {
9401             siginfo_t uinfo;
9402 
9403             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9404             if (!p) {
9405                 return -TARGET_EFAULT;
9406             }
9407             target_to_host_siginfo(&uinfo, p);
9408             unlock_user(p, arg4, 0);
9409             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9410         }
9411         return ret;
9412 #ifdef TARGET_NR_sigreturn
9413     case TARGET_NR_sigreturn:
9414         if (block_signals()) {
9415             return -TARGET_ERESTARTSYS;
9416         }
9417         return do_sigreturn(cpu_env);
9418 #endif
9419     case TARGET_NR_rt_sigreturn:
9420         if (block_signals()) {
9421             return -TARGET_ERESTARTSYS;
9422         }
9423         return do_rt_sigreturn(cpu_env);
9424     case TARGET_NR_sethostname:
9425         if (!(p = lock_user_string(arg1)))
9426             return -TARGET_EFAULT;
9427         ret = get_errno(sethostname(p, arg2));
9428         unlock_user(p, arg1, 0);
9429         return ret;
9430 #ifdef TARGET_NR_setrlimit
9431     case TARGET_NR_setrlimit:
9432         {
9433             int resource = target_to_host_resource(arg1);
9434             struct target_rlimit *target_rlim;
9435             struct rlimit rlim;
9436             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9437                 return -TARGET_EFAULT;
9438             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9439             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9440             unlock_user_struct(target_rlim, arg2, 0);
9441             /*
9442              * If we just passed through resource limit settings for memory then
9443              * they would also apply to QEMU's own allocations, and QEMU will
9444              * crash or hang or die if its allocations fail. Ideally we would
9445              * track the guest allocations in QEMU and apply the limits ourselves.
9446              * For now, just tell the guest the call succeeded but don't actually
9447              * limit anything.
9448              */
9449             if (resource != RLIMIT_AS &&
9450                 resource != RLIMIT_DATA &&
9451                 resource != RLIMIT_STACK) {
9452                 return get_errno(setrlimit(resource, &rlim));
9453             } else {
9454                 return 0;
9455             }
9456         }
9457 #endif
9458 #ifdef TARGET_NR_getrlimit
9459     case TARGET_NR_getrlimit:
9460         {
9461             int resource = target_to_host_resource(arg1);
9462             struct target_rlimit *target_rlim;
9463             struct rlimit rlim;
9464 
9465             ret = get_errno(getrlimit(resource, &rlim));
9466             if (!is_error(ret)) {
9467                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9468                     return -TARGET_EFAULT;
9469                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9470                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9471                 unlock_user_struct(target_rlim, arg2, 1);
9472             }
9473         }
9474         return ret;
9475 #endif
9476     case TARGET_NR_getrusage:
9477         {
9478             struct rusage rusage;
9479             ret = get_errno(getrusage(arg1, &rusage));
9480             if (!is_error(ret)) {
9481                 ret = host_to_target_rusage(arg2, &rusage);
9482             }
9483         }
9484         return ret;
9485 #if defined(TARGET_NR_gettimeofday)
9486     case TARGET_NR_gettimeofday:
9487         {
9488             struct timeval tv;
9489             struct timezone tz;
9490 
9491             ret = get_errno(gettimeofday(&tv, &tz));
9492             if (!is_error(ret)) {
9493                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9494                     return -TARGET_EFAULT;
9495                 }
9496                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9497                     return -TARGET_EFAULT;
9498                 }
9499             }
9500         }
9501         return ret;
9502 #endif
9503 #if defined(TARGET_NR_settimeofday)
9504     case TARGET_NR_settimeofday:
9505         {
9506             struct timeval tv, *ptv = NULL;
9507             struct timezone tz, *ptz = NULL;
9508 
9509             if (arg1) {
9510                 if (copy_from_user_timeval(&tv, arg1)) {
9511                     return -TARGET_EFAULT;
9512                 }
9513                 ptv = &tv;
9514             }
9515 
9516             if (arg2) {
9517                 if (copy_from_user_timezone(&tz, arg2)) {
9518                     return -TARGET_EFAULT;
9519                 }
9520                 ptz = &tz;
9521             }
9522 
9523             return get_errno(settimeofday(ptv, ptz));
9524         }
9525 #endif
9526 #if defined(TARGET_NR_select)
9527     case TARGET_NR_select:
9528 #if defined(TARGET_WANT_NI_OLD_SELECT)
9529         /* some architectures used to have old_select here
9530          * but now ENOSYS it.
9531          */
9532         ret = -TARGET_ENOSYS;
9533 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9534         ret = do_old_select(arg1);
9535 #else
9536         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9537 #endif
9538         return ret;
9539 #endif
9540 #ifdef TARGET_NR_pselect6
9541     case TARGET_NR_pselect6:
9542         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9543 #endif
9544 #ifdef TARGET_NR_pselect6_time64
9545     case TARGET_NR_pselect6_time64:
9546         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9547 #endif
9548 #ifdef TARGET_NR_symlink
9549     case TARGET_NR_symlink:
9550         {
9551             void *p2;
9552             p = lock_user_string(arg1);
9553             p2 = lock_user_string(arg2);
9554             if (!p || !p2)
9555                 ret = -TARGET_EFAULT;
9556             else
9557                 ret = get_errno(symlink(p, p2));
9558             unlock_user(p2, arg2, 0);
9559             unlock_user(p, arg1, 0);
9560         }
9561         return ret;
9562 #endif
9563 #if defined(TARGET_NR_symlinkat)
9564     case TARGET_NR_symlinkat:
9565         {
9566             void *p2;
9567             p  = lock_user_string(arg1);
9568             p2 = lock_user_string(arg3);
9569             if (!p || !p2)
9570                 ret = -TARGET_EFAULT;
9571             else
9572                 ret = get_errno(symlinkat(p, arg2, p2));
9573             unlock_user(p2, arg3, 0);
9574             unlock_user(p, arg1, 0);
9575         }
9576         return ret;
9577 #endif
9578 #ifdef TARGET_NR_readlink
9579     case TARGET_NR_readlink:
9580         {
9581             void *p2;
9582             p = lock_user_string(arg1);
9583             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9584             if (!p || !p2) {
9585                 ret = -TARGET_EFAULT;
9586             } else if (!arg3) {
9587                 /* Short circuit this for the magic exe check. */
9588                 ret = -TARGET_EINVAL;
9589             } else if (is_proc_myself((const char *)p, "exe")) {
9590                 char real[PATH_MAX], *temp;
9591                 temp = realpath(exec_path, real);
9592                 /* Return value is # of bytes that we wrote to the buffer. */
9593                 if (temp == NULL) {
9594                     ret = get_errno(-1);
9595                 } else {
9596                     /* Don't worry about sign mismatch as earlier mapping
9597                      * logic would have thrown a bad address error. */
9598                     ret = MIN(strlen(real), arg3);
9599                     /* We cannot NUL terminate the string. */
9600                     memcpy(p2, real, ret);
9601                 }
9602             } else {
9603                 ret = get_errno(readlink(path(p), p2, arg3));
9604             }
9605             unlock_user(p2, arg2, ret);
9606             unlock_user(p, arg1, 0);
9607         }
9608         return ret;
9609 #endif
9610 #if defined(TARGET_NR_readlinkat)
9611     case TARGET_NR_readlinkat:
9612         {
9613             void *p2;
9614             p  = lock_user_string(arg2);
9615             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9616             if (!p || !p2) {
9617                 ret = -TARGET_EFAULT;
9618             } else if (is_proc_myself((const char *)p, "exe")) {
9619                 char real[PATH_MAX], *temp;
9620                 temp = realpath(exec_path, real);
9621                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9622                 snprintf((char *)p2, arg4, "%s", real);
9623             } else {
9624                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9625             }
9626             unlock_user(p2, arg3, ret);
9627             unlock_user(p, arg2, 0);
9628         }
9629         return ret;
9630 #endif
9631 #ifdef TARGET_NR_swapon
9632     case TARGET_NR_swapon:
9633         if (!(p = lock_user_string(arg1)))
9634             return -TARGET_EFAULT;
9635         ret = get_errno(swapon(p, arg2));
9636         unlock_user(p, arg1, 0);
9637         return ret;
9638 #endif
9639     case TARGET_NR_reboot:
9640         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9641            /* arg4 must be ignored in all other cases */
9642            p = lock_user_string(arg4);
9643            if (!p) {
9644                return -TARGET_EFAULT;
9645            }
9646            ret = get_errno(reboot(arg1, arg2, arg3, p));
9647            unlock_user(p, arg4, 0);
9648         } else {
9649            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9650         }
9651         return ret;
9652 #ifdef TARGET_NR_mmap
9653     case TARGET_NR_mmap:
9654 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9655     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9656     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9657     || defined(TARGET_S390X)
9658         {
9659             abi_ulong *v;
9660             abi_ulong v1, v2, v3, v4, v5, v6;
9661             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9662                 return -TARGET_EFAULT;
9663             v1 = tswapal(v[0]);
9664             v2 = tswapal(v[1]);
9665             v3 = tswapal(v[2]);
9666             v4 = tswapal(v[3]);
9667             v5 = tswapal(v[4]);
9668             v6 = tswapal(v[5]);
9669             unlock_user(v, arg1, 0);
9670             ret = get_errno(target_mmap(v1, v2, v3,
9671                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9672                                         v5, v6));
9673         }
9674 #else
9675         ret = get_errno(target_mmap(arg1, arg2, arg3,
9676                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9677                                     arg5,
9678                                     arg6));
9679 #endif
9680         return ret;
9681 #endif
9682 #ifdef TARGET_NR_mmap2
9683     case TARGET_NR_mmap2:
9684 #ifndef MMAP_SHIFT
9685 #define MMAP_SHIFT 12
9686 #endif
9687         ret = target_mmap(arg1, arg2, arg3,
9688                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9689                           arg5, arg6 << MMAP_SHIFT);
9690         return get_errno(ret);
9691 #endif
9692     case TARGET_NR_munmap:
9693         return get_errno(target_munmap(arg1, arg2));
9694     case TARGET_NR_mprotect:
9695         {
9696             TaskState *ts = cpu->opaque;
9697             /* Special hack to detect libc making the stack executable.  */
9698             if ((arg3 & PROT_GROWSDOWN)
9699                 && arg1 >= ts->info->stack_limit
9700                 && arg1 <= ts->info->start_stack) {
9701                 arg3 &= ~PROT_GROWSDOWN;
9702                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9703                 arg1 = ts->info->stack_limit;
9704             }
9705         }
9706         return get_errno(target_mprotect(arg1, arg2, arg3));
9707 #ifdef TARGET_NR_mremap
9708     case TARGET_NR_mremap:
9709         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9710 #endif
9711         /* ??? msync/mlock/munlock are broken for softmmu.  */
9712 #ifdef TARGET_NR_msync
9713     case TARGET_NR_msync:
9714         return get_errno(msync(g2h(arg1), arg2, arg3));
9715 #endif
9716 #ifdef TARGET_NR_mlock
9717     case TARGET_NR_mlock:
9718         return get_errno(mlock(g2h(arg1), arg2));
9719 #endif
9720 #ifdef TARGET_NR_munlock
9721     case TARGET_NR_munlock:
9722         return get_errno(munlock(g2h(arg1), arg2));
9723 #endif
9724 #ifdef TARGET_NR_mlockall
9725     case TARGET_NR_mlockall:
9726         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9727 #endif
9728 #ifdef TARGET_NR_munlockall
9729     case TARGET_NR_munlockall:
9730         return get_errno(munlockall());
9731 #endif
9732 #ifdef TARGET_NR_truncate
9733     case TARGET_NR_truncate:
9734         if (!(p = lock_user_string(arg1)))
9735             return -TARGET_EFAULT;
9736         ret = get_errno(truncate(p, arg2));
9737         unlock_user(p, arg1, 0);
9738         return ret;
9739 #endif
9740 #ifdef TARGET_NR_ftruncate
9741     case TARGET_NR_ftruncate:
9742         return get_errno(ftruncate(arg1, arg2));
9743 #endif
9744     case TARGET_NR_fchmod:
9745         return get_errno(fchmod(arg1, arg2));
9746 #if defined(TARGET_NR_fchmodat)
9747     case TARGET_NR_fchmodat:
9748         if (!(p = lock_user_string(arg2)))
9749             return -TARGET_EFAULT;
9750         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9751         unlock_user(p, arg2, 0);
9752         return ret;
9753 #endif
9754     case TARGET_NR_getpriority:
9755         /* Note that negative values are valid for getpriority, so we must
9756            differentiate based on errno settings.  */
9757         errno = 0;
9758         ret = getpriority(arg1, arg2);
9759         if (ret == -1 && errno != 0) {
9760             return -host_to_target_errno(errno);
9761         }
9762 #ifdef TARGET_ALPHA
9763         /* Return value is the unbiased priority.  Signal no error.  */
9764         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9765 #else
9766         /* Return value is a biased priority to avoid negative numbers.  */
9767         ret = 20 - ret;
9768 #endif
9769         return ret;
9770     case TARGET_NR_setpriority:
9771         return get_errno(setpriority(arg1, arg2, arg3));
9772 #ifdef TARGET_NR_statfs
9773     case TARGET_NR_statfs:
9774         if (!(p = lock_user_string(arg1))) {
9775             return -TARGET_EFAULT;
9776         }
9777         ret = get_errno(statfs(path(p), &stfs));
9778         unlock_user(p, arg1, 0);
9779     convert_statfs:
9780         if (!is_error(ret)) {
9781             struct target_statfs *target_stfs;
9782 
9783             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9784                 return -TARGET_EFAULT;
9785             __put_user(stfs.f_type, &target_stfs->f_type);
9786             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9787             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9788             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9789             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9790             __put_user(stfs.f_files, &target_stfs->f_files);
9791             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9792             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9793             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9794             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9795             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9796 #ifdef _STATFS_F_FLAGS
9797             __put_user(stfs.f_flags, &target_stfs->f_flags);
9798 #else
9799             __put_user(0, &target_stfs->f_flags);
9800 #endif
9801             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9802             unlock_user_struct(target_stfs, arg2, 1);
9803         }
9804         return ret;
9805 #endif
9806 #ifdef TARGET_NR_fstatfs
9807     case TARGET_NR_fstatfs:
9808         ret = get_errno(fstatfs(arg1, &stfs));
9809         goto convert_statfs;
9810 #endif
9811 #ifdef TARGET_NR_statfs64
9812     case TARGET_NR_statfs64:
9813         if (!(p = lock_user_string(arg1))) {
9814             return -TARGET_EFAULT;
9815         }
9816         ret = get_errno(statfs(path(p), &stfs));
9817         unlock_user(p, arg1, 0);
9818     convert_statfs64:
9819         if (!is_error(ret)) {
9820             struct target_statfs64 *target_stfs;
9821 
9822             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9823                 return -TARGET_EFAULT;
9824             __put_user(stfs.f_type, &target_stfs->f_type);
9825             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9826             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9827             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9828             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9829             __put_user(stfs.f_files, &target_stfs->f_files);
9830             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9831             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9832             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9833             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9834             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9835 #ifdef _STATFS_F_FLAGS
9836             __put_user(stfs.f_flags, &target_stfs->f_flags);
9837 #else
9838             __put_user(0, &target_stfs->f_flags);
9839 #endif
9840             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9841             unlock_user_struct(target_stfs, arg3, 1);
9842         }
9843         return ret;
9844     case TARGET_NR_fstatfs64:
9845         ret = get_errno(fstatfs(arg1, &stfs));
9846         goto convert_statfs64;
9847 #endif
9848 #ifdef TARGET_NR_socketcall
9849     case TARGET_NR_socketcall:
9850         return do_socketcall(arg1, arg2);
9851 #endif
9852 #ifdef TARGET_NR_accept
9853     case TARGET_NR_accept:
9854         return do_accept4(arg1, arg2, arg3, 0);
9855 #endif
9856 #ifdef TARGET_NR_accept4
9857     case TARGET_NR_accept4:
9858         return do_accept4(arg1, arg2, arg3, arg4);
9859 #endif
9860 #ifdef TARGET_NR_bind
9861     case TARGET_NR_bind:
9862         return do_bind(arg1, arg2, arg3);
9863 #endif
9864 #ifdef TARGET_NR_connect
9865     case TARGET_NR_connect:
9866         return do_connect(arg1, arg2, arg3);
9867 #endif
9868 #ifdef TARGET_NR_getpeername
9869     case TARGET_NR_getpeername:
9870         return do_getpeername(arg1, arg2, arg3);
9871 #endif
9872 #ifdef TARGET_NR_getsockname
9873     case TARGET_NR_getsockname:
9874         return do_getsockname(arg1, arg2, arg3);
9875 #endif
9876 #ifdef TARGET_NR_getsockopt
9877     case TARGET_NR_getsockopt:
9878         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9879 #endif
9880 #ifdef TARGET_NR_listen
9881     case TARGET_NR_listen:
9882         return get_errno(listen(arg1, arg2));
9883 #endif
9884 #ifdef TARGET_NR_recv
9885     case TARGET_NR_recv:
9886         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9887 #endif
9888 #ifdef TARGET_NR_recvfrom
9889     case TARGET_NR_recvfrom:
9890         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9891 #endif
9892 #ifdef TARGET_NR_recvmsg
9893     case TARGET_NR_recvmsg:
9894         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9895 #endif
9896 #ifdef TARGET_NR_send
9897     case TARGET_NR_send:
9898         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9899 #endif
9900 #ifdef TARGET_NR_sendmsg
9901     case TARGET_NR_sendmsg:
9902         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9903 #endif
9904 #ifdef TARGET_NR_sendmmsg
9905     case TARGET_NR_sendmmsg:
9906         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9907 #endif
9908 #ifdef TARGET_NR_recvmmsg
9909     case TARGET_NR_recvmmsg:
9910         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9911 #endif
9912 #ifdef TARGET_NR_sendto
9913     case TARGET_NR_sendto:
9914         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9915 #endif
9916 #ifdef TARGET_NR_shutdown
9917     case TARGET_NR_shutdown:
9918         return get_errno(shutdown(arg1, arg2));
9919 #endif
9920 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9921     case TARGET_NR_getrandom:
9922         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9923         if (!p) {
9924             return -TARGET_EFAULT;
9925         }
9926         ret = get_errno(getrandom(p, arg2, arg3));
9927         unlock_user(p, arg1, ret);
9928         return ret;
9929 #endif
9930 #ifdef TARGET_NR_socket
9931     case TARGET_NR_socket:
9932         return do_socket(arg1, arg2, arg3);
9933 #endif
9934 #ifdef TARGET_NR_socketpair
9935     case TARGET_NR_socketpair:
9936         return do_socketpair(arg1, arg2, arg3, arg4);
9937 #endif
9938 #ifdef TARGET_NR_setsockopt
9939     case TARGET_NR_setsockopt:
9940         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9941 #endif
9942 #if defined(TARGET_NR_syslog)
9943     case TARGET_NR_syslog:
9944         {
9945             int len = arg2;
9946 
9947             switch (arg1) {
9948             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9949             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9950             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9951             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9952             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9953             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9954             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9955             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9956                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9957             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9958             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9959             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9960                 {
9961                     if (len < 0) {
9962                         return -TARGET_EINVAL;
9963                     }
9964                     if (len == 0) {
9965                         return 0;
9966                     }
9967                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9968                     if (!p) {
9969                         return -TARGET_EFAULT;
9970                     }
9971                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9972                     unlock_user(p, arg2, arg3);
9973                 }
9974                 return ret;
9975             default:
9976                 return -TARGET_EINVAL;
9977             }
9978         }
9979         break;
9980 #endif
9981     case TARGET_NR_setitimer:
9982         {
9983             struct itimerval value, ovalue, *pvalue;
9984 
9985             if (arg2) {
9986                 pvalue = &value;
9987                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9988                     || copy_from_user_timeval(&pvalue->it_value,
9989                                               arg2 + sizeof(struct target_timeval)))
9990                     return -TARGET_EFAULT;
9991             } else {
9992                 pvalue = NULL;
9993             }
9994             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9995             if (!is_error(ret) && arg3) {
9996                 if (copy_to_user_timeval(arg3,
9997                                          &ovalue.it_interval)
9998                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9999                                             &ovalue.it_value))
10000                     return -TARGET_EFAULT;
10001             }
10002         }
10003         return ret;
10004     case TARGET_NR_getitimer:
10005         {
10006             struct itimerval value;
10007 
10008             ret = get_errno(getitimer(arg1, &value));
10009             if (!is_error(ret) && arg2) {
10010                 if (copy_to_user_timeval(arg2,
10011                                          &value.it_interval)
10012                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10013                                             &value.it_value))
10014                     return -TARGET_EFAULT;
10015             }
10016         }
10017         return ret;
10018 #ifdef TARGET_NR_stat
10019     case TARGET_NR_stat:
10020         if (!(p = lock_user_string(arg1))) {
10021             return -TARGET_EFAULT;
10022         }
10023         ret = get_errno(stat(path(p), &st));
10024         unlock_user(p, arg1, 0);
10025         goto do_stat;
10026 #endif
10027 #ifdef TARGET_NR_lstat
10028     case TARGET_NR_lstat:
10029         if (!(p = lock_user_string(arg1))) {
10030             return -TARGET_EFAULT;
10031         }
10032         ret = get_errno(lstat(path(p), &st));
10033         unlock_user(p, arg1, 0);
10034         goto do_stat;
10035 #endif
10036 #ifdef TARGET_NR_fstat
10037     case TARGET_NR_fstat:
10038         {
10039             ret = get_errno(fstat(arg1, &st));
10040 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10041         do_stat:
10042 #endif
10043             if (!is_error(ret)) {
10044                 struct target_stat *target_st;
10045 
10046                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10047                     return -TARGET_EFAULT;
10048                 memset(target_st, 0, sizeof(*target_st));
10049                 __put_user(st.st_dev, &target_st->st_dev);
10050                 __put_user(st.st_ino, &target_st->st_ino);
10051                 __put_user(st.st_mode, &target_st->st_mode);
10052                 __put_user(st.st_uid, &target_st->st_uid);
10053                 __put_user(st.st_gid, &target_st->st_gid);
10054                 __put_user(st.st_nlink, &target_st->st_nlink);
10055                 __put_user(st.st_rdev, &target_st->st_rdev);
10056                 __put_user(st.st_size, &target_st->st_size);
10057                 __put_user(st.st_blksize, &target_st->st_blksize);
10058                 __put_user(st.st_blocks, &target_st->st_blocks);
10059                 __put_user(st.st_atime, &target_st->target_st_atime);
10060                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10061                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10062 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10063     defined(TARGET_STAT_HAVE_NSEC)
10064                 __put_user(st.st_atim.tv_nsec,
10065                            &target_st->target_st_atime_nsec);
10066                 __put_user(st.st_mtim.tv_nsec,
10067                            &target_st->target_st_mtime_nsec);
10068                 __put_user(st.st_ctim.tv_nsec,
10069                            &target_st->target_st_ctime_nsec);
10070 #endif
10071                 unlock_user_struct(target_st, arg2, 1);
10072             }
10073         }
10074         return ret;
10075 #endif
10076     case TARGET_NR_vhangup:
10077         return get_errno(vhangup());
10078 #ifdef TARGET_NR_syscall
10079     case TARGET_NR_syscall:
10080         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10081                           arg6, arg7, arg8, 0);
10082 #endif
10083 #if defined(TARGET_NR_wait4)
10084     case TARGET_NR_wait4:
10085         {
10086             int status;
10087             abi_long status_ptr = arg2;
10088             struct rusage rusage, *rusage_ptr;
10089             abi_ulong target_rusage = arg4;
10090             abi_long rusage_err;
10091             if (target_rusage)
10092                 rusage_ptr = &rusage;
10093             else
10094                 rusage_ptr = NULL;
10095             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10096             if (!is_error(ret)) {
10097                 if (status_ptr && ret) {
10098                     status = host_to_target_waitstatus(status);
10099                     if (put_user_s32(status, status_ptr))
10100                         return -TARGET_EFAULT;
10101                 }
10102                 if (target_rusage) {
10103                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10104                     if (rusage_err) {
10105                         ret = rusage_err;
10106                     }
10107                 }
10108             }
10109         }
10110         return ret;
10111 #endif
10112 #ifdef TARGET_NR_swapoff
10113     case TARGET_NR_swapoff:
10114         if (!(p = lock_user_string(arg1)))
10115             return -TARGET_EFAULT;
10116         ret = get_errno(swapoff(p));
10117         unlock_user(p, arg1, 0);
10118         return ret;
10119 #endif
10120     case TARGET_NR_sysinfo:
10121         {
10122             struct target_sysinfo *target_value;
10123             struct sysinfo value;
10124             ret = get_errno(sysinfo(&value));
10125             if (!is_error(ret) && arg1)
10126             {
10127                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10128                     return -TARGET_EFAULT;
10129                 __put_user(value.uptime, &target_value->uptime);
10130                 __put_user(value.loads[0], &target_value->loads[0]);
10131                 __put_user(value.loads[1], &target_value->loads[1]);
10132                 __put_user(value.loads[2], &target_value->loads[2]);
10133                 __put_user(value.totalram, &target_value->totalram);
10134                 __put_user(value.freeram, &target_value->freeram);
10135                 __put_user(value.sharedram, &target_value->sharedram);
10136                 __put_user(value.bufferram, &target_value->bufferram);
10137                 __put_user(value.totalswap, &target_value->totalswap);
10138                 __put_user(value.freeswap, &target_value->freeswap);
10139                 __put_user(value.procs, &target_value->procs);
10140                 __put_user(value.totalhigh, &target_value->totalhigh);
10141                 __put_user(value.freehigh, &target_value->freehigh);
10142                 __put_user(value.mem_unit, &target_value->mem_unit);
10143                 unlock_user_struct(target_value, arg1, 1);
10144             }
10145         }
10146         return ret;
10147 #ifdef TARGET_NR_ipc
10148     case TARGET_NR_ipc:
10149         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10150 #endif
10151 #ifdef TARGET_NR_semget
10152     case TARGET_NR_semget:
10153         return get_errno(semget(arg1, arg2, arg3));
10154 #endif
10155 #ifdef TARGET_NR_semop
10156     case TARGET_NR_semop:
10157         return do_semtimedop(arg1, arg2, arg3, 0, false);
10158 #endif
10159 #ifdef TARGET_NR_semtimedop
10160     case TARGET_NR_semtimedop:
10161         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10162 #endif
10163 #ifdef TARGET_NR_semtimedop_time64
10164     case TARGET_NR_semtimedop_time64:
10165         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10166 #endif
10167 #ifdef TARGET_NR_semctl
10168     case TARGET_NR_semctl:
10169         return do_semctl(arg1, arg2, arg3, arg4);
10170 #endif
10171 #ifdef TARGET_NR_msgctl
10172     case TARGET_NR_msgctl:
10173         return do_msgctl(arg1, arg2, arg3);
10174 #endif
10175 #ifdef TARGET_NR_msgget
10176     case TARGET_NR_msgget:
10177         return get_errno(msgget(arg1, arg2));
10178 #endif
10179 #ifdef TARGET_NR_msgrcv
10180     case TARGET_NR_msgrcv:
10181         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10182 #endif
10183 #ifdef TARGET_NR_msgsnd
10184     case TARGET_NR_msgsnd:
10185         return do_msgsnd(arg1, arg2, arg3, arg4);
10186 #endif
10187 #ifdef TARGET_NR_shmget
10188     case TARGET_NR_shmget:
10189         return get_errno(shmget(arg1, arg2, arg3));
10190 #endif
10191 #ifdef TARGET_NR_shmctl
10192     case TARGET_NR_shmctl:
10193         return do_shmctl(arg1, arg2, arg3);
10194 #endif
10195 #ifdef TARGET_NR_shmat
10196     case TARGET_NR_shmat:
10197         return do_shmat(cpu_env, arg1, arg2, arg3);
10198 #endif
10199 #ifdef TARGET_NR_shmdt
10200     case TARGET_NR_shmdt:
10201         return do_shmdt(arg1);
10202 #endif
10203     case TARGET_NR_fsync:
10204         return get_errno(fsync(arg1));
10205     case TARGET_NR_clone:
10206         /* Linux manages to have three different orderings for its
10207          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10208          * match the kernel's CONFIG_CLONE_* settings.
10209          * Microblaze is further special in that it uses a sixth
10210          * implicit argument to clone for the TLS pointer.
10211          */
10212 #if defined(TARGET_MICROBLAZE)
10213         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10214 #elif defined(TARGET_CLONE_BACKWARDS)
10215         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10216 #elif defined(TARGET_CLONE_BACKWARDS2)
10217         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10218 #else
10219         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10220 #endif
10221         return ret;
10222 #ifdef __NR_exit_group
10223         /* new thread calls */
10224     case TARGET_NR_exit_group:
10225         preexit_cleanup(cpu_env, arg1);
10226         return get_errno(exit_group(arg1));
10227 #endif
10228     case TARGET_NR_setdomainname:
10229         if (!(p = lock_user_string(arg1)))
10230             return -TARGET_EFAULT;
10231         ret = get_errno(setdomainname(p, arg2));
10232         unlock_user(p, arg1, 0);
10233         return ret;
10234     case TARGET_NR_uname:
10235         /* no need to transcode because we use the linux syscall */
10236         {
10237             struct new_utsname * buf;
10238 
10239             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10240                 return -TARGET_EFAULT;
10241             ret = get_errno(sys_uname(buf));
10242             if (!is_error(ret)) {
10243                 /* Overwrite the native machine name with whatever is being
10244                    emulated. */
10245                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10246                           sizeof(buf->machine));
10247                 /* Allow the user to override the reported release.  */
10248                 if (qemu_uname_release && *qemu_uname_release) {
10249                     g_strlcpy(buf->release, qemu_uname_release,
10250                               sizeof(buf->release));
10251                 }
10252             }
10253             unlock_user_struct(buf, arg1, 1);
10254         }
10255         return ret;
10256 #ifdef TARGET_I386
10257     case TARGET_NR_modify_ldt:
10258         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10259 #if !defined(TARGET_X86_64)
10260     case TARGET_NR_vm86:
10261         return do_vm86(cpu_env, arg1, arg2);
10262 #endif
10263 #endif
10264 #if defined(TARGET_NR_adjtimex)
10265     case TARGET_NR_adjtimex:
10266         {
10267             struct timex host_buf;
10268 
10269             if (target_to_host_timex(&host_buf, arg1) != 0) {
10270                 return -TARGET_EFAULT;
10271             }
10272             ret = get_errno(adjtimex(&host_buf));
10273             if (!is_error(ret)) {
10274                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10275                     return -TARGET_EFAULT;
10276                 }
10277             }
10278         }
10279         return ret;
10280 #endif
10281 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10282     case TARGET_NR_clock_adjtime:
10283         {
10284             struct timex htx, *phtx = &htx;
10285 
10286             if (target_to_host_timex(phtx, arg2) != 0) {
10287                 return -TARGET_EFAULT;
10288             }
10289             ret = get_errno(clock_adjtime(arg1, phtx));
10290             if (!is_error(ret) && phtx) {
10291                 if (host_to_target_timex(arg2, phtx) != 0) {
10292                     return -TARGET_EFAULT;
10293                 }
10294             }
10295         }
10296         return ret;
10297 #endif
10298 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10299     case TARGET_NR_clock_adjtime64:
10300         {
10301             struct timex htx;
10302 
10303             if (target_to_host_timex64(&htx, arg2) != 0) {
10304                 return -TARGET_EFAULT;
10305             }
10306             ret = get_errno(clock_adjtime(arg1, &htx));
10307             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10308                     return -TARGET_EFAULT;
10309             }
10310         }
10311         return ret;
10312 #endif
10313     case TARGET_NR_getpgid:
10314         return get_errno(getpgid(arg1));
10315     case TARGET_NR_fchdir:
10316         return get_errno(fchdir(arg1));
10317     case TARGET_NR_personality:
10318         return get_errno(personality(arg1));
10319 #ifdef TARGET_NR__llseek /* Not on alpha */
10320     case TARGET_NR__llseek:
10321         {
10322             int64_t res;
10323 #if !defined(__NR_llseek)
10324             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10325             if (res == -1) {
10326                 ret = get_errno(res);
10327             } else {
10328                 ret = 0;
10329             }
10330 #else
10331             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10332 #endif
10333             if ((ret == 0) && put_user_s64(res, arg4)) {
10334                 return -TARGET_EFAULT;
10335             }
10336         }
10337         return ret;
10338 #endif
10339 #ifdef TARGET_NR_getdents
10340     case TARGET_NR_getdents:
10341 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10342 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10343         {
10344             struct target_dirent *target_dirp;
10345             struct linux_dirent *dirp;
10346             abi_long count = arg3;
10347 
10348             dirp = g_try_malloc(count);
10349             if (!dirp) {
10350                 return -TARGET_ENOMEM;
10351             }
10352 
10353             ret = get_errno(sys_getdents(arg1, dirp, count));
10354             if (!is_error(ret)) {
10355                 struct linux_dirent *de;
10356 		struct target_dirent *tde;
10357                 int len = ret;
10358                 int reclen, treclen;
10359 		int count1, tnamelen;
10360 
10361 		count1 = 0;
10362                 de = dirp;
10363                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10364                     return -TARGET_EFAULT;
10365 		tde = target_dirp;
10366                 while (len > 0) {
10367                     reclen = de->d_reclen;
10368                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10369                     assert(tnamelen >= 0);
10370                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10371                     assert(count1 + treclen <= count);
10372                     tde->d_reclen = tswap16(treclen);
10373                     tde->d_ino = tswapal(de->d_ino);
10374                     tde->d_off = tswapal(de->d_off);
10375                     memcpy(tde->d_name, de->d_name, tnamelen);
10376                     de = (struct linux_dirent *)((char *)de + reclen);
10377                     len -= reclen;
10378                     tde = (struct target_dirent *)((char *)tde + treclen);
10379 		    count1 += treclen;
10380                 }
10381 		ret = count1;
10382                 unlock_user(target_dirp, arg2, ret);
10383             }
10384             g_free(dirp);
10385         }
10386 #else
10387         {
10388             struct linux_dirent *dirp;
10389             abi_long count = arg3;
10390 
10391             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10392                 return -TARGET_EFAULT;
10393             ret = get_errno(sys_getdents(arg1, dirp, count));
10394             if (!is_error(ret)) {
10395                 struct linux_dirent *de;
10396                 int len = ret;
10397                 int reclen;
10398                 de = dirp;
10399                 while (len > 0) {
10400                     reclen = de->d_reclen;
10401                     if (reclen > len)
10402                         break;
10403                     de->d_reclen = tswap16(reclen);
10404                     tswapls(&de->d_ino);
10405                     tswapls(&de->d_off);
10406                     de = (struct linux_dirent *)((char *)de + reclen);
10407                     len -= reclen;
10408                 }
10409             }
10410             unlock_user(dirp, arg2, ret);
10411         }
10412 #endif
10413 #else
10414         /* Implement getdents in terms of getdents64 */
10415         {
10416             struct linux_dirent64 *dirp;
10417             abi_long count = arg3;
10418 
10419             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10420             if (!dirp) {
10421                 return -TARGET_EFAULT;
10422             }
10423             ret = get_errno(sys_getdents64(arg1, dirp, count));
10424             if (!is_error(ret)) {
10425                 /* Convert the dirent64 structs to target dirent.  We do this
10426                  * in-place, since we can guarantee that a target_dirent is no
10427                  * larger than a dirent64; however this means we have to be
10428                  * careful to read everything before writing in the new format.
10429                  */
10430                 struct linux_dirent64 *de;
10431                 struct target_dirent *tde;
10432                 int len = ret;
10433                 int tlen = 0;
10434 
10435                 de = dirp;
10436                 tde = (struct target_dirent *)dirp;
10437                 while (len > 0) {
10438                     int namelen, treclen;
10439                     int reclen = de->d_reclen;
10440                     uint64_t ino = de->d_ino;
10441                     int64_t off = de->d_off;
10442                     uint8_t type = de->d_type;
10443 
10444                     namelen = strlen(de->d_name);
10445                     treclen = offsetof(struct target_dirent, d_name)
10446                         + namelen + 2;
10447                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10448 
10449                     memmove(tde->d_name, de->d_name, namelen + 1);
10450                     tde->d_ino = tswapal(ino);
10451                     tde->d_off = tswapal(off);
10452                     tde->d_reclen = tswap16(treclen);
10453                     /* The target_dirent type is in what was formerly a padding
10454                      * byte at the end of the structure:
10455                      */
10456                     *(((char *)tde) + treclen - 1) = type;
10457 
10458                     de = (struct linux_dirent64 *)((char *)de + reclen);
10459                     tde = (struct target_dirent *)((char *)tde + treclen);
10460                     len -= reclen;
10461                     tlen += treclen;
10462                 }
10463                 ret = tlen;
10464             }
10465             unlock_user(dirp, arg2, ret);
10466         }
10467 #endif
10468         return ret;
10469 #endif /* TARGET_NR_getdents */
10470 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10471     case TARGET_NR_getdents64:
10472         {
10473             struct linux_dirent64 *dirp;
10474             abi_long count = arg3;
10475             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10476                 return -TARGET_EFAULT;
10477             ret = get_errno(sys_getdents64(arg1, dirp, count));
10478             if (!is_error(ret)) {
10479                 struct linux_dirent64 *de;
10480                 int len = ret;
10481                 int reclen;
10482                 de = dirp;
10483                 while (len > 0) {
10484                     reclen = de->d_reclen;
10485                     if (reclen > len)
10486                         break;
10487                     de->d_reclen = tswap16(reclen);
10488                     tswap64s((uint64_t *)&de->d_ino);
10489                     tswap64s((uint64_t *)&de->d_off);
10490                     de = (struct linux_dirent64 *)((char *)de + reclen);
10491                     len -= reclen;
10492                 }
10493             }
10494             unlock_user(dirp, arg2, ret);
10495         }
10496         return ret;
10497 #endif /* TARGET_NR_getdents64 */
10498 #if defined(TARGET_NR__newselect)
10499     case TARGET_NR__newselect:
10500         return do_select(arg1, arg2, arg3, arg4, arg5);
10501 #endif
10502 #ifdef TARGET_NR_poll
10503     case TARGET_NR_poll:
10504         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10505 #endif
10506 #ifdef TARGET_NR_ppoll
10507     case TARGET_NR_ppoll:
10508         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10509 #endif
10510 #ifdef TARGET_NR_ppoll_time64
10511     case TARGET_NR_ppoll_time64:
10512         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10513 #endif
10514     case TARGET_NR_flock:
10515         /* NOTE: the flock constant seems to be the same for every
10516            Linux platform */
10517         return get_errno(safe_flock(arg1, arg2));
10518     case TARGET_NR_readv:
10519         {
10520             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10521             if (vec != NULL) {
10522                 ret = get_errno(safe_readv(arg1, vec, arg3));
10523                 unlock_iovec(vec, arg2, arg3, 1);
10524             } else {
10525                 ret = -host_to_target_errno(errno);
10526             }
10527         }
10528         return ret;
10529     case TARGET_NR_writev:
10530         {
10531             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10532             if (vec != NULL) {
10533                 ret = get_errno(safe_writev(arg1, vec, arg3));
10534                 unlock_iovec(vec, arg2, arg3, 0);
10535             } else {
10536                 ret = -host_to_target_errno(errno);
10537             }
10538         }
10539         return ret;
10540 #if defined(TARGET_NR_preadv)
10541     case TARGET_NR_preadv:
10542         {
10543             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10544             if (vec != NULL) {
10545                 unsigned long low, high;
10546 
10547                 target_to_host_low_high(arg4, arg5, &low, &high);
10548                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10549                 unlock_iovec(vec, arg2, arg3, 1);
10550             } else {
10551                 ret = -host_to_target_errno(errno);
10552            }
10553         }
10554         return ret;
10555 #endif
10556 #if defined(TARGET_NR_pwritev)
10557     case TARGET_NR_pwritev:
10558         {
10559             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10560             if (vec != NULL) {
10561                 unsigned long low, high;
10562 
10563                 target_to_host_low_high(arg4, arg5, &low, &high);
10564                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10565                 unlock_iovec(vec, arg2, arg3, 0);
10566             } else {
10567                 ret = -host_to_target_errno(errno);
10568            }
10569         }
10570         return ret;
10571 #endif
10572     case TARGET_NR_getsid:
10573         return get_errno(getsid(arg1));
10574 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10575     case TARGET_NR_fdatasync:
10576         return get_errno(fdatasync(arg1));
10577 #endif
10578     case TARGET_NR_sched_getaffinity:
10579         {
10580             unsigned int mask_size;
10581             unsigned long *mask;
10582 
10583             /*
10584              * sched_getaffinity needs multiples of ulong, so need to take
10585              * care of mismatches between target ulong and host ulong sizes.
10586              */
10587             if (arg2 & (sizeof(abi_ulong) - 1)) {
10588                 return -TARGET_EINVAL;
10589             }
10590             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10591 
10592             mask = alloca(mask_size);
10593             memset(mask, 0, mask_size);
10594             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10595 
10596             if (!is_error(ret)) {
10597                 if (ret > arg2) {
10598                     /* More data returned than the caller's buffer will fit.
10599                      * This only happens if sizeof(abi_long) < sizeof(long)
10600                      * and the caller passed us a buffer holding an odd number
10601                      * of abi_longs. If the host kernel is actually using the
10602                      * extra 4 bytes then fail EINVAL; otherwise we can just
10603                      * ignore them and only copy the interesting part.
10604                      */
10605                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10606                     if (numcpus > arg2 * 8) {
10607                         return -TARGET_EINVAL;
10608                     }
10609                     ret = arg2;
10610                 }
10611 
10612                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10613                     return -TARGET_EFAULT;
10614                 }
10615             }
10616         }
10617         return ret;
10618     case TARGET_NR_sched_setaffinity:
10619         {
10620             unsigned int mask_size;
10621             unsigned long *mask;
10622 
10623             /*
10624              * sched_setaffinity needs multiples of ulong, so need to take
10625              * care of mismatches between target ulong and host ulong sizes.
10626              */
10627             if (arg2 & (sizeof(abi_ulong) - 1)) {
10628                 return -TARGET_EINVAL;
10629             }
10630             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10631             mask = alloca(mask_size);
10632 
10633             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10634             if (ret) {
10635                 return ret;
10636             }
10637 
10638             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10639         }
10640     case TARGET_NR_getcpu:
10641         {
10642             unsigned cpu, node;
10643             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10644                                        arg2 ? &node : NULL,
10645                                        NULL));
10646             if (is_error(ret)) {
10647                 return ret;
10648             }
10649             if (arg1 && put_user_u32(cpu, arg1)) {
10650                 return -TARGET_EFAULT;
10651             }
10652             if (arg2 && put_user_u32(node, arg2)) {
10653                 return -TARGET_EFAULT;
10654             }
10655         }
10656         return ret;
10657     case TARGET_NR_sched_setparam:
10658         {
10659             struct sched_param *target_schp;
10660             struct sched_param schp;
10661 
10662             if (arg2 == 0) {
10663                 return -TARGET_EINVAL;
10664             }
10665             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10666                 return -TARGET_EFAULT;
10667             schp.sched_priority = tswap32(target_schp->sched_priority);
10668             unlock_user_struct(target_schp, arg2, 0);
10669             return get_errno(sched_setparam(arg1, &schp));
10670         }
10671     case TARGET_NR_sched_getparam:
10672         {
10673             struct sched_param *target_schp;
10674             struct sched_param schp;
10675 
10676             if (arg2 == 0) {
10677                 return -TARGET_EINVAL;
10678             }
10679             ret = get_errno(sched_getparam(arg1, &schp));
10680             if (!is_error(ret)) {
10681                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10682                     return -TARGET_EFAULT;
10683                 target_schp->sched_priority = tswap32(schp.sched_priority);
10684                 unlock_user_struct(target_schp, arg2, 1);
10685             }
10686         }
10687         return ret;
10688     case TARGET_NR_sched_setscheduler:
10689         {
10690             struct sched_param *target_schp;
10691             struct sched_param schp;
10692             if (arg3 == 0) {
10693                 return -TARGET_EINVAL;
10694             }
10695             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10696                 return -TARGET_EFAULT;
10697             schp.sched_priority = tswap32(target_schp->sched_priority);
10698             unlock_user_struct(target_schp, arg3, 0);
10699             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10700         }
10701     case TARGET_NR_sched_getscheduler:
10702         return get_errno(sched_getscheduler(arg1));
10703     case TARGET_NR_sched_yield:
10704         return get_errno(sched_yield());
10705     case TARGET_NR_sched_get_priority_max:
10706         return get_errno(sched_get_priority_max(arg1));
10707     case TARGET_NR_sched_get_priority_min:
10708         return get_errno(sched_get_priority_min(arg1));
10709 #ifdef TARGET_NR_sched_rr_get_interval
10710     case TARGET_NR_sched_rr_get_interval:
10711         {
10712             struct timespec ts;
10713             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10714             if (!is_error(ret)) {
10715                 ret = host_to_target_timespec(arg2, &ts);
10716             }
10717         }
10718         return ret;
10719 #endif
10720 #ifdef TARGET_NR_sched_rr_get_interval_time64
10721     case TARGET_NR_sched_rr_get_interval_time64:
10722         {
10723             struct timespec ts;
10724             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10725             if (!is_error(ret)) {
10726                 ret = host_to_target_timespec64(arg2, &ts);
10727             }
10728         }
10729         return ret;
10730 #endif
10731 #if defined(TARGET_NR_nanosleep)
10732     case TARGET_NR_nanosleep:
10733         {
10734             struct timespec req, rem;
10735             target_to_host_timespec(&req, arg1);
10736             ret = get_errno(safe_nanosleep(&req, &rem));
10737             if (is_error(ret) && arg2) {
10738                 host_to_target_timespec(arg2, &rem);
10739             }
10740         }
10741         return ret;
10742 #endif
10743     case TARGET_NR_prctl:
10744         switch (arg1) {
10745         case PR_GET_PDEATHSIG:
10746         {
10747             int deathsig;
10748             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10749             if (!is_error(ret) && arg2
10750                 && put_user_s32(deathsig, arg2)) {
10751                 return -TARGET_EFAULT;
10752             }
10753             return ret;
10754         }
10755 #ifdef PR_GET_NAME
10756         case PR_GET_NAME:
10757         {
10758             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10759             if (!name) {
10760                 return -TARGET_EFAULT;
10761             }
10762             ret = get_errno(prctl(arg1, (unsigned long)name,
10763                                   arg3, arg4, arg5));
10764             unlock_user(name, arg2, 16);
10765             return ret;
10766         }
10767         case PR_SET_NAME:
10768         {
10769             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10770             if (!name) {
10771                 return -TARGET_EFAULT;
10772             }
10773             ret = get_errno(prctl(arg1, (unsigned long)name,
10774                                   arg3, arg4, arg5));
10775             unlock_user(name, arg2, 0);
10776             return ret;
10777         }
10778 #endif
10779 #ifdef TARGET_MIPS
10780         case TARGET_PR_GET_FP_MODE:
10781         {
10782             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10783             ret = 0;
10784             if (env->CP0_Status & (1 << CP0St_FR)) {
10785                 ret |= TARGET_PR_FP_MODE_FR;
10786             }
10787             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10788                 ret |= TARGET_PR_FP_MODE_FRE;
10789             }
10790             return ret;
10791         }
10792         case TARGET_PR_SET_FP_MODE:
10793         {
10794             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10795             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10796             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10797             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10798             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10799 
10800             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10801                                             TARGET_PR_FP_MODE_FRE;
10802 
10803             /* If nothing to change, return right away, successfully.  */
10804             if (old_fr == new_fr && old_fre == new_fre) {
10805                 return 0;
10806             }
10807             /* Check the value is valid */
10808             if (arg2 & ~known_bits) {
10809                 return -TARGET_EOPNOTSUPP;
10810             }
10811             /* Setting FRE without FR is not supported.  */
10812             if (new_fre && !new_fr) {
10813                 return -TARGET_EOPNOTSUPP;
10814             }
10815             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10816                 /* FR1 is not supported */
10817                 return -TARGET_EOPNOTSUPP;
10818             }
10819             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10820                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10821                 /* cannot set FR=0 */
10822                 return -TARGET_EOPNOTSUPP;
10823             }
10824             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10825                 /* Cannot set FRE=1 */
10826                 return -TARGET_EOPNOTSUPP;
10827             }
10828 
10829             int i;
10830             fpr_t *fpr = env->active_fpu.fpr;
10831             for (i = 0; i < 32 ; i += 2) {
10832                 if (!old_fr && new_fr) {
10833                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10834                 } else if (old_fr && !new_fr) {
10835                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10836                 }
10837             }
10838 
10839             if (new_fr) {
10840                 env->CP0_Status |= (1 << CP0St_FR);
10841                 env->hflags |= MIPS_HFLAG_F64;
10842             } else {
10843                 env->CP0_Status &= ~(1 << CP0St_FR);
10844                 env->hflags &= ~MIPS_HFLAG_F64;
10845             }
10846             if (new_fre) {
10847                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10848                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10849                     env->hflags |= MIPS_HFLAG_FRE;
10850                 }
10851             } else {
10852                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10853                 env->hflags &= ~MIPS_HFLAG_FRE;
10854             }
10855 
10856             return 0;
10857         }
10858 #endif /* MIPS */
10859 #ifdef TARGET_AARCH64
10860         case TARGET_PR_SVE_SET_VL:
10861             /*
10862              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10863              * PR_SVE_VL_INHERIT.  Note the kernel definition
10864              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10865              * even though the current architectural maximum is VQ=16.
10866              */
10867             ret = -TARGET_EINVAL;
10868             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10869                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10870                 CPUARMState *env = cpu_env;
10871                 ARMCPU *cpu = env_archcpu(env);
10872                 uint32_t vq, old_vq;
10873 
10874                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10875                 vq = MAX(arg2 / 16, 1);
10876                 vq = MIN(vq, cpu->sve_max_vq);
10877 
10878                 if (vq < old_vq) {
10879                     aarch64_sve_narrow_vq(env, vq);
10880                 }
10881                 env->vfp.zcr_el[1] = vq - 1;
10882                 arm_rebuild_hflags(env);
10883                 ret = vq * 16;
10884             }
10885             return ret;
10886         case TARGET_PR_SVE_GET_VL:
10887             ret = -TARGET_EINVAL;
10888             {
10889                 ARMCPU *cpu = env_archcpu(cpu_env);
10890                 if (cpu_isar_feature(aa64_sve, cpu)) {
10891                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10892                 }
10893             }
10894             return ret;
10895         case TARGET_PR_PAC_RESET_KEYS:
10896             {
10897                 CPUARMState *env = cpu_env;
10898                 ARMCPU *cpu = env_archcpu(env);
10899 
10900                 if (arg3 || arg4 || arg5) {
10901                     return -TARGET_EINVAL;
10902                 }
10903                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10904                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10905                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10906                                TARGET_PR_PAC_APGAKEY);
10907                     int ret = 0;
10908                     Error *err = NULL;
10909 
10910                     if (arg2 == 0) {
10911                         arg2 = all;
10912                     } else if (arg2 & ~all) {
10913                         return -TARGET_EINVAL;
10914                     }
10915                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10916                         ret |= qemu_guest_getrandom(&env->keys.apia,
10917                                                     sizeof(ARMPACKey), &err);
10918                     }
10919                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10920                         ret |= qemu_guest_getrandom(&env->keys.apib,
10921                                                     sizeof(ARMPACKey), &err);
10922                     }
10923                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10924                         ret |= qemu_guest_getrandom(&env->keys.apda,
10925                                                     sizeof(ARMPACKey), &err);
10926                     }
10927                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10928                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10929                                                     sizeof(ARMPACKey), &err);
10930                     }
10931                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10932                         ret |= qemu_guest_getrandom(&env->keys.apga,
10933                                                     sizeof(ARMPACKey), &err);
10934                     }
10935                     if (ret != 0) {
10936                         /*
10937                          * Some unknown failure in the crypto.  The best
10938                          * we can do is log it and fail the syscall.
10939                          * The real syscall cannot fail this way.
10940                          */
10941                         qemu_log_mask(LOG_UNIMP,
10942                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10943                                       error_get_pretty(err));
10944                         error_free(err);
10945                         return -TARGET_EIO;
10946                     }
10947                     return 0;
10948                 }
10949             }
10950             return -TARGET_EINVAL;
10951 #endif /* AARCH64 */
10952         case PR_GET_SECCOMP:
10953         case PR_SET_SECCOMP:
10954             /* Disable seccomp to prevent the target disabling syscalls we
10955              * need. */
10956             return -TARGET_EINVAL;
10957         default:
10958             /* Most prctl options have no pointer arguments */
10959             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10960         }
10961         break;
10962 #ifdef TARGET_NR_arch_prctl
10963     case TARGET_NR_arch_prctl:
10964         return do_arch_prctl(cpu_env, arg1, arg2);
10965 #endif
10966 #ifdef TARGET_NR_pread64
10967     case TARGET_NR_pread64:
10968         if (regpairs_aligned(cpu_env, num)) {
10969             arg4 = arg5;
10970             arg5 = arg6;
10971         }
10972         if (arg2 == 0 && arg3 == 0) {
10973             /* Special-case NULL buffer and zero length, which should succeed */
10974             p = 0;
10975         } else {
10976             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10977             if (!p) {
10978                 return -TARGET_EFAULT;
10979             }
10980         }
10981         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10982         unlock_user(p, arg2, ret);
10983         return ret;
10984     case TARGET_NR_pwrite64:
10985         if (regpairs_aligned(cpu_env, num)) {
10986             arg4 = arg5;
10987             arg5 = arg6;
10988         }
10989         if (arg2 == 0 && arg3 == 0) {
10990             /* Special-case NULL buffer and zero length, which should succeed */
10991             p = 0;
10992         } else {
10993             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10994             if (!p) {
10995                 return -TARGET_EFAULT;
10996             }
10997         }
10998         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10999         unlock_user(p, arg2, 0);
11000         return ret;
11001 #endif
11002     case TARGET_NR_getcwd:
11003         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11004             return -TARGET_EFAULT;
11005         ret = get_errno(sys_getcwd1(p, arg2));
11006         unlock_user(p, arg1, ret);
11007         return ret;
11008     case TARGET_NR_capget:
11009     case TARGET_NR_capset:
11010     {
11011         struct target_user_cap_header *target_header;
11012         struct target_user_cap_data *target_data = NULL;
11013         struct __user_cap_header_struct header;
11014         struct __user_cap_data_struct data[2];
11015         struct __user_cap_data_struct *dataptr = NULL;
11016         int i, target_datalen;
11017         int data_items = 1;
11018 
11019         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11020             return -TARGET_EFAULT;
11021         }
11022         header.version = tswap32(target_header->version);
11023         header.pid = tswap32(target_header->pid);
11024 
11025         if (header.version != _LINUX_CAPABILITY_VERSION) {
11026             /* Version 2 and up takes pointer to two user_data structs */
11027             data_items = 2;
11028         }
11029 
11030         target_datalen = sizeof(*target_data) * data_items;
11031 
11032         if (arg2) {
11033             if (num == TARGET_NR_capget) {
11034                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11035             } else {
11036                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11037             }
11038             if (!target_data) {
11039                 unlock_user_struct(target_header, arg1, 0);
11040                 return -TARGET_EFAULT;
11041             }
11042 
11043             if (num == TARGET_NR_capset) {
11044                 for (i = 0; i < data_items; i++) {
11045                     data[i].effective = tswap32(target_data[i].effective);
11046                     data[i].permitted = tswap32(target_data[i].permitted);
11047                     data[i].inheritable = tswap32(target_data[i].inheritable);
11048                 }
11049             }
11050 
11051             dataptr = data;
11052         }
11053 
11054         if (num == TARGET_NR_capget) {
11055             ret = get_errno(capget(&header, dataptr));
11056         } else {
11057             ret = get_errno(capset(&header, dataptr));
11058         }
11059 
11060         /* The kernel always updates version for both capget and capset */
11061         target_header->version = tswap32(header.version);
11062         unlock_user_struct(target_header, arg1, 1);
11063 
11064         if (arg2) {
11065             if (num == TARGET_NR_capget) {
11066                 for (i = 0; i < data_items; i++) {
11067                     target_data[i].effective = tswap32(data[i].effective);
11068                     target_data[i].permitted = tswap32(data[i].permitted);
11069                     target_data[i].inheritable = tswap32(data[i].inheritable);
11070                 }
11071                 unlock_user(target_data, arg2, target_datalen);
11072             } else {
11073                 unlock_user(target_data, arg2, 0);
11074             }
11075         }
11076         return ret;
11077     }
11078     case TARGET_NR_sigaltstack:
11079         return do_sigaltstack(arg1, arg2,
11080                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11081 
11082 #ifdef CONFIG_SENDFILE
11083 #ifdef TARGET_NR_sendfile
11084     case TARGET_NR_sendfile:
11085     {
11086         off_t *offp = NULL;
11087         off_t off;
11088         if (arg3) {
11089             ret = get_user_sal(off, arg3);
11090             if (is_error(ret)) {
11091                 return ret;
11092             }
11093             offp = &off;
11094         }
11095         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11096         if (!is_error(ret) && arg3) {
11097             abi_long ret2 = put_user_sal(off, arg3);
11098             if (is_error(ret2)) {
11099                 ret = ret2;
11100             }
11101         }
11102         return ret;
11103     }
11104 #endif
11105 #ifdef TARGET_NR_sendfile64
11106     case TARGET_NR_sendfile64:
11107     {
11108         off_t *offp = NULL;
11109         off_t off;
11110         if (arg3) {
11111             ret = get_user_s64(off, arg3);
11112             if (is_error(ret)) {
11113                 return ret;
11114             }
11115             offp = &off;
11116         }
11117         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11118         if (!is_error(ret) && arg3) {
11119             abi_long ret2 = put_user_s64(off, arg3);
11120             if (is_error(ret2)) {
11121                 ret = ret2;
11122             }
11123         }
11124         return ret;
11125     }
11126 #endif
11127 #endif
11128 #ifdef TARGET_NR_vfork
11129     case TARGET_NR_vfork:
11130         return get_errno(do_fork(cpu_env,
11131                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11132                          0, 0, 0, 0));
11133 #endif
11134 #ifdef TARGET_NR_ugetrlimit
11135     case TARGET_NR_ugetrlimit:
11136     {
11137 	struct rlimit rlim;
11138 	int resource = target_to_host_resource(arg1);
11139 	ret = get_errno(getrlimit(resource, &rlim));
11140 	if (!is_error(ret)) {
11141 	    struct target_rlimit *target_rlim;
11142             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11143                 return -TARGET_EFAULT;
11144 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11145 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11146             unlock_user_struct(target_rlim, arg2, 1);
11147 	}
11148         return ret;
11149     }
11150 #endif
11151 #ifdef TARGET_NR_truncate64
11152     case TARGET_NR_truncate64:
11153         if (!(p = lock_user_string(arg1)))
11154             return -TARGET_EFAULT;
11155 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11156         unlock_user(p, arg1, 0);
11157         return ret;
11158 #endif
11159 #ifdef TARGET_NR_ftruncate64
11160     case TARGET_NR_ftruncate64:
11161         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11162 #endif
11163 #ifdef TARGET_NR_stat64
11164     case TARGET_NR_stat64:
11165         if (!(p = lock_user_string(arg1))) {
11166             return -TARGET_EFAULT;
11167         }
11168         ret = get_errno(stat(path(p), &st));
11169         unlock_user(p, arg1, 0);
11170         if (!is_error(ret))
11171             ret = host_to_target_stat64(cpu_env, arg2, &st);
11172         return ret;
11173 #endif
11174 #ifdef TARGET_NR_lstat64
11175     case TARGET_NR_lstat64:
11176         if (!(p = lock_user_string(arg1))) {
11177             return -TARGET_EFAULT;
11178         }
11179         ret = get_errno(lstat(path(p), &st));
11180         unlock_user(p, arg1, 0);
11181         if (!is_error(ret))
11182             ret = host_to_target_stat64(cpu_env, arg2, &st);
11183         return ret;
11184 #endif
11185 #ifdef TARGET_NR_fstat64
11186     case TARGET_NR_fstat64:
11187         ret = get_errno(fstat(arg1, &st));
11188         if (!is_error(ret))
11189             ret = host_to_target_stat64(cpu_env, arg2, &st);
11190         return ret;
11191 #endif
11192 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11193 #ifdef TARGET_NR_fstatat64
11194     case TARGET_NR_fstatat64:
11195 #endif
11196 #ifdef TARGET_NR_newfstatat
11197     case TARGET_NR_newfstatat:
11198 #endif
11199         if (!(p = lock_user_string(arg2))) {
11200             return -TARGET_EFAULT;
11201         }
11202         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11203         unlock_user(p, arg2, 0);
11204         if (!is_error(ret))
11205             ret = host_to_target_stat64(cpu_env, arg3, &st);
11206         return ret;
11207 #endif
11208 #if defined(TARGET_NR_statx)
11209     case TARGET_NR_statx:
11210         {
11211             struct target_statx *target_stx;
11212             int dirfd = arg1;
11213             int flags = arg3;
11214 
11215             p = lock_user_string(arg2);
11216             if (p == NULL) {
11217                 return -TARGET_EFAULT;
11218             }
11219 #if defined(__NR_statx)
11220             {
11221                 /*
11222                  * It is assumed that struct statx is architecture independent.
11223                  */
11224                 struct target_statx host_stx;
11225                 int mask = arg4;
11226 
11227                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11228                 if (!is_error(ret)) {
11229                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11230                         unlock_user(p, arg2, 0);
11231                         return -TARGET_EFAULT;
11232                     }
11233                 }
11234 
11235                 if (ret != -TARGET_ENOSYS) {
11236                     unlock_user(p, arg2, 0);
11237                     return ret;
11238                 }
11239             }
11240 #endif
11241             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11242             unlock_user(p, arg2, 0);
11243 
11244             if (!is_error(ret)) {
11245                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11246                     return -TARGET_EFAULT;
11247                 }
11248                 memset(target_stx, 0, sizeof(*target_stx));
11249                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11250                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11251                 __put_user(st.st_ino, &target_stx->stx_ino);
11252                 __put_user(st.st_mode, &target_stx->stx_mode);
11253                 __put_user(st.st_uid, &target_stx->stx_uid);
11254                 __put_user(st.st_gid, &target_stx->stx_gid);
11255                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11256                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11257                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11258                 __put_user(st.st_size, &target_stx->stx_size);
11259                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11260                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11261                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11262                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11263                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11264                 unlock_user_struct(target_stx, arg5, 1);
11265             }
11266         }
11267         return ret;
11268 #endif
11269 #ifdef TARGET_NR_lchown
11270     case TARGET_NR_lchown:
11271         if (!(p = lock_user_string(arg1)))
11272             return -TARGET_EFAULT;
11273         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11274         unlock_user(p, arg1, 0);
11275         return ret;
11276 #endif
11277 #ifdef TARGET_NR_getuid
11278     case TARGET_NR_getuid:
11279         return get_errno(high2lowuid(getuid()));
11280 #endif
11281 #ifdef TARGET_NR_getgid
11282     case TARGET_NR_getgid:
11283         return get_errno(high2lowgid(getgid()));
11284 #endif
11285 #ifdef TARGET_NR_geteuid
11286     case TARGET_NR_geteuid:
11287         return get_errno(high2lowuid(geteuid()));
11288 #endif
11289 #ifdef TARGET_NR_getegid
11290     case TARGET_NR_getegid:
11291         return get_errno(high2lowgid(getegid()));
11292 #endif
11293     case TARGET_NR_setreuid:
11294         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11295     case TARGET_NR_setregid:
11296         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11297     case TARGET_NR_getgroups:
11298         {
11299             int gidsetsize = arg1;
11300             target_id *target_grouplist;
11301             gid_t *grouplist;
11302             int i;
11303 
11304             grouplist = alloca(gidsetsize * sizeof(gid_t));
11305             ret = get_errno(getgroups(gidsetsize, grouplist));
11306             if (gidsetsize == 0)
11307                 return ret;
11308             if (!is_error(ret)) {
11309                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11310                 if (!target_grouplist)
11311                     return -TARGET_EFAULT;
11312                 for(i = 0;i < ret; i++)
11313                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11314                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11315             }
11316         }
11317         return ret;
11318     case TARGET_NR_setgroups:
11319         {
11320             int gidsetsize = arg1;
11321             target_id *target_grouplist;
11322             gid_t *grouplist = NULL;
11323             int i;
11324             if (gidsetsize) {
11325                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11326                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11327                 if (!target_grouplist) {
11328                     return -TARGET_EFAULT;
11329                 }
11330                 for (i = 0; i < gidsetsize; i++) {
11331                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11332                 }
11333                 unlock_user(target_grouplist, arg2, 0);
11334             }
11335             return get_errno(setgroups(gidsetsize, grouplist));
11336         }
11337     case TARGET_NR_fchown:
11338         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11339 #if defined(TARGET_NR_fchownat)
11340     case TARGET_NR_fchownat:
11341         if (!(p = lock_user_string(arg2)))
11342             return -TARGET_EFAULT;
11343         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11344                                  low2highgid(arg4), arg5));
11345         unlock_user(p, arg2, 0);
11346         return ret;
11347 #endif
11348 #ifdef TARGET_NR_setresuid
11349     case TARGET_NR_setresuid:
11350         return get_errno(sys_setresuid(low2highuid(arg1),
11351                                        low2highuid(arg2),
11352                                        low2highuid(arg3)));
11353 #endif
11354 #ifdef TARGET_NR_getresuid
11355     case TARGET_NR_getresuid:
11356         {
11357             uid_t ruid, euid, suid;
11358             ret = get_errno(getresuid(&ruid, &euid, &suid));
11359             if (!is_error(ret)) {
11360                 if (put_user_id(high2lowuid(ruid), arg1)
11361                     || put_user_id(high2lowuid(euid), arg2)
11362                     || put_user_id(high2lowuid(suid), arg3))
11363                     return -TARGET_EFAULT;
11364             }
11365         }
11366         return ret;
11367 #endif
11368 #ifdef TARGET_NR_getresgid
11369     case TARGET_NR_setresgid:
11370         return get_errno(sys_setresgid(low2highgid(arg1),
11371                                        low2highgid(arg2),
11372                                        low2highgid(arg3)));
11373 #endif
11374 #ifdef TARGET_NR_getresgid
11375     case TARGET_NR_getresgid:
11376         {
11377             gid_t rgid, egid, sgid;
11378             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11379             if (!is_error(ret)) {
11380                 if (put_user_id(high2lowgid(rgid), arg1)
11381                     || put_user_id(high2lowgid(egid), arg2)
11382                     || put_user_id(high2lowgid(sgid), arg3))
11383                     return -TARGET_EFAULT;
11384             }
11385         }
11386         return ret;
11387 #endif
11388 #ifdef TARGET_NR_chown
11389     case TARGET_NR_chown:
11390         if (!(p = lock_user_string(arg1)))
11391             return -TARGET_EFAULT;
11392         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11393         unlock_user(p, arg1, 0);
11394         return ret;
11395 #endif
11396     case TARGET_NR_setuid:
11397         return get_errno(sys_setuid(low2highuid(arg1)));
11398     case TARGET_NR_setgid:
11399         return get_errno(sys_setgid(low2highgid(arg1)));
11400     case TARGET_NR_setfsuid:
11401         return get_errno(setfsuid(arg1));
11402     case TARGET_NR_setfsgid:
11403         return get_errno(setfsgid(arg1));
11404 
11405 #ifdef TARGET_NR_lchown32
11406     case TARGET_NR_lchown32:
11407         if (!(p = lock_user_string(arg1)))
11408             return -TARGET_EFAULT;
11409         ret = get_errno(lchown(p, arg2, arg3));
11410         unlock_user(p, arg1, 0);
11411         return ret;
11412 #endif
11413 #ifdef TARGET_NR_getuid32
11414     case TARGET_NR_getuid32:
11415         return get_errno(getuid());
11416 #endif
11417 
11418 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11419    /* Alpha specific */
11420     case TARGET_NR_getxuid:
11421          {
11422             uid_t euid;
11423             euid=geteuid();
11424             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11425          }
11426         return get_errno(getuid());
11427 #endif
11428 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11429    /* Alpha specific */
11430     case TARGET_NR_getxgid:
11431          {
11432             uid_t egid;
11433             egid=getegid();
11434             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11435          }
11436         return get_errno(getgid());
11437 #endif
11438 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11439     /* Alpha specific */
11440     case TARGET_NR_osf_getsysinfo:
11441         ret = -TARGET_EOPNOTSUPP;
11442         switch (arg1) {
11443           case TARGET_GSI_IEEE_FP_CONTROL:
11444             {
11445                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11446                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11447 
11448                 swcr &= ~SWCR_STATUS_MASK;
11449                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11450 
11451                 if (put_user_u64 (swcr, arg2))
11452                         return -TARGET_EFAULT;
11453                 ret = 0;
11454             }
11455             break;
11456 
11457           /* case GSI_IEEE_STATE_AT_SIGNAL:
11458              -- Not implemented in linux kernel.
11459              case GSI_UACPROC:
11460              -- Retrieves current unaligned access state; not much used.
11461              case GSI_PROC_TYPE:
11462              -- Retrieves implver information; surely not used.
11463              case GSI_GET_HWRPB:
11464              -- Grabs a copy of the HWRPB; surely not used.
11465           */
11466         }
11467         return ret;
11468 #endif
11469 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11470     /* Alpha specific */
11471     case TARGET_NR_osf_setsysinfo:
11472         ret = -TARGET_EOPNOTSUPP;
11473         switch (arg1) {
11474           case TARGET_SSI_IEEE_FP_CONTROL:
11475             {
11476                 uint64_t swcr, fpcr;
11477 
11478                 if (get_user_u64 (swcr, arg2)) {
11479                     return -TARGET_EFAULT;
11480                 }
11481 
11482                 /*
11483                  * The kernel calls swcr_update_status to update the
11484                  * status bits from the fpcr at every point that it
11485                  * could be queried.  Therefore, we store the status
11486                  * bits only in FPCR.
11487                  */
11488                 ((CPUAlphaState *)cpu_env)->swcr
11489                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11490 
11491                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11492                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11493                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11494                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11495                 ret = 0;
11496             }
11497             break;
11498 
11499           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11500             {
11501                 uint64_t exc, fpcr, fex;
11502 
11503                 if (get_user_u64(exc, arg2)) {
11504                     return -TARGET_EFAULT;
11505                 }
11506                 exc &= SWCR_STATUS_MASK;
11507                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11508 
11509                 /* Old exceptions are not signaled.  */
11510                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11511                 fex = exc & ~fex;
11512                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11513                 fex &= ((CPUArchState *)cpu_env)->swcr;
11514 
11515                 /* Update the hardware fpcr.  */
11516                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11517                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11518 
11519                 if (fex) {
11520                     int si_code = TARGET_FPE_FLTUNK;
11521                     target_siginfo_t info;
11522 
11523                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11524                         si_code = TARGET_FPE_FLTUND;
11525                     }
11526                     if (fex & SWCR_TRAP_ENABLE_INE) {
11527                         si_code = TARGET_FPE_FLTRES;
11528                     }
11529                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11530                         si_code = TARGET_FPE_FLTUND;
11531                     }
11532                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11533                         si_code = TARGET_FPE_FLTOVF;
11534                     }
11535                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11536                         si_code = TARGET_FPE_FLTDIV;
11537                     }
11538                     if (fex & SWCR_TRAP_ENABLE_INV) {
11539                         si_code = TARGET_FPE_FLTINV;
11540                     }
11541 
11542                     info.si_signo = SIGFPE;
11543                     info.si_errno = 0;
11544                     info.si_code = si_code;
11545                     info._sifields._sigfault._addr
11546                         = ((CPUArchState *)cpu_env)->pc;
11547                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11548                                  QEMU_SI_FAULT, &info);
11549                 }
11550                 ret = 0;
11551             }
11552             break;
11553 
11554           /* case SSI_NVPAIRS:
11555              -- Used with SSIN_UACPROC to enable unaligned accesses.
11556              case SSI_IEEE_STATE_AT_SIGNAL:
11557              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11558              -- Not implemented in linux kernel
11559           */
11560         }
11561         return ret;
11562 #endif
11563 #ifdef TARGET_NR_osf_sigprocmask
11564     /* Alpha specific.  */
11565     case TARGET_NR_osf_sigprocmask:
11566         {
11567             abi_ulong mask;
11568             int how;
11569             sigset_t set, oldset;
11570 
11571             switch(arg1) {
11572             case TARGET_SIG_BLOCK:
11573                 how = SIG_BLOCK;
11574                 break;
11575             case TARGET_SIG_UNBLOCK:
11576                 how = SIG_UNBLOCK;
11577                 break;
11578             case TARGET_SIG_SETMASK:
11579                 how = SIG_SETMASK;
11580                 break;
11581             default:
11582                 return -TARGET_EINVAL;
11583             }
11584             mask = arg2;
11585             target_to_host_old_sigset(&set, &mask);
11586             ret = do_sigprocmask(how, &set, &oldset);
11587             if (!ret) {
11588                 host_to_target_old_sigset(&mask, &oldset);
11589                 ret = mask;
11590             }
11591         }
11592         return ret;
11593 #endif
11594 
11595 #ifdef TARGET_NR_getgid32
11596     case TARGET_NR_getgid32:
11597         return get_errno(getgid());
11598 #endif
11599 #ifdef TARGET_NR_geteuid32
11600     case TARGET_NR_geteuid32:
11601         return get_errno(geteuid());
11602 #endif
11603 #ifdef TARGET_NR_getegid32
11604     case TARGET_NR_getegid32:
11605         return get_errno(getegid());
11606 #endif
11607 #ifdef TARGET_NR_setreuid32
11608     case TARGET_NR_setreuid32:
11609         return get_errno(setreuid(arg1, arg2));
11610 #endif
11611 #ifdef TARGET_NR_setregid32
11612     case TARGET_NR_setregid32:
11613         return get_errno(setregid(arg1, arg2));
11614 #endif
11615 #ifdef TARGET_NR_getgroups32
11616     case TARGET_NR_getgroups32:
11617         {
11618             int gidsetsize = arg1;
11619             uint32_t *target_grouplist;
11620             gid_t *grouplist;
11621             int i;
11622 
11623             grouplist = alloca(gidsetsize * sizeof(gid_t));
11624             ret = get_errno(getgroups(gidsetsize, grouplist));
11625             if (gidsetsize == 0)
11626                 return ret;
11627             if (!is_error(ret)) {
11628                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11629                 if (!target_grouplist) {
11630                     return -TARGET_EFAULT;
11631                 }
11632                 for(i = 0;i < ret; i++)
11633                     target_grouplist[i] = tswap32(grouplist[i]);
11634                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11635             }
11636         }
11637         return ret;
11638 #endif
11639 #ifdef TARGET_NR_setgroups32
11640     case TARGET_NR_setgroups32:
11641         {
11642             int gidsetsize = arg1;
11643             uint32_t *target_grouplist;
11644             gid_t *grouplist;
11645             int i;
11646 
11647             grouplist = alloca(gidsetsize * sizeof(gid_t));
11648             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11649             if (!target_grouplist) {
11650                 return -TARGET_EFAULT;
11651             }
11652             for(i = 0;i < gidsetsize; i++)
11653                 grouplist[i] = tswap32(target_grouplist[i]);
11654             unlock_user(target_grouplist, arg2, 0);
11655             return get_errno(setgroups(gidsetsize, grouplist));
11656         }
11657 #endif
11658 #ifdef TARGET_NR_fchown32
11659     case TARGET_NR_fchown32:
11660         return get_errno(fchown(arg1, arg2, arg3));
11661 #endif
11662 #ifdef TARGET_NR_setresuid32
11663     case TARGET_NR_setresuid32:
11664         return get_errno(sys_setresuid(arg1, arg2, arg3));
11665 #endif
11666 #ifdef TARGET_NR_getresuid32
11667     case TARGET_NR_getresuid32:
11668         {
11669             uid_t ruid, euid, suid;
11670             ret = get_errno(getresuid(&ruid, &euid, &suid));
11671             if (!is_error(ret)) {
11672                 if (put_user_u32(ruid, arg1)
11673                     || put_user_u32(euid, arg2)
11674                     || put_user_u32(suid, arg3))
11675                     return -TARGET_EFAULT;
11676             }
11677         }
11678         return ret;
11679 #endif
11680 #ifdef TARGET_NR_setresgid32
11681     case TARGET_NR_setresgid32:
11682         return get_errno(sys_setresgid(arg1, arg2, arg3));
11683 #endif
11684 #ifdef TARGET_NR_getresgid32
11685     case TARGET_NR_getresgid32:
11686         {
11687             gid_t rgid, egid, sgid;
11688             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11689             if (!is_error(ret)) {
11690                 if (put_user_u32(rgid, arg1)
11691                     || put_user_u32(egid, arg2)
11692                     || put_user_u32(sgid, arg3))
11693                     return -TARGET_EFAULT;
11694             }
11695         }
11696         return ret;
11697 #endif
11698 #ifdef TARGET_NR_chown32
11699     case TARGET_NR_chown32:
11700         if (!(p = lock_user_string(arg1)))
11701             return -TARGET_EFAULT;
11702         ret = get_errno(chown(p, arg2, arg3));
11703         unlock_user(p, arg1, 0);
11704         return ret;
11705 #endif
11706 #ifdef TARGET_NR_setuid32
11707     case TARGET_NR_setuid32:
11708         return get_errno(sys_setuid(arg1));
11709 #endif
11710 #ifdef TARGET_NR_setgid32
11711     case TARGET_NR_setgid32:
11712         return get_errno(sys_setgid(arg1));
11713 #endif
11714 #ifdef TARGET_NR_setfsuid32
11715     case TARGET_NR_setfsuid32:
11716         return get_errno(setfsuid(arg1));
11717 #endif
11718 #ifdef TARGET_NR_setfsgid32
11719     case TARGET_NR_setfsgid32:
11720         return get_errno(setfsgid(arg1));
11721 #endif
11722 #ifdef TARGET_NR_mincore
11723     case TARGET_NR_mincore:
11724         {
11725             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11726             if (!a) {
11727                 return -TARGET_ENOMEM;
11728             }
11729             p = lock_user_string(arg3);
11730             if (!p) {
11731                 ret = -TARGET_EFAULT;
11732             } else {
11733                 ret = get_errno(mincore(a, arg2, p));
11734                 unlock_user(p, arg3, ret);
11735             }
11736             unlock_user(a, arg1, 0);
11737         }
11738         return ret;
11739 #endif
11740 #ifdef TARGET_NR_arm_fadvise64_64
11741     case TARGET_NR_arm_fadvise64_64:
11742         /* arm_fadvise64_64 looks like fadvise64_64 but
11743          * with different argument order: fd, advice, offset, len
11744          * rather than the usual fd, offset, len, advice.
11745          * Note that offset and len are both 64-bit so appear as
11746          * pairs of 32-bit registers.
11747          */
11748         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11749                             target_offset64(arg5, arg6), arg2);
11750         return -host_to_target_errno(ret);
11751 #endif
11752 
11753 #if TARGET_ABI_BITS == 32
11754 
11755 #ifdef TARGET_NR_fadvise64_64
11756     case TARGET_NR_fadvise64_64:
11757 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11758         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11759         ret = arg2;
11760         arg2 = arg3;
11761         arg3 = arg4;
11762         arg4 = arg5;
11763         arg5 = arg6;
11764         arg6 = ret;
11765 #else
11766         /* 6 args: fd, offset (high, low), len (high, low), advice */
11767         if (regpairs_aligned(cpu_env, num)) {
11768             /* offset is in (3,4), len in (5,6) and advice in 7 */
11769             arg2 = arg3;
11770             arg3 = arg4;
11771             arg4 = arg5;
11772             arg5 = arg6;
11773             arg6 = arg7;
11774         }
11775 #endif
11776         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11777                             target_offset64(arg4, arg5), arg6);
11778         return -host_to_target_errno(ret);
11779 #endif
11780 
11781 #ifdef TARGET_NR_fadvise64
11782     case TARGET_NR_fadvise64:
11783         /* 5 args: fd, offset (high, low), len, advice */
11784         if (regpairs_aligned(cpu_env, num)) {
11785             /* offset is in (3,4), len in 5 and advice in 6 */
11786             arg2 = arg3;
11787             arg3 = arg4;
11788             arg4 = arg5;
11789             arg5 = arg6;
11790         }
11791         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11792         return -host_to_target_errno(ret);
11793 #endif
11794 
11795 #else /* not a 32-bit ABI */
11796 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11797 #ifdef TARGET_NR_fadvise64_64
11798     case TARGET_NR_fadvise64_64:
11799 #endif
11800 #ifdef TARGET_NR_fadvise64
11801     case TARGET_NR_fadvise64:
11802 #endif
11803 #ifdef TARGET_S390X
11804         switch (arg4) {
11805         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11806         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11807         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11808         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11809         default: break;
11810         }
11811 #endif
11812         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11813 #endif
11814 #endif /* end of 64-bit ABI fadvise handling */
11815 
11816 #ifdef TARGET_NR_madvise
11817     case TARGET_NR_madvise:
11818         /* A straight passthrough may not be safe because qemu sometimes
11819            turns private file-backed mappings into anonymous mappings.
11820            This will break MADV_DONTNEED.
11821            This is a hint, so ignoring and returning success is ok.  */
11822         return 0;
11823 #endif
11824 #ifdef TARGET_NR_fcntl64
11825     case TARGET_NR_fcntl64:
11826     {
11827         int cmd;
11828         struct flock64 fl;
11829         from_flock64_fn *copyfrom = copy_from_user_flock64;
11830         to_flock64_fn *copyto = copy_to_user_flock64;
11831 
11832 #ifdef TARGET_ARM
11833         if (!((CPUARMState *)cpu_env)->eabi) {
11834             copyfrom = copy_from_user_oabi_flock64;
11835             copyto = copy_to_user_oabi_flock64;
11836         }
11837 #endif
11838 
11839         cmd = target_to_host_fcntl_cmd(arg2);
11840         if (cmd == -TARGET_EINVAL) {
11841             return cmd;
11842         }
11843 
11844         switch(arg2) {
11845         case TARGET_F_GETLK64:
11846             ret = copyfrom(&fl, arg3);
11847             if (ret) {
11848                 break;
11849             }
11850             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11851             if (ret == 0) {
11852                 ret = copyto(arg3, &fl);
11853             }
11854 	    break;
11855 
11856         case TARGET_F_SETLK64:
11857         case TARGET_F_SETLKW64:
11858             ret = copyfrom(&fl, arg3);
11859             if (ret) {
11860                 break;
11861             }
11862             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11863 	    break;
11864         default:
11865             ret = do_fcntl(arg1, arg2, arg3);
11866             break;
11867         }
11868         return ret;
11869     }
11870 #endif
11871 #ifdef TARGET_NR_cacheflush
11872     case TARGET_NR_cacheflush:
11873         /* self-modifying code is handled automatically, so nothing needed */
11874         return 0;
11875 #endif
11876 #ifdef TARGET_NR_getpagesize
11877     case TARGET_NR_getpagesize:
11878         return TARGET_PAGE_SIZE;
11879 #endif
11880     case TARGET_NR_gettid:
11881         return get_errno(sys_gettid());
11882 #ifdef TARGET_NR_readahead
11883     case TARGET_NR_readahead:
11884 #if TARGET_ABI_BITS == 32
11885         if (regpairs_aligned(cpu_env, num)) {
11886             arg2 = arg3;
11887             arg3 = arg4;
11888             arg4 = arg5;
11889         }
11890         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11891 #else
11892         ret = get_errno(readahead(arg1, arg2, arg3));
11893 #endif
11894         return ret;
11895 #endif
11896 #ifdef CONFIG_ATTR
11897 #ifdef TARGET_NR_setxattr
11898     case TARGET_NR_listxattr:
11899     case TARGET_NR_llistxattr:
11900     {
11901         void *p, *b = 0;
11902         if (arg2) {
11903             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11904             if (!b) {
11905                 return -TARGET_EFAULT;
11906             }
11907         }
11908         p = lock_user_string(arg1);
11909         if (p) {
11910             if (num == TARGET_NR_listxattr) {
11911                 ret = get_errno(listxattr(p, b, arg3));
11912             } else {
11913                 ret = get_errno(llistxattr(p, b, arg3));
11914             }
11915         } else {
11916             ret = -TARGET_EFAULT;
11917         }
11918         unlock_user(p, arg1, 0);
11919         unlock_user(b, arg2, arg3);
11920         return ret;
11921     }
11922     case TARGET_NR_flistxattr:
11923     {
11924         void *b = 0;
11925         if (arg2) {
11926             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11927             if (!b) {
11928                 return -TARGET_EFAULT;
11929             }
11930         }
11931         ret = get_errno(flistxattr(arg1, b, arg3));
11932         unlock_user(b, arg2, arg3);
11933         return ret;
11934     }
11935     case TARGET_NR_setxattr:
11936     case TARGET_NR_lsetxattr:
11937         {
11938             void *p, *n, *v = 0;
11939             if (arg3) {
11940                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11941                 if (!v) {
11942                     return -TARGET_EFAULT;
11943                 }
11944             }
11945             p = lock_user_string(arg1);
11946             n = lock_user_string(arg2);
11947             if (p && n) {
11948                 if (num == TARGET_NR_setxattr) {
11949                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11950                 } else {
11951                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11952                 }
11953             } else {
11954                 ret = -TARGET_EFAULT;
11955             }
11956             unlock_user(p, arg1, 0);
11957             unlock_user(n, arg2, 0);
11958             unlock_user(v, arg3, 0);
11959         }
11960         return ret;
11961     case TARGET_NR_fsetxattr:
11962         {
11963             void *n, *v = 0;
11964             if (arg3) {
11965                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11966                 if (!v) {
11967                     return -TARGET_EFAULT;
11968                 }
11969             }
11970             n = lock_user_string(arg2);
11971             if (n) {
11972                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11973             } else {
11974                 ret = -TARGET_EFAULT;
11975             }
11976             unlock_user(n, arg2, 0);
11977             unlock_user(v, arg3, 0);
11978         }
11979         return ret;
11980     case TARGET_NR_getxattr:
11981     case TARGET_NR_lgetxattr:
11982         {
11983             void *p, *n, *v = 0;
11984             if (arg3) {
11985                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11986                 if (!v) {
11987                     return -TARGET_EFAULT;
11988                 }
11989             }
11990             p = lock_user_string(arg1);
11991             n = lock_user_string(arg2);
11992             if (p && n) {
11993                 if (num == TARGET_NR_getxattr) {
11994                     ret = get_errno(getxattr(p, n, v, arg4));
11995                 } else {
11996                     ret = get_errno(lgetxattr(p, n, v, arg4));
11997                 }
11998             } else {
11999                 ret = -TARGET_EFAULT;
12000             }
12001             unlock_user(p, arg1, 0);
12002             unlock_user(n, arg2, 0);
12003             unlock_user(v, arg3, arg4);
12004         }
12005         return ret;
12006     case TARGET_NR_fgetxattr:
12007         {
12008             void *n, *v = 0;
12009             if (arg3) {
12010                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12011                 if (!v) {
12012                     return -TARGET_EFAULT;
12013                 }
12014             }
12015             n = lock_user_string(arg2);
12016             if (n) {
12017                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12018             } else {
12019                 ret = -TARGET_EFAULT;
12020             }
12021             unlock_user(n, arg2, 0);
12022             unlock_user(v, arg3, arg4);
12023         }
12024         return ret;
12025     case TARGET_NR_removexattr:
12026     case TARGET_NR_lremovexattr:
12027         {
12028             void *p, *n;
12029             p = lock_user_string(arg1);
12030             n = lock_user_string(arg2);
12031             if (p && n) {
12032                 if (num == TARGET_NR_removexattr) {
12033                     ret = get_errno(removexattr(p, n));
12034                 } else {
12035                     ret = get_errno(lremovexattr(p, n));
12036                 }
12037             } else {
12038                 ret = -TARGET_EFAULT;
12039             }
12040             unlock_user(p, arg1, 0);
12041             unlock_user(n, arg2, 0);
12042         }
12043         return ret;
12044     case TARGET_NR_fremovexattr:
12045         {
12046             void *n;
12047             n = lock_user_string(arg2);
12048             if (n) {
12049                 ret = get_errno(fremovexattr(arg1, n));
12050             } else {
12051                 ret = -TARGET_EFAULT;
12052             }
12053             unlock_user(n, arg2, 0);
12054         }
12055         return ret;
12056 #endif
12057 #endif /* CONFIG_ATTR */
12058 #ifdef TARGET_NR_set_thread_area
12059     case TARGET_NR_set_thread_area:
12060 #if defined(TARGET_MIPS)
12061       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12062       return 0;
12063 #elif defined(TARGET_CRIS)
12064       if (arg1 & 0xff)
12065           ret = -TARGET_EINVAL;
12066       else {
12067           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12068           ret = 0;
12069       }
12070       return ret;
12071 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12072       return do_set_thread_area(cpu_env, arg1);
12073 #elif defined(TARGET_M68K)
12074       {
12075           TaskState *ts = cpu->opaque;
12076           ts->tp_value = arg1;
12077           return 0;
12078       }
12079 #else
12080       return -TARGET_ENOSYS;
12081 #endif
12082 #endif
12083 #ifdef TARGET_NR_get_thread_area
12084     case TARGET_NR_get_thread_area:
12085 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12086         return do_get_thread_area(cpu_env, arg1);
12087 #elif defined(TARGET_M68K)
12088         {
12089             TaskState *ts = cpu->opaque;
12090             return ts->tp_value;
12091         }
12092 #else
12093         return -TARGET_ENOSYS;
12094 #endif
12095 #endif
12096 #ifdef TARGET_NR_getdomainname
12097     case TARGET_NR_getdomainname:
12098         return -TARGET_ENOSYS;
12099 #endif
12100 
12101 #ifdef TARGET_NR_clock_settime
12102     case TARGET_NR_clock_settime:
12103     {
12104         struct timespec ts;
12105 
12106         ret = target_to_host_timespec(&ts, arg2);
12107         if (!is_error(ret)) {
12108             ret = get_errno(clock_settime(arg1, &ts));
12109         }
12110         return ret;
12111     }
12112 #endif
12113 #ifdef TARGET_NR_clock_settime64
12114     case TARGET_NR_clock_settime64:
12115     {
12116         struct timespec ts;
12117 
12118         ret = target_to_host_timespec64(&ts, arg2);
12119         if (!is_error(ret)) {
12120             ret = get_errno(clock_settime(arg1, &ts));
12121         }
12122         return ret;
12123     }
12124 #endif
12125 #ifdef TARGET_NR_clock_gettime
12126     case TARGET_NR_clock_gettime:
12127     {
12128         struct timespec ts;
12129         ret = get_errno(clock_gettime(arg1, &ts));
12130         if (!is_error(ret)) {
12131             ret = host_to_target_timespec(arg2, &ts);
12132         }
12133         return ret;
12134     }
12135 #endif
12136 #ifdef TARGET_NR_clock_gettime64
12137     case TARGET_NR_clock_gettime64:
12138     {
12139         struct timespec ts;
12140         ret = get_errno(clock_gettime(arg1, &ts));
12141         if (!is_error(ret)) {
12142             ret = host_to_target_timespec64(arg2, &ts);
12143         }
12144         return ret;
12145     }
12146 #endif
12147 #ifdef TARGET_NR_clock_getres
12148     case TARGET_NR_clock_getres:
12149     {
12150         struct timespec ts;
12151         ret = get_errno(clock_getres(arg1, &ts));
12152         if (!is_error(ret)) {
12153             host_to_target_timespec(arg2, &ts);
12154         }
12155         return ret;
12156     }
12157 #endif
12158 #ifdef TARGET_NR_clock_getres_time64
12159     case TARGET_NR_clock_getres_time64:
12160     {
12161         struct timespec ts;
12162         ret = get_errno(clock_getres(arg1, &ts));
12163         if (!is_error(ret)) {
12164             host_to_target_timespec64(arg2, &ts);
12165         }
12166         return ret;
12167     }
12168 #endif
12169 #ifdef TARGET_NR_clock_nanosleep
12170     case TARGET_NR_clock_nanosleep:
12171     {
12172         struct timespec ts;
12173         if (target_to_host_timespec(&ts, arg3)) {
12174             return -TARGET_EFAULT;
12175         }
12176         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12177                                              &ts, arg4 ? &ts : NULL));
12178         /*
12179          * if the call is interrupted by a signal handler, it fails
12180          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12181          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12182          */
12183         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12184             host_to_target_timespec(arg4, &ts)) {
12185               return -TARGET_EFAULT;
12186         }
12187 
12188         return ret;
12189     }
12190 #endif
12191 #ifdef TARGET_NR_clock_nanosleep_time64
12192     case TARGET_NR_clock_nanosleep_time64:
12193     {
12194         struct timespec ts;
12195 
12196         if (target_to_host_timespec64(&ts, arg3)) {
12197             return -TARGET_EFAULT;
12198         }
12199 
12200         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12201                                              &ts, arg4 ? &ts : NULL));
12202 
12203         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12204             host_to_target_timespec64(arg4, &ts)) {
12205             return -TARGET_EFAULT;
12206         }
12207         return ret;
12208     }
12209 #endif
12210 
12211 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12212     case TARGET_NR_set_tid_address:
12213         return get_errno(set_tid_address((int *)g2h(arg1)));
12214 #endif
12215 
12216     case TARGET_NR_tkill:
12217         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12218 
12219     case TARGET_NR_tgkill:
12220         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12221                          target_to_host_signal(arg3)));
12222 
12223 #ifdef TARGET_NR_set_robust_list
12224     case TARGET_NR_set_robust_list:
12225     case TARGET_NR_get_robust_list:
12226         /* The ABI for supporting robust futexes has userspace pass
12227          * the kernel a pointer to a linked list which is updated by
12228          * userspace after the syscall; the list is walked by the kernel
12229          * when the thread exits. Since the linked list in QEMU guest
12230          * memory isn't a valid linked list for the host and we have
12231          * no way to reliably intercept the thread-death event, we can't
12232          * support these. Silently return ENOSYS so that guest userspace
12233          * falls back to a non-robust futex implementation (which should
12234          * be OK except in the corner case of the guest crashing while
12235          * holding a mutex that is shared with another process via
12236          * shared memory).
12237          */
12238         return -TARGET_ENOSYS;
12239 #endif
12240 
12241 #if defined(TARGET_NR_utimensat)
12242     case TARGET_NR_utimensat:
12243         {
12244             struct timespec *tsp, ts[2];
12245             if (!arg3) {
12246                 tsp = NULL;
12247             } else {
12248                 if (target_to_host_timespec(ts, arg3)) {
12249                     return -TARGET_EFAULT;
12250                 }
12251                 if (target_to_host_timespec(ts + 1, arg3 +
12252                                             sizeof(struct target_timespec))) {
12253                     return -TARGET_EFAULT;
12254                 }
12255                 tsp = ts;
12256             }
12257             if (!arg2)
12258                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12259             else {
12260                 if (!(p = lock_user_string(arg2))) {
12261                     return -TARGET_EFAULT;
12262                 }
12263                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12264                 unlock_user(p, arg2, 0);
12265             }
12266         }
12267         return ret;
12268 #endif
12269 #ifdef TARGET_NR_utimensat_time64
12270     case TARGET_NR_utimensat_time64:
12271         {
12272             struct timespec *tsp, ts[2];
12273             if (!arg3) {
12274                 tsp = NULL;
12275             } else {
12276                 if (target_to_host_timespec64(ts, arg3)) {
12277                     return -TARGET_EFAULT;
12278                 }
12279                 if (target_to_host_timespec64(ts + 1, arg3 +
12280                                      sizeof(struct target__kernel_timespec))) {
12281                     return -TARGET_EFAULT;
12282                 }
12283                 tsp = ts;
12284             }
12285             if (!arg2)
12286                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12287             else {
12288                 p = lock_user_string(arg2);
12289                 if (!p) {
12290                     return -TARGET_EFAULT;
12291                 }
12292                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12293                 unlock_user(p, arg2, 0);
12294             }
12295         }
12296         return ret;
12297 #endif
12298 #ifdef TARGET_NR_futex
12299     case TARGET_NR_futex:
12300         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12301 #endif
12302 #ifdef TARGET_NR_futex_time64
12303     case TARGET_NR_futex_time64:
12304         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12305 #endif
12306 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12307     case TARGET_NR_inotify_init:
12308         ret = get_errno(sys_inotify_init());
12309         if (ret >= 0) {
12310             fd_trans_register(ret, &target_inotify_trans);
12311         }
12312         return ret;
12313 #endif
12314 #ifdef CONFIG_INOTIFY1
12315 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12316     case TARGET_NR_inotify_init1:
12317         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12318                                           fcntl_flags_tbl)));
12319         if (ret >= 0) {
12320             fd_trans_register(ret, &target_inotify_trans);
12321         }
12322         return ret;
12323 #endif
12324 #endif
12325 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12326     case TARGET_NR_inotify_add_watch:
12327         p = lock_user_string(arg2);
12328         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12329         unlock_user(p, arg2, 0);
12330         return ret;
12331 #endif
12332 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12333     case TARGET_NR_inotify_rm_watch:
12334         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12335 #endif
12336 
12337 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12338     case TARGET_NR_mq_open:
12339         {
12340             struct mq_attr posix_mq_attr;
12341             struct mq_attr *pposix_mq_attr;
12342             int host_flags;
12343 
12344             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12345             pposix_mq_attr = NULL;
12346             if (arg4) {
12347                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12348                     return -TARGET_EFAULT;
12349                 }
12350                 pposix_mq_attr = &posix_mq_attr;
12351             }
12352             p = lock_user_string(arg1 - 1);
12353             if (!p) {
12354                 return -TARGET_EFAULT;
12355             }
12356             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12357             unlock_user (p, arg1, 0);
12358         }
12359         return ret;
12360 
12361     case TARGET_NR_mq_unlink:
12362         p = lock_user_string(arg1 - 1);
12363         if (!p) {
12364             return -TARGET_EFAULT;
12365         }
12366         ret = get_errno(mq_unlink(p));
12367         unlock_user (p, arg1, 0);
12368         return ret;
12369 
12370 #ifdef TARGET_NR_mq_timedsend
12371     case TARGET_NR_mq_timedsend:
12372         {
12373             struct timespec ts;
12374 
12375             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12376             if (arg5 != 0) {
12377                 if (target_to_host_timespec(&ts, arg5)) {
12378                     return -TARGET_EFAULT;
12379                 }
12380                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12381                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12382                     return -TARGET_EFAULT;
12383                 }
12384             } else {
12385                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12386             }
12387             unlock_user (p, arg2, arg3);
12388         }
12389         return ret;
12390 #endif
12391 #ifdef TARGET_NR_mq_timedsend_time64
12392     case TARGET_NR_mq_timedsend_time64:
12393         {
12394             struct timespec ts;
12395 
12396             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12397             if (arg5 != 0) {
12398                 if (target_to_host_timespec64(&ts, arg5)) {
12399                     return -TARGET_EFAULT;
12400                 }
12401                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12402                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12403                     return -TARGET_EFAULT;
12404                 }
12405             } else {
12406                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12407             }
12408             unlock_user(p, arg2, arg3);
12409         }
12410         return ret;
12411 #endif
12412 
12413 #ifdef TARGET_NR_mq_timedreceive
12414     case TARGET_NR_mq_timedreceive:
12415         {
12416             struct timespec ts;
12417             unsigned int prio;
12418 
12419             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12420             if (arg5 != 0) {
12421                 if (target_to_host_timespec(&ts, arg5)) {
12422                     return -TARGET_EFAULT;
12423                 }
12424                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12425                                                      &prio, &ts));
12426                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12427                     return -TARGET_EFAULT;
12428                 }
12429             } else {
12430                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12431                                                      &prio, NULL));
12432             }
12433             unlock_user (p, arg2, arg3);
12434             if (arg4 != 0)
12435                 put_user_u32(prio, arg4);
12436         }
12437         return ret;
12438 #endif
12439 #ifdef TARGET_NR_mq_timedreceive_time64
12440     case TARGET_NR_mq_timedreceive_time64:
12441         {
12442             struct timespec ts;
12443             unsigned int prio;
12444 
12445             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12446             if (arg5 != 0) {
12447                 if (target_to_host_timespec64(&ts, arg5)) {
12448                     return -TARGET_EFAULT;
12449                 }
12450                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12451                                                      &prio, &ts));
12452                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12453                     return -TARGET_EFAULT;
12454                 }
12455             } else {
12456                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12457                                                      &prio, NULL));
12458             }
12459             unlock_user(p, arg2, arg3);
12460             if (arg4 != 0) {
12461                 put_user_u32(prio, arg4);
12462             }
12463         }
12464         return ret;
12465 #endif
12466 
12467     /* Not implemented for now... */
12468 /*     case TARGET_NR_mq_notify: */
12469 /*         break; */
12470 
12471     case TARGET_NR_mq_getsetattr:
12472         {
12473             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12474             ret = 0;
12475             if (arg2 != 0) {
12476                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12477                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12478                                            &posix_mq_attr_out));
12479             } else if (arg3 != 0) {
12480                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12481             }
12482             if (ret == 0 && arg3 != 0) {
12483                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12484             }
12485         }
12486         return ret;
12487 #endif
12488 
12489 #ifdef CONFIG_SPLICE
12490 #ifdef TARGET_NR_tee
12491     case TARGET_NR_tee:
12492         {
12493             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12494         }
12495         return ret;
12496 #endif
12497 #ifdef TARGET_NR_splice
12498     case TARGET_NR_splice:
12499         {
12500             loff_t loff_in, loff_out;
12501             loff_t *ploff_in = NULL, *ploff_out = NULL;
12502             if (arg2) {
12503                 if (get_user_u64(loff_in, arg2)) {
12504                     return -TARGET_EFAULT;
12505                 }
12506                 ploff_in = &loff_in;
12507             }
12508             if (arg4) {
12509                 if (get_user_u64(loff_out, arg4)) {
12510                     return -TARGET_EFAULT;
12511                 }
12512                 ploff_out = &loff_out;
12513             }
12514             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12515             if (arg2) {
12516                 if (put_user_u64(loff_in, arg2)) {
12517                     return -TARGET_EFAULT;
12518                 }
12519             }
12520             if (arg4) {
12521                 if (put_user_u64(loff_out, arg4)) {
12522                     return -TARGET_EFAULT;
12523                 }
12524             }
12525         }
12526         return ret;
12527 #endif
12528 #ifdef TARGET_NR_vmsplice
12529 	case TARGET_NR_vmsplice:
12530         {
12531             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12532             if (vec != NULL) {
12533                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12534                 unlock_iovec(vec, arg2, arg3, 0);
12535             } else {
12536                 ret = -host_to_target_errno(errno);
12537             }
12538         }
12539         return ret;
12540 #endif
12541 #endif /* CONFIG_SPLICE */
12542 #ifdef CONFIG_EVENTFD
12543 #if defined(TARGET_NR_eventfd)
12544     case TARGET_NR_eventfd:
12545         ret = get_errno(eventfd(arg1, 0));
12546         if (ret >= 0) {
12547             fd_trans_register(ret, &target_eventfd_trans);
12548         }
12549         return ret;
12550 #endif
12551 #if defined(TARGET_NR_eventfd2)
12552     case TARGET_NR_eventfd2:
12553     {
12554         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12555         if (arg2 & TARGET_O_NONBLOCK) {
12556             host_flags |= O_NONBLOCK;
12557         }
12558         if (arg2 & TARGET_O_CLOEXEC) {
12559             host_flags |= O_CLOEXEC;
12560         }
12561         ret = get_errno(eventfd(arg1, host_flags));
12562         if (ret >= 0) {
12563             fd_trans_register(ret, &target_eventfd_trans);
12564         }
12565         return ret;
12566     }
12567 #endif
12568 #endif /* CONFIG_EVENTFD  */
12569 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12570     case TARGET_NR_fallocate:
12571 #if TARGET_ABI_BITS == 32
12572         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12573                                   target_offset64(arg5, arg6)));
12574 #else
12575         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12576 #endif
12577         return ret;
12578 #endif
12579 #if defined(CONFIG_SYNC_FILE_RANGE)
12580 #if defined(TARGET_NR_sync_file_range)
12581     case TARGET_NR_sync_file_range:
12582 #if TARGET_ABI_BITS == 32
12583 #if defined(TARGET_MIPS)
12584         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12585                                         target_offset64(arg5, arg6), arg7));
12586 #else
12587         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12588                                         target_offset64(arg4, arg5), arg6));
12589 #endif /* !TARGET_MIPS */
12590 #else
12591         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12592 #endif
12593         return ret;
12594 #endif
12595 #if defined(TARGET_NR_sync_file_range2) || \
12596     defined(TARGET_NR_arm_sync_file_range)
12597 #if defined(TARGET_NR_sync_file_range2)
12598     case TARGET_NR_sync_file_range2:
12599 #endif
12600 #if defined(TARGET_NR_arm_sync_file_range)
12601     case TARGET_NR_arm_sync_file_range:
12602 #endif
12603         /* This is like sync_file_range but the arguments are reordered */
12604 #if TARGET_ABI_BITS == 32
12605         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12606                                         target_offset64(arg5, arg6), arg2));
12607 #else
12608         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12609 #endif
12610         return ret;
12611 #endif
12612 #endif
12613 #if defined(TARGET_NR_signalfd4)
12614     case TARGET_NR_signalfd4:
12615         return do_signalfd4(arg1, arg2, arg4);
12616 #endif
12617 #if defined(TARGET_NR_signalfd)
12618     case TARGET_NR_signalfd:
12619         return do_signalfd4(arg1, arg2, 0);
12620 #endif
12621 #if defined(CONFIG_EPOLL)
12622 #if defined(TARGET_NR_epoll_create)
12623     case TARGET_NR_epoll_create:
12624         return get_errno(epoll_create(arg1));
12625 #endif
12626 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12627     case TARGET_NR_epoll_create1:
12628         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12629 #endif
12630 #if defined(TARGET_NR_epoll_ctl)
12631     case TARGET_NR_epoll_ctl:
12632     {
12633         struct epoll_event ep;
12634         struct epoll_event *epp = 0;
12635         if (arg4) {
12636             if (arg2 != EPOLL_CTL_DEL) {
12637                 struct target_epoll_event *target_ep;
12638                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12639                     return -TARGET_EFAULT;
12640                 }
12641                 ep.events = tswap32(target_ep->events);
12642                 /*
12643                  * The epoll_data_t union is just opaque data to the kernel,
12644                  * so we transfer all 64 bits across and need not worry what
12645                  * actual data type it is.
12646                  */
12647                 ep.data.u64 = tswap64(target_ep->data.u64);
12648                 unlock_user_struct(target_ep, arg4, 0);
12649             }
12650             /*
12651              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12652              * non-null pointer, even though this argument is ignored.
12653              *
12654              */
12655             epp = &ep;
12656         }
12657         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12658     }
12659 #endif
12660 
12661 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12662 #if defined(TARGET_NR_epoll_wait)
12663     case TARGET_NR_epoll_wait:
12664 #endif
12665 #if defined(TARGET_NR_epoll_pwait)
12666     case TARGET_NR_epoll_pwait:
12667 #endif
12668     {
12669         struct target_epoll_event *target_ep;
12670         struct epoll_event *ep;
12671         int epfd = arg1;
12672         int maxevents = arg3;
12673         int timeout = arg4;
12674 
12675         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12676             return -TARGET_EINVAL;
12677         }
12678 
12679         target_ep = lock_user(VERIFY_WRITE, arg2,
12680                               maxevents * sizeof(struct target_epoll_event), 1);
12681         if (!target_ep) {
12682             return -TARGET_EFAULT;
12683         }
12684 
12685         ep = g_try_new(struct epoll_event, maxevents);
12686         if (!ep) {
12687             unlock_user(target_ep, arg2, 0);
12688             return -TARGET_ENOMEM;
12689         }
12690 
12691         switch (num) {
12692 #if defined(TARGET_NR_epoll_pwait)
12693         case TARGET_NR_epoll_pwait:
12694         {
12695             target_sigset_t *target_set;
12696             sigset_t _set, *set = &_set;
12697 
12698             if (arg5) {
12699                 if (arg6 != sizeof(target_sigset_t)) {
12700                     ret = -TARGET_EINVAL;
12701                     break;
12702                 }
12703 
12704                 target_set = lock_user(VERIFY_READ, arg5,
12705                                        sizeof(target_sigset_t), 1);
12706                 if (!target_set) {
12707                     ret = -TARGET_EFAULT;
12708                     break;
12709                 }
12710                 target_to_host_sigset(set, target_set);
12711                 unlock_user(target_set, arg5, 0);
12712             } else {
12713                 set = NULL;
12714             }
12715 
12716             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12717                                              set, SIGSET_T_SIZE));
12718             break;
12719         }
12720 #endif
12721 #if defined(TARGET_NR_epoll_wait)
12722         case TARGET_NR_epoll_wait:
12723             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12724                                              NULL, 0));
12725             break;
12726 #endif
12727         default:
12728             ret = -TARGET_ENOSYS;
12729         }
12730         if (!is_error(ret)) {
12731             int i;
12732             for (i = 0; i < ret; i++) {
12733                 target_ep[i].events = tswap32(ep[i].events);
12734                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12735             }
12736             unlock_user(target_ep, arg2,
12737                         ret * sizeof(struct target_epoll_event));
12738         } else {
12739             unlock_user(target_ep, arg2, 0);
12740         }
12741         g_free(ep);
12742         return ret;
12743     }
12744 #endif
12745 #endif
12746 #ifdef TARGET_NR_prlimit64
12747     case TARGET_NR_prlimit64:
12748     {
12749         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12750         struct target_rlimit64 *target_rnew, *target_rold;
12751         struct host_rlimit64 rnew, rold, *rnewp = 0;
12752         int resource = target_to_host_resource(arg2);
12753 
12754         if (arg3 && (resource != RLIMIT_AS &&
12755                      resource != RLIMIT_DATA &&
12756                      resource != RLIMIT_STACK)) {
12757             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12758                 return -TARGET_EFAULT;
12759             }
12760             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12761             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12762             unlock_user_struct(target_rnew, arg3, 0);
12763             rnewp = &rnew;
12764         }
12765 
12766         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12767         if (!is_error(ret) && arg4) {
12768             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12769                 return -TARGET_EFAULT;
12770             }
12771             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12772             target_rold->rlim_max = tswap64(rold.rlim_max);
12773             unlock_user_struct(target_rold, arg4, 1);
12774         }
12775         return ret;
12776     }
12777 #endif
12778 #ifdef TARGET_NR_gethostname
12779     case TARGET_NR_gethostname:
12780     {
12781         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12782         if (name) {
12783             ret = get_errno(gethostname(name, arg2));
12784             unlock_user(name, arg1, arg2);
12785         } else {
12786             ret = -TARGET_EFAULT;
12787         }
12788         return ret;
12789     }
12790 #endif
12791 #ifdef TARGET_NR_atomic_cmpxchg_32
12792     case TARGET_NR_atomic_cmpxchg_32:
12793     {
12794         /* should use start_exclusive from main.c */
12795         abi_ulong mem_value;
12796         if (get_user_u32(mem_value, arg6)) {
12797             target_siginfo_t info;
12798             info.si_signo = SIGSEGV;
12799             info.si_errno = 0;
12800             info.si_code = TARGET_SEGV_MAPERR;
12801             info._sifields._sigfault._addr = arg6;
12802             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12803                          QEMU_SI_FAULT, &info);
12804             ret = 0xdeadbeef;
12805 
12806         }
12807         if (mem_value == arg2)
12808             put_user_u32(arg1, arg6);
12809         return mem_value;
12810     }
12811 #endif
12812 #ifdef TARGET_NR_atomic_barrier
12813     case TARGET_NR_atomic_barrier:
12814         /* Like the kernel implementation and the
12815            qemu arm barrier, no-op this? */
12816         return 0;
12817 #endif
12818 
12819 #ifdef TARGET_NR_timer_create
12820     case TARGET_NR_timer_create:
12821     {
12822         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12823 
12824         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12825 
12826         int clkid = arg1;
12827         int timer_index = next_free_host_timer();
12828 
12829         if (timer_index < 0) {
12830             ret = -TARGET_EAGAIN;
12831         } else {
12832             timer_t *phtimer = g_posix_timers  + timer_index;
12833 
12834             if (arg2) {
12835                 phost_sevp = &host_sevp;
12836                 ret = target_to_host_sigevent(phost_sevp, arg2);
12837                 if (ret != 0) {
12838                     return ret;
12839                 }
12840             }
12841 
12842             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12843             if (ret) {
12844                 phtimer = NULL;
12845             } else {
12846                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12847                     return -TARGET_EFAULT;
12848                 }
12849             }
12850         }
12851         return ret;
12852     }
12853 #endif
12854 
12855 #ifdef TARGET_NR_timer_settime
12856     case TARGET_NR_timer_settime:
12857     {
12858         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12859          * struct itimerspec * old_value */
12860         target_timer_t timerid = get_timer_id(arg1);
12861 
12862         if (timerid < 0) {
12863             ret = timerid;
12864         } else if (arg3 == 0) {
12865             ret = -TARGET_EINVAL;
12866         } else {
12867             timer_t htimer = g_posix_timers[timerid];
12868             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12869 
12870             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12871                 return -TARGET_EFAULT;
12872             }
12873             ret = get_errno(
12874                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12875             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12876                 return -TARGET_EFAULT;
12877             }
12878         }
12879         return ret;
12880     }
12881 #endif
12882 
12883 #ifdef TARGET_NR_timer_settime64
12884     case TARGET_NR_timer_settime64:
12885     {
12886         target_timer_t timerid = get_timer_id(arg1);
12887 
12888         if (timerid < 0) {
12889             ret = timerid;
12890         } else if (arg3 == 0) {
12891             ret = -TARGET_EINVAL;
12892         } else {
12893             timer_t htimer = g_posix_timers[timerid];
12894             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12895 
12896             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12897                 return -TARGET_EFAULT;
12898             }
12899             ret = get_errno(
12900                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12901             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12902                 return -TARGET_EFAULT;
12903             }
12904         }
12905         return ret;
12906     }
12907 #endif
12908 
12909 #ifdef TARGET_NR_timer_gettime
12910     case TARGET_NR_timer_gettime:
12911     {
12912         /* args: timer_t timerid, struct itimerspec *curr_value */
12913         target_timer_t timerid = get_timer_id(arg1);
12914 
12915         if (timerid < 0) {
12916             ret = timerid;
12917         } else if (!arg2) {
12918             ret = -TARGET_EFAULT;
12919         } else {
12920             timer_t htimer = g_posix_timers[timerid];
12921             struct itimerspec hspec;
12922             ret = get_errno(timer_gettime(htimer, &hspec));
12923 
12924             if (host_to_target_itimerspec(arg2, &hspec)) {
12925                 ret = -TARGET_EFAULT;
12926             }
12927         }
12928         return ret;
12929     }
12930 #endif
12931 
12932 #ifdef TARGET_NR_timer_gettime64
12933     case TARGET_NR_timer_gettime64:
12934     {
12935         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12936         target_timer_t timerid = get_timer_id(arg1);
12937 
12938         if (timerid < 0) {
12939             ret = timerid;
12940         } else if (!arg2) {
12941             ret = -TARGET_EFAULT;
12942         } else {
12943             timer_t htimer = g_posix_timers[timerid];
12944             struct itimerspec hspec;
12945             ret = get_errno(timer_gettime(htimer, &hspec));
12946 
12947             if (host_to_target_itimerspec64(arg2, &hspec)) {
12948                 ret = -TARGET_EFAULT;
12949             }
12950         }
12951         return ret;
12952     }
12953 #endif
12954 
12955 #ifdef TARGET_NR_timer_getoverrun
12956     case TARGET_NR_timer_getoverrun:
12957     {
12958         /* args: timer_t timerid */
12959         target_timer_t timerid = get_timer_id(arg1);
12960 
12961         if (timerid < 0) {
12962             ret = timerid;
12963         } else {
12964             timer_t htimer = g_posix_timers[timerid];
12965             ret = get_errno(timer_getoverrun(htimer));
12966         }
12967         return ret;
12968     }
12969 #endif
12970 
12971 #ifdef TARGET_NR_timer_delete
12972     case TARGET_NR_timer_delete:
12973     {
12974         /* args: timer_t timerid */
12975         target_timer_t timerid = get_timer_id(arg1);
12976 
12977         if (timerid < 0) {
12978             ret = timerid;
12979         } else {
12980             timer_t htimer = g_posix_timers[timerid];
12981             ret = get_errno(timer_delete(htimer));
12982             g_posix_timers[timerid] = 0;
12983         }
12984         return ret;
12985     }
12986 #endif
12987 
12988 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12989     case TARGET_NR_timerfd_create:
12990         return get_errno(timerfd_create(arg1,
12991                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12992 #endif
12993 
12994 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12995     case TARGET_NR_timerfd_gettime:
12996         {
12997             struct itimerspec its_curr;
12998 
12999             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13000 
13001             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13002                 return -TARGET_EFAULT;
13003             }
13004         }
13005         return ret;
13006 #endif
13007 
13008 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13009     case TARGET_NR_timerfd_gettime64:
13010         {
13011             struct itimerspec its_curr;
13012 
13013             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13014 
13015             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13016                 return -TARGET_EFAULT;
13017             }
13018         }
13019         return ret;
13020 #endif
13021 
13022 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13023     case TARGET_NR_timerfd_settime:
13024         {
13025             struct itimerspec its_new, its_old, *p_new;
13026 
13027             if (arg3) {
13028                 if (target_to_host_itimerspec(&its_new, arg3)) {
13029                     return -TARGET_EFAULT;
13030                 }
13031                 p_new = &its_new;
13032             } else {
13033                 p_new = NULL;
13034             }
13035 
13036             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13037 
13038             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13039                 return -TARGET_EFAULT;
13040             }
13041         }
13042         return ret;
13043 #endif
13044 
13045 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13046     case TARGET_NR_timerfd_settime64:
13047         {
13048             struct itimerspec its_new, its_old, *p_new;
13049 
13050             if (arg3) {
13051                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13052                     return -TARGET_EFAULT;
13053                 }
13054                 p_new = &its_new;
13055             } else {
13056                 p_new = NULL;
13057             }
13058 
13059             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13060 
13061             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13062                 return -TARGET_EFAULT;
13063             }
13064         }
13065         return ret;
13066 #endif
13067 
13068 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13069     case TARGET_NR_ioprio_get:
13070         return get_errno(ioprio_get(arg1, arg2));
13071 #endif
13072 
13073 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13074     case TARGET_NR_ioprio_set:
13075         return get_errno(ioprio_set(arg1, arg2, arg3));
13076 #endif
13077 
13078 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13079     case TARGET_NR_setns:
13080         return get_errno(setns(arg1, arg2));
13081 #endif
13082 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13083     case TARGET_NR_unshare:
13084         return get_errno(unshare(arg1));
13085 #endif
13086 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13087     case TARGET_NR_kcmp:
13088         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13089 #endif
13090 #ifdef TARGET_NR_swapcontext
13091     case TARGET_NR_swapcontext:
13092         /* PowerPC specific.  */
13093         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13094 #endif
13095 #ifdef TARGET_NR_memfd_create
13096     case TARGET_NR_memfd_create:
13097         p = lock_user_string(arg1);
13098         if (!p) {
13099             return -TARGET_EFAULT;
13100         }
13101         ret = get_errno(memfd_create(p, arg2));
13102         fd_trans_unregister(ret);
13103         unlock_user(p, arg1, 0);
13104         return ret;
13105 #endif
13106 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13107     case TARGET_NR_membarrier:
13108         return get_errno(membarrier(arg1, arg2));
13109 #endif
13110 
13111 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13112     case TARGET_NR_copy_file_range:
13113         {
13114             loff_t inoff, outoff;
13115             loff_t *pinoff = NULL, *poutoff = NULL;
13116 
13117             if (arg2) {
13118                 if (get_user_u64(inoff, arg2)) {
13119                     return -TARGET_EFAULT;
13120                 }
13121                 pinoff = &inoff;
13122             }
13123             if (arg4) {
13124                 if (get_user_u64(outoff, arg4)) {
13125                     return -TARGET_EFAULT;
13126                 }
13127                 poutoff = &outoff;
13128             }
13129             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13130                                                  arg5, arg6));
13131             if (!is_error(ret) && ret > 0) {
13132                 if (arg2) {
13133                     if (put_user_u64(inoff, arg2)) {
13134                         return -TARGET_EFAULT;
13135                     }
13136                 }
13137                 if (arg4) {
13138                     if (put_user_u64(outoff, arg4)) {
13139                         return -TARGET_EFAULT;
13140                     }
13141                 }
13142             }
13143         }
13144         return ret;
13145 #endif
13146 
13147     default:
13148         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13149         return -TARGET_ENOSYS;
13150     }
13151     return ret;
13152 }
13153 
13154 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13155                     abi_long arg2, abi_long arg3, abi_long arg4,
13156                     abi_long arg5, abi_long arg6, abi_long arg7,
13157                     abi_long arg8)
13158 {
13159     CPUState *cpu = env_cpu(cpu_env);
13160     abi_long ret;
13161 
13162 #ifdef DEBUG_ERESTARTSYS
13163     /* Debug-only code for exercising the syscall-restart code paths
13164      * in the per-architecture cpu main loops: restart every syscall
13165      * the guest makes once before letting it through.
13166      */
13167     {
13168         static bool flag;
13169         flag = !flag;
13170         if (flag) {
13171             return -TARGET_ERESTARTSYS;
13172         }
13173     }
13174 #endif
13175 
13176     record_syscall_start(cpu, num, arg1,
13177                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13178 
13179     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13180         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13181     }
13182 
13183     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13184                       arg5, arg6, arg7, arg8);
13185 
13186     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13187         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13188                           arg3, arg4, arg5, arg6);
13189     }
13190 
13191     record_syscall_return(cpu, num, ret);
13192     return ret;
13193 }
13194