xref: /qemu/linux-user/syscall.c (revision 49f95221)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 
144 #ifndef CLONE_IO
145 #define CLONE_IO                0x80000000      /* Clone io context */
146 #endif
147 
148 /* We can't directly call the host clone syscall, because this will
149  * badly confuse libc (breaking mutexes, for example). So we must
150  * divide clone flags into:
151  *  * flag combinations that look like pthread_create()
152  *  * flag combinations that look like fork()
153  *  * flags we can implement within QEMU itself
154  *  * flags we can't support and will return an error for
155  */
156 /* For thread creation, all these flags must be present; for
157  * fork, none must be present.
158  */
159 #define CLONE_THREAD_FLAGS                              \
160     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
161      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 
163 /* These flags are ignored:
164  * CLONE_DETACHED is now ignored by the kernel;
165  * CLONE_IO is just an optimisation hint to the I/O scheduler
166  */
167 #define CLONE_IGNORED_FLAGS                     \
168     (CLONE_DETACHED | CLONE_IO)
169 
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS               \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
177     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
178      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 
180 #define CLONE_INVALID_FORK_FLAGS                                        \
181     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 
183 #define CLONE_INVALID_THREAD_FLAGS                                      \
184     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
185        CLONE_IGNORED_FLAGS))
186 
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188  * have almost all been allocated. We cannot support any of
189  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191  * The checks against the invalid thread masks above will catch these.
192  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193  */
194 
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196  * once. This exercises the codepaths for restart.
197  */
198 //#define DEBUG_ERESTARTSYS
199 
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205 
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213 
214 #define _syscall0(type,name)		\
215 static type name (void)			\
216 {					\
217 	return syscall(__NR_##name);	\
218 }
219 
220 #define _syscall1(type,name,type1,arg1)		\
221 static type name (type1 arg1)			\
222 {						\
223 	return syscall(__NR_##name, arg1);	\
224 }
225 
226 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
227 static type name (type1 arg1,type2 arg2)		\
228 {							\
229 	return syscall(__NR_##name, arg1, arg2);	\
230 }
231 
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3)		\
234 {								\
235 	return syscall(__NR_##name, arg1, arg2, arg3);		\
236 }
237 
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
242 }
243 
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5)							\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
249 }
250 
251 
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5,type6,arg6)					\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
255                   type6 arg6)							\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
258 }
259 
260 
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_statx __NR_statx
276 
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
279 #endif
280 
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
284 #endif
285 
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
289 #endif
290 
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
293 
294 /* For the 64-bit guest on 32-bit host case we must emulate
295  * getdents using getdents64, because otherwise the host
296  * might hand us back more dirent records than we can fit
297  * into the guest buffer after structure format conversion.
298  * Otherwise we emulate getdents with getdents if the host has it.
299  */
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
302 #endif
303 
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #endif
307 #if (defined(TARGET_NR_getdents) && \
308       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #endif
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
314           loff_t *, res, uint, wh);
315 #endif
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318           siginfo_t *, uinfo)
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
322 #endif
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address,int *,tidptr)
325 #endif
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
328           const struct timespec *,timeout,int *,uaddr2,int,val3)
329 #endif
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
332           const struct timespec *,timeout,int *,uaddr2,int,val3)
333 #endif
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
336           unsigned long *, user_mask_ptr);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 /* sched_attr is not defined in glibc */
341 struct sched_attr {
342     uint32_t size;
343     uint32_t sched_policy;
344     uint64_t sched_flags;
345     int32_t sched_nice;
346     uint32_t sched_priority;
347     uint64_t sched_runtime;
348     uint64_t sched_deadline;
349     uint64_t sched_period;
350     uint32_t sched_util_min;
351     uint32_t sched_util_max;
352 };
353 #define __NR_sys_sched_getattr __NR_sched_getattr
354 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
355           unsigned int, size, unsigned int, flags);
356 #define __NR_sys_sched_setattr __NR_sched_setattr
357 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
358           unsigned int, flags);
359 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
360 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
361 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
362 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
363           const struct sched_param *, param);
364 #define __NR_sys_sched_getparam __NR_sched_getparam
365 _syscall2(int, sys_sched_getparam, pid_t, pid,
366           struct sched_param *, param);
367 #define __NR_sys_sched_setparam __NR_sched_setparam
368 _syscall2(int, sys_sched_setparam, pid_t, pid,
369           const struct sched_param *, param);
370 #define __NR_sys_getcpu __NR_getcpu
371 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
372 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
373           void *, arg);
374 _syscall2(int, capget, struct __user_cap_header_struct *, header,
375           struct __user_cap_data_struct *, data);
376 _syscall2(int, capset, struct __user_cap_header_struct *, header,
377           struct __user_cap_data_struct *, data);
378 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
379 _syscall2(int, ioprio_get, int, which, int, who)
380 #endif
381 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
382 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
383 #endif
384 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
385 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
386 #endif
387 
388 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
389 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
390           unsigned long, idx1, unsigned long, idx2)
391 #endif
392 
393 /*
394  * It is assumed that struct statx is architecture independent.
395  */
396 #if defined(TARGET_NR_statx) && defined(__NR_statx)
397 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
398           unsigned int, mask, struct target_statx *, statxbuf)
399 #endif
400 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
401 _syscall2(int, membarrier, int, cmd, int, flags)
402 #endif
403 
404 static const bitmask_transtbl fcntl_flags_tbl[] = {
405   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
406   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
407   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
408   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
409   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
410   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
411   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
412   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
413   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
414   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
415   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
416   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
417   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
418 #if defined(O_DIRECT)
419   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
420 #endif
421 #if defined(O_NOATIME)
422   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
423 #endif
424 #if defined(O_CLOEXEC)
425   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
426 #endif
427 #if defined(O_PATH)
428   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
429 #endif
430 #if defined(O_TMPFILE)
431   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
432 #endif
433   /* Don't terminate the list prematurely on 64-bit host+guest.  */
434 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
435   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
436 #endif
437   { 0, 0, 0, 0 }
438 };
439 
440 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
441 
442 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
443 #if defined(__NR_utimensat)
444 #define __NR_sys_utimensat __NR_utimensat
445 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
446           const struct timespec *,tsp,int,flags)
447 #else
448 static int sys_utimensat(int dirfd, const char *pathname,
449                          const struct timespec times[2], int flags)
450 {
451     errno = ENOSYS;
452     return -1;
453 }
454 #endif
455 #endif /* TARGET_NR_utimensat */
456 
457 #ifdef TARGET_NR_renameat2
458 #if defined(__NR_renameat2)
459 #define __NR_sys_renameat2 __NR_renameat2
460 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
461           const char *, new, unsigned int, flags)
462 #else
463 static int sys_renameat2(int oldfd, const char *old,
464                          int newfd, const char *new, int flags)
465 {
466     if (flags == 0) {
467         return renameat(oldfd, old, newfd, new);
468     }
469     errno = ENOSYS;
470     return -1;
471 }
472 #endif
473 #endif /* TARGET_NR_renameat2 */
474 
475 #ifdef CONFIG_INOTIFY
476 #include <sys/inotify.h>
477 #else
478 /* Userspace can usually survive runtime without inotify */
479 #undef TARGET_NR_inotify_init
480 #undef TARGET_NR_inotify_init1
481 #undef TARGET_NR_inotify_add_watch
482 #undef TARGET_NR_inotify_rm_watch
483 #endif /* CONFIG_INOTIFY  */
484 
485 #if defined(TARGET_NR_prlimit64)
486 #ifndef __NR_prlimit64
487 # define __NR_prlimit64 -1
488 #endif
489 #define __NR_sys_prlimit64 __NR_prlimit64
490 /* The glibc rlimit structure may not be that used by the underlying syscall */
491 struct host_rlimit64 {
492     uint64_t rlim_cur;
493     uint64_t rlim_max;
494 };
495 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
496           const struct host_rlimit64 *, new_limit,
497           struct host_rlimit64 *, old_limit)
498 #endif
499 
500 
501 #if defined(TARGET_NR_timer_create)
502 /* Maximum of 32 active POSIX timers allowed at any one time. */
503 static timer_t g_posix_timers[32] = { 0, } ;
504 
505 static inline int next_free_host_timer(void)
506 {
507     int k ;
508     /* FIXME: Does finding the next free slot require a lock? */
509     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
510         if (g_posix_timers[k] == 0) {
511             g_posix_timers[k] = (timer_t) 1;
512             return k;
513         }
514     }
515     return -1;
516 }
517 #endif
518 
519 static inline int host_to_target_errno(int host_errno)
520 {
521     switch (host_errno) {
522 #define E(X)  case X: return TARGET_##X;
523 #include "errnos.c.inc"
524 #undef E
525     default:
526         return host_errno;
527     }
528 }
529 
530 static inline int target_to_host_errno(int target_errno)
531 {
532     switch (target_errno) {
533 #define E(X)  case TARGET_##X: return X;
534 #include "errnos.c.inc"
535 #undef E
536     default:
537         return target_errno;
538     }
539 }
540 
541 static inline abi_long get_errno(abi_long ret)
542 {
543     if (ret == -1)
544         return -host_to_target_errno(errno);
545     else
546         return ret;
547 }
548 
549 const char *target_strerror(int err)
550 {
551     if (err == QEMU_ERESTARTSYS) {
552         return "To be restarted";
553     }
554     if (err == QEMU_ESIGRETURN) {
555         return "Successful exit from sigreturn";
556     }
557 
558     return strerror(target_to_host_errno(err));
559 }
560 
561 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
562 {
563     int i;
564     uint8_t b;
565     if (usize <= ksize) {
566         return 1;
567     }
568     for (i = ksize; i < usize; i++) {
569         if (get_user_u8(b, addr + i)) {
570             return -TARGET_EFAULT;
571         }
572         if (b != 0) {
573             return 0;
574         }
575     }
576     return 1;
577 }
578 
579 #define safe_syscall0(type, name) \
580 static type safe_##name(void) \
581 { \
582     return safe_syscall(__NR_##name); \
583 }
584 
585 #define safe_syscall1(type, name, type1, arg1) \
586 static type safe_##name(type1 arg1) \
587 { \
588     return safe_syscall(__NR_##name, arg1); \
589 }
590 
591 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
592 static type safe_##name(type1 arg1, type2 arg2) \
593 { \
594     return safe_syscall(__NR_##name, arg1, arg2); \
595 }
596 
597 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
598 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
599 { \
600     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
601 }
602 
603 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
604     type4, arg4) \
605 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
606 { \
607     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
608 }
609 
610 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
611     type4, arg4, type5, arg5) \
612 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
613     type5 arg5) \
614 { \
615     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
616 }
617 
618 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
619     type4, arg4, type5, arg5, type6, arg6) \
620 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
621     type5 arg5, type6 arg6) \
622 { \
623     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
624 }
625 
626 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
627 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
628 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
629               int, flags, mode_t, mode)
630 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
631 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
632               struct rusage *, rusage)
633 #endif
634 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
635               int, options, struct rusage *, rusage)
636 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
637 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
638     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
639 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
640               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
641 #endif
642 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
643 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
644               struct timespec *, tsp, const sigset_t *, sigmask,
645               size_t, sigsetsize)
646 #endif
647 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
648               int, maxevents, int, timeout, const sigset_t *, sigmask,
649               size_t, sigsetsize)
650 #if defined(__NR_futex)
651 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
652               const struct timespec *,timeout,int *,uaddr2,int,val3)
653 #endif
654 #if defined(__NR_futex_time64)
655 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
656               const struct timespec *,timeout,int *,uaddr2,int,val3)
657 #endif
658 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
659 safe_syscall2(int, kill, pid_t, pid, int, sig)
660 safe_syscall2(int, tkill, int, tid, int, sig)
661 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
662 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
663 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
664 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
665               unsigned long, pos_l, unsigned long, pos_h)
666 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
667               unsigned long, pos_l, unsigned long, pos_h)
668 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
669               socklen_t, addrlen)
670 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
671               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
672 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
673               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
674 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
675 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
676 safe_syscall2(int, flock, int, fd, int, operation)
677 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
678 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
679               const struct timespec *, uts, size_t, sigsetsize)
680 #endif
681 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
682               int, flags)
683 #if defined(TARGET_NR_nanosleep)
684 safe_syscall2(int, nanosleep, const struct timespec *, req,
685               struct timespec *, rem)
686 #endif
687 #if defined(TARGET_NR_clock_nanosleep) || \
688     defined(TARGET_NR_clock_nanosleep_time64)
689 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
690               const struct timespec *, req, struct timespec *, rem)
691 #endif
692 #ifdef __NR_ipc
693 #ifdef __s390x__
694 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
695               void *, ptr)
696 #else
697 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
698               void *, ptr, long, fifth)
699 #endif
700 #endif
701 #ifdef __NR_msgsnd
702 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
703               int, flags)
704 #endif
705 #ifdef __NR_msgrcv
706 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
707               long, msgtype, int, flags)
708 #endif
709 #ifdef __NR_semtimedop
710 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
711               unsigned, nsops, const struct timespec *, timeout)
712 #endif
713 #if defined(TARGET_NR_mq_timedsend) || \
714     defined(TARGET_NR_mq_timedsend_time64)
715 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
716               size_t, len, unsigned, prio, const struct timespec *, timeout)
717 #endif
718 #if defined(TARGET_NR_mq_timedreceive) || \
719     defined(TARGET_NR_mq_timedreceive_time64)
720 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
721               size_t, len, unsigned *, prio, const struct timespec *, timeout)
722 #endif
723 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
724 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
725               int, outfd, loff_t *, poutoff, size_t, length,
726               unsigned int, flags)
727 #endif
728 
729 /* We do ioctl like this rather than via safe_syscall3 to preserve the
730  * "third argument might be integer or pointer or not present" behaviour of
731  * the libc function.
732  */
733 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
734 /* Similarly for fcntl. Note that callers must always:
735  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
736  *  use the flock64 struct rather than unsuffixed flock
737  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
738  */
739 #ifdef __NR_fcntl64
740 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
741 #else
742 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
743 #endif
744 
745 static inline int host_to_target_sock_type(int host_type)
746 {
747     int target_type;
748 
749     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
750     case SOCK_DGRAM:
751         target_type = TARGET_SOCK_DGRAM;
752         break;
753     case SOCK_STREAM:
754         target_type = TARGET_SOCK_STREAM;
755         break;
756     default:
757         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
758         break;
759     }
760 
761 #if defined(SOCK_CLOEXEC)
762     if (host_type & SOCK_CLOEXEC) {
763         target_type |= TARGET_SOCK_CLOEXEC;
764     }
765 #endif
766 
767 #if defined(SOCK_NONBLOCK)
768     if (host_type & SOCK_NONBLOCK) {
769         target_type |= TARGET_SOCK_NONBLOCK;
770     }
771 #endif
772 
773     return target_type;
774 }
775 
776 static abi_ulong target_brk;
777 static abi_ulong target_original_brk;
778 static abi_ulong brk_page;
779 
780 void target_set_brk(abi_ulong new_brk)
781 {
782     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
783     brk_page = HOST_PAGE_ALIGN(target_brk);
784 }
785 
786 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
787 #define DEBUGF_BRK(message, args...)
788 
789 /* do_brk() must return target values and target errnos. */
790 abi_long do_brk(abi_ulong new_brk)
791 {
792     abi_long mapped_addr;
793     abi_ulong new_alloc_size;
794 
795     /* brk pointers are always untagged */
796 
797     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
798 
799     if (!new_brk) {
800         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
801         return target_brk;
802     }
803     if (new_brk < target_original_brk) {
804         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
805                    target_brk);
806         return target_brk;
807     }
808 
809     /* If the new brk is less than the highest page reserved to the
810      * target heap allocation, set it and we're almost done...  */
811     if (new_brk <= brk_page) {
812         /* Heap contents are initialized to zero, as for anonymous
813          * mapped pages.  */
814         if (new_brk > target_brk) {
815             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
816         }
817 	target_brk = new_brk;
818         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
819 	return target_brk;
820     }
821 
822     /* We need to allocate more memory after the brk... Note that
823      * we don't use MAP_FIXED because that will map over the top of
824      * any existing mapping (like the one with the host libc or qemu
825      * itself); instead we treat "mapped but at wrong address" as
826      * a failure and unmap again.
827      */
828     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
829     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
830                                         PROT_READ|PROT_WRITE,
831                                         MAP_ANON|MAP_PRIVATE, 0, 0));
832 
833     if (mapped_addr == brk_page) {
834         /* Heap contents are initialized to zero, as for anonymous
835          * mapped pages.  Technically the new pages are already
836          * initialized to zero since they *are* anonymous mapped
837          * pages, however we have to take care with the contents that
838          * come from the remaining part of the previous page: it may
839          * contains garbage data due to a previous heap usage (grown
840          * then shrunken).  */
841         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
842 
843         target_brk = new_brk;
844         brk_page = HOST_PAGE_ALIGN(target_brk);
845         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
846             target_brk);
847         return target_brk;
848     } else if (mapped_addr != -1) {
849         /* Mapped but at wrong address, meaning there wasn't actually
850          * enough space for this brk.
851          */
852         target_munmap(mapped_addr, new_alloc_size);
853         mapped_addr = -1;
854         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
855     }
856     else {
857         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
858     }
859 
860 #if defined(TARGET_ALPHA)
861     /* We (partially) emulate OSF/1 on Alpha, which requires we
862        return a proper errno, not an unchanged brk value.  */
863     return -TARGET_ENOMEM;
864 #endif
865     /* For everything else, return the previous break. */
866     return target_brk;
867 }
868 
869 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
870     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
871 static inline abi_long copy_from_user_fdset(fd_set *fds,
872                                             abi_ulong target_fds_addr,
873                                             int n)
874 {
875     int i, nw, j, k;
876     abi_ulong b, *target_fds;
877 
878     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
879     if (!(target_fds = lock_user(VERIFY_READ,
880                                  target_fds_addr,
881                                  sizeof(abi_ulong) * nw,
882                                  1)))
883         return -TARGET_EFAULT;
884 
885     FD_ZERO(fds);
886     k = 0;
887     for (i = 0; i < nw; i++) {
888         /* grab the abi_ulong */
889         __get_user(b, &target_fds[i]);
890         for (j = 0; j < TARGET_ABI_BITS; j++) {
891             /* check the bit inside the abi_ulong */
892             if ((b >> j) & 1)
893                 FD_SET(k, fds);
894             k++;
895         }
896     }
897 
898     unlock_user(target_fds, target_fds_addr, 0);
899 
900     return 0;
901 }
902 
903 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
904                                                  abi_ulong target_fds_addr,
905                                                  int n)
906 {
907     if (target_fds_addr) {
908         if (copy_from_user_fdset(fds, target_fds_addr, n))
909             return -TARGET_EFAULT;
910         *fds_ptr = fds;
911     } else {
912         *fds_ptr = NULL;
913     }
914     return 0;
915 }
916 
917 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
918                                           const fd_set *fds,
919                                           int n)
920 {
921     int i, nw, j, k;
922     abi_long v;
923     abi_ulong *target_fds;
924 
925     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
926     if (!(target_fds = lock_user(VERIFY_WRITE,
927                                  target_fds_addr,
928                                  sizeof(abi_ulong) * nw,
929                                  0)))
930         return -TARGET_EFAULT;
931 
932     k = 0;
933     for (i = 0; i < nw; i++) {
934         v = 0;
935         for (j = 0; j < TARGET_ABI_BITS; j++) {
936             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
937             k++;
938         }
939         __put_user(v, &target_fds[i]);
940     }
941 
942     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
943 
944     return 0;
945 }
946 #endif
947 
948 #if defined(__alpha__)
949 #define HOST_HZ 1024
950 #else
951 #define HOST_HZ 100
952 #endif
953 
954 static inline abi_long host_to_target_clock_t(long ticks)
955 {
956 #if HOST_HZ == TARGET_HZ
957     return ticks;
958 #else
959     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
960 #endif
961 }
962 
963 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
964                                              const struct rusage *rusage)
965 {
966     struct target_rusage *target_rusage;
967 
968     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
969         return -TARGET_EFAULT;
970     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
971     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
972     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
973     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
974     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
975     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
976     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
977     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
978     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
979     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
980     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
981     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
982     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
983     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
984     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
985     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
986     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
987     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
988     unlock_user_struct(target_rusage, target_addr, 1);
989 
990     return 0;
991 }
992 
993 #ifdef TARGET_NR_setrlimit
994 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
995 {
996     abi_ulong target_rlim_swap;
997     rlim_t result;
998 
999     target_rlim_swap = tswapal(target_rlim);
1000     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1001         return RLIM_INFINITY;
1002 
1003     result = target_rlim_swap;
1004     if (target_rlim_swap != (rlim_t)result)
1005         return RLIM_INFINITY;
1006 
1007     return result;
1008 }
1009 #endif
1010 
1011 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1012 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1013 {
1014     abi_ulong target_rlim_swap;
1015     abi_ulong result;
1016 
1017     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1018         target_rlim_swap = TARGET_RLIM_INFINITY;
1019     else
1020         target_rlim_swap = rlim;
1021     result = tswapal(target_rlim_swap);
1022 
1023     return result;
1024 }
1025 #endif
1026 
1027 static inline int target_to_host_resource(int code)
1028 {
1029     switch (code) {
1030     case TARGET_RLIMIT_AS:
1031         return RLIMIT_AS;
1032     case TARGET_RLIMIT_CORE:
1033         return RLIMIT_CORE;
1034     case TARGET_RLIMIT_CPU:
1035         return RLIMIT_CPU;
1036     case TARGET_RLIMIT_DATA:
1037         return RLIMIT_DATA;
1038     case TARGET_RLIMIT_FSIZE:
1039         return RLIMIT_FSIZE;
1040     case TARGET_RLIMIT_LOCKS:
1041         return RLIMIT_LOCKS;
1042     case TARGET_RLIMIT_MEMLOCK:
1043         return RLIMIT_MEMLOCK;
1044     case TARGET_RLIMIT_MSGQUEUE:
1045         return RLIMIT_MSGQUEUE;
1046     case TARGET_RLIMIT_NICE:
1047         return RLIMIT_NICE;
1048     case TARGET_RLIMIT_NOFILE:
1049         return RLIMIT_NOFILE;
1050     case TARGET_RLIMIT_NPROC:
1051         return RLIMIT_NPROC;
1052     case TARGET_RLIMIT_RSS:
1053         return RLIMIT_RSS;
1054     case TARGET_RLIMIT_RTPRIO:
1055         return RLIMIT_RTPRIO;
1056     case TARGET_RLIMIT_RTTIME:
1057         return RLIMIT_RTTIME;
1058     case TARGET_RLIMIT_SIGPENDING:
1059         return RLIMIT_SIGPENDING;
1060     case TARGET_RLIMIT_STACK:
1061         return RLIMIT_STACK;
1062     default:
1063         return code;
1064     }
1065 }
1066 
1067 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1068                                               abi_ulong target_tv_addr)
1069 {
1070     struct target_timeval *target_tv;
1071 
1072     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1073         return -TARGET_EFAULT;
1074     }
1075 
1076     __get_user(tv->tv_sec, &target_tv->tv_sec);
1077     __get_user(tv->tv_usec, &target_tv->tv_usec);
1078 
1079     unlock_user_struct(target_tv, target_tv_addr, 0);
1080 
1081     return 0;
1082 }
1083 
1084 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1085                                             const struct timeval *tv)
1086 {
1087     struct target_timeval *target_tv;
1088 
1089     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1090         return -TARGET_EFAULT;
1091     }
1092 
1093     __put_user(tv->tv_sec, &target_tv->tv_sec);
1094     __put_user(tv->tv_usec, &target_tv->tv_usec);
1095 
1096     unlock_user_struct(target_tv, target_tv_addr, 1);
1097 
1098     return 0;
1099 }
1100 
1101 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1102 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1103                                                 abi_ulong target_tv_addr)
1104 {
1105     struct target__kernel_sock_timeval *target_tv;
1106 
1107     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1108         return -TARGET_EFAULT;
1109     }
1110 
1111     __get_user(tv->tv_sec, &target_tv->tv_sec);
1112     __get_user(tv->tv_usec, &target_tv->tv_usec);
1113 
1114     unlock_user_struct(target_tv, target_tv_addr, 0);
1115 
1116     return 0;
1117 }
1118 #endif
1119 
1120 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1121                                               const struct timeval *tv)
1122 {
1123     struct target__kernel_sock_timeval *target_tv;
1124 
1125     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1126         return -TARGET_EFAULT;
1127     }
1128 
1129     __put_user(tv->tv_sec, &target_tv->tv_sec);
1130     __put_user(tv->tv_usec, &target_tv->tv_usec);
1131 
1132     unlock_user_struct(target_tv, target_tv_addr, 1);
1133 
1134     return 0;
1135 }
1136 
1137 #if defined(TARGET_NR_futex) || \
1138     defined(TARGET_NR_rt_sigtimedwait) || \
1139     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1140     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1141     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1142     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1143     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1144     defined(TARGET_NR_timer_settime) || \
1145     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1146 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1147                                                abi_ulong target_addr)
1148 {
1149     struct target_timespec *target_ts;
1150 
1151     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1152         return -TARGET_EFAULT;
1153     }
1154     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1155     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1156     unlock_user_struct(target_ts, target_addr, 0);
1157     return 0;
1158 }
1159 #endif
1160 
1161 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1162     defined(TARGET_NR_timer_settime64) || \
1163     defined(TARGET_NR_mq_timedsend_time64) || \
1164     defined(TARGET_NR_mq_timedreceive_time64) || \
1165     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1166     defined(TARGET_NR_clock_nanosleep_time64) || \
1167     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1168     defined(TARGET_NR_utimensat) || \
1169     defined(TARGET_NR_utimensat_time64) || \
1170     defined(TARGET_NR_semtimedop_time64) || \
1171     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1172 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1173                                                  abi_ulong target_addr)
1174 {
1175     struct target__kernel_timespec *target_ts;
1176 
1177     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1178         return -TARGET_EFAULT;
1179     }
1180     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1181     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1182     /* in 32bit mode, this drops the padding */
1183     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1184     unlock_user_struct(target_ts, target_addr, 0);
1185     return 0;
1186 }
1187 #endif
1188 
1189 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1190                                                struct timespec *host_ts)
1191 {
1192     struct target_timespec *target_ts;
1193 
1194     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1195         return -TARGET_EFAULT;
1196     }
1197     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1198     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1199     unlock_user_struct(target_ts, target_addr, 1);
1200     return 0;
1201 }
1202 
1203 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1204                                                  struct timespec *host_ts)
1205 {
1206     struct target__kernel_timespec *target_ts;
1207 
1208     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1209         return -TARGET_EFAULT;
1210     }
1211     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1212     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1213     unlock_user_struct(target_ts, target_addr, 1);
1214     return 0;
1215 }
1216 
1217 #if defined(TARGET_NR_gettimeofday)
1218 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1219                                              struct timezone *tz)
1220 {
1221     struct target_timezone *target_tz;
1222 
1223     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1224         return -TARGET_EFAULT;
1225     }
1226 
1227     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1228     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1229 
1230     unlock_user_struct(target_tz, target_tz_addr, 1);
1231 
1232     return 0;
1233 }
1234 #endif
1235 
1236 #if defined(TARGET_NR_settimeofday)
1237 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1238                                                abi_ulong target_tz_addr)
1239 {
1240     struct target_timezone *target_tz;
1241 
1242     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1243         return -TARGET_EFAULT;
1244     }
1245 
1246     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1247     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1248 
1249     unlock_user_struct(target_tz, target_tz_addr, 0);
1250 
1251     return 0;
1252 }
1253 #endif
1254 
1255 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1256 #include <mqueue.h>
1257 
1258 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1259                                               abi_ulong target_mq_attr_addr)
1260 {
1261     struct target_mq_attr *target_mq_attr;
1262 
1263     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1264                           target_mq_attr_addr, 1))
1265         return -TARGET_EFAULT;
1266 
1267     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1268     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1269     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1270     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1271 
1272     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1273 
1274     return 0;
1275 }
1276 
1277 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1278                                             const struct mq_attr *attr)
1279 {
1280     struct target_mq_attr *target_mq_attr;
1281 
1282     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1283                           target_mq_attr_addr, 0))
1284         return -TARGET_EFAULT;
1285 
1286     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1287     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1288     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1289     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1290 
1291     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1292 
1293     return 0;
1294 }
1295 #endif
1296 
1297 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1298 /* do_select() must return target values and target errnos. */
1299 static abi_long do_select(int n,
1300                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1301                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1302 {
1303     fd_set rfds, wfds, efds;
1304     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1305     struct timeval tv;
1306     struct timespec ts, *ts_ptr;
1307     abi_long ret;
1308 
1309     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1310     if (ret) {
1311         return ret;
1312     }
1313     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1314     if (ret) {
1315         return ret;
1316     }
1317     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1318     if (ret) {
1319         return ret;
1320     }
1321 
1322     if (target_tv_addr) {
1323         if (copy_from_user_timeval(&tv, target_tv_addr))
1324             return -TARGET_EFAULT;
1325         ts.tv_sec = tv.tv_sec;
1326         ts.tv_nsec = tv.tv_usec * 1000;
1327         ts_ptr = &ts;
1328     } else {
1329         ts_ptr = NULL;
1330     }
1331 
1332     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1333                                   ts_ptr, NULL));
1334 
1335     if (!is_error(ret)) {
1336         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1337             return -TARGET_EFAULT;
1338         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1339             return -TARGET_EFAULT;
1340         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1341             return -TARGET_EFAULT;
1342 
1343         if (target_tv_addr) {
1344             tv.tv_sec = ts.tv_sec;
1345             tv.tv_usec = ts.tv_nsec / 1000;
1346             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1347                 return -TARGET_EFAULT;
1348             }
1349         }
1350     }
1351 
1352     return ret;
1353 }
1354 
1355 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1356 static abi_long do_old_select(abi_ulong arg1)
1357 {
1358     struct target_sel_arg_struct *sel;
1359     abi_ulong inp, outp, exp, tvp;
1360     long nsel;
1361 
1362     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1363         return -TARGET_EFAULT;
1364     }
1365 
1366     nsel = tswapal(sel->n);
1367     inp = tswapal(sel->inp);
1368     outp = tswapal(sel->outp);
1369     exp = tswapal(sel->exp);
1370     tvp = tswapal(sel->tvp);
1371 
1372     unlock_user_struct(sel, arg1, 0);
1373 
1374     return do_select(nsel, inp, outp, exp, tvp);
1375 }
1376 #endif
1377 #endif
1378 
1379 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1380 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1381                             abi_long arg4, abi_long arg5, abi_long arg6,
1382                             bool time64)
1383 {
1384     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1385     fd_set rfds, wfds, efds;
1386     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1387     struct timespec ts, *ts_ptr;
1388     abi_long ret;
1389 
1390     /*
1391      * The 6th arg is actually two args smashed together,
1392      * so we cannot use the C library.
1393      */
1394     struct {
1395         sigset_t *set;
1396         size_t size;
1397     } sig, *sig_ptr;
1398 
1399     abi_ulong arg_sigset, arg_sigsize, *arg7;
1400 
1401     n = arg1;
1402     rfd_addr = arg2;
1403     wfd_addr = arg3;
1404     efd_addr = arg4;
1405     ts_addr = arg5;
1406 
1407     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1408     if (ret) {
1409         return ret;
1410     }
1411     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1412     if (ret) {
1413         return ret;
1414     }
1415     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1416     if (ret) {
1417         return ret;
1418     }
1419 
1420     /*
1421      * This takes a timespec, and not a timeval, so we cannot
1422      * use the do_select() helper ...
1423      */
1424     if (ts_addr) {
1425         if (time64) {
1426             if (target_to_host_timespec64(&ts, ts_addr)) {
1427                 return -TARGET_EFAULT;
1428             }
1429         } else {
1430             if (target_to_host_timespec(&ts, ts_addr)) {
1431                 return -TARGET_EFAULT;
1432             }
1433         }
1434             ts_ptr = &ts;
1435     } else {
1436         ts_ptr = NULL;
1437     }
1438 
1439     /* Extract the two packed args for the sigset */
1440     sig_ptr = NULL;
1441     if (arg6) {
1442         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1443         if (!arg7) {
1444             return -TARGET_EFAULT;
1445         }
1446         arg_sigset = tswapal(arg7[0]);
1447         arg_sigsize = tswapal(arg7[1]);
1448         unlock_user(arg7, arg6, 0);
1449 
1450         if (arg_sigset) {
1451             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1452             if (ret != 0) {
1453                 return ret;
1454             }
1455             sig_ptr = &sig;
1456             sig.size = SIGSET_T_SIZE;
1457         }
1458     }
1459 
1460     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1461                                   ts_ptr, sig_ptr));
1462 
1463     if (sig_ptr) {
1464         finish_sigsuspend_mask(ret);
1465     }
1466 
1467     if (!is_error(ret)) {
1468         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1469             return -TARGET_EFAULT;
1470         }
1471         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1472             return -TARGET_EFAULT;
1473         }
1474         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1475             return -TARGET_EFAULT;
1476         }
1477         if (time64) {
1478             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1479                 return -TARGET_EFAULT;
1480             }
1481         } else {
1482             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1483                 return -TARGET_EFAULT;
1484             }
1485         }
1486     }
1487     return ret;
1488 }
1489 #endif
1490 
1491 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1492     defined(TARGET_NR_ppoll_time64)
1493 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1494                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1495 {
1496     struct target_pollfd *target_pfd;
1497     unsigned int nfds = arg2;
1498     struct pollfd *pfd;
1499     unsigned int i;
1500     abi_long ret;
1501 
1502     pfd = NULL;
1503     target_pfd = NULL;
1504     if (nfds) {
1505         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1506             return -TARGET_EINVAL;
1507         }
1508         target_pfd = lock_user(VERIFY_WRITE, arg1,
1509                                sizeof(struct target_pollfd) * nfds, 1);
1510         if (!target_pfd) {
1511             return -TARGET_EFAULT;
1512         }
1513 
1514         pfd = alloca(sizeof(struct pollfd) * nfds);
1515         for (i = 0; i < nfds; i++) {
1516             pfd[i].fd = tswap32(target_pfd[i].fd);
1517             pfd[i].events = tswap16(target_pfd[i].events);
1518         }
1519     }
1520     if (ppoll) {
1521         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1522         sigset_t *set = NULL;
1523 
1524         if (arg3) {
1525             if (time64) {
1526                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1527                     unlock_user(target_pfd, arg1, 0);
1528                     return -TARGET_EFAULT;
1529                 }
1530             } else {
1531                 if (target_to_host_timespec(timeout_ts, arg3)) {
1532                     unlock_user(target_pfd, arg1, 0);
1533                     return -TARGET_EFAULT;
1534                 }
1535             }
1536         } else {
1537             timeout_ts = NULL;
1538         }
1539 
1540         if (arg4) {
1541             ret = process_sigsuspend_mask(&set, arg4, arg5);
1542             if (ret != 0) {
1543                 unlock_user(target_pfd, arg1, 0);
1544                 return ret;
1545             }
1546         }
1547 
1548         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1549                                    set, SIGSET_T_SIZE));
1550 
1551         if (set) {
1552             finish_sigsuspend_mask(ret);
1553         }
1554         if (!is_error(ret) && arg3) {
1555             if (time64) {
1556                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1557                     return -TARGET_EFAULT;
1558                 }
1559             } else {
1560                 if (host_to_target_timespec(arg3, timeout_ts)) {
1561                     return -TARGET_EFAULT;
1562                 }
1563             }
1564         }
1565     } else {
1566           struct timespec ts, *pts;
1567 
1568           if (arg3 >= 0) {
1569               /* Convert ms to secs, ns */
1570               ts.tv_sec = arg3 / 1000;
1571               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1572               pts = &ts;
1573           } else {
1574               /* -ve poll() timeout means "infinite" */
1575               pts = NULL;
1576           }
1577           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1578     }
1579 
1580     if (!is_error(ret)) {
1581         for (i = 0; i < nfds; i++) {
1582             target_pfd[i].revents = tswap16(pfd[i].revents);
1583         }
1584     }
1585     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1586     return ret;
1587 }
1588 #endif
1589 
1590 static abi_long do_pipe2(int host_pipe[], int flags)
1591 {
1592 #ifdef CONFIG_PIPE2
1593     return pipe2(host_pipe, flags);
1594 #else
1595     return -ENOSYS;
1596 #endif
1597 }
1598 
1599 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1600                         int flags, int is_pipe2)
1601 {
1602     int host_pipe[2];
1603     abi_long ret;
1604     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1605 
1606     if (is_error(ret))
1607         return get_errno(ret);
1608 
1609     /* Several targets have special calling conventions for the original
1610        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1611     if (!is_pipe2) {
1612 #if defined(TARGET_ALPHA)
1613         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1614         return host_pipe[0];
1615 #elif defined(TARGET_MIPS)
1616         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1617         return host_pipe[0];
1618 #elif defined(TARGET_SH4)
1619         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1620         return host_pipe[0];
1621 #elif defined(TARGET_SPARC)
1622         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1623         return host_pipe[0];
1624 #endif
1625     }
1626 
1627     if (put_user_s32(host_pipe[0], pipedes)
1628         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1629         return -TARGET_EFAULT;
1630     return get_errno(ret);
1631 }
1632 
1633 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1634                                               abi_ulong target_addr,
1635                                               socklen_t len)
1636 {
1637     struct target_ip_mreqn *target_smreqn;
1638 
1639     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1640     if (!target_smreqn)
1641         return -TARGET_EFAULT;
1642     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1643     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1644     if (len == sizeof(struct target_ip_mreqn))
1645         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1646     unlock_user(target_smreqn, target_addr, 0);
1647 
1648     return 0;
1649 }
1650 
1651 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1652                                                abi_ulong target_addr,
1653                                                socklen_t len)
1654 {
1655     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1656     sa_family_t sa_family;
1657     struct target_sockaddr *target_saddr;
1658 
1659     if (fd_trans_target_to_host_addr(fd)) {
1660         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1661     }
1662 
1663     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1664     if (!target_saddr)
1665         return -TARGET_EFAULT;
1666 
1667     sa_family = tswap16(target_saddr->sa_family);
1668 
1669     /* Oops. The caller might send a incomplete sun_path; sun_path
1670      * must be terminated by \0 (see the manual page), but
1671      * unfortunately it is quite common to specify sockaddr_un
1672      * length as "strlen(x->sun_path)" while it should be
1673      * "strlen(...) + 1". We'll fix that here if needed.
1674      * Linux kernel has a similar feature.
1675      */
1676 
1677     if (sa_family == AF_UNIX) {
1678         if (len < unix_maxlen && len > 0) {
1679             char *cp = (char*)target_saddr;
1680 
1681             if ( cp[len-1] && !cp[len] )
1682                 len++;
1683         }
1684         if (len > unix_maxlen)
1685             len = unix_maxlen;
1686     }
1687 
1688     memcpy(addr, target_saddr, len);
1689     addr->sa_family = sa_family;
1690     if (sa_family == AF_NETLINK) {
1691         struct sockaddr_nl *nladdr;
1692 
1693         nladdr = (struct sockaddr_nl *)addr;
1694         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1695         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1696     } else if (sa_family == AF_PACKET) {
1697 	struct target_sockaddr_ll *lladdr;
1698 
1699 	lladdr = (struct target_sockaddr_ll *)addr;
1700 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1701 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1702     }
1703     unlock_user(target_saddr, target_addr, 0);
1704 
1705     return 0;
1706 }
1707 
1708 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1709                                                struct sockaddr *addr,
1710                                                socklen_t len)
1711 {
1712     struct target_sockaddr *target_saddr;
1713 
1714     if (len == 0) {
1715         return 0;
1716     }
1717     assert(addr);
1718 
1719     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1720     if (!target_saddr)
1721         return -TARGET_EFAULT;
1722     memcpy(target_saddr, addr, len);
1723     if (len >= offsetof(struct target_sockaddr, sa_family) +
1724         sizeof(target_saddr->sa_family)) {
1725         target_saddr->sa_family = tswap16(addr->sa_family);
1726     }
1727     if (addr->sa_family == AF_NETLINK &&
1728         len >= sizeof(struct target_sockaddr_nl)) {
1729         struct target_sockaddr_nl *target_nl =
1730                (struct target_sockaddr_nl *)target_saddr;
1731         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1732         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1733     } else if (addr->sa_family == AF_PACKET) {
1734         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1735         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1736         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1737     } else if (addr->sa_family == AF_INET6 &&
1738                len >= sizeof(struct target_sockaddr_in6)) {
1739         struct target_sockaddr_in6 *target_in6 =
1740                (struct target_sockaddr_in6 *)target_saddr;
1741         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1742     }
1743     unlock_user(target_saddr, target_addr, len);
1744 
1745     return 0;
1746 }
1747 
1748 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1749                                            struct target_msghdr *target_msgh)
1750 {
1751     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1752     abi_long msg_controllen;
1753     abi_ulong target_cmsg_addr;
1754     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1755     socklen_t space = 0;
1756 
1757     msg_controllen = tswapal(target_msgh->msg_controllen);
1758     if (msg_controllen < sizeof (struct target_cmsghdr))
1759         goto the_end;
1760     target_cmsg_addr = tswapal(target_msgh->msg_control);
1761     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1762     target_cmsg_start = target_cmsg;
1763     if (!target_cmsg)
1764         return -TARGET_EFAULT;
1765 
1766     while (cmsg && target_cmsg) {
1767         void *data = CMSG_DATA(cmsg);
1768         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1769 
1770         int len = tswapal(target_cmsg->cmsg_len)
1771             - sizeof(struct target_cmsghdr);
1772 
1773         space += CMSG_SPACE(len);
1774         if (space > msgh->msg_controllen) {
1775             space -= CMSG_SPACE(len);
1776             /* This is a QEMU bug, since we allocated the payload
1777              * area ourselves (unlike overflow in host-to-target
1778              * conversion, which is just the guest giving us a buffer
1779              * that's too small). It can't happen for the payload types
1780              * we currently support; if it becomes an issue in future
1781              * we would need to improve our allocation strategy to
1782              * something more intelligent than "twice the size of the
1783              * target buffer we're reading from".
1784              */
1785             qemu_log_mask(LOG_UNIMP,
1786                           ("Unsupported ancillary data %d/%d: "
1787                            "unhandled msg size\n"),
1788                           tswap32(target_cmsg->cmsg_level),
1789                           tswap32(target_cmsg->cmsg_type));
1790             break;
1791         }
1792 
1793         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1794             cmsg->cmsg_level = SOL_SOCKET;
1795         } else {
1796             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1797         }
1798         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1799         cmsg->cmsg_len = CMSG_LEN(len);
1800 
1801         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1802             int *fd = (int *)data;
1803             int *target_fd = (int *)target_data;
1804             int i, numfds = len / sizeof(int);
1805 
1806             for (i = 0; i < numfds; i++) {
1807                 __get_user(fd[i], target_fd + i);
1808             }
1809         } else if (cmsg->cmsg_level == SOL_SOCKET
1810                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1811             struct ucred *cred = (struct ucred *)data;
1812             struct target_ucred *target_cred =
1813                 (struct target_ucred *)target_data;
1814 
1815             __get_user(cred->pid, &target_cred->pid);
1816             __get_user(cred->uid, &target_cred->uid);
1817             __get_user(cred->gid, &target_cred->gid);
1818         } else {
1819             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1820                           cmsg->cmsg_level, cmsg->cmsg_type);
1821             memcpy(data, target_data, len);
1822         }
1823 
1824         cmsg = CMSG_NXTHDR(msgh, cmsg);
1825         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1826                                          target_cmsg_start);
1827     }
1828     unlock_user(target_cmsg, target_cmsg_addr, 0);
1829  the_end:
1830     msgh->msg_controllen = space;
1831     return 0;
1832 }
1833 
1834 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1835                                            struct msghdr *msgh)
1836 {
1837     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1838     abi_long msg_controllen;
1839     abi_ulong target_cmsg_addr;
1840     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1841     socklen_t space = 0;
1842 
1843     msg_controllen = tswapal(target_msgh->msg_controllen);
1844     if (msg_controllen < sizeof (struct target_cmsghdr))
1845         goto the_end;
1846     target_cmsg_addr = tswapal(target_msgh->msg_control);
1847     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1848     target_cmsg_start = target_cmsg;
1849     if (!target_cmsg)
1850         return -TARGET_EFAULT;
1851 
1852     while (cmsg && target_cmsg) {
1853         void *data = CMSG_DATA(cmsg);
1854         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1855 
1856         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1857         int tgt_len, tgt_space;
1858 
1859         /* We never copy a half-header but may copy half-data;
1860          * this is Linux's behaviour in put_cmsg(). Note that
1861          * truncation here is a guest problem (which we report
1862          * to the guest via the CTRUNC bit), unlike truncation
1863          * in target_to_host_cmsg, which is a QEMU bug.
1864          */
1865         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1866             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1867             break;
1868         }
1869 
1870         if (cmsg->cmsg_level == SOL_SOCKET) {
1871             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1872         } else {
1873             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1874         }
1875         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1876 
1877         /* Payload types which need a different size of payload on
1878          * the target must adjust tgt_len here.
1879          */
1880         tgt_len = len;
1881         switch (cmsg->cmsg_level) {
1882         case SOL_SOCKET:
1883             switch (cmsg->cmsg_type) {
1884             case SO_TIMESTAMP:
1885                 tgt_len = sizeof(struct target_timeval);
1886                 break;
1887             default:
1888                 break;
1889             }
1890             break;
1891         default:
1892             break;
1893         }
1894 
1895         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1896             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1897             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1898         }
1899 
1900         /* We must now copy-and-convert len bytes of payload
1901          * into tgt_len bytes of destination space. Bear in mind
1902          * that in both source and destination we may be dealing
1903          * with a truncated value!
1904          */
1905         switch (cmsg->cmsg_level) {
1906         case SOL_SOCKET:
1907             switch (cmsg->cmsg_type) {
1908             case SCM_RIGHTS:
1909             {
1910                 int *fd = (int *)data;
1911                 int *target_fd = (int *)target_data;
1912                 int i, numfds = tgt_len / sizeof(int);
1913 
1914                 for (i = 0; i < numfds; i++) {
1915                     __put_user(fd[i], target_fd + i);
1916                 }
1917                 break;
1918             }
1919             case SO_TIMESTAMP:
1920             {
1921                 struct timeval *tv = (struct timeval *)data;
1922                 struct target_timeval *target_tv =
1923                     (struct target_timeval *)target_data;
1924 
1925                 if (len != sizeof(struct timeval) ||
1926                     tgt_len != sizeof(struct target_timeval)) {
1927                     goto unimplemented;
1928                 }
1929 
1930                 /* copy struct timeval to target */
1931                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1932                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1933                 break;
1934             }
1935             case SCM_CREDENTIALS:
1936             {
1937                 struct ucred *cred = (struct ucred *)data;
1938                 struct target_ucred *target_cred =
1939                     (struct target_ucred *)target_data;
1940 
1941                 __put_user(cred->pid, &target_cred->pid);
1942                 __put_user(cred->uid, &target_cred->uid);
1943                 __put_user(cred->gid, &target_cred->gid);
1944                 break;
1945             }
1946             default:
1947                 goto unimplemented;
1948             }
1949             break;
1950 
1951         case SOL_IP:
1952             switch (cmsg->cmsg_type) {
1953             case IP_TTL:
1954             {
1955                 uint32_t *v = (uint32_t *)data;
1956                 uint32_t *t_int = (uint32_t *)target_data;
1957 
1958                 if (len != sizeof(uint32_t) ||
1959                     tgt_len != sizeof(uint32_t)) {
1960                     goto unimplemented;
1961                 }
1962                 __put_user(*v, t_int);
1963                 break;
1964             }
1965             case IP_RECVERR:
1966             {
1967                 struct errhdr_t {
1968                    struct sock_extended_err ee;
1969                    struct sockaddr_in offender;
1970                 };
1971                 struct errhdr_t *errh = (struct errhdr_t *)data;
1972                 struct errhdr_t *target_errh =
1973                     (struct errhdr_t *)target_data;
1974 
1975                 if (len != sizeof(struct errhdr_t) ||
1976                     tgt_len != sizeof(struct errhdr_t)) {
1977                     goto unimplemented;
1978                 }
1979                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1980                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1981                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1982                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1983                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1984                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1985                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1986                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1987                     (void *) &errh->offender, sizeof(errh->offender));
1988                 break;
1989             }
1990             default:
1991                 goto unimplemented;
1992             }
1993             break;
1994 
1995         case SOL_IPV6:
1996             switch (cmsg->cmsg_type) {
1997             case IPV6_HOPLIMIT:
1998             {
1999                 uint32_t *v = (uint32_t *)data;
2000                 uint32_t *t_int = (uint32_t *)target_data;
2001 
2002                 if (len != sizeof(uint32_t) ||
2003                     tgt_len != sizeof(uint32_t)) {
2004                     goto unimplemented;
2005                 }
2006                 __put_user(*v, t_int);
2007                 break;
2008             }
2009             case IPV6_RECVERR:
2010             {
2011                 struct errhdr6_t {
2012                    struct sock_extended_err ee;
2013                    struct sockaddr_in6 offender;
2014                 };
2015                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2016                 struct errhdr6_t *target_errh =
2017                     (struct errhdr6_t *)target_data;
2018 
2019                 if (len != sizeof(struct errhdr6_t) ||
2020                     tgt_len != sizeof(struct errhdr6_t)) {
2021                     goto unimplemented;
2022                 }
2023                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2024                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2025                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2026                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2027                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2028                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2029                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2030                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2031                     (void *) &errh->offender, sizeof(errh->offender));
2032                 break;
2033             }
2034             default:
2035                 goto unimplemented;
2036             }
2037             break;
2038 
2039         default:
2040         unimplemented:
2041             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2042                           cmsg->cmsg_level, cmsg->cmsg_type);
2043             memcpy(target_data, data, MIN(len, tgt_len));
2044             if (tgt_len > len) {
2045                 memset(target_data + len, 0, tgt_len - len);
2046             }
2047         }
2048 
2049         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2050         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2051         if (msg_controllen < tgt_space) {
2052             tgt_space = msg_controllen;
2053         }
2054         msg_controllen -= tgt_space;
2055         space += tgt_space;
2056         cmsg = CMSG_NXTHDR(msgh, cmsg);
2057         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2058                                          target_cmsg_start);
2059     }
2060     unlock_user(target_cmsg, target_cmsg_addr, space);
2061  the_end:
2062     target_msgh->msg_controllen = tswapal(space);
2063     return 0;
2064 }
2065 
2066 /* do_setsockopt() Must return target values and target errnos. */
2067 static abi_long do_setsockopt(int sockfd, int level, int optname,
2068                               abi_ulong optval_addr, socklen_t optlen)
2069 {
2070     abi_long ret;
2071     int val;
2072     struct ip_mreqn *ip_mreq;
2073     struct ip_mreq_source *ip_mreq_source;
2074 
2075     switch(level) {
2076     case SOL_TCP:
2077     case SOL_UDP:
2078         /* TCP and UDP options all take an 'int' value.  */
2079         if (optlen < sizeof(uint32_t))
2080             return -TARGET_EINVAL;
2081 
2082         if (get_user_u32(val, optval_addr))
2083             return -TARGET_EFAULT;
2084         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2085         break;
2086     case SOL_IP:
2087         switch(optname) {
2088         case IP_TOS:
2089         case IP_TTL:
2090         case IP_HDRINCL:
2091         case IP_ROUTER_ALERT:
2092         case IP_RECVOPTS:
2093         case IP_RETOPTS:
2094         case IP_PKTINFO:
2095         case IP_MTU_DISCOVER:
2096         case IP_RECVERR:
2097         case IP_RECVTTL:
2098         case IP_RECVTOS:
2099 #ifdef IP_FREEBIND
2100         case IP_FREEBIND:
2101 #endif
2102         case IP_MULTICAST_TTL:
2103         case IP_MULTICAST_LOOP:
2104             val = 0;
2105             if (optlen >= sizeof(uint32_t)) {
2106                 if (get_user_u32(val, optval_addr))
2107                     return -TARGET_EFAULT;
2108             } else if (optlen >= 1) {
2109                 if (get_user_u8(val, optval_addr))
2110                     return -TARGET_EFAULT;
2111             }
2112             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2113             break;
2114         case IP_ADD_MEMBERSHIP:
2115         case IP_DROP_MEMBERSHIP:
2116             if (optlen < sizeof (struct target_ip_mreq) ||
2117                 optlen > sizeof (struct target_ip_mreqn))
2118                 return -TARGET_EINVAL;
2119 
2120             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2121             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2122             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2123             break;
2124 
2125         case IP_BLOCK_SOURCE:
2126         case IP_UNBLOCK_SOURCE:
2127         case IP_ADD_SOURCE_MEMBERSHIP:
2128         case IP_DROP_SOURCE_MEMBERSHIP:
2129             if (optlen != sizeof (struct target_ip_mreq_source))
2130                 return -TARGET_EINVAL;
2131 
2132             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2133             if (!ip_mreq_source) {
2134                 return -TARGET_EFAULT;
2135             }
2136             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2137             unlock_user (ip_mreq_source, optval_addr, 0);
2138             break;
2139 
2140         default:
2141             goto unimplemented;
2142         }
2143         break;
2144     case SOL_IPV6:
2145         switch (optname) {
2146         case IPV6_MTU_DISCOVER:
2147         case IPV6_MTU:
2148         case IPV6_V6ONLY:
2149         case IPV6_RECVPKTINFO:
2150         case IPV6_UNICAST_HOPS:
2151         case IPV6_MULTICAST_HOPS:
2152         case IPV6_MULTICAST_LOOP:
2153         case IPV6_RECVERR:
2154         case IPV6_RECVHOPLIMIT:
2155         case IPV6_2292HOPLIMIT:
2156         case IPV6_CHECKSUM:
2157         case IPV6_ADDRFORM:
2158         case IPV6_2292PKTINFO:
2159         case IPV6_RECVTCLASS:
2160         case IPV6_RECVRTHDR:
2161         case IPV6_2292RTHDR:
2162         case IPV6_RECVHOPOPTS:
2163         case IPV6_2292HOPOPTS:
2164         case IPV6_RECVDSTOPTS:
2165         case IPV6_2292DSTOPTS:
2166         case IPV6_TCLASS:
2167         case IPV6_ADDR_PREFERENCES:
2168 #ifdef IPV6_RECVPATHMTU
2169         case IPV6_RECVPATHMTU:
2170 #endif
2171 #ifdef IPV6_TRANSPARENT
2172         case IPV6_TRANSPARENT:
2173 #endif
2174 #ifdef IPV6_FREEBIND
2175         case IPV6_FREEBIND:
2176 #endif
2177 #ifdef IPV6_RECVORIGDSTADDR
2178         case IPV6_RECVORIGDSTADDR:
2179 #endif
2180             val = 0;
2181             if (optlen < sizeof(uint32_t)) {
2182                 return -TARGET_EINVAL;
2183             }
2184             if (get_user_u32(val, optval_addr)) {
2185                 return -TARGET_EFAULT;
2186             }
2187             ret = get_errno(setsockopt(sockfd, level, optname,
2188                                        &val, sizeof(val)));
2189             break;
2190         case IPV6_PKTINFO:
2191         {
2192             struct in6_pktinfo pki;
2193 
2194             if (optlen < sizeof(pki)) {
2195                 return -TARGET_EINVAL;
2196             }
2197 
2198             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2199                 return -TARGET_EFAULT;
2200             }
2201 
2202             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2203 
2204             ret = get_errno(setsockopt(sockfd, level, optname,
2205                                        &pki, sizeof(pki)));
2206             break;
2207         }
2208         case IPV6_ADD_MEMBERSHIP:
2209         case IPV6_DROP_MEMBERSHIP:
2210         {
2211             struct ipv6_mreq ipv6mreq;
2212 
2213             if (optlen < sizeof(ipv6mreq)) {
2214                 return -TARGET_EINVAL;
2215             }
2216 
2217             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2218                 return -TARGET_EFAULT;
2219             }
2220 
2221             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2222 
2223             ret = get_errno(setsockopt(sockfd, level, optname,
2224                                        &ipv6mreq, sizeof(ipv6mreq)));
2225             break;
2226         }
2227         default:
2228             goto unimplemented;
2229         }
2230         break;
2231     case SOL_ICMPV6:
2232         switch (optname) {
2233         case ICMPV6_FILTER:
2234         {
2235             struct icmp6_filter icmp6f;
2236 
2237             if (optlen > sizeof(icmp6f)) {
2238                 optlen = sizeof(icmp6f);
2239             }
2240 
2241             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2242                 return -TARGET_EFAULT;
2243             }
2244 
2245             for (val = 0; val < 8; val++) {
2246                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2247             }
2248 
2249             ret = get_errno(setsockopt(sockfd, level, optname,
2250                                        &icmp6f, optlen));
2251             break;
2252         }
2253         default:
2254             goto unimplemented;
2255         }
2256         break;
2257     case SOL_RAW:
2258         switch (optname) {
2259         case ICMP_FILTER:
2260         case IPV6_CHECKSUM:
2261             /* those take an u32 value */
2262             if (optlen < sizeof(uint32_t)) {
2263                 return -TARGET_EINVAL;
2264             }
2265 
2266             if (get_user_u32(val, optval_addr)) {
2267                 return -TARGET_EFAULT;
2268             }
2269             ret = get_errno(setsockopt(sockfd, level, optname,
2270                                        &val, sizeof(val)));
2271             break;
2272 
2273         default:
2274             goto unimplemented;
2275         }
2276         break;
2277 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2278     case SOL_ALG:
2279         switch (optname) {
2280         case ALG_SET_KEY:
2281         {
2282             char *alg_key = g_malloc(optlen);
2283 
2284             if (!alg_key) {
2285                 return -TARGET_ENOMEM;
2286             }
2287             if (copy_from_user(alg_key, optval_addr, optlen)) {
2288                 g_free(alg_key);
2289                 return -TARGET_EFAULT;
2290             }
2291             ret = get_errno(setsockopt(sockfd, level, optname,
2292                                        alg_key, optlen));
2293             g_free(alg_key);
2294             break;
2295         }
2296         case ALG_SET_AEAD_AUTHSIZE:
2297         {
2298             ret = get_errno(setsockopt(sockfd, level, optname,
2299                                        NULL, optlen));
2300             break;
2301         }
2302         default:
2303             goto unimplemented;
2304         }
2305         break;
2306 #endif
2307     case TARGET_SOL_SOCKET:
2308         switch (optname) {
2309         case TARGET_SO_RCVTIMEO:
2310         {
2311                 struct timeval tv;
2312 
2313                 optname = SO_RCVTIMEO;
2314 
2315 set_timeout:
2316                 if (optlen != sizeof(struct target_timeval)) {
2317                     return -TARGET_EINVAL;
2318                 }
2319 
2320                 if (copy_from_user_timeval(&tv, optval_addr)) {
2321                     return -TARGET_EFAULT;
2322                 }
2323 
2324                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2325                                 &tv, sizeof(tv)));
2326                 return ret;
2327         }
2328         case TARGET_SO_SNDTIMEO:
2329                 optname = SO_SNDTIMEO;
2330                 goto set_timeout;
2331         case TARGET_SO_ATTACH_FILTER:
2332         {
2333                 struct target_sock_fprog *tfprog;
2334                 struct target_sock_filter *tfilter;
2335                 struct sock_fprog fprog;
2336                 struct sock_filter *filter;
2337                 int i;
2338 
2339                 if (optlen != sizeof(*tfprog)) {
2340                     return -TARGET_EINVAL;
2341                 }
2342                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2343                     return -TARGET_EFAULT;
2344                 }
2345                 if (!lock_user_struct(VERIFY_READ, tfilter,
2346                                       tswapal(tfprog->filter), 0)) {
2347                     unlock_user_struct(tfprog, optval_addr, 1);
2348                     return -TARGET_EFAULT;
2349                 }
2350 
2351                 fprog.len = tswap16(tfprog->len);
2352                 filter = g_try_new(struct sock_filter, fprog.len);
2353                 if (filter == NULL) {
2354                     unlock_user_struct(tfilter, tfprog->filter, 1);
2355                     unlock_user_struct(tfprog, optval_addr, 1);
2356                     return -TARGET_ENOMEM;
2357                 }
2358                 for (i = 0; i < fprog.len; i++) {
2359                     filter[i].code = tswap16(tfilter[i].code);
2360                     filter[i].jt = tfilter[i].jt;
2361                     filter[i].jf = tfilter[i].jf;
2362                     filter[i].k = tswap32(tfilter[i].k);
2363                 }
2364                 fprog.filter = filter;
2365 
2366                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2367                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2368                 g_free(filter);
2369 
2370                 unlock_user_struct(tfilter, tfprog->filter, 1);
2371                 unlock_user_struct(tfprog, optval_addr, 1);
2372                 return ret;
2373         }
2374 	case TARGET_SO_BINDTODEVICE:
2375 	{
2376 		char *dev_ifname, *addr_ifname;
2377 
2378 		if (optlen > IFNAMSIZ - 1) {
2379 		    optlen = IFNAMSIZ - 1;
2380 		}
2381 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2382 		if (!dev_ifname) {
2383 		    return -TARGET_EFAULT;
2384 		}
2385 		optname = SO_BINDTODEVICE;
2386 		addr_ifname = alloca(IFNAMSIZ);
2387 		memcpy(addr_ifname, dev_ifname, optlen);
2388 		addr_ifname[optlen] = 0;
2389 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2390                                            addr_ifname, optlen));
2391 		unlock_user (dev_ifname, optval_addr, 0);
2392 		return ret;
2393 	}
2394         case TARGET_SO_LINGER:
2395         {
2396                 struct linger lg;
2397                 struct target_linger *tlg;
2398 
2399                 if (optlen != sizeof(struct target_linger)) {
2400                     return -TARGET_EINVAL;
2401                 }
2402                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2403                     return -TARGET_EFAULT;
2404                 }
2405                 __get_user(lg.l_onoff, &tlg->l_onoff);
2406                 __get_user(lg.l_linger, &tlg->l_linger);
2407                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2408                                 &lg, sizeof(lg)));
2409                 unlock_user_struct(tlg, optval_addr, 0);
2410                 return ret;
2411         }
2412             /* Options with 'int' argument.  */
2413         case TARGET_SO_DEBUG:
2414 		optname = SO_DEBUG;
2415 		break;
2416         case TARGET_SO_REUSEADDR:
2417 		optname = SO_REUSEADDR;
2418 		break;
2419 #ifdef SO_REUSEPORT
2420         case TARGET_SO_REUSEPORT:
2421                 optname = SO_REUSEPORT;
2422                 break;
2423 #endif
2424         case TARGET_SO_TYPE:
2425 		optname = SO_TYPE;
2426 		break;
2427         case TARGET_SO_ERROR:
2428 		optname = SO_ERROR;
2429 		break;
2430         case TARGET_SO_DONTROUTE:
2431 		optname = SO_DONTROUTE;
2432 		break;
2433         case TARGET_SO_BROADCAST:
2434 		optname = SO_BROADCAST;
2435 		break;
2436         case TARGET_SO_SNDBUF:
2437 		optname = SO_SNDBUF;
2438 		break;
2439         case TARGET_SO_SNDBUFFORCE:
2440                 optname = SO_SNDBUFFORCE;
2441                 break;
2442         case TARGET_SO_RCVBUF:
2443 		optname = SO_RCVBUF;
2444 		break;
2445         case TARGET_SO_RCVBUFFORCE:
2446                 optname = SO_RCVBUFFORCE;
2447                 break;
2448         case TARGET_SO_KEEPALIVE:
2449 		optname = SO_KEEPALIVE;
2450 		break;
2451         case TARGET_SO_OOBINLINE:
2452 		optname = SO_OOBINLINE;
2453 		break;
2454         case TARGET_SO_NO_CHECK:
2455 		optname = SO_NO_CHECK;
2456 		break;
2457         case TARGET_SO_PRIORITY:
2458 		optname = SO_PRIORITY;
2459 		break;
2460 #ifdef SO_BSDCOMPAT
2461         case TARGET_SO_BSDCOMPAT:
2462 		optname = SO_BSDCOMPAT;
2463 		break;
2464 #endif
2465         case TARGET_SO_PASSCRED:
2466 		optname = SO_PASSCRED;
2467 		break;
2468         case TARGET_SO_PASSSEC:
2469                 optname = SO_PASSSEC;
2470                 break;
2471         case TARGET_SO_TIMESTAMP:
2472 		optname = SO_TIMESTAMP;
2473 		break;
2474         case TARGET_SO_RCVLOWAT:
2475 		optname = SO_RCVLOWAT;
2476 		break;
2477         default:
2478             goto unimplemented;
2479         }
2480 	if (optlen < sizeof(uint32_t))
2481             return -TARGET_EINVAL;
2482 
2483 	if (get_user_u32(val, optval_addr))
2484             return -TARGET_EFAULT;
2485 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2486         break;
2487 #ifdef SOL_NETLINK
2488     case SOL_NETLINK:
2489         switch (optname) {
2490         case NETLINK_PKTINFO:
2491         case NETLINK_ADD_MEMBERSHIP:
2492         case NETLINK_DROP_MEMBERSHIP:
2493         case NETLINK_BROADCAST_ERROR:
2494         case NETLINK_NO_ENOBUFS:
2495 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2496         case NETLINK_LISTEN_ALL_NSID:
2497         case NETLINK_CAP_ACK:
2498 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2499 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2500         case NETLINK_EXT_ACK:
2501 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2502 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2503         case NETLINK_GET_STRICT_CHK:
2504 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2505             break;
2506         default:
2507             goto unimplemented;
2508         }
2509         val = 0;
2510         if (optlen < sizeof(uint32_t)) {
2511             return -TARGET_EINVAL;
2512         }
2513         if (get_user_u32(val, optval_addr)) {
2514             return -TARGET_EFAULT;
2515         }
2516         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2517                                    sizeof(val)));
2518         break;
2519 #endif /* SOL_NETLINK */
2520     default:
2521     unimplemented:
2522         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2523                       level, optname);
2524         ret = -TARGET_ENOPROTOOPT;
2525     }
2526     return ret;
2527 }
2528 
2529 /* do_getsockopt() Must return target values and target errnos. */
2530 static abi_long do_getsockopt(int sockfd, int level, int optname,
2531                               abi_ulong optval_addr, abi_ulong optlen)
2532 {
2533     abi_long ret;
2534     int len, val;
2535     socklen_t lv;
2536 
2537     switch(level) {
2538     case TARGET_SOL_SOCKET:
2539         level = SOL_SOCKET;
2540         switch (optname) {
2541         /* These don't just return a single integer */
2542         case TARGET_SO_PEERNAME:
2543             goto unimplemented;
2544         case TARGET_SO_RCVTIMEO: {
2545             struct timeval tv;
2546             socklen_t tvlen;
2547 
2548             optname = SO_RCVTIMEO;
2549 
2550 get_timeout:
2551             if (get_user_u32(len, optlen)) {
2552                 return -TARGET_EFAULT;
2553             }
2554             if (len < 0) {
2555                 return -TARGET_EINVAL;
2556             }
2557 
2558             tvlen = sizeof(tv);
2559             ret = get_errno(getsockopt(sockfd, level, optname,
2560                                        &tv, &tvlen));
2561             if (ret < 0) {
2562                 return ret;
2563             }
2564             if (len > sizeof(struct target_timeval)) {
2565                 len = sizeof(struct target_timeval);
2566             }
2567             if (copy_to_user_timeval(optval_addr, &tv)) {
2568                 return -TARGET_EFAULT;
2569             }
2570             if (put_user_u32(len, optlen)) {
2571                 return -TARGET_EFAULT;
2572             }
2573             break;
2574         }
2575         case TARGET_SO_SNDTIMEO:
2576             optname = SO_SNDTIMEO;
2577             goto get_timeout;
2578         case TARGET_SO_PEERCRED: {
2579             struct ucred cr;
2580             socklen_t crlen;
2581             struct target_ucred *tcr;
2582 
2583             if (get_user_u32(len, optlen)) {
2584                 return -TARGET_EFAULT;
2585             }
2586             if (len < 0) {
2587                 return -TARGET_EINVAL;
2588             }
2589 
2590             crlen = sizeof(cr);
2591             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2592                                        &cr, &crlen));
2593             if (ret < 0) {
2594                 return ret;
2595             }
2596             if (len > crlen) {
2597                 len = crlen;
2598             }
2599             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             __put_user(cr.pid, &tcr->pid);
2603             __put_user(cr.uid, &tcr->uid);
2604             __put_user(cr.gid, &tcr->gid);
2605             unlock_user_struct(tcr, optval_addr, 1);
2606             if (put_user_u32(len, optlen)) {
2607                 return -TARGET_EFAULT;
2608             }
2609             break;
2610         }
2611         case TARGET_SO_PEERSEC: {
2612             char *name;
2613 
2614             if (get_user_u32(len, optlen)) {
2615                 return -TARGET_EFAULT;
2616             }
2617             if (len < 0) {
2618                 return -TARGET_EINVAL;
2619             }
2620             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2621             if (!name) {
2622                 return -TARGET_EFAULT;
2623             }
2624             lv = len;
2625             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2626                                        name, &lv));
2627             if (put_user_u32(lv, optlen)) {
2628                 ret = -TARGET_EFAULT;
2629             }
2630             unlock_user(name, optval_addr, lv);
2631             break;
2632         }
2633         case TARGET_SO_LINGER:
2634         {
2635             struct linger lg;
2636             socklen_t lglen;
2637             struct target_linger *tlg;
2638 
2639             if (get_user_u32(len, optlen)) {
2640                 return -TARGET_EFAULT;
2641             }
2642             if (len < 0) {
2643                 return -TARGET_EINVAL;
2644             }
2645 
2646             lglen = sizeof(lg);
2647             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2648                                        &lg, &lglen));
2649             if (ret < 0) {
2650                 return ret;
2651             }
2652             if (len > lglen) {
2653                 len = lglen;
2654             }
2655             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             __put_user(lg.l_onoff, &tlg->l_onoff);
2659             __put_user(lg.l_linger, &tlg->l_linger);
2660             unlock_user_struct(tlg, optval_addr, 1);
2661             if (put_user_u32(len, optlen)) {
2662                 return -TARGET_EFAULT;
2663             }
2664             break;
2665         }
2666         /* Options with 'int' argument.  */
2667         case TARGET_SO_DEBUG:
2668             optname = SO_DEBUG;
2669             goto int_case;
2670         case TARGET_SO_REUSEADDR:
2671             optname = SO_REUSEADDR;
2672             goto int_case;
2673 #ifdef SO_REUSEPORT
2674         case TARGET_SO_REUSEPORT:
2675             optname = SO_REUSEPORT;
2676             goto int_case;
2677 #endif
2678         case TARGET_SO_TYPE:
2679             optname = SO_TYPE;
2680             goto int_case;
2681         case TARGET_SO_ERROR:
2682             optname = SO_ERROR;
2683             goto int_case;
2684         case TARGET_SO_DONTROUTE:
2685             optname = SO_DONTROUTE;
2686             goto int_case;
2687         case TARGET_SO_BROADCAST:
2688             optname = SO_BROADCAST;
2689             goto int_case;
2690         case TARGET_SO_SNDBUF:
2691             optname = SO_SNDBUF;
2692             goto int_case;
2693         case TARGET_SO_RCVBUF:
2694             optname = SO_RCVBUF;
2695             goto int_case;
2696         case TARGET_SO_KEEPALIVE:
2697             optname = SO_KEEPALIVE;
2698             goto int_case;
2699         case TARGET_SO_OOBINLINE:
2700             optname = SO_OOBINLINE;
2701             goto int_case;
2702         case TARGET_SO_NO_CHECK:
2703             optname = SO_NO_CHECK;
2704             goto int_case;
2705         case TARGET_SO_PRIORITY:
2706             optname = SO_PRIORITY;
2707             goto int_case;
2708 #ifdef SO_BSDCOMPAT
2709         case TARGET_SO_BSDCOMPAT:
2710             optname = SO_BSDCOMPAT;
2711             goto int_case;
2712 #endif
2713         case TARGET_SO_PASSCRED:
2714             optname = SO_PASSCRED;
2715             goto int_case;
2716         case TARGET_SO_TIMESTAMP:
2717             optname = SO_TIMESTAMP;
2718             goto int_case;
2719         case TARGET_SO_RCVLOWAT:
2720             optname = SO_RCVLOWAT;
2721             goto int_case;
2722         case TARGET_SO_ACCEPTCONN:
2723             optname = SO_ACCEPTCONN;
2724             goto int_case;
2725         case TARGET_SO_PROTOCOL:
2726             optname = SO_PROTOCOL;
2727             goto int_case;
2728         case TARGET_SO_DOMAIN:
2729             optname = SO_DOMAIN;
2730             goto int_case;
2731         default:
2732             goto int_case;
2733         }
2734         break;
2735     case SOL_TCP:
2736     case SOL_UDP:
2737         /* TCP and UDP options all take an 'int' value.  */
2738     int_case:
2739         if (get_user_u32(len, optlen))
2740             return -TARGET_EFAULT;
2741         if (len < 0)
2742             return -TARGET_EINVAL;
2743         lv = sizeof(lv);
2744         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2745         if (ret < 0)
2746             return ret;
2747         if (optname == SO_TYPE) {
2748             val = host_to_target_sock_type(val);
2749         }
2750         if (len > lv)
2751             len = lv;
2752         if (len == 4) {
2753             if (put_user_u32(val, optval_addr))
2754                 return -TARGET_EFAULT;
2755         } else {
2756             if (put_user_u8(val, optval_addr))
2757                 return -TARGET_EFAULT;
2758         }
2759         if (put_user_u32(len, optlen))
2760             return -TARGET_EFAULT;
2761         break;
2762     case SOL_IP:
2763         switch(optname) {
2764         case IP_TOS:
2765         case IP_TTL:
2766         case IP_HDRINCL:
2767         case IP_ROUTER_ALERT:
2768         case IP_RECVOPTS:
2769         case IP_RETOPTS:
2770         case IP_PKTINFO:
2771         case IP_MTU_DISCOVER:
2772         case IP_RECVERR:
2773         case IP_RECVTOS:
2774 #ifdef IP_FREEBIND
2775         case IP_FREEBIND:
2776 #endif
2777         case IP_MULTICAST_TTL:
2778         case IP_MULTICAST_LOOP:
2779             if (get_user_u32(len, optlen))
2780                 return -TARGET_EFAULT;
2781             if (len < 0)
2782                 return -TARGET_EINVAL;
2783             lv = sizeof(lv);
2784             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2785             if (ret < 0)
2786                 return ret;
2787             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2788                 len = 1;
2789                 if (put_user_u32(len, optlen)
2790                     || put_user_u8(val, optval_addr))
2791                     return -TARGET_EFAULT;
2792             } else {
2793                 if (len > sizeof(int))
2794                     len = sizeof(int);
2795                 if (put_user_u32(len, optlen)
2796                     || put_user_u32(val, optval_addr))
2797                     return -TARGET_EFAULT;
2798             }
2799             break;
2800         default:
2801             ret = -TARGET_ENOPROTOOPT;
2802             break;
2803         }
2804         break;
2805     case SOL_IPV6:
2806         switch (optname) {
2807         case IPV6_MTU_DISCOVER:
2808         case IPV6_MTU:
2809         case IPV6_V6ONLY:
2810         case IPV6_RECVPKTINFO:
2811         case IPV6_UNICAST_HOPS:
2812         case IPV6_MULTICAST_HOPS:
2813         case IPV6_MULTICAST_LOOP:
2814         case IPV6_RECVERR:
2815         case IPV6_RECVHOPLIMIT:
2816         case IPV6_2292HOPLIMIT:
2817         case IPV6_CHECKSUM:
2818         case IPV6_ADDRFORM:
2819         case IPV6_2292PKTINFO:
2820         case IPV6_RECVTCLASS:
2821         case IPV6_RECVRTHDR:
2822         case IPV6_2292RTHDR:
2823         case IPV6_RECVHOPOPTS:
2824         case IPV6_2292HOPOPTS:
2825         case IPV6_RECVDSTOPTS:
2826         case IPV6_2292DSTOPTS:
2827         case IPV6_TCLASS:
2828         case IPV6_ADDR_PREFERENCES:
2829 #ifdef IPV6_RECVPATHMTU
2830         case IPV6_RECVPATHMTU:
2831 #endif
2832 #ifdef IPV6_TRANSPARENT
2833         case IPV6_TRANSPARENT:
2834 #endif
2835 #ifdef IPV6_FREEBIND
2836         case IPV6_FREEBIND:
2837 #endif
2838 #ifdef IPV6_RECVORIGDSTADDR
2839         case IPV6_RECVORIGDSTADDR:
2840 #endif
2841             if (get_user_u32(len, optlen))
2842                 return -TARGET_EFAULT;
2843             if (len < 0)
2844                 return -TARGET_EINVAL;
2845             lv = sizeof(lv);
2846             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2847             if (ret < 0)
2848                 return ret;
2849             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2850                 len = 1;
2851                 if (put_user_u32(len, optlen)
2852                     || put_user_u8(val, optval_addr))
2853                     return -TARGET_EFAULT;
2854             } else {
2855                 if (len > sizeof(int))
2856                     len = sizeof(int);
2857                 if (put_user_u32(len, optlen)
2858                     || put_user_u32(val, optval_addr))
2859                     return -TARGET_EFAULT;
2860             }
2861             break;
2862         default:
2863             ret = -TARGET_ENOPROTOOPT;
2864             break;
2865         }
2866         break;
2867 #ifdef SOL_NETLINK
2868     case SOL_NETLINK:
2869         switch (optname) {
2870         case NETLINK_PKTINFO:
2871         case NETLINK_BROADCAST_ERROR:
2872         case NETLINK_NO_ENOBUFS:
2873 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2874         case NETLINK_LISTEN_ALL_NSID:
2875         case NETLINK_CAP_ACK:
2876 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2878         case NETLINK_EXT_ACK:
2879 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2880 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2881         case NETLINK_GET_STRICT_CHK:
2882 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2883             if (get_user_u32(len, optlen)) {
2884                 return -TARGET_EFAULT;
2885             }
2886             if (len != sizeof(val)) {
2887                 return -TARGET_EINVAL;
2888             }
2889             lv = len;
2890             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2891             if (ret < 0) {
2892                 return ret;
2893             }
2894             if (put_user_u32(lv, optlen)
2895                 || put_user_u32(val, optval_addr)) {
2896                 return -TARGET_EFAULT;
2897             }
2898             break;
2899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2900         case NETLINK_LIST_MEMBERSHIPS:
2901         {
2902             uint32_t *results;
2903             int i;
2904             if (get_user_u32(len, optlen)) {
2905                 return -TARGET_EFAULT;
2906             }
2907             if (len < 0) {
2908                 return -TARGET_EINVAL;
2909             }
2910             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2911             if (!results && len > 0) {
2912                 return -TARGET_EFAULT;
2913             }
2914             lv = len;
2915             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2916             if (ret < 0) {
2917                 unlock_user(results, optval_addr, 0);
2918                 return ret;
2919             }
2920             /* swap host endianess to target endianess. */
2921             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2922                 results[i] = tswap32(results[i]);
2923             }
2924             if (put_user_u32(lv, optlen)) {
2925                 return -TARGET_EFAULT;
2926             }
2927             unlock_user(results, optval_addr, 0);
2928             break;
2929         }
2930 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2931         default:
2932             goto unimplemented;
2933         }
2934         break;
2935 #endif /* SOL_NETLINK */
2936     default:
2937     unimplemented:
2938         qemu_log_mask(LOG_UNIMP,
2939                       "getsockopt level=%d optname=%d not yet supported\n",
2940                       level, optname);
2941         ret = -TARGET_EOPNOTSUPP;
2942         break;
2943     }
2944     return ret;
2945 }
2946 
2947 /* Convert target low/high pair representing file offset into the host
2948  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2949  * as the kernel doesn't handle them either.
2950  */
2951 static void target_to_host_low_high(abi_ulong tlow,
2952                                     abi_ulong thigh,
2953                                     unsigned long *hlow,
2954                                     unsigned long *hhigh)
2955 {
2956     uint64_t off = tlow |
2957         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2958         TARGET_LONG_BITS / 2;
2959 
2960     *hlow = off;
2961     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2962 }
2963 
2964 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2965                                 abi_ulong count, int copy)
2966 {
2967     struct target_iovec *target_vec;
2968     struct iovec *vec;
2969     abi_ulong total_len, max_len;
2970     int i;
2971     int err = 0;
2972     bool bad_address = false;
2973 
2974     if (count == 0) {
2975         errno = 0;
2976         return NULL;
2977     }
2978     if (count > IOV_MAX) {
2979         errno = EINVAL;
2980         return NULL;
2981     }
2982 
2983     vec = g_try_new0(struct iovec, count);
2984     if (vec == NULL) {
2985         errno = ENOMEM;
2986         return NULL;
2987     }
2988 
2989     target_vec = lock_user(VERIFY_READ, target_addr,
2990                            count * sizeof(struct target_iovec), 1);
2991     if (target_vec == NULL) {
2992         err = EFAULT;
2993         goto fail2;
2994     }
2995 
2996     /* ??? If host page size > target page size, this will result in a
2997        value larger than what we can actually support.  */
2998     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2999     total_len = 0;
3000 
3001     for (i = 0; i < count; i++) {
3002         abi_ulong base = tswapal(target_vec[i].iov_base);
3003         abi_long len = tswapal(target_vec[i].iov_len);
3004 
3005         if (len < 0) {
3006             err = EINVAL;
3007             goto fail;
3008         } else if (len == 0) {
3009             /* Zero length pointer is ignored.  */
3010             vec[i].iov_base = 0;
3011         } else {
3012             vec[i].iov_base = lock_user(type, base, len, copy);
3013             /* If the first buffer pointer is bad, this is a fault.  But
3014              * subsequent bad buffers will result in a partial write; this
3015              * is realized by filling the vector with null pointers and
3016              * zero lengths. */
3017             if (!vec[i].iov_base) {
3018                 if (i == 0) {
3019                     err = EFAULT;
3020                     goto fail;
3021                 } else {
3022                     bad_address = true;
3023                 }
3024             }
3025             if (bad_address) {
3026                 len = 0;
3027             }
3028             if (len > max_len - total_len) {
3029                 len = max_len - total_len;
3030             }
3031         }
3032         vec[i].iov_len = len;
3033         total_len += len;
3034     }
3035 
3036     unlock_user(target_vec, target_addr, 0);
3037     return vec;
3038 
3039  fail:
3040     while (--i >= 0) {
3041         if (tswapal(target_vec[i].iov_len) > 0) {
3042             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3043         }
3044     }
3045     unlock_user(target_vec, target_addr, 0);
3046  fail2:
3047     g_free(vec);
3048     errno = err;
3049     return NULL;
3050 }
3051 
3052 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3053                          abi_ulong count, int copy)
3054 {
3055     struct target_iovec *target_vec;
3056     int i;
3057 
3058     target_vec = lock_user(VERIFY_READ, target_addr,
3059                            count * sizeof(struct target_iovec), 1);
3060     if (target_vec) {
3061         for (i = 0; i < count; i++) {
3062             abi_ulong base = tswapal(target_vec[i].iov_base);
3063             abi_long len = tswapal(target_vec[i].iov_len);
3064             if (len < 0) {
3065                 break;
3066             }
3067             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3068         }
3069         unlock_user(target_vec, target_addr, 0);
3070     }
3071 
3072     g_free(vec);
3073 }
3074 
3075 static inline int target_to_host_sock_type(int *type)
3076 {
3077     int host_type = 0;
3078     int target_type = *type;
3079 
3080     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3081     case TARGET_SOCK_DGRAM:
3082         host_type = SOCK_DGRAM;
3083         break;
3084     case TARGET_SOCK_STREAM:
3085         host_type = SOCK_STREAM;
3086         break;
3087     default:
3088         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3089         break;
3090     }
3091     if (target_type & TARGET_SOCK_CLOEXEC) {
3092 #if defined(SOCK_CLOEXEC)
3093         host_type |= SOCK_CLOEXEC;
3094 #else
3095         return -TARGET_EINVAL;
3096 #endif
3097     }
3098     if (target_type & TARGET_SOCK_NONBLOCK) {
3099 #if defined(SOCK_NONBLOCK)
3100         host_type |= SOCK_NONBLOCK;
3101 #elif !defined(O_NONBLOCK)
3102         return -TARGET_EINVAL;
3103 #endif
3104     }
3105     *type = host_type;
3106     return 0;
3107 }
3108 
3109 /* Try to emulate socket type flags after socket creation.  */
3110 static int sock_flags_fixup(int fd, int target_type)
3111 {
3112 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3113     if (target_type & TARGET_SOCK_NONBLOCK) {
3114         int flags = fcntl(fd, F_GETFL);
3115         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3116             close(fd);
3117             return -TARGET_EINVAL;
3118         }
3119     }
3120 #endif
3121     return fd;
3122 }
3123 
3124 /* do_socket() Must return target values and target errnos. */
3125 static abi_long do_socket(int domain, int type, int protocol)
3126 {
3127     int target_type = type;
3128     int ret;
3129 
3130     ret = target_to_host_sock_type(&type);
3131     if (ret) {
3132         return ret;
3133     }
3134 
3135     if (domain == PF_NETLINK && !(
3136 #ifdef CONFIG_RTNETLINK
3137          protocol == NETLINK_ROUTE ||
3138 #endif
3139          protocol == NETLINK_KOBJECT_UEVENT ||
3140          protocol == NETLINK_AUDIT)) {
3141         return -TARGET_EPROTONOSUPPORT;
3142     }
3143 
3144     if (domain == AF_PACKET ||
3145         (domain == AF_INET && type == SOCK_PACKET)) {
3146         protocol = tswap16(protocol);
3147     }
3148 
3149     ret = get_errno(socket(domain, type, protocol));
3150     if (ret >= 0) {
3151         ret = sock_flags_fixup(ret, target_type);
3152         if (type == SOCK_PACKET) {
3153             /* Manage an obsolete case :
3154              * if socket type is SOCK_PACKET, bind by name
3155              */
3156             fd_trans_register(ret, &target_packet_trans);
3157         } else if (domain == PF_NETLINK) {
3158             switch (protocol) {
3159 #ifdef CONFIG_RTNETLINK
3160             case NETLINK_ROUTE:
3161                 fd_trans_register(ret, &target_netlink_route_trans);
3162                 break;
3163 #endif
3164             case NETLINK_KOBJECT_UEVENT:
3165                 /* nothing to do: messages are strings */
3166                 break;
3167             case NETLINK_AUDIT:
3168                 fd_trans_register(ret, &target_netlink_audit_trans);
3169                 break;
3170             default:
3171                 g_assert_not_reached();
3172             }
3173         }
3174     }
3175     return ret;
3176 }
3177 
3178 /* do_bind() Must return target values and target errnos. */
3179 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3180                         socklen_t addrlen)
3181 {
3182     void *addr;
3183     abi_long ret;
3184 
3185     if ((int)addrlen < 0) {
3186         return -TARGET_EINVAL;
3187     }
3188 
3189     addr = alloca(addrlen+1);
3190 
3191     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3192     if (ret)
3193         return ret;
3194 
3195     return get_errno(bind(sockfd, addr, addrlen));
3196 }
3197 
3198 /* do_connect() Must return target values and target errnos. */
3199 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3200                            socklen_t addrlen)
3201 {
3202     void *addr;
3203     abi_long ret;
3204 
3205     if ((int)addrlen < 0) {
3206         return -TARGET_EINVAL;
3207     }
3208 
3209     addr = alloca(addrlen+1);
3210 
3211     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3212     if (ret)
3213         return ret;
3214 
3215     return get_errno(safe_connect(sockfd, addr, addrlen));
3216 }
3217 
3218 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3219 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3220                                       int flags, int send)
3221 {
3222     abi_long ret, len;
3223     struct msghdr msg;
3224     abi_ulong count;
3225     struct iovec *vec;
3226     abi_ulong target_vec;
3227 
3228     if (msgp->msg_name) {
3229         msg.msg_namelen = tswap32(msgp->msg_namelen);
3230         msg.msg_name = alloca(msg.msg_namelen+1);
3231         ret = target_to_host_sockaddr(fd, msg.msg_name,
3232                                       tswapal(msgp->msg_name),
3233                                       msg.msg_namelen);
3234         if (ret == -TARGET_EFAULT) {
3235             /* For connected sockets msg_name and msg_namelen must
3236              * be ignored, so returning EFAULT immediately is wrong.
3237              * Instead, pass a bad msg_name to the host kernel, and
3238              * let it decide whether to return EFAULT or not.
3239              */
3240             msg.msg_name = (void *)-1;
3241         } else if (ret) {
3242             goto out2;
3243         }
3244     } else {
3245         msg.msg_name = NULL;
3246         msg.msg_namelen = 0;
3247     }
3248     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3249     msg.msg_control = alloca(msg.msg_controllen);
3250     memset(msg.msg_control, 0, msg.msg_controllen);
3251 
3252     msg.msg_flags = tswap32(msgp->msg_flags);
3253 
3254     count = tswapal(msgp->msg_iovlen);
3255     target_vec = tswapal(msgp->msg_iov);
3256 
3257     if (count > IOV_MAX) {
3258         /* sendrcvmsg returns a different errno for this condition than
3259          * readv/writev, so we must catch it here before lock_iovec() does.
3260          */
3261         ret = -TARGET_EMSGSIZE;
3262         goto out2;
3263     }
3264 
3265     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3266                      target_vec, count, send);
3267     if (vec == NULL) {
3268         ret = -host_to_target_errno(errno);
3269         goto out2;
3270     }
3271     msg.msg_iovlen = count;
3272     msg.msg_iov = vec;
3273 
3274     if (send) {
3275         if (fd_trans_target_to_host_data(fd)) {
3276             void *host_msg;
3277 
3278             host_msg = g_malloc(msg.msg_iov->iov_len);
3279             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3280             ret = fd_trans_target_to_host_data(fd)(host_msg,
3281                                                    msg.msg_iov->iov_len);
3282             if (ret >= 0) {
3283                 msg.msg_iov->iov_base = host_msg;
3284                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3285             }
3286             g_free(host_msg);
3287         } else {
3288             ret = target_to_host_cmsg(&msg, msgp);
3289             if (ret == 0) {
3290                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3291             }
3292         }
3293     } else {
3294         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3295         if (!is_error(ret)) {
3296             len = ret;
3297             if (fd_trans_host_to_target_data(fd)) {
3298                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3299                                                MIN(msg.msg_iov->iov_len, len));
3300             } else {
3301                 ret = host_to_target_cmsg(msgp, &msg);
3302             }
3303             if (!is_error(ret)) {
3304                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3305                 msgp->msg_flags = tswap32(msg.msg_flags);
3306                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3307                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3308                                     msg.msg_name, msg.msg_namelen);
3309                     if (ret) {
3310                         goto out;
3311                     }
3312                 }
3313 
3314                 ret = len;
3315             }
3316         }
3317     }
3318 
3319 out:
3320     unlock_iovec(vec, target_vec, count, !send);
3321 out2:
3322     return ret;
3323 }
3324 
3325 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3326                                int flags, int send)
3327 {
3328     abi_long ret;
3329     struct target_msghdr *msgp;
3330 
3331     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3332                           msgp,
3333                           target_msg,
3334                           send ? 1 : 0)) {
3335         return -TARGET_EFAULT;
3336     }
3337     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3338     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3339     return ret;
3340 }
3341 
3342 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3343  * so it might not have this *mmsg-specific flag either.
3344  */
3345 #ifndef MSG_WAITFORONE
3346 #define MSG_WAITFORONE 0x10000
3347 #endif
3348 
3349 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3350                                 unsigned int vlen, unsigned int flags,
3351                                 int send)
3352 {
3353     struct target_mmsghdr *mmsgp;
3354     abi_long ret = 0;
3355     int i;
3356 
3357     if (vlen > UIO_MAXIOV) {
3358         vlen = UIO_MAXIOV;
3359     }
3360 
3361     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3362     if (!mmsgp) {
3363         return -TARGET_EFAULT;
3364     }
3365 
3366     for (i = 0; i < vlen; i++) {
3367         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3368         if (is_error(ret)) {
3369             break;
3370         }
3371         mmsgp[i].msg_len = tswap32(ret);
3372         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3373         if (flags & MSG_WAITFORONE) {
3374             flags |= MSG_DONTWAIT;
3375         }
3376     }
3377 
3378     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3379 
3380     /* Return number of datagrams sent if we sent any at all;
3381      * otherwise return the error.
3382      */
3383     if (i) {
3384         return i;
3385     }
3386     return ret;
3387 }
3388 
3389 /* do_accept4() Must return target values and target errnos. */
3390 static abi_long do_accept4(int fd, abi_ulong target_addr,
3391                            abi_ulong target_addrlen_addr, int flags)
3392 {
3393     socklen_t addrlen, ret_addrlen;
3394     void *addr;
3395     abi_long ret;
3396     int host_flags;
3397 
3398     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3399 
3400     if (target_addr == 0) {
3401         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3402     }
3403 
3404     /* linux returns EFAULT if addrlen pointer is invalid */
3405     if (get_user_u32(addrlen, target_addrlen_addr))
3406         return -TARGET_EFAULT;
3407 
3408     if ((int)addrlen < 0) {
3409         return -TARGET_EINVAL;
3410     }
3411 
3412     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3413         return -TARGET_EFAULT;
3414     }
3415 
3416     addr = alloca(addrlen);
3417 
3418     ret_addrlen = addrlen;
3419     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3420     if (!is_error(ret)) {
3421         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3422         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3423             ret = -TARGET_EFAULT;
3424         }
3425     }
3426     return ret;
3427 }
3428 
3429 /* do_getpeername() Must return target values and target errnos. */
3430 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3431                                abi_ulong target_addrlen_addr)
3432 {
3433     socklen_t addrlen, ret_addrlen;
3434     void *addr;
3435     abi_long ret;
3436 
3437     if (get_user_u32(addrlen, target_addrlen_addr))
3438         return -TARGET_EFAULT;
3439 
3440     if ((int)addrlen < 0) {
3441         return -TARGET_EINVAL;
3442     }
3443 
3444     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3445         return -TARGET_EFAULT;
3446     }
3447 
3448     addr = alloca(addrlen);
3449 
3450     ret_addrlen = addrlen;
3451     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3452     if (!is_error(ret)) {
3453         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3454         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3455             ret = -TARGET_EFAULT;
3456         }
3457     }
3458     return ret;
3459 }
3460 
3461 /* do_getsockname() Must return target values and target errnos. */
3462 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3463                                abi_ulong target_addrlen_addr)
3464 {
3465     socklen_t addrlen, ret_addrlen;
3466     void *addr;
3467     abi_long ret;
3468 
3469     if (get_user_u32(addrlen, target_addrlen_addr))
3470         return -TARGET_EFAULT;
3471 
3472     if ((int)addrlen < 0) {
3473         return -TARGET_EINVAL;
3474     }
3475 
3476     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3477         return -TARGET_EFAULT;
3478     }
3479 
3480     addr = alloca(addrlen);
3481 
3482     ret_addrlen = addrlen;
3483     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3484     if (!is_error(ret)) {
3485         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3486         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3487             ret = -TARGET_EFAULT;
3488         }
3489     }
3490     return ret;
3491 }
3492 
3493 /* do_socketpair() Must return target values and target errnos. */
3494 static abi_long do_socketpair(int domain, int type, int protocol,
3495                               abi_ulong target_tab_addr)
3496 {
3497     int tab[2];
3498     abi_long ret;
3499 
3500     target_to_host_sock_type(&type);
3501 
3502     ret = get_errno(socketpair(domain, type, protocol, tab));
3503     if (!is_error(ret)) {
3504         if (put_user_s32(tab[0], target_tab_addr)
3505             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3506             ret = -TARGET_EFAULT;
3507     }
3508     return ret;
3509 }
3510 
3511 /* do_sendto() Must return target values and target errnos. */
3512 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3513                           abi_ulong target_addr, socklen_t addrlen)
3514 {
3515     void *addr;
3516     void *host_msg;
3517     void *copy_msg = NULL;
3518     abi_long ret;
3519 
3520     if ((int)addrlen < 0) {
3521         return -TARGET_EINVAL;
3522     }
3523 
3524     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3525     if (!host_msg)
3526         return -TARGET_EFAULT;
3527     if (fd_trans_target_to_host_data(fd)) {
3528         copy_msg = host_msg;
3529         host_msg = g_malloc(len);
3530         memcpy(host_msg, copy_msg, len);
3531         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3532         if (ret < 0) {
3533             goto fail;
3534         }
3535     }
3536     if (target_addr) {
3537         addr = alloca(addrlen+1);
3538         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3539         if (ret) {
3540             goto fail;
3541         }
3542         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3543     } else {
3544         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3545     }
3546 fail:
3547     if (copy_msg) {
3548         g_free(host_msg);
3549         host_msg = copy_msg;
3550     }
3551     unlock_user(host_msg, msg, 0);
3552     return ret;
3553 }
3554 
3555 /* do_recvfrom() Must return target values and target errnos. */
3556 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3557                             abi_ulong target_addr,
3558                             abi_ulong target_addrlen)
3559 {
3560     socklen_t addrlen, ret_addrlen;
3561     void *addr;
3562     void *host_msg;
3563     abi_long ret;
3564 
3565     if (!msg) {
3566         host_msg = NULL;
3567     } else {
3568         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3569         if (!host_msg) {
3570             return -TARGET_EFAULT;
3571         }
3572     }
3573     if (target_addr) {
3574         if (get_user_u32(addrlen, target_addrlen)) {
3575             ret = -TARGET_EFAULT;
3576             goto fail;
3577         }
3578         if ((int)addrlen < 0) {
3579             ret = -TARGET_EINVAL;
3580             goto fail;
3581         }
3582         addr = alloca(addrlen);
3583         ret_addrlen = addrlen;
3584         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3585                                       addr, &ret_addrlen));
3586     } else {
3587         addr = NULL; /* To keep compiler quiet.  */
3588         addrlen = 0; /* To keep compiler quiet.  */
3589         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3590     }
3591     if (!is_error(ret)) {
3592         if (fd_trans_host_to_target_data(fd)) {
3593             abi_long trans;
3594             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3595             if (is_error(trans)) {
3596                 ret = trans;
3597                 goto fail;
3598             }
3599         }
3600         if (target_addr) {
3601             host_to_target_sockaddr(target_addr, addr,
3602                                     MIN(addrlen, ret_addrlen));
3603             if (put_user_u32(ret_addrlen, target_addrlen)) {
3604                 ret = -TARGET_EFAULT;
3605                 goto fail;
3606             }
3607         }
3608         unlock_user(host_msg, msg, len);
3609     } else {
3610 fail:
3611         unlock_user(host_msg, msg, 0);
3612     }
3613     return ret;
3614 }
3615 
3616 #ifdef TARGET_NR_socketcall
3617 /* do_socketcall() must return target values and target errnos. */
3618 static abi_long do_socketcall(int num, abi_ulong vptr)
3619 {
3620     static const unsigned nargs[] = { /* number of arguments per operation */
3621         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3622         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3623         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3624         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3625         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3626         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3627         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3628         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3629         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3630         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3631         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3632         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3633         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3634         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3635         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3636         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3637         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3638         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3639         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3640         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3641     };
3642     abi_long a[6]; /* max 6 args */
3643     unsigned i;
3644 
3645     /* check the range of the first argument num */
3646     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3647     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3648         return -TARGET_EINVAL;
3649     }
3650     /* ensure we have space for args */
3651     if (nargs[num] > ARRAY_SIZE(a)) {
3652         return -TARGET_EINVAL;
3653     }
3654     /* collect the arguments in a[] according to nargs[] */
3655     for (i = 0; i < nargs[num]; ++i) {
3656         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3657             return -TARGET_EFAULT;
3658         }
3659     }
3660     /* now when we have the args, invoke the appropriate underlying function */
3661     switch (num) {
3662     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3663         return do_socket(a[0], a[1], a[2]);
3664     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3665         return do_bind(a[0], a[1], a[2]);
3666     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3667         return do_connect(a[0], a[1], a[2]);
3668     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3669         return get_errno(listen(a[0], a[1]));
3670     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3671         return do_accept4(a[0], a[1], a[2], 0);
3672     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3673         return do_getsockname(a[0], a[1], a[2]);
3674     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3675         return do_getpeername(a[0], a[1], a[2]);
3676     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3677         return do_socketpair(a[0], a[1], a[2], a[3]);
3678     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3679         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3680     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3681         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3682     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3683         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3684     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3685         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3686     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3687         return get_errno(shutdown(a[0], a[1]));
3688     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3689         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3690     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3691         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3692     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3693         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3694     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3695         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3696     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3697         return do_accept4(a[0], a[1], a[2], a[3]);
3698     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3699         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3700     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3701         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3702     default:
3703         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3704         return -TARGET_EINVAL;
3705     }
3706 }
3707 #endif
3708 
3709 #define N_SHM_REGIONS	32
3710 
3711 static struct shm_region {
3712     abi_ulong start;
3713     abi_ulong size;
3714     bool in_use;
3715 } shm_regions[N_SHM_REGIONS];
3716 
3717 #ifndef TARGET_SEMID64_DS
3718 /* asm-generic version of this struct */
3719 struct target_semid64_ds
3720 {
3721   struct target_ipc_perm sem_perm;
3722   abi_ulong sem_otime;
3723 #if TARGET_ABI_BITS == 32
3724   abi_ulong __unused1;
3725 #endif
3726   abi_ulong sem_ctime;
3727 #if TARGET_ABI_BITS == 32
3728   abi_ulong __unused2;
3729 #endif
3730   abi_ulong sem_nsems;
3731   abi_ulong __unused3;
3732   abi_ulong __unused4;
3733 };
3734 #endif
3735 
3736 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3737                                                abi_ulong target_addr)
3738 {
3739     struct target_ipc_perm *target_ip;
3740     struct target_semid64_ds *target_sd;
3741 
3742     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3743         return -TARGET_EFAULT;
3744     target_ip = &(target_sd->sem_perm);
3745     host_ip->__key = tswap32(target_ip->__key);
3746     host_ip->uid = tswap32(target_ip->uid);
3747     host_ip->gid = tswap32(target_ip->gid);
3748     host_ip->cuid = tswap32(target_ip->cuid);
3749     host_ip->cgid = tswap32(target_ip->cgid);
3750 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3751     host_ip->mode = tswap32(target_ip->mode);
3752 #else
3753     host_ip->mode = tswap16(target_ip->mode);
3754 #endif
3755 #if defined(TARGET_PPC)
3756     host_ip->__seq = tswap32(target_ip->__seq);
3757 #else
3758     host_ip->__seq = tswap16(target_ip->__seq);
3759 #endif
3760     unlock_user_struct(target_sd, target_addr, 0);
3761     return 0;
3762 }
3763 
3764 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3765                                                struct ipc_perm *host_ip)
3766 {
3767     struct target_ipc_perm *target_ip;
3768     struct target_semid64_ds *target_sd;
3769 
3770     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3771         return -TARGET_EFAULT;
3772     target_ip = &(target_sd->sem_perm);
3773     target_ip->__key = tswap32(host_ip->__key);
3774     target_ip->uid = tswap32(host_ip->uid);
3775     target_ip->gid = tswap32(host_ip->gid);
3776     target_ip->cuid = tswap32(host_ip->cuid);
3777     target_ip->cgid = tswap32(host_ip->cgid);
3778 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3779     target_ip->mode = tswap32(host_ip->mode);
3780 #else
3781     target_ip->mode = tswap16(host_ip->mode);
3782 #endif
3783 #if defined(TARGET_PPC)
3784     target_ip->__seq = tswap32(host_ip->__seq);
3785 #else
3786     target_ip->__seq = tswap16(host_ip->__seq);
3787 #endif
3788     unlock_user_struct(target_sd, target_addr, 1);
3789     return 0;
3790 }
3791 
3792 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3793                                                abi_ulong target_addr)
3794 {
3795     struct target_semid64_ds *target_sd;
3796 
3797     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3798         return -TARGET_EFAULT;
3799     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3800         return -TARGET_EFAULT;
3801     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3802     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3803     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3804     unlock_user_struct(target_sd, target_addr, 0);
3805     return 0;
3806 }
3807 
3808 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3809                                                struct semid_ds *host_sd)
3810 {
3811     struct target_semid64_ds *target_sd;
3812 
3813     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3814         return -TARGET_EFAULT;
3815     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3816         return -TARGET_EFAULT;
3817     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3818     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3819     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3820     unlock_user_struct(target_sd, target_addr, 1);
3821     return 0;
3822 }
3823 
3824 struct target_seminfo {
3825     int semmap;
3826     int semmni;
3827     int semmns;
3828     int semmnu;
3829     int semmsl;
3830     int semopm;
3831     int semume;
3832     int semusz;
3833     int semvmx;
3834     int semaem;
3835 };
3836 
3837 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3838                                               struct seminfo *host_seminfo)
3839 {
3840     struct target_seminfo *target_seminfo;
3841     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3842         return -TARGET_EFAULT;
3843     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3844     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3845     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3846     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3847     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3848     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3849     __put_user(host_seminfo->semume, &target_seminfo->semume);
3850     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3851     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3852     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3853     unlock_user_struct(target_seminfo, target_addr, 1);
3854     return 0;
3855 }
3856 
3857 union semun {
3858 	int val;
3859 	struct semid_ds *buf;
3860 	unsigned short *array;
3861 	struct seminfo *__buf;
3862 };
3863 
3864 union target_semun {
3865 	int val;
3866 	abi_ulong buf;
3867 	abi_ulong array;
3868 	abi_ulong __buf;
3869 };
3870 
3871 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3872                                                abi_ulong target_addr)
3873 {
3874     int nsems;
3875     unsigned short *array;
3876     union semun semun;
3877     struct semid_ds semid_ds;
3878     int i, ret;
3879 
3880     semun.buf = &semid_ds;
3881 
3882     ret = semctl(semid, 0, IPC_STAT, semun);
3883     if (ret == -1)
3884         return get_errno(ret);
3885 
3886     nsems = semid_ds.sem_nsems;
3887 
3888     *host_array = g_try_new(unsigned short, nsems);
3889     if (!*host_array) {
3890         return -TARGET_ENOMEM;
3891     }
3892     array = lock_user(VERIFY_READ, target_addr,
3893                       nsems*sizeof(unsigned short), 1);
3894     if (!array) {
3895         g_free(*host_array);
3896         return -TARGET_EFAULT;
3897     }
3898 
3899     for(i=0; i<nsems; i++) {
3900         __get_user((*host_array)[i], &array[i]);
3901     }
3902     unlock_user(array, target_addr, 0);
3903 
3904     return 0;
3905 }
3906 
3907 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3908                                                unsigned short **host_array)
3909 {
3910     int nsems;
3911     unsigned short *array;
3912     union semun semun;
3913     struct semid_ds semid_ds;
3914     int i, ret;
3915 
3916     semun.buf = &semid_ds;
3917 
3918     ret = semctl(semid, 0, IPC_STAT, semun);
3919     if (ret == -1)
3920         return get_errno(ret);
3921 
3922     nsems = semid_ds.sem_nsems;
3923 
3924     array = lock_user(VERIFY_WRITE, target_addr,
3925                       nsems*sizeof(unsigned short), 0);
3926     if (!array)
3927         return -TARGET_EFAULT;
3928 
3929     for(i=0; i<nsems; i++) {
3930         __put_user((*host_array)[i], &array[i]);
3931     }
3932     g_free(*host_array);
3933     unlock_user(array, target_addr, 1);
3934 
3935     return 0;
3936 }
3937 
3938 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3939                                  abi_ulong target_arg)
3940 {
3941     union target_semun target_su = { .buf = target_arg };
3942     union semun arg;
3943     struct semid_ds dsarg;
3944     unsigned short *array = NULL;
3945     struct seminfo seminfo;
3946     abi_long ret = -TARGET_EINVAL;
3947     abi_long err;
3948     cmd &= 0xff;
3949 
3950     switch( cmd ) {
3951 	case GETVAL:
3952 	case SETVAL:
3953             /* In 64 bit cross-endian situations, we will erroneously pick up
3954              * the wrong half of the union for the "val" element.  To rectify
3955              * this, the entire 8-byte structure is byteswapped, followed by
3956 	     * a swap of the 4 byte val field. In other cases, the data is
3957 	     * already in proper host byte order. */
3958 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3959 		target_su.buf = tswapal(target_su.buf);
3960 		arg.val = tswap32(target_su.val);
3961 	    } else {
3962 		arg.val = target_su.val;
3963 	    }
3964             ret = get_errno(semctl(semid, semnum, cmd, arg));
3965             break;
3966 	case GETALL:
3967 	case SETALL:
3968             err = target_to_host_semarray(semid, &array, target_su.array);
3969             if (err)
3970                 return err;
3971             arg.array = array;
3972             ret = get_errno(semctl(semid, semnum, cmd, arg));
3973             err = host_to_target_semarray(semid, target_su.array, &array);
3974             if (err)
3975                 return err;
3976             break;
3977 	case IPC_STAT:
3978 	case IPC_SET:
3979 	case SEM_STAT:
3980             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3981             if (err)
3982                 return err;
3983             arg.buf = &dsarg;
3984             ret = get_errno(semctl(semid, semnum, cmd, arg));
3985             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3986             if (err)
3987                 return err;
3988             break;
3989 	case IPC_INFO:
3990 	case SEM_INFO:
3991             arg.__buf = &seminfo;
3992             ret = get_errno(semctl(semid, semnum, cmd, arg));
3993             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3994             if (err)
3995                 return err;
3996             break;
3997 	case IPC_RMID:
3998 	case GETPID:
3999 	case GETNCNT:
4000 	case GETZCNT:
4001             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4002             break;
4003     }
4004 
4005     return ret;
4006 }
4007 
4008 struct target_sembuf {
4009     unsigned short sem_num;
4010     short sem_op;
4011     short sem_flg;
4012 };
4013 
4014 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4015                                              abi_ulong target_addr,
4016                                              unsigned nsops)
4017 {
4018     struct target_sembuf *target_sembuf;
4019     int i;
4020 
4021     target_sembuf = lock_user(VERIFY_READ, target_addr,
4022                               nsops*sizeof(struct target_sembuf), 1);
4023     if (!target_sembuf)
4024         return -TARGET_EFAULT;
4025 
4026     for(i=0; i<nsops; i++) {
4027         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4028         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4029         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4030     }
4031 
4032     unlock_user(target_sembuf, target_addr, 0);
4033 
4034     return 0;
4035 }
4036 
4037 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4038     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4039 
4040 /*
4041  * This macro is required to handle the s390 variants, which passes the
4042  * arguments in a different order than default.
4043  */
4044 #ifdef __s390x__
4045 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4046   (__nsops), (__timeout), (__sops)
4047 #else
4048 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4049   (__nsops), 0, (__sops), (__timeout)
4050 #endif
4051 
4052 static inline abi_long do_semtimedop(int semid,
4053                                      abi_long ptr,
4054                                      unsigned nsops,
4055                                      abi_long timeout, bool time64)
4056 {
4057     struct sembuf *sops;
4058     struct timespec ts, *pts = NULL;
4059     abi_long ret;
4060 
4061     if (timeout) {
4062         pts = &ts;
4063         if (time64) {
4064             if (target_to_host_timespec64(pts, timeout)) {
4065                 return -TARGET_EFAULT;
4066             }
4067         } else {
4068             if (target_to_host_timespec(pts, timeout)) {
4069                 return -TARGET_EFAULT;
4070             }
4071         }
4072     }
4073 
4074     if (nsops > TARGET_SEMOPM) {
4075         return -TARGET_E2BIG;
4076     }
4077 
4078     sops = g_new(struct sembuf, nsops);
4079 
4080     if (target_to_host_sembuf(sops, ptr, nsops)) {
4081         g_free(sops);
4082         return -TARGET_EFAULT;
4083     }
4084 
4085     ret = -TARGET_ENOSYS;
4086 #ifdef __NR_semtimedop
4087     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4088 #endif
4089 #ifdef __NR_ipc
4090     if (ret == -TARGET_ENOSYS) {
4091         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4092                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4093     }
4094 #endif
4095     g_free(sops);
4096     return ret;
4097 }
4098 #endif
4099 
4100 struct target_msqid_ds
4101 {
4102     struct target_ipc_perm msg_perm;
4103     abi_ulong msg_stime;
4104 #if TARGET_ABI_BITS == 32
4105     abi_ulong __unused1;
4106 #endif
4107     abi_ulong msg_rtime;
4108 #if TARGET_ABI_BITS == 32
4109     abi_ulong __unused2;
4110 #endif
4111     abi_ulong msg_ctime;
4112 #if TARGET_ABI_BITS == 32
4113     abi_ulong __unused3;
4114 #endif
4115     abi_ulong __msg_cbytes;
4116     abi_ulong msg_qnum;
4117     abi_ulong msg_qbytes;
4118     abi_ulong msg_lspid;
4119     abi_ulong msg_lrpid;
4120     abi_ulong __unused4;
4121     abi_ulong __unused5;
4122 };
4123 
4124 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4125                                                abi_ulong target_addr)
4126 {
4127     struct target_msqid_ds *target_md;
4128 
4129     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4130         return -TARGET_EFAULT;
4131     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4132         return -TARGET_EFAULT;
4133     host_md->msg_stime = tswapal(target_md->msg_stime);
4134     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4135     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4136     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4137     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4138     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4139     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4140     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4141     unlock_user_struct(target_md, target_addr, 0);
4142     return 0;
4143 }
4144 
4145 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4146                                                struct msqid_ds *host_md)
4147 {
4148     struct target_msqid_ds *target_md;
4149 
4150     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4151         return -TARGET_EFAULT;
4152     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4153         return -TARGET_EFAULT;
4154     target_md->msg_stime = tswapal(host_md->msg_stime);
4155     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4156     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4157     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4158     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4159     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4160     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4161     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4162     unlock_user_struct(target_md, target_addr, 1);
4163     return 0;
4164 }
4165 
4166 struct target_msginfo {
4167     int msgpool;
4168     int msgmap;
4169     int msgmax;
4170     int msgmnb;
4171     int msgmni;
4172     int msgssz;
4173     int msgtql;
4174     unsigned short int msgseg;
4175 };
4176 
4177 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4178                                               struct msginfo *host_msginfo)
4179 {
4180     struct target_msginfo *target_msginfo;
4181     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4182         return -TARGET_EFAULT;
4183     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4184     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4185     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4186     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4187     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4188     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4189     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4190     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4191     unlock_user_struct(target_msginfo, target_addr, 1);
4192     return 0;
4193 }
4194 
4195 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4196 {
4197     struct msqid_ds dsarg;
4198     struct msginfo msginfo;
4199     abi_long ret = -TARGET_EINVAL;
4200 
4201     cmd &= 0xff;
4202 
4203     switch (cmd) {
4204     case IPC_STAT:
4205     case IPC_SET:
4206     case MSG_STAT:
4207         if (target_to_host_msqid_ds(&dsarg,ptr))
4208             return -TARGET_EFAULT;
4209         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4210         if (host_to_target_msqid_ds(ptr,&dsarg))
4211             return -TARGET_EFAULT;
4212         break;
4213     case IPC_RMID:
4214         ret = get_errno(msgctl(msgid, cmd, NULL));
4215         break;
4216     case IPC_INFO:
4217     case MSG_INFO:
4218         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4219         if (host_to_target_msginfo(ptr, &msginfo))
4220             return -TARGET_EFAULT;
4221         break;
4222     }
4223 
4224     return ret;
4225 }
4226 
4227 struct target_msgbuf {
4228     abi_long mtype;
4229     char	mtext[1];
4230 };
4231 
4232 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4233                                  ssize_t msgsz, int msgflg)
4234 {
4235     struct target_msgbuf *target_mb;
4236     struct msgbuf *host_mb;
4237     abi_long ret = 0;
4238 
4239     if (msgsz < 0) {
4240         return -TARGET_EINVAL;
4241     }
4242 
4243     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4244         return -TARGET_EFAULT;
4245     host_mb = g_try_malloc(msgsz + sizeof(long));
4246     if (!host_mb) {
4247         unlock_user_struct(target_mb, msgp, 0);
4248         return -TARGET_ENOMEM;
4249     }
4250     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4251     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4252     ret = -TARGET_ENOSYS;
4253 #ifdef __NR_msgsnd
4254     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4255 #endif
4256 #ifdef __NR_ipc
4257     if (ret == -TARGET_ENOSYS) {
4258 #ifdef __s390x__
4259         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4260                                  host_mb));
4261 #else
4262         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4263                                  host_mb, 0));
4264 #endif
4265     }
4266 #endif
4267     g_free(host_mb);
4268     unlock_user_struct(target_mb, msgp, 0);
4269 
4270     return ret;
4271 }
4272 
4273 #ifdef __NR_ipc
4274 #if defined(__sparc__)
4275 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4276 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4277 #elif defined(__s390x__)
4278 /* The s390 sys_ipc variant has only five parameters.  */
4279 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4280     ((long int[]){(long int)__msgp, __msgtyp})
4281 #else
4282 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4283     ((long int[]){(long int)__msgp, __msgtyp}), 0
4284 #endif
4285 #endif
4286 
4287 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4288                                  ssize_t msgsz, abi_long msgtyp,
4289                                  int msgflg)
4290 {
4291     struct target_msgbuf *target_mb;
4292     char *target_mtext;
4293     struct msgbuf *host_mb;
4294     abi_long ret = 0;
4295 
4296     if (msgsz < 0) {
4297         return -TARGET_EINVAL;
4298     }
4299 
4300     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4301         return -TARGET_EFAULT;
4302 
4303     host_mb = g_try_malloc(msgsz + sizeof(long));
4304     if (!host_mb) {
4305         ret = -TARGET_ENOMEM;
4306         goto end;
4307     }
4308     ret = -TARGET_ENOSYS;
4309 #ifdef __NR_msgrcv
4310     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4311 #endif
4312 #ifdef __NR_ipc
4313     if (ret == -TARGET_ENOSYS) {
4314         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4315                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4316     }
4317 #endif
4318 
4319     if (ret > 0) {
4320         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4321         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4322         if (!target_mtext) {
4323             ret = -TARGET_EFAULT;
4324             goto end;
4325         }
4326         memcpy(target_mb->mtext, host_mb->mtext, ret);
4327         unlock_user(target_mtext, target_mtext_addr, ret);
4328     }
4329 
4330     target_mb->mtype = tswapal(host_mb->mtype);
4331 
4332 end:
4333     if (target_mb)
4334         unlock_user_struct(target_mb, msgp, 1);
4335     g_free(host_mb);
4336     return ret;
4337 }
4338 
4339 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4340                                                abi_ulong target_addr)
4341 {
4342     struct target_shmid_ds *target_sd;
4343 
4344     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4345         return -TARGET_EFAULT;
4346     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4347         return -TARGET_EFAULT;
4348     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4349     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4350     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4351     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4352     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4353     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4354     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4355     unlock_user_struct(target_sd, target_addr, 0);
4356     return 0;
4357 }
4358 
4359 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4360                                                struct shmid_ds *host_sd)
4361 {
4362     struct target_shmid_ds *target_sd;
4363 
4364     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4365         return -TARGET_EFAULT;
4366     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4367         return -TARGET_EFAULT;
4368     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4369     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4370     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4371     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4372     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4373     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4374     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4375     unlock_user_struct(target_sd, target_addr, 1);
4376     return 0;
4377 }
4378 
4379 struct  target_shminfo {
4380     abi_ulong shmmax;
4381     abi_ulong shmmin;
4382     abi_ulong shmmni;
4383     abi_ulong shmseg;
4384     abi_ulong shmall;
4385 };
4386 
4387 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4388                                               struct shminfo *host_shminfo)
4389 {
4390     struct target_shminfo *target_shminfo;
4391     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4392         return -TARGET_EFAULT;
4393     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4394     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4395     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4396     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4397     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4398     unlock_user_struct(target_shminfo, target_addr, 1);
4399     return 0;
4400 }
4401 
4402 struct target_shm_info {
4403     int used_ids;
4404     abi_ulong shm_tot;
4405     abi_ulong shm_rss;
4406     abi_ulong shm_swp;
4407     abi_ulong swap_attempts;
4408     abi_ulong swap_successes;
4409 };
4410 
4411 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4412                                                struct shm_info *host_shm_info)
4413 {
4414     struct target_shm_info *target_shm_info;
4415     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4416         return -TARGET_EFAULT;
4417     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4418     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4419     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4420     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4421     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4422     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4423     unlock_user_struct(target_shm_info, target_addr, 1);
4424     return 0;
4425 }
4426 
4427 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4428 {
4429     struct shmid_ds dsarg;
4430     struct shminfo shminfo;
4431     struct shm_info shm_info;
4432     abi_long ret = -TARGET_EINVAL;
4433 
4434     cmd &= 0xff;
4435 
4436     switch(cmd) {
4437     case IPC_STAT:
4438     case IPC_SET:
4439     case SHM_STAT:
4440         if (target_to_host_shmid_ds(&dsarg, buf))
4441             return -TARGET_EFAULT;
4442         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4443         if (host_to_target_shmid_ds(buf, &dsarg))
4444             return -TARGET_EFAULT;
4445         break;
4446     case IPC_INFO:
4447         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4448         if (host_to_target_shminfo(buf, &shminfo))
4449             return -TARGET_EFAULT;
4450         break;
4451     case SHM_INFO:
4452         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4453         if (host_to_target_shm_info(buf, &shm_info))
4454             return -TARGET_EFAULT;
4455         break;
4456     case IPC_RMID:
4457     case SHM_LOCK:
4458     case SHM_UNLOCK:
4459         ret = get_errno(shmctl(shmid, cmd, NULL));
4460         break;
4461     }
4462 
4463     return ret;
4464 }
4465 
4466 #ifndef TARGET_FORCE_SHMLBA
4467 /* For most architectures, SHMLBA is the same as the page size;
4468  * some architectures have larger values, in which case they should
4469  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4470  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4471  * and defining its own value for SHMLBA.
4472  *
4473  * The kernel also permits SHMLBA to be set by the architecture to a
4474  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4475  * this means that addresses are rounded to the large size if
4476  * SHM_RND is set but addresses not aligned to that size are not rejected
4477  * as long as they are at least page-aligned. Since the only architecture
4478  * which uses this is ia64 this code doesn't provide for that oddity.
4479  */
4480 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4481 {
4482     return TARGET_PAGE_SIZE;
4483 }
4484 #endif
4485 
4486 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4487                                  int shmid, abi_ulong shmaddr, int shmflg)
4488 {
4489     CPUState *cpu = env_cpu(cpu_env);
4490     abi_long raddr;
4491     void *host_raddr;
4492     struct shmid_ds shm_info;
4493     int i,ret;
4494     abi_ulong shmlba;
4495 
4496     /* shmat pointers are always untagged */
4497 
4498     /* find out the length of the shared memory segment */
4499     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4500     if (is_error(ret)) {
4501         /* can't get length, bail out */
4502         return ret;
4503     }
4504 
4505     shmlba = target_shmlba(cpu_env);
4506 
4507     if (shmaddr & (shmlba - 1)) {
4508         if (shmflg & SHM_RND) {
4509             shmaddr &= ~(shmlba - 1);
4510         } else {
4511             return -TARGET_EINVAL;
4512         }
4513     }
4514     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4515         return -TARGET_EINVAL;
4516     }
4517 
4518     mmap_lock();
4519 
4520     /*
4521      * We're mapping shared memory, so ensure we generate code for parallel
4522      * execution and flush old translations.  This will work up to the level
4523      * supported by the host -- anything that requires EXCP_ATOMIC will not
4524      * be atomic with respect to an external process.
4525      */
4526     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4527         cpu->tcg_cflags |= CF_PARALLEL;
4528         tb_flush(cpu);
4529     }
4530 
4531     if (shmaddr)
4532         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4533     else {
4534         abi_ulong mmap_start;
4535 
4536         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4537         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4538 
4539         if (mmap_start == -1) {
4540             errno = ENOMEM;
4541             host_raddr = (void *)-1;
4542         } else
4543             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4544                                shmflg | SHM_REMAP);
4545     }
4546 
4547     if (host_raddr == (void *)-1) {
4548         mmap_unlock();
4549         return get_errno((long)host_raddr);
4550     }
4551     raddr=h2g((unsigned long)host_raddr);
4552 
4553     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4554                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4555                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4556 
4557     for (i = 0; i < N_SHM_REGIONS; i++) {
4558         if (!shm_regions[i].in_use) {
4559             shm_regions[i].in_use = true;
4560             shm_regions[i].start = raddr;
4561             shm_regions[i].size = shm_info.shm_segsz;
4562             break;
4563         }
4564     }
4565 
4566     mmap_unlock();
4567     return raddr;
4568 
4569 }
4570 
4571 static inline abi_long do_shmdt(abi_ulong shmaddr)
4572 {
4573     int i;
4574     abi_long rv;
4575 
4576     /* shmdt pointers are always untagged */
4577 
4578     mmap_lock();
4579 
4580     for (i = 0; i < N_SHM_REGIONS; ++i) {
4581         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4582             shm_regions[i].in_use = false;
4583             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4584             break;
4585         }
4586     }
4587     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4588 
4589     mmap_unlock();
4590 
4591     return rv;
4592 }
4593 
4594 #ifdef TARGET_NR_ipc
4595 /* ??? This only works with linear mappings.  */
4596 /* do_ipc() must return target values and target errnos. */
4597 static abi_long do_ipc(CPUArchState *cpu_env,
4598                        unsigned int call, abi_long first,
4599                        abi_long second, abi_long third,
4600                        abi_long ptr, abi_long fifth)
4601 {
4602     int version;
4603     abi_long ret = 0;
4604 
4605     version = call >> 16;
4606     call &= 0xffff;
4607 
4608     switch (call) {
4609     case IPCOP_semop:
4610         ret = do_semtimedop(first, ptr, second, 0, false);
4611         break;
4612     case IPCOP_semtimedop:
4613     /*
4614      * The s390 sys_ipc variant has only five parameters instead of six
4615      * (as for default variant) and the only difference is the handling of
4616      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4617      * to a struct timespec where the generic variant uses fifth parameter.
4618      */
4619 #if defined(TARGET_S390X)
4620         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4621 #else
4622         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4623 #endif
4624         break;
4625 
4626     case IPCOP_semget:
4627         ret = get_errno(semget(first, second, third));
4628         break;
4629 
4630     case IPCOP_semctl: {
4631         /* The semun argument to semctl is passed by value, so dereference the
4632          * ptr argument. */
4633         abi_ulong atptr;
4634         get_user_ual(atptr, ptr);
4635         ret = do_semctl(first, second, third, atptr);
4636         break;
4637     }
4638 
4639     case IPCOP_msgget:
4640         ret = get_errno(msgget(first, second));
4641         break;
4642 
4643     case IPCOP_msgsnd:
4644         ret = do_msgsnd(first, ptr, second, third);
4645         break;
4646 
4647     case IPCOP_msgctl:
4648         ret = do_msgctl(first, second, ptr);
4649         break;
4650 
4651     case IPCOP_msgrcv:
4652         switch (version) {
4653         case 0:
4654             {
4655                 struct target_ipc_kludge {
4656                     abi_long msgp;
4657                     abi_long msgtyp;
4658                 } *tmp;
4659 
4660                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4661                     ret = -TARGET_EFAULT;
4662                     break;
4663                 }
4664 
4665                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4666 
4667                 unlock_user_struct(tmp, ptr, 0);
4668                 break;
4669             }
4670         default:
4671             ret = do_msgrcv(first, ptr, second, fifth, third);
4672         }
4673         break;
4674 
4675     case IPCOP_shmat:
4676         switch (version) {
4677         default:
4678         {
4679             abi_ulong raddr;
4680             raddr = do_shmat(cpu_env, first, ptr, second);
4681             if (is_error(raddr))
4682                 return get_errno(raddr);
4683             if (put_user_ual(raddr, third))
4684                 return -TARGET_EFAULT;
4685             break;
4686         }
4687         case 1:
4688             ret = -TARGET_EINVAL;
4689             break;
4690         }
4691 	break;
4692     case IPCOP_shmdt:
4693         ret = do_shmdt(ptr);
4694 	break;
4695 
4696     case IPCOP_shmget:
4697 	/* IPC_* flag values are the same on all linux platforms */
4698 	ret = get_errno(shmget(first, second, third));
4699 	break;
4700 
4701 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4702     case IPCOP_shmctl:
4703         ret = do_shmctl(first, second, ptr);
4704         break;
4705     default:
4706         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4707                       call, version);
4708 	ret = -TARGET_ENOSYS;
4709 	break;
4710     }
4711     return ret;
4712 }
4713 #endif
4714 
4715 /* kernel structure types definitions */
4716 
4717 #define STRUCT(name, ...) STRUCT_ ## name,
4718 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4719 enum {
4720 #include "syscall_types.h"
4721 STRUCT_MAX
4722 };
4723 #undef STRUCT
4724 #undef STRUCT_SPECIAL
4725 
4726 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4727 #define STRUCT_SPECIAL(name)
4728 #include "syscall_types.h"
4729 #undef STRUCT
4730 #undef STRUCT_SPECIAL
4731 
4732 #define MAX_STRUCT_SIZE 4096
4733 
4734 #ifdef CONFIG_FIEMAP
4735 /* So fiemap access checks don't overflow on 32 bit systems.
4736  * This is very slightly smaller than the limit imposed by
4737  * the underlying kernel.
4738  */
4739 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4740                             / sizeof(struct fiemap_extent))
4741 
4742 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4743                                        int fd, int cmd, abi_long arg)
4744 {
4745     /* The parameter for this ioctl is a struct fiemap followed
4746      * by an array of struct fiemap_extent whose size is set
4747      * in fiemap->fm_extent_count. The array is filled in by the
4748      * ioctl.
4749      */
4750     int target_size_in, target_size_out;
4751     struct fiemap *fm;
4752     const argtype *arg_type = ie->arg_type;
4753     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4754     void *argptr, *p;
4755     abi_long ret;
4756     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4757     uint32_t outbufsz;
4758     int free_fm = 0;
4759 
4760     assert(arg_type[0] == TYPE_PTR);
4761     assert(ie->access == IOC_RW);
4762     arg_type++;
4763     target_size_in = thunk_type_size(arg_type, 0);
4764     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4765     if (!argptr) {
4766         return -TARGET_EFAULT;
4767     }
4768     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4769     unlock_user(argptr, arg, 0);
4770     fm = (struct fiemap *)buf_temp;
4771     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4772         return -TARGET_EINVAL;
4773     }
4774 
4775     outbufsz = sizeof (*fm) +
4776         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4777 
4778     if (outbufsz > MAX_STRUCT_SIZE) {
4779         /* We can't fit all the extents into the fixed size buffer.
4780          * Allocate one that is large enough and use it instead.
4781          */
4782         fm = g_try_malloc(outbufsz);
4783         if (!fm) {
4784             return -TARGET_ENOMEM;
4785         }
4786         memcpy(fm, buf_temp, sizeof(struct fiemap));
4787         free_fm = 1;
4788     }
4789     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4790     if (!is_error(ret)) {
4791         target_size_out = target_size_in;
4792         /* An extent_count of 0 means we were only counting the extents
4793          * so there are no structs to copy
4794          */
4795         if (fm->fm_extent_count != 0) {
4796             target_size_out += fm->fm_mapped_extents * extent_size;
4797         }
4798         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4799         if (!argptr) {
4800             ret = -TARGET_EFAULT;
4801         } else {
4802             /* Convert the struct fiemap */
4803             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4804             if (fm->fm_extent_count != 0) {
4805                 p = argptr + target_size_in;
4806                 /* ...and then all the struct fiemap_extents */
4807                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4808                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4809                                   THUNK_TARGET);
4810                     p += extent_size;
4811                 }
4812             }
4813             unlock_user(argptr, arg, target_size_out);
4814         }
4815     }
4816     if (free_fm) {
4817         g_free(fm);
4818     }
4819     return ret;
4820 }
4821 #endif
4822 
4823 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4824                                 int fd, int cmd, abi_long arg)
4825 {
4826     const argtype *arg_type = ie->arg_type;
4827     int target_size;
4828     void *argptr;
4829     int ret;
4830     struct ifconf *host_ifconf;
4831     uint32_t outbufsz;
4832     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4833     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4834     int target_ifreq_size;
4835     int nb_ifreq;
4836     int free_buf = 0;
4837     int i;
4838     int target_ifc_len;
4839     abi_long target_ifc_buf;
4840     int host_ifc_len;
4841     char *host_ifc_buf;
4842 
4843     assert(arg_type[0] == TYPE_PTR);
4844     assert(ie->access == IOC_RW);
4845 
4846     arg_type++;
4847     target_size = thunk_type_size(arg_type, 0);
4848 
4849     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4850     if (!argptr)
4851         return -TARGET_EFAULT;
4852     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4853     unlock_user(argptr, arg, 0);
4854 
4855     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4856     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4857     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4858 
4859     if (target_ifc_buf != 0) {
4860         target_ifc_len = host_ifconf->ifc_len;
4861         nb_ifreq = target_ifc_len / target_ifreq_size;
4862         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4863 
4864         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4865         if (outbufsz > MAX_STRUCT_SIZE) {
4866             /*
4867              * We can't fit all the extents into the fixed size buffer.
4868              * Allocate one that is large enough and use it instead.
4869              */
4870             host_ifconf = g_try_malloc(outbufsz);
4871             if (!host_ifconf) {
4872                 return -TARGET_ENOMEM;
4873             }
4874             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4875             free_buf = 1;
4876         }
4877         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4878 
4879         host_ifconf->ifc_len = host_ifc_len;
4880     } else {
4881       host_ifc_buf = NULL;
4882     }
4883     host_ifconf->ifc_buf = host_ifc_buf;
4884 
4885     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4886     if (!is_error(ret)) {
4887 	/* convert host ifc_len to target ifc_len */
4888 
4889         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4890         target_ifc_len = nb_ifreq * target_ifreq_size;
4891         host_ifconf->ifc_len = target_ifc_len;
4892 
4893 	/* restore target ifc_buf */
4894 
4895         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4896 
4897 	/* copy struct ifconf to target user */
4898 
4899         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4900         if (!argptr)
4901             return -TARGET_EFAULT;
4902         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4903         unlock_user(argptr, arg, target_size);
4904 
4905         if (target_ifc_buf != 0) {
4906             /* copy ifreq[] to target user */
4907             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4908             for (i = 0; i < nb_ifreq ; i++) {
4909                 thunk_convert(argptr + i * target_ifreq_size,
4910                               host_ifc_buf + i * sizeof(struct ifreq),
4911                               ifreq_arg_type, THUNK_TARGET);
4912             }
4913             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4914         }
4915     }
4916 
4917     if (free_buf) {
4918         g_free(host_ifconf);
4919     }
4920 
4921     return ret;
4922 }
4923 
4924 #if defined(CONFIG_USBFS)
4925 #if HOST_LONG_BITS > 64
4926 #error USBDEVFS thunks do not support >64 bit hosts yet.
4927 #endif
4928 struct live_urb {
4929     uint64_t target_urb_adr;
4930     uint64_t target_buf_adr;
4931     char *target_buf_ptr;
4932     struct usbdevfs_urb host_urb;
4933 };
4934 
4935 static GHashTable *usbdevfs_urb_hashtable(void)
4936 {
4937     static GHashTable *urb_hashtable;
4938 
4939     if (!urb_hashtable) {
4940         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4941     }
4942     return urb_hashtable;
4943 }
4944 
4945 static void urb_hashtable_insert(struct live_urb *urb)
4946 {
4947     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4948     g_hash_table_insert(urb_hashtable, urb, urb);
4949 }
4950 
4951 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4952 {
4953     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4954     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4955 }
4956 
4957 static void urb_hashtable_remove(struct live_urb *urb)
4958 {
4959     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4960     g_hash_table_remove(urb_hashtable, urb);
4961 }
4962 
4963 static abi_long
4964 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4965                           int fd, int cmd, abi_long arg)
4966 {
4967     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4968     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4969     struct live_urb *lurb;
4970     void *argptr;
4971     uint64_t hurb;
4972     int target_size;
4973     uintptr_t target_urb_adr;
4974     abi_long ret;
4975 
4976     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4977 
4978     memset(buf_temp, 0, sizeof(uint64_t));
4979     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4980     if (is_error(ret)) {
4981         return ret;
4982     }
4983 
4984     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4985     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4986     if (!lurb->target_urb_adr) {
4987         return -TARGET_EFAULT;
4988     }
4989     urb_hashtable_remove(lurb);
4990     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4991         lurb->host_urb.buffer_length);
4992     lurb->target_buf_ptr = NULL;
4993 
4994     /* restore the guest buffer pointer */
4995     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4996 
4997     /* update the guest urb struct */
4998     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4999     if (!argptr) {
5000         g_free(lurb);
5001         return -TARGET_EFAULT;
5002     }
5003     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5004     unlock_user(argptr, lurb->target_urb_adr, target_size);
5005 
5006     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5007     /* write back the urb handle */
5008     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5009     if (!argptr) {
5010         g_free(lurb);
5011         return -TARGET_EFAULT;
5012     }
5013 
5014     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5015     target_urb_adr = lurb->target_urb_adr;
5016     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5017     unlock_user(argptr, arg, target_size);
5018 
5019     g_free(lurb);
5020     return ret;
5021 }
5022 
5023 static abi_long
5024 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5025                              uint8_t *buf_temp __attribute__((unused)),
5026                              int fd, int cmd, abi_long arg)
5027 {
5028     struct live_urb *lurb;
5029 
5030     /* map target address back to host URB with metadata. */
5031     lurb = urb_hashtable_lookup(arg);
5032     if (!lurb) {
5033         return -TARGET_EFAULT;
5034     }
5035     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5036 }
5037 
5038 static abi_long
5039 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5040                             int fd, int cmd, abi_long arg)
5041 {
5042     const argtype *arg_type = ie->arg_type;
5043     int target_size;
5044     abi_long ret;
5045     void *argptr;
5046     int rw_dir;
5047     struct live_urb *lurb;
5048 
5049     /*
5050      * each submitted URB needs to map to a unique ID for the
5051      * kernel, and that unique ID needs to be a pointer to
5052      * host memory.  hence, we need to malloc for each URB.
5053      * isochronous transfers have a variable length struct.
5054      */
5055     arg_type++;
5056     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5057 
5058     /* construct host copy of urb and metadata */
5059     lurb = g_try_new0(struct live_urb, 1);
5060     if (!lurb) {
5061         return -TARGET_ENOMEM;
5062     }
5063 
5064     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5065     if (!argptr) {
5066         g_free(lurb);
5067         return -TARGET_EFAULT;
5068     }
5069     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5070     unlock_user(argptr, arg, 0);
5071 
5072     lurb->target_urb_adr = arg;
5073     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5074 
5075     /* buffer space used depends on endpoint type so lock the entire buffer */
5076     /* control type urbs should check the buffer contents for true direction */
5077     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5078     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5079         lurb->host_urb.buffer_length, 1);
5080     if (lurb->target_buf_ptr == NULL) {
5081         g_free(lurb);
5082         return -TARGET_EFAULT;
5083     }
5084 
5085     /* update buffer pointer in host copy */
5086     lurb->host_urb.buffer = lurb->target_buf_ptr;
5087 
5088     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5089     if (is_error(ret)) {
5090         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5091         g_free(lurb);
5092     } else {
5093         urb_hashtable_insert(lurb);
5094     }
5095 
5096     return ret;
5097 }
5098 #endif /* CONFIG_USBFS */
5099 
5100 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5101                             int cmd, abi_long arg)
5102 {
5103     void *argptr;
5104     struct dm_ioctl *host_dm;
5105     abi_long guest_data;
5106     uint32_t guest_data_size;
5107     int target_size;
5108     const argtype *arg_type = ie->arg_type;
5109     abi_long ret;
5110     void *big_buf = NULL;
5111     char *host_data;
5112 
5113     arg_type++;
5114     target_size = thunk_type_size(arg_type, 0);
5115     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5116     if (!argptr) {
5117         ret = -TARGET_EFAULT;
5118         goto out;
5119     }
5120     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5121     unlock_user(argptr, arg, 0);
5122 
5123     /* buf_temp is too small, so fetch things into a bigger buffer */
5124     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5125     memcpy(big_buf, buf_temp, target_size);
5126     buf_temp = big_buf;
5127     host_dm = big_buf;
5128 
5129     guest_data = arg + host_dm->data_start;
5130     if ((guest_data - arg) < 0) {
5131         ret = -TARGET_EINVAL;
5132         goto out;
5133     }
5134     guest_data_size = host_dm->data_size - host_dm->data_start;
5135     host_data = (char*)host_dm + host_dm->data_start;
5136 
5137     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5138     if (!argptr) {
5139         ret = -TARGET_EFAULT;
5140         goto out;
5141     }
5142 
5143     switch (ie->host_cmd) {
5144     case DM_REMOVE_ALL:
5145     case DM_LIST_DEVICES:
5146     case DM_DEV_CREATE:
5147     case DM_DEV_REMOVE:
5148     case DM_DEV_SUSPEND:
5149     case DM_DEV_STATUS:
5150     case DM_DEV_WAIT:
5151     case DM_TABLE_STATUS:
5152     case DM_TABLE_CLEAR:
5153     case DM_TABLE_DEPS:
5154     case DM_LIST_VERSIONS:
5155         /* no input data */
5156         break;
5157     case DM_DEV_RENAME:
5158     case DM_DEV_SET_GEOMETRY:
5159         /* data contains only strings */
5160         memcpy(host_data, argptr, guest_data_size);
5161         break;
5162     case DM_TARGET_MSG:
5163         memcpy(host_data, argptr, guest_data_size);
5164         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5165         break;
5166     case DM_TABLE_LOAD:
5167     {
5168         void *gspec = argptr;
5169         void *cur_data = host_data;
5170         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5171         int spec_size = thunk_type_size(arg_type, 0);
5172         int i;
5173 
5174         for (i = 0; i < host_dm->target_count; i++) {
5175             struct dm_target_spec *spec = cur_data;
5176             uint32_t next;
5177             int slen;
5178 
5179             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5180             slen = strlen((char*)gspec + spec_size) + 1;
5181             next = spec->next;
5182             spec->next = sizeof(*spec) + slen;
5183             strcpy((char*)&spec[1], gspec + spec_size);
5184             gspec += next;
5185             cur_data += spec->next;
5186         }
5187         break;
5188     }
5189     default:
5190         ret = -TARGET_EINVAL;
5191         unlock_user(argptr, guest_data, 0);
5192         goto out;
5193     }
5194     unlock_user(argptr, guest_data, 0);
5195 
5196     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5197     if (!is_error(ret)) {
5198         guest_data = arg + host_dm->data_start;
5199         guest_data_size = host_dm->data_size - host_dm->data_start;
5200         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5201         switch (ie->host_cmd) {
5202         case DM_REMOVE_ALL:
5203         case DM_DEV_CREATE:
5204         case DM_DEV_REMOVE:
5205         case DM_DEV_RENAME:
5206         case DM_DEV_SUSPEND:
5207         case DM_DEV_STATUS:
5208         case DM_TABLE_LOAD:
5209         case DM_TABLE_CLEAR:
5210         case DM_TARGET_MSG:
5211         case DM_DEV_SET_GEOMETRY:
5212             /* no return data */
5213             break;
5214         case DM_LIST_DEVICES:
5215         {
5216             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5217             uint32_t remaining_data = guest_data_size;
5218             void *cur_data = argptr;
5219             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5220             int nl_size = 12; /* can't use thunk_size due to alignment */
5221 
5222             while (1) {
5223                 uint32_t next = nl->next;
5224                 if (next) {
5225                     nl->next = nl_size + (strlen(nl->name) + 1);
5226                 }
5227                 if (remaining_data < nl->next) {
5228                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5229                     break;
5230                 }
5231                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5232                 strcpy(cur_data + nl_size, nl->name);
5233                 cur_data += nl->next;
5234                 remaining_data -= nl->next;
5235                 if (!next) {
5236                     break;
5237                 }
5238                 nl = (void*)nl + next;
5239             }
5240             break;
5241         }
5242         case DM_DEV_WAIT:
5243         case DM_TABLE_STATUS:
5244         {
5245             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5246             void *cur_data = argptr;
5247             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5248             int spec_size = thunk_type_size(arg_type, 0);
5249             int i;
5250 
5251             for (i = 0; i < host_dm->target_count; i++) {
5252                 uint32_t next = spec->next;
5253                 int slen = strlen((char*)&spec[1]) + 1;
5254                 spec->next = (cur_data - argptr) + spec_size + slen;
5255                 if (guest_data_size < spec->next) {
5256                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5257                     break;
5258                 }
5259                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5260                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5261                 cur_data = argptr + spec->next;
5262                 spec = (void*)host_dm + host_dm->data_start + next;
5263             }
5264             break;
5265         }
5266         case DM_TABLE_DEPS:
5267         {
5268             void *hdata = (void*)host_dm + host_dm->data_start;
5269             int count = *(uint32_t*)hdata;
5270             uint64_t *hdev = hdata + 8;
5271             uint64_t *gdev = argptr + 8;
5272             int i;
5273 
5274             *(uint32_t*)argptr = tswap32(count);
5275             for (i = 0; i < count; i++) {
5276                 *gdev = tswap64(*hdev);
5277                 gdev++;
5278                 hdev++;
5279             }
5280             break;
5281         }
5282         case DM_LIST_VERSIONS:
5283         {
5284             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5285             uint32_t remaining_data = guest_data_size;
5286             void *cur_data = argptr;
5287             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5288             int vers_size = thunk_type_size(arg_type, 0);
5289 
5290             while (1) {
5291                 uint32_t next = vers->next;
5292                 if (next) {
5293                     vers->next = vers_size + (strlen(vers->name) + 1);
5294                 }
5295                 if (remaining_data < vers->next) {
5296                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5297                     break;
5298                 }
5299                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5300                 strcpy(cur_data + vers_size, vers->name);
5301                 cur_data += vers->next;
5302                 remaining_data -= vers->next;
5303                 if (!next) {
5304                     break;
5305                 }
5306                 vers = (void*)vers + next;
5307             }
5308             break;
5309         }
5310         default:
5311             unlock_user(argptr, guest_data, 0);
5312             ret = -TARGET_EINVAL;
5313             goto out;
5314         }
5315         unlock_user(argptr, guest_data, guest_data_size);
5316 
5317         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5318         if (!argptr) {
5319             ret = -TARGET_EFAULT;
5320             goto out;
5321         }
5322         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5323         unlock_user(argptr, arg, target_size);
5324     }
5325 out:
5326     g_free(big_buf);
5327     return ret;
5328 }
5329 
5330 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5331                                int cmd, abi_long arg)
5332 {
5333     void *argptr;
5334     int target_size;
5335     const argtype *arg_type = ie->arg_type;
5336     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5337     abi_long ret;
5338 
5339     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5340     struct blkpg_partition host_part;
5341 
5342     /* Read and convert blkpg */
5343     arg_type++;
5344     target_size = thunk_type_size(arg_type, 0);
5345     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5346     if (!argptr) {
5347         ret = -TARGET_EFAULT;
5348         goto out;
5349     }
5350     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5351     unlock_user(argptr, arg, 0);
5352 
5353     switch (host_blkpg->op) {
5354     case BLKPG_ADD_PARTITION:
5355     case BLKPG_DEL_PARTITION:
5356         /* payload is struct blkpg_partition */
5357         break;
5358     default:
5359         /* Unknown opcode */
5360         ret = -TARGET_EINVAL;
5361         goto out;
5362     }
5363 
5364     /* Read and convert blkpg->data */
5365     arg = (abi_long)(uintptr_t)host_blkpg->data;
5366     target_size = thunk_type_size(part_arg_type, 0);
5367     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5368     if (!argptr) {
5369         ret = -TARGET_EFAULT;
5370         goto out;
5371     }
5372     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5373     unlock_user(argptr, arg, 0);
5374 
5375     /* Swizzle the data pointer to our local copy and call! */
5376     host_blkpg->data = &host_part;
5377     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5378 
5379 out:
5380     return ret;
5381 }
5382 
5383 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5384                                 int fd, int cmd, abi_long arg)
5385 {
5386     const argtype *arg_type = ie->arg_type;
5387     const StructEntry *se;
5388     const argtype *field_types;
5389     const int *dst_offsets, *src_offsets;
5390     int target_size;
5391     void *argptr;
5392     abi_ulong *target_rt_dev_ptr = NULL;
5393     unsigned long *host_rt_dev_ptr = NULL;
5394     abi_long ret;
5395     int i;
5396 
5397     assert(ie->access == IOC_W);
5398     assert(*arg_type == TYPE_PTR);
5399     arg_type++;
5400     assert(*arg_type == TYPE_STRUCT);
5401     target_size = thunk_type_size(arg_type, 0);
5402     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5403     if (!argptr) {
5404         return -TARGET_EFAULT;
5405     }
5406     arg_type++;
5407     assert(*arg_type == (int)STRUCT_rtentry);
5408     se = struct_entries + *arg_type++;
5409     assert(se->convert[0] == NULL);
5410     /* convert struct here to be able to catch rt_dev string */
5411     field_types = se->field_types;
5412     dst_offsets = se->field_offsets[THUNK_HOST];
5413     src_offsets = se->field_offsets[THUNK_TARGET];
5414     for (i = 0; i < se->nb_fields; i++) {
5415         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5416             assert(*field_types == TYPE_PTRVOID);
5417             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5418             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5419             if (*target_rt_dev_ptr != 0) {
5420                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5421                                                   tswapal(*target_rt_dev_ptr));
5422                 if (!*host_rt_dev_ptr) {
5423                     unlock_user(argptr, arg, 0);
5424                     return -TARGET_EFAULT;
5425                 }
5426             } else {
5427                 *host_rt_dev_ptr = 0;
5428             }
5429             field_types++;
5430             continue;
5431         }
5432         field_types = thunk_convert(buf_temp + dst_offsets[i],
5433                                     argptr + src_offsets[i],
5434                                     field_types, THUNK_HOST);
5435     }
5436     unlock_user(argptr, arg, 0);
5437 
5438     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5439 
5440     assert(host_rt_dev_ptr != NULL);
5441     assert(target_rt_dev_ptr != NULL);
5442     if (*host_rt_dev_ptr != 0) {
5443         unlock_user((void *)*host_rt_dev_ptr,
5444                     *target_rt_dev_ptr, 0);
5445     }
5446     return ret;
5447 }
5448 
5449 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5450                                      int fd, int cmd, abi_long arg)
5451 {
5452     int sig = target_to_host_signal(arg);
5453     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5454 }
5455 
5456 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5457                                     int fd, int cmd, abi_long arg)
5458 {
5459     struct timeval tv;
5460     abi_long ret;
5461 
5462     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5463     if (is_error(ret)) {
5464         return ret;
5465     }
5466 
5467     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5468         if (copy_to_user_timeval(arg, &tv)) {
5469             return -TARGET_EFAULT;
5470         }
5471     } else {
5472         if (copy_to_user_timeval64(arg, &tv)) {
5473             return -TARGET_EFAULT;
5474         }
5475     }
5476 
5477     return ret;
5478 }
5479 
5480 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5481                                       int fd, int cmd, abi_long arg)
5482 {
5483     struct timespec ts;
5484     abi_long ret;
5485 
5486     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5487     if (is_error(ret)) {
5488         return ret;
5489     }
5490 
5491     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5492         if (host_to_target_timespec(arg, &ts)) {
5493             return -TARGET_EFAULT;
5494         }
5495     } else{
5496         if (host_to_target_timespec64(arg, &ts)) {
5497             return -TARGET_EFAULT;
5498         }
5499     }
5500 
5501     return ret;
5502 }
5503 
5504 #ifdef TIOCGPTPEER
5505 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5506                                      int fd, int cmd, abi_long arg)
5507 {
5508     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5509     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5510 }
5511 #endif
5512 
5513 #ifdef HAVE_DRM_H
5514 
5515 static void unlock_drm_version(struct drm_version *host_ver,
5516                                struct target_drm_version *target_ver,
5517                                bool copy)
5518 {
5519     unlock_user(host_ver->name, target_ver->name,
5520                                 copy ? host_ver->name_len : 0);
5521     unlock_user(host_ver->date, target_ver->date,
5522                                 copy ? host_ver->date_len : 0);
5523     unlock_user(host_ver->desc, target_ver->desc,
5524                                 copy ? host_ver->desc_len : 0);
5525 }
5526 
5527 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5528                                           struct target_drm_version *target_ver)
5529 {
5530     memset(host_ver, 0, sizeof(*host_ver));
5531 
5532     __get_user(host_ver->name_len, &target_ver->name_len);
5533     if (host_ver->name_len) {
5534         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5535                                    target_ver->name_len, 0);
5536         if (!host_ver->name) {
5537             return -EFAULT;
5538         }
5539     }
5540 
5541     __get_user(host_ver->date_len, &target_ver->date_len);
5542     if (host_ver->date_len) {
5543         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5544                                    target_ver->date_len, 0);
5545         if (!host_ver->date) {
5546             goto err;
5547         }
5548     }
5549 
5550     __get_user(host_ver->desc_len, &target_ver->desc_len);
5551     if (host_ver->desc_len) {
5552         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5553                                    target_ver->desc_len, 0);
5554         if (!host_ver->desc) {
5555             goto err;
5556         }
5557     }
5558 
5559     return 0;
5560 err:
5561     unlock_drm_version(host_ver, target_ver, false);
5562     return -EFAULT;
5563 }
5564 
5565 static inline void host_to_target_drmversion(
5566                                           struct target_drm_version *target_ver,
5567                                           struct drm_version *host_ver)
5568 {
5569     __put_user(host_ver->version_major, &target_ver->version_major);
5570     __put_user(host_ver->version_minor, &target_ver->version_minor);
5571     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5572     __put_user(host_ver->name_len, &target_ver->name_len);
5573     __put_user(host_ver->date_len, &target_ver->date_len);
5574     __put_user(host_ver->desc_len, &target_ver->desc_len);
5575     unlock_drm_version(host_ver, target_ver, true);
5576 }
5577 
5578 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5579                              int fd, int cmd, abi_long arg)
5580 {
5581     struct drm_version *ver;
5582     struct target_drm_version *target_ver;
5583     abi_long ret;
5584 
5585     switch (ie->host_cmd) {
5586     case DRM_IOCTL_VERSION:
5587         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5588             return -TARGET_EFAULT;
5589         }
5590         ver = (struct drm_version *)buf_temp;
5591         ret = target_to_host_drmversion(ver, target_ver);
5592         if (!is_error(ret)) {
5593             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5594             if (is_error(ret)) {
5595                 unlock_drm_version(ver, target_ver, false);
5596             } else {
5597                 host_to_target_drmversion(target_ver, ver);
5598             }
5599         }
5600         unlock_user_struct(target_ver, arg, 0);
5601         return ret;
5602     }
5603     return -TARGET_ENOSYS;
5604 }
5605 
5606 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5607                                            struct drm_i915_getparam *gparam,
5608                                            int fd, abi_long arg)
5609 {
5610     abi_long ret;
5611     int value;
5612     struct target_drm_i915_getparam *target_gparam;
5613 
5614     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5615         return -TARGET_EFAULT;
5616     }
5617 
5618     __get_user(gparam->param, &target_gparam->param);
5619     gparam->value = &value;
5620     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5621     put_user_s32(value, target_gparam->value);
5622 
5623     unlock_user_struct(target_gparam, arg, 0);
5624     return ret;
5625 }
5626 
5627 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5628                                   int fd, int cmd, abi_long arg)
5629 {
5630     switch (ie->host_cmd) {
5631     case DRM_IOCTL_I915_GETPARAM:
5632         return do_ioctl_drm_i915_getparam(ie,
5633                                           (struct drm_i915_getparam *)buf_temp,
5634                                           fd, arg);
5635     default:
5636         return -TARGET_ENOSYS;
5637     }
5638 }
5639 
5640 #endif
5641 
5642 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5643                                         int fd, int cmd, abi_long arg)
5644 {
5645     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5646     struct tun_filter *target_filter;
5647     char *target_addr;
5648 
5649     assert(ie->access == IOC_W);
5650 
5651     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5652     if (!target_filter) {
5653         return -TARGET_EFAULT;
5654     }
5655     filter->flags = tswap16(target_filter->flags);
5656     filter->count = tswap16(target_filter->count);
5657     unlock_user(target_filter, arg, 0);
5658 
5659     if (filter->count) {
5660         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5661             MAX_STRUCT_SIZE) {
5662             return -TARGET_EFAULT;
5663         }
5664 
5665         target_addr = lock_user(VERIFY_READ,
5666                                 arg + offsetof(struct tun_filter, addr),
5667                                 filter->count * ETH_ALEN, 1);
5668         if (!target_addr) {
5669             return -TARGET_EFAULT;
5670         }
5671         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5672         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5673     }
5674 
5675     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5676 }
5677 
5678 IOCTLEntry ioctl_entries[] = {
5679 #define IOCTL(cmd, access, ...) \
5680     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5681 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5682     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5683 #define IOCTL_IGNORE(cmd) \
5684     { TARGET_ ## cmd, 0, #cmd },
5685 #include "ioctls.h"
5686     { 0, 0, },
5687 };
5688 
5689 /* ??? Implement proper locking for ioctls.  */
5690 /* do_ioctl() Must return target values and target errnos. */
5691 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5692 {
5693     const IOCTLEntry *ie;
5694     const argtype *arg_type;
5695     abi_long ret;
5696     uint8_t buf_temp[MAX_STRUCT_SIZE];
5697     int target_size;
5698     void *argptr;
5699 
5700     ie = ioctl_entries;
5701     for(;;) {
5702         if (ie->target_cmd == 0) {
5703             qemu_log_mask(
5704                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5705             return -TARGET_ENOSYS;
5706         }
5707         if (ie->target_cmd == cmd)
5708             break;
5709         ie++;
5710     }
5711     arg_type = ie->arg_type;
5712     if (ie->do_ioctl) {
5713         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5714     } else if (!ie->host_cmd) {
5715         /* Some architectures define BSD ioctls in their headers
5716            that are not implemented in Linux.  */
5717         return -TARGET_ENOSYS;
5718     }
5719 
5720     switch(arg_type[0]) {
5721     case TYPE_NULL:
5722         /* no argument */
5723         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5724         break;
5725     case TYPE_PTRVOID:
5726     case TYPE_INT:
5727     case TYPE_LONG:
5728     case TYPE_ULONG:
5729         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5730         break;
5731     case TYPE_PTR:
5732         arg_type++;
5733         target_size = thunk_type_size(arg_type, 0);
5734         switch(ie->access) {
5735         case IOC_R:
5736             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5737             if (!is_error(ret)) {
5738                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5739                 if (!argptr)
5740                     return -TARGET_EFAULT;
5741                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5742                 unlock_user(argptr, arg, target_size);
5743             }
5744             break;
5745         case IOC_W:
5746             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5747             if (!argptr)
5748                 return -TARGET_EFAULT;
5749             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5750             unlock_user(argptr, arg, 0);
5751             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5752             break;
5753         default:
5754         case IOC_RW:
5755             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5756             if (!argptr)
5757                 return -TARGET_EFAULT;
5758             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5759             unlock_user(argptr, arg, 0);
5760             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5761             if (!is_error(ret)) {
5762                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5763                 if (!argptr)
5764                     return -TARGET_EFAULT;
5765                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5766                 unlock_user(argptr, arg, target_size);
5767             }
5768             break;
5769         }
5770         break;
5771     default:
5772         qemu_log_mask(LOG_UNIMP,
5773                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5774                       (long)cmd, arg_type[0]);
5775         ret = -TARGET_ENOSYS;
5776         break;
5777     }
5778     return ret;
5779 }
5780 
5781 static const bitmask_transtbl iflag_tbl[] = {
5782         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5783         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5784         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5785         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5786         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5787         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5788         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5789         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5790         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5791         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5792         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5793         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5794         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5795         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5796         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5797         { 0, 0, 0, 0 }
5798 };
5799 
5800 static const bitmask_transtbl oflag_tbl[] = {
5801 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5802 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5803 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5804 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5805 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5806 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5807 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5808 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5809 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5810 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5811 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5812 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5813 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5814 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5815 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5816 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5817 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5818 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5819 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5820 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5821 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5822 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5823 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5824 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5825 	{ 0, 0, 0, 0 }
5826 };
5827 
5828 static const bitmask_transtbl cflag_tbl[] = {
5829 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5830 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5831 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5832 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5833 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5834 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5835 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5836 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5837 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5838 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5839 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5840 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5841 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5842 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5843 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5844 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5845 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5846 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5847 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5848 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5849 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5850 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5851 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5852 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5853 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5854 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5855 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5856 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5857 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5858 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5859 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5860 	{ 0, 0, 0, 0 }
5861 };
5862 
5863 static const bitmask_transtbl lflag_tbl[] = {
5864   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5865   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5866   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5867   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5868   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5869   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5870   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5871   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5872   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5873   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5874   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5875   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5876   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5877   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5878   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5879   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5880   { 0, 0, 0, 0 }
5881 };
5882 
5883 static void target_to_host_termios (void *dst, const void *src)
5884 {
5885     struct host_termios *host = dst;
5886     const struct target_termios *target = src;
5887 
5888     host->c_iflag =
5889         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5890     host->c_oflag =
5891         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5892     host->c_cflag =
5893         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5894     host->c_lflag =
5895         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5896     host->c_line = target->c_line;
5897 
5898     memset(host->c_cc, 0, sizeof(host->c_cc));
5899     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5900     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5901     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5902     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5903     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5904     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5905     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5906     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5907     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5908     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5909     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5910     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5911     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5912     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5913     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5914     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5915     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5916 }
5917 
5918 static void host_to_target_termios (void *dst, const void *src)
5919 {
5920     struct target_termios *target = dst;
5921     const struct host_termios *host = src;
5922 
5923     target->c_iflag =
5924         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5925     target->c_oflag =
5926         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5927     target->c_cflag =
5928         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5929     target->c_lflag =
5930         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5931     target->c_line = host->c_line;
5932 
5933     memset(target->c_cc, 0, sizeof(target->c_cc));
5934     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5935     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5936     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5937     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5938     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5939     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5940     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5941     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5942     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5943     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5944     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5945     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5946     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5947     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5948     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5949     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5950     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5951 }
5952 
5953 static const StructEntry struct_termios_def = {
5954     .convert = { host_to_target_termios, target_to_host_termios },
5955     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5956     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5957     .print = print_termios,
5958 };
5959 
5960 static const bitmask_transtbl mmap_flags_tbl[] = {
5961     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5962     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5963     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5964     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5965       MAP_ANONYMOUS, MAP_ANONYMOUS },
5966     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5967       MAP_GROWSDOWN, MAP_GROWSDOWN },
5968     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5969       MAP_DENYWRITE, MAP_DENYWRITE },
5970     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5971       MAP_EXECUTABLE, MAP_EXECUTABLE },
5972     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5973     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5974       MAP_NORESERVE, MAP_NORESERVE },
5975     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5976     /* MAP_STACK had been ignored by the kernel for quite some time.
5977        Recognize it for the target insofar as we do not want to pass
5978        it through to the host.  */
5979     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5980     { 0, 0, 0, 0 }
5981 };
5982 
5983 /*
5984  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5985  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5986  */
5987 #if defined(TARGET_I386)
5988 
5989 /* NOTE: there is really one LDT for all the threads */
5990 static uint8_t *ldt_table;
5991 
5992 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5993 {
5994     int size;
5995     void *p;
5996 
5997     if (!ldt_table)
5998         return 0;
5999     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6000     if (size > bytecount)
6001         size = bytecount;
6002     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6003     if (!p)
6004         return -TARGET_EFAULT;
6005     /* ??? Should this by byteswapped?  */
6006     memcpy(p, ldt_table, size);
6007     unlock_user(p, ptr, size);
6008     return size;
6009 }
6010 
6011 /* XXX: add locking support */
6012 static abi_long write_ldt(CPUX86State *env,
6013                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6014 {
6015     struct target_modify_ldt_ldt_s ldt_info;
6016     struct target_modify_ldt_ldt_s *target_ldt_info;
6017     int seg_32bit, contents, read_exec_only, limit_in_pages;
6018     int seg_not_present, useable, lm;
6019     uint32_t *lp, entry_1, entry_2;
6020 
6021     if (bytecount != sizeof(ldt_info))
6022         return -TARGET_EINVAL;
6023     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6024         return -TARGET_EFAULT;
6025     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6026     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6027     ldt_info.limit = tswap32(target_ldt_info->limit);
6028     ldt_info.flags = tswap32(target_ldt_info->flags);
6029     unlock_user_struct(target_ldt_info, ptr, 0);
6030 
6031     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6032         return -TARGET_EINVAL;
6033     seg_32bit = ldt_info.flags & 1;
6034     contents = (ldt_info.flags >> 1) & 3;
6035     read_exec_only = (ldt_info.flags >> 3) & 1;
6036     limit_in_pages = (ldt_info.flags >> 4) & 1;
6037     seg_not_present = (ldt_info.flags >> 5) & 1;
6038     useable = (ldt_info.flags >> 6) & 1;
6039 #ifdef TARGET_ABI32
6040     lm = 0;
6041 #else
6042     lm = (ldt_info.flags >> 7) & 1;
6043 #endif
6044     if (contents == 3) {
6045         if (oldmode)
6046             return -TARGET_EINVAL;
6047         if (seg_not_present == 0)
6048             return -TARGET_EINVAL;
6049     }
6050     /* allocate the LDT */
6051     if (!ldt_table) {
6052         env->ldt.base = target_mmap(0,
6053                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6054                                     PROT_READ|PROT_WRITE,
6055                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6056         if (env->ldt.base == -1)
6057             return -TARGET_ENOMEM;
6058         memset(g2h_untagged(env->ldt.base), 0,
6059                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6060         env->ldt.limit = 0xffff;
6061         ldt_table = g2h_untagged(env->ldt.base);
6062     }
6063 
6064     /* NOTE: same code as Linux kernel */
6065     /* Allow LDTs to be cleared by the user. */
6066     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6067         if (oldmode ||
6068             (contents == 0		&&
6069              read_exec_only == 1	&&
6070              seg_32bit == 0		&&
6071              limit_in_pages == 0	&&
6072              seg_not_present == 1	&&
6073              useable == 0 )) {
6074             entry_1 = 0;
6075             entry_2 = 0;
6076             goto install;
6077         }
6078     }
6079 
6080     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6081         (ldt_info.limit & 0x0ffff);
6082     entry_2 = (ldt_info.base_addr & 0xff000000) |
6083         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6084         (ldt_info.limit & 0xf0000) |
6085         ((read_exec_only ^ 1) << 9) |
6086         (contents << 10) |
6087         ((seg_not_present ^ 1) << 15) |
6088         (seg_32bit << 22) |
6089         (limit_in_pages << 23) |
6090         (lm << 21) |
6091         0x7000;
6092     if (!oldmode)
6093         entry_2 |= (useable << 20);
6094 
6095     /* Install the new entry ...  */
6096 install:
6097     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6098     lp[0] = tswap32(entry_1);
6099     lp[1] = tswap32(entry_2);
6100     return 0;
6101 }
6102 
6103 /* specific and weird i386 syscalls */
6104 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6105                               unsigned long bytecount)
6106 {
6107     abi_long ret;
6108 
6109     switch (func) {
6110     case 0:
6111         ret = read_ldt(ptr, bytecount);
6112         break;
6113     case 1:
6114         ret = write_ldt(env, ptr, bytecount, 1);
6115         break;
6116     case 0x11:
6117         ret = write_ldt(env, ptr, bytecount, 0);
6118         break;
6119     default:
6120         ret = -TARGET_ENOSYS;
6121         break;
6122     }
6123     return ret;
6124 }
6125 
6126 #if defined(TARGET_ABI32)
6127 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6128 {
6129     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6130     struct target_modify_ldt_ldt_s ldt_info;
6131     struct target_modify_ldt_ldt_s *target_ldt_info;
6132     int seg_32bit, contents, read_exec_only, limit_in_pages;
6133     int seg_not_present, useable, lm;
6134     uint32_t *lp, entry_1, entry_2;
6135     int i;
6136 
6137     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6138     if (!target_ldt_info)
6139         return -TARGET_EFAULT;
6140     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6141     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6142     ldt_info.limit = tswap32(target_ldt_info->limit);
6143     ldt_info.flags = tswap32(target_ldt_info->flags);
6144     if (ldt_info.entry_number == -1) {
6145         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6146             if (gdt_table[i] == 0) {
6147                 ldt_info.entry_number = i;
6148                 target_ldt_info->entry_number = tswap32(i);
6149                 break;
6150             }
6151         }
6152     }
6153     unlock_user_struct(target_ldt_info, ptr, 1);
6154 
6155     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6156         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6157            return -TARGET_EINVAL;
6158     seg_32bit = ldt_info.flags & 1;
6159     contents = (ldt_info.flags >> 1) & 3;
6160     read_exec_only = (ldt_info.flags >> 3) & 1;
6161     limit_in_pages = (ldt_info.flags >> 4) & 1;
6162     seg_not_present = (ldt_info.flags >> 5) & 1;
6163     useable = (ldt_info.flags >> 6) & 1;
6164 #ifdef TARGET_ABI32
6165     lm = 0;
6166 #else
6167     lm = (ldt_info.flags >> 7) & 1;
6168 #endif
6169 
6170     if (contents == 3) {
6171         if (seg_not_present == 0)
6172             return -TARGET_EINVAL;
6173     }
6174 
6175     /* NOTE: same code as Linux kernel */
6176     /* Allow LDTs to be cleared by the user. */
6177     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6178         if ((contents == 0             &&
6179              read_exec_only == 1       &&
6180              seg_32bit == 0            &&
6181              limit_in_pages == 0       &&
6182              seg_not_present == 1      &&
6183              useable == 0 )) {
6184             entry_1 = 0;
6185             entry_2 = 0;
6186             goto install;
6187         }
6188     }
6189 
6190     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6191         (ldt_info.limit & 0x0ffff);
6192     entry_2 = (ldt_info.base_addr & 0xff000000) |
6193         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6194         (ldt_info.limit & 0xf0000) |
6195         ((read_exec_only ^ 1) << 9) |
6196         (contents << 10) |
6197         ((seg_not_present ^ 1) << 15) |
6198         (seg_32bit << 22) |
6199         (limit_in_pages << 23) |
6200         (useable << 20) |
6201         (lm << 21) |
6202         0x7000;
6203 
6204     /* Install the new entry ...  */
6205 install:
6206     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6207     lp[0] = tswap32(entry_1);
6208     lp[1] = tswap32(entry_2);
6209     return 0;
6210 }
6211 
6212 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6213 {
6214     struct target_modify_ldt_ldt_s *target_ldt_info;
6215     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6216     uint32_t base_addr, limit, flags;
6217     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6218     int seg_not_present, useable, lm;
6219     uint32_t *lp, entry_1, entry_2;
6220 
6221     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6222     if (!target_ldt_info)
6223         return -TARGET_EFAULT;
6224     idx = tswap32(target_ldt_info->entry_number);
6225     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6226         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6227         unlock_user_struct(target_ldt_info, ptr, 1);
6228         return -TARGET_EINVAL;
6229     }
6230     lp = (uint32_t *)(gdt_table + idx);
6231     entry_1 = tswap32(lp[0]);
6232     entry_2 = tswap32(lp[1]);
6233 
6234     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6235     contents = (entry_2 >> 10) & 3;
6236     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6237     seg_32bit = (entry_2 >> 22) & 1;
6238     limit_in_pages = (entry_2 >> 23) & 1;
6239     useable = (entry_2 >> 20) & 1;
6240 #ifdef TARGET_ABI32
6241     lm = 0;
6242 #else
6243     lm = (entry_2 >> 21) & 1;
6244 #endif
6245     flags = (seg_32bit << 0) | (contents << 1) |
6246         (read_exec_only << 3) | (limit_in_pages << 4) |
6247         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6248     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6249     base_addr = (entry_1 >> 16) |
6250         (entry_2 & 0xff000000) |
6251         ((entry_2 & 0xff) << 16);
6252     target_ldt_info->base_addr = tswapal(base_addr);
6253     target_ldt_info->limit = tswap32(limit);
6254     target_ldt_info->flags = tswap32(flags);
6255     unlock_user_struct(target_ldt_info, ptr, 1);
6256     return 0;
6257 }
6258 
6259 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6260 {
6261     return -TARGET_ENOSYS;
6262 }
6263 #else
6264 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6265 {
6266     abi_long ret = 0;
6267     abi_ulong val;
6268     int idx;
6269 
6270     switch(code) {
6271     case TARGET_ARCH_SET_GS:
6272     case TARGET_ARCH_SET_FS:
6273         if (code == TARGET_ARCH_SET_GS)
6274             idx = R_GS;
6275         else
6276             idx = R_FS;
6277         cpu_x86_load_seg(env, idx, 0);
6278         env->segs[idx].base = addr;
6279         break;
6280     case TARGET_ARCH_GET_GS:
6281     case TARGET_ARCH_GET_FS:
6282         if (code == TARGET_ARCH_GET_GS)
6283             idx = R_GS;
6284         else
6285             idx = R_FS;
6286         val = env->segs[idx].base;
6287         if (put_user(val, addr, abi_ulong))
6288             ret = -TARGET_EFAULT;
6289         break;
6290     default:
6291         ret = -TARGET_EINVAL;
6292         break;
6293     }
6294     return ret;
6295 }
6296 #endif /* defined(TARGET_ABI32 */
6297 #endif /* defined(TARGET_I386) */
6298 
6299 /*
6300  * These constants are generic.  Supply any that are missing from the host.
6301  */
6302 #ifndef PR_SET_NAME
6303 # define PR_SET_NAME    15
6304 # define PR_GET_NAME    16
6305 #endif
6306 #ifndef PR_SET_FP_MODE
6307 # define PR_SET_FP_MODE 45
6308 # define PR_GET_FP_MODE 46
6309 # define PR_FP_MODE_FR   (1 << 0)
6310 # define PR_FP_MODE_FRE  (1 << 1)
6311 #endif
6312 #ifndef PR_SVE_SET_VL
6313 # define PR_SVE_SET_VL  50
6314 # define PR_SVE_GET_VL  51
6315 # define PR_SVE_VL_LEN_MASK  0xffff
6316 # define PR_SVE_VL_INHERIT   (1 << 17)
6317 #endif
6318 #ifndef PR_PAC_RESET_KEYS
6319 # define PR_PAC_RESET_KEYS  54
6320 # define PR_PAC_APIAKEY   (1 << 0)
6321 # define PR_PAC_APIBKEY   (1 << 1)
6322 # define PR_PAC_APDAKEY   (1 << 2)
6323 # define PR_PAC_APDBKEY   (1 << 3)
6324 # define PR_PAC_APGAKEY   (1 << 4)
6325 #endif
6326 #ifndef PR_SET_TAGGED_ADDR_CTRL
6327 # define PR_SET_TAGGED_ADDR_CTRL 55
6328 # define PR_GET_TAGGED_ADDR_CTRL 56
6329 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6330 #endif
6331 #ifndef PR_MTE_TCF_SHIFT
6332 # define PR_MTE_TCF_SHIFT       1
6333 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6334 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6335 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6336 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6337 # define PR_MTE_TAG_SHIFT       3
6338 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6339 #endif
6340 #ifndef PR_SET_IO_FLUSHER
6341 # define PR_SET_IO_FLUSHER 57
6342 # define PR_GET_IO_FLUSHER 58
6343 #endif
6344 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6345 # define PR_SET_SYSCALL_USER_DISPATCH 59
6346 #endif
6347 
6348 #include "target_prctl.h"
6349 
6350 static abi_long do_prctl_inval0(CPUArchState *env)
6351 {
6352     return -TARGET_EINVAL;
6353 }
6354 
6355 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6356 {
6357     return -TARGET_EINVAL;
6358 }
6359 
6360 #ifndef do_prctl_get_fp_mode
6361 #define do_prctl_get_fp_mode do_prctl_inval0
6362 #endif
6363 #ifndef do_prctl_set_fp_mode
6364 #define do_prctl_set_fp_mode do_prctl_inval1
6365 #endif
6366 #ifndef do_prctl_get_vl
6367 #define do_prctl_get_vl do_prctl_inval0
6368 #endif
6369 #ifndef do_prctl_set_vl
6370 #define do_prctl_set_vl do_prctl_inval1
6371 #endif
6372 #ifndef do_prctl_reset_keys
6373 #define do_prctl_reset_keys do_prctl_inval1
6374 #endif
6375 #ifndef do_prctl_set_tagged_addr_ctrl
6376 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6377 #endif
6378 #ifndef do_prctl_get_tagged_addr_ctrl
6379 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6380 #endif
6381 #ifndef do_prctl_get_unalign
6382 #define do_prctl_get_unalign do_prctl_inval1
6383 #endif
6384 #ifndef do_prctl_set_unalign
6385 #define do_prctl_set_unalign do_prctl_inval1
6386 #endif
6387 
6388 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6389                          abi_long arg3, abi_long arg4, abi_long arg5)
6390 {
6391     abi_long ret;
6392 
6393     switch (option) {
6394     case PR_GET_PDEATHSIG:
6395         {
6396             int deathsig;
6397             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6398                                   arg3, arg4, arg5));
6399             if (!is_error(ret) &&
6400                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6401                 return -TARGET_EFAULT;
6402             }
6403             return ret;
6404         }
6405     case PR_SET_PDEATHSIG:
6406         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6407                                arg3, arg4, arg5));
6408     case PR_GET_NAME:
6409         {
6410             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6411             if (!name) {
6412                 return -TARGET_EFAULT;
6413             }
6414             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6415                                   arg3, arg4, arg5));
6416             unlock_user(name, arg2, 16);
6417             return ret;
6418         }
6419     case PR_SET_NAME:
6420         {
6421             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6422             if (!name) {
6423                 return -TARGET_EFAULT;
6424             }
6425             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6426                                   arg3, arg4, arg5));
6427             unlock_user(name, arg2, 0);
6428             return ret;
6429         }
6430     case PR_GET_FP_MODE:
6431         return do_prctl_get_fp_mode(env);
6432     case PR_SET_FP_MODE:
6433         return do_prctl_set_fp_mode(env, arg2);
6434     case PR_SVE_GET_VL:
6435         return do_prctl_get_vl(env);
6436     case PR_SVE_SET_VL:
6437         return do_prctl_set_vl(env, arg2);
6438     case PR_PAC_RESET_KEYS:
6439         if (arg3 || arg4 || arg5) {
6440             return -TARGET_EINVAL;
6441         }
6442         return do_prctl_reset_keys(env, arg2);
6443     case PR_SET_TAGGED_ADDR_CTRL:
6444         if (arg3 || arg4 || arg5) {
6445             return -TARGET_EINVAL;
6446         }
6447         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6448     case PR_GET_TAGGED_ADDR_CTRL:
6449         if (arg2 || arg3 || arg4 || arg5) {
6450             return -TARGET_EINVAL;
6451         }
6452         return do_prctl_get_tagged_addr_ctrl(env);
6453 
6454     case PR_GET_UNALIGN:
6455         return do_prctl_get_unalign(env, arg2);
6456     case PR_SET_UNALIGN:
6457         return do_prctl_set_unalign(env, arg2);
6458 
6459     case PR_CAP_AMBIENT:
6460     case PR_CAPBSET_READ:
6461     case PR_CAPBSET_DROP:
6462     case PR_GET_DUMPABLE:
6463     case PR_SET_DUMPABLE:
6464     case PR_GET_KEEPCAPS:
6465     case PR_SET_KEEPCAPS:
6466     case PR_GET_SECUREBITS:
6467     case PR_SET_SECUREBITS:
6468     case PR_GET_TIMING:
6469     case PR_SET_TIMING:
6470     case PR_GET_TIMERSLACK:
6471     case PR_SET_TIMERSLACK:
6472     case PR_MCE_KILL:
6473     case PR_MCE_KILL_GET:
6474     case PR_GET_NO_NEW_PRIVS:
6475     case PR_SET_NO_NEW_PRIVS:
6476     case PR_GET_IO_FLUSHER:
6477     case PR_SET_IO_FLUSHER:
6478         /* Some prctl options have no pointer arguments and we can pass on. */
6479         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6480 
6481     case PR_GET_CHILD_SUBREAPER:
6482     case PR_SET_CHILD_SUBREAPER:
6483     case PR_GET_SPECULATION_CTRL:
6484     case PR_SET_SPECULATION_CTRL:
6485     case PR_GET_TID_ADDRESS:
6486         /* TODO */
6487         return -TARGET_EINVAL;
6488 
6489     case PR_GET_FPEXC:
6490     case PR_SET_FPEXC:
6491         /* Was used for SPE on PowerPC. */
6492         return -TARGET_EINVAL;
6493 
6494     case PR_GET_ENDIAN:
6495     case PR_SET_ENDIAN:
6496     case PR_GET_FPEMU:
6497     case PR_SET_FPEMU:
6498     case PR_SET_MM:
6499     case PR_GET_SECCOMP:
6500     case PR_SET_SECCOMP:
6501     case PR_SET_SYSCALL_USER_DISPATCH:
6502     case PR_GET_THP_DISABLE:
6503     case PR_SET_THP_DISABLE:
6504     case PR_GET_TSC:
6505     case PR_SET_TSC:
6506         /* Disable to prevent the target disabling stuff we need. */
6507         return -TARGET_EINVAL;
6508 
6509     default:
6510         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6511                       option);
6512         return -TARGET_EINVAL;
6513     }
6514 }
6515 
6516 #define NEW_STACK_SIZE 0x40000
6517 
6518 
6519 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6520 typedef struct {
6521     CPUArchState *env;
6522     pthread_mutex_t mutex;
6523     pthread_cond_t cond;
6524     pthread_t thread;
6525     uint32_t tid;
6526     abi_ulong child_tidptr;
6527     abi_ulong parent_tidptr;
6528     sigset_t sigmask;
6529 } new_thread_info;
6530 
6531 static void *clone_func(void *arg)
6532 {
6533     new_thread_info *info = arg;
6534     CPUArchState *env;
6535     CPUState *cpu;
6536     TaskState *ts;
6537 
6538     rcu_register_thread();
6539     tcg_register_thread();
6540     env = info->env;
6541     cpu = env_cpu(env);
6542     thread_cpu = cpu;
6543     ts = (TaskState *)cpu->opaque;
6544     info->tid = sys_gettid();
6545     task_settid(ts);
6546     if (info->child_tidptr)
6547         put_user_u32(info->tid, info->child_tidptr);
6548     if (info->parent_tidptr)
6549         put_user_u32(info->tid, info->parent_tidptr);
6550     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6551     /* Enable signals.  */
6552     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6553     /* Signal to the parent that we're ready.  */
6554     pthread_mutex_lock(&info->mutex);
6555     pthread_cond_broadcast(&info->cond);
6556     pthread_mutex_unlock(&info->mutex);
6557     /* Wait until the parent has finished initializing the tls state.  */
6558     pthread_mutex_lock(&clone_lock);
6559     pthread_mutex_unlock(&clone_lock);
6560     cpu_loop(env);
6561     /* never exits */
6562     return NULL;
6563 }
6564 
6565 /* do_fork() Must return host values and target errnos (unlike most
6566    do_*() functions). */
6567 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6568                    abi_ulong parent_tidptr, target_ulong newtls,
6569                    abi_ulong child_tidptr)
6570 {
6571     CPUState *cpu = env_cpu(env);
6572     int ret;
6573     TaskState *ts;
6574     CPUState *new_cpu;
6575     CPUArchState *new_env;
6576     sigset_t sigmask;
6577 
6578     flags &= ~CLONE_IGNORED_FLAGS;
6579 
6580     /* Emulate vfork() with fork() */
6581     if (flags & CLONE_VFORK)
6582         flags &= ~(CLONE_VFORK | CLONE_VM);
6583 
6584     if (flags & CLONE_VM) {
6585         TaskState *parent_ts = (TaskState *)cpu->opaque;
6586         new_thread_info info;
6587         pthread_attr_t attr;
6588 
6589         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6590             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6591             return -TARGET_EINVAL;
6592         }
6593 
6594         ts = g_new0(TaskState, 1);
6595         init_task_state(ts);
6596 
6597         /* Grab a mutex so that thread setup appears atomic.  */
6598         pthread_mutex_lock(&clone_lock);
6599 
6600         /*
6601          * If this is our first additional thread, we need to ensure we
6602          * generate code for parallel execution and flush old translations.
6603          * Do this now so that the copy gets CF_PARALLEL too.
6604          */
6605         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6606             cpu->tcg_cflags |= CF_PARALLEL;
6607             tb_flush(cpu);
6608         }
6609 
6610         /* we create a new CPU instance. */
6611         new_env = cpu_copy(env);
6612         /* Init regs that differ from the parent.  */
6613         cpu_clone_regs_child(new_env, newsp, flags);
6614         cpu_clone_regs_parent(env, flags);
6615         new_cpu = env_cpu(new_env);
6616         new_cpu->opaque = ts;
6617         ts->bprm = parent_ts->bprm;
6618         ts->info = parent_ts->info;
6619         ts->signal_mask = parent_ts->signal_mask;
6620 
6621         if (flags & CLONE_CHILD_CLEARTID) {
6622             ts->child_tidptr = child_tidptr;
6623         }
6624 
6625         if (flags & CLONE_SETTLS) {
6626             cpu_set_tls (new_env, newtls);
6627         }
6628 
6629         memset(&info, 0, sizeof(info));
6630         pthread_mutex_init(&info.mutex, NULL);
6631         pthread_mutex_lock(&info.mutex);
6632         pthread_cond_init(&info.cond, NULL);
6633         info.env = new_env;
6634         if (flags & CLONE_CHILD_SETTID) {
6635             info.child_tidptr = child_tidptr;
6636         }
6637         if (flags & CLONE_PARENT_SETTID) {
6638             info.parent_tidptr = parent_tidptr;
6639         }
6640 
6641         ret = pthread_attr_init(&attr);
6642         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6643         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6644         /* It is not safe to deliver signals until the child has finished
6645            initializing, so temporarily block all signals.  */
6646         sigfillset(&sigmask);
6647         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6648         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6649 
6650         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6651         /* TODO: Free new CPU state if thread creation failed.  */
6652 
6653         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6654         pthread_attr_destroy(&attr);
6655         if (ret == 0) {
6656             /* Wait for the child to initialize.  */
6657             pthread_cond_wait(&info.cond, &info.mutex);
6658             ret = info.tid;
6659         } else {
6660             ret = -1;
6661         }
6662         pthread_mutex_unlock(&info.mutex);
6663         pthread_cond_destroy(&info.cond);
6664         pthread_mutex_destroy(&info.mutex);
6665         pthread_mutex_unlock(&clone_lock);
6666     } else {
6667         /* if no CLONE_VM, we consider it is a fork */
6668         if (flags & CLONE_INVALID_FORK_FLAGS) {
6669             return -TARGET_EINVAL;
6670         }
6671 
6672         /* We can't support custom termination signals */
6673         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6674             return -TARGET_EINVAL;
6675         }
6676 
6677         if (block_signals()) {
6678             return -QEMU_ERESTARTSYS;
6679         }
6680 
6681         fork_start();
6682         ret = fork();
6683         if (ret == 0) {
6684             /* Child Process.  */
6685             cpu_clone_regs_child(env, newsp, flags);
6686             fork_end(1);
6687             /* There is a race condition here.  The parent process could
6688                theoretically read the TID in the child process before the child
6689                tid is set.  This would require using either ptrace
6690                (not implemented) or having *_tidptr to point at a shared memory
6691                mapping.  We can't repeat the spinlock hack used above because
6692                the child process gets its own copy of the lock.  */
6693             if (flags & CLONE_CHILD_SETTID)
6694                 put_user_u32(sys_gettid(), child_tidptr);
6695             if (flags & CLONE_PARENT_SETTID)
6696                 put_user_u32(sys_gettid(), parent_tidptr);
6697             ts = (TaskState *)cpu->opaque;
6698             if (flags & CLONE_SETTLS)
6699                 cpu_set_tls (env, newtls);
6700             if (flags & CLONE_CHILD_CLEARTID)
6701                 ts->child_tidptr = child_tidptr;
6702         } else {
6703             cpu_clone_regs_parent(env, flags);
6704             fork_end(0);
6705         }
6706     }
6707     return ret;
6708 }
6709 
6710 /* warning : doesn't handle linux specific flags... */
6711 static int target_to_host_fcntl_cmd(int cmd)
6712 {
6713     int ret;
6714 
6715     switch(cmd) {
6716     case TARGET_F_DUPFD:
6717     case TARGET_F_GETFD:
6718     case TARGET_F_SETFD:
6719     case TARGET_F_GETFL:
6720     case TARGET_F_SETFL:
6721     case TARGET_F_OFD_GETLK:
6722     case TARGET_F_OFD_SETLK:
6723     case TARGET_F_OFD_SETLKW:
6724         ret = cmd;
6725         break;
6726     case TARGET_F_GETLK:
6727         ret = F_GETLK64;
6728         break;
6729     case TARGET_F_SETLK:
6730         ret = F_SETLK64;
6731         break;
6732     case TARGET_F_SETLKW:
6733         ret = F_SETLKW64;
6734         break;
6735     case TARGET_F_GETOWN:
6736         ret = F_GETOWN;
6737         break;
6738     case TARGET_F_SETOWN:
6739         ret = F_SETOWN;
6740         break;
6741     case TARGET_F_GETSIG:
6742         ret = F_GETSIG;
6743         break;
6744     case TARGET_F_SETSIG:
6745         ret = F_SETSIG;
6746         break;
6747 #if TARGET_ABI_BITS == 32
6748     case TARGET_F_GETLK64:
6749         ret = F_GETLK64;
6750         break;
6751     case TARGET_F_SETLK64:
6752         ret = F_SETLK64;
6753         break;
6754     case TARGET_F_SETLKW64:
6755         ret = F_SETLKW64;
6756         break;
6757 #endif
6758     case TARGET_F_SETLEASE:
6759         ret = F_SETLEASE;
6760         break;
6761     case TARGET_F_GETLEASE:
6762         ret = F_GETLEASE;
6763         break;
6764 #ifdef F_DUPFD_CLOEXEC
6765     case TARGET_F_DUPFD_CLOEXEC:
6766         ret = F_DUPFD_CLOEXEC;
6767         break;
6768 #endif
6769     case TARGET_F_NOTIFY:
6770         ret = F_NOTIFY;
6771         break;
6772 #ifdef F_GETOWN_EX
6773     case TARGET_F_GETOWN_EX:
6774         ret = F_GETOWN_EX;
6775         break;
6776 #endif
6777 #ifdef F_SETOWN_EX
6778     case TARGET_F_SETOWN_EX:
6779         ret = F_SETOWN_EX;
6780         break;
6781 #endif
6782 #ifdef F_SETPIPE_SZ
6783     case TARGET_F_SETPIPE_SZ:
6784         ret = F_SETPIPE_SZ;
6785         break;
6786     case TARGET_F_GETPIPE_SZ:
6787         ret = F_GETPIPE_SZ;
6788         break;
6789 #endif
6790 #ifdef F_ADD_SEALS
6791     case TARGET_F_ADD_SEALS:
6792         ret = F_ADD_SEALS;
6793         break;
6794     case TARGET_F_GET_SEALS:
6795         ret = F_GET_SEALS;
6796         break;
6797 #endif
6798     default:
6799         ret = -TARGET_EINVAL;
6800         break;
6801     }
6802 
6803 #if defined(__powerpc64__)
6804     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6805      * is not supported by kernel. The glibc fcntl call actually adjusts
6806      * them to 5, 6 and 7 before making the syscall(). Since we make the
6807      * syscall directly, adjust to what is supported by the kernel.
6808      */
6809     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6810         ret -= F_GETLK64 - 5;
6811     }
6812 #endif
6813 
6814     return ret;
6815 }
6816 
6817 #define FLOCK_TRANSTBL \
6818     switch (type) { \
6819     TRANSTBL_CONVERT(F_RDLCK); \
6820     TRANSTBL_CONVERT(F_WRLCK); \
6821     TRANSTBL_CONVERT(F_UNLCK); \
6822     }
6823 
6824 static int target_to_host_flock(int type)
6825 {
6826 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6827     FLOCK_TRANSTBL
6828 #undef  TRANSTBL_CONVERT
6829     return -TARGET_EINVAL;
6830 }
6831 
6832 static int host_to_target_flock(int type)
6833 {
6834 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6835     FLOCK_TRANSTBL
6836 #undef  TRANSTBL_CONVERT
6837     /* if we don't know how to convert the value coming
6838      * from the host we copy to the target field as-is
6839      */
6840     return type;
6841 }
6842 
6843 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6844                                             abi_ulong target_flock_addr)
6845 {
6846     struct target_flock *target_fl;
6847     int l_type;
6848 
6849     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6850         return -TARGET_EFAULT;
6851     }
6852 
6853     __get_user(l_type, &target_fl->l_type);
6854     l_type = target_to_host_flock(l_type);
6855     if (l_type < 0) {
6856         return l_type;
6857     }
6858     fl->l_type = l_type;
6859     __get_user(fl->l_whence, &target_fl->l_whence);
6860     __get_user(fl->l_start, &target_fl->l_start);
6861     __get_user(fl->l_len, &target_fl->l_len);
6862     __get_user(fl->l_pid, &target_fl->l_pid);
6863     unlock_user_struct(target_fl, target_flock_addr, 0);
6864     return 0;
6865 }
6866 
6867 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6868                                           const struct flock64 *fl)
6869 {
6870     struct target_flock *target_fl;
6871     short l_type;
6872 
6873     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6874         return -TARGET_EFAULT;
6875     }
6876 
6877     l_type = host_to_target_flock(fl->l_type);
6878     __put_user(l_type, &target_fl->l_type);
6879     __put_user(fl->l_whence, &target_fl->l_whence);
6880     __put_user(fl->l_start, &target_fl->l_start);
6881     __put_user(fl->l_len, &target_fl->l_len);
6882     __put_user(fl->l_pid, &target_fl->l_pid);
6883     unlock_user_struct(target_fl, target_flock_addr, 1);
6884     return 0;
6885 }
6886 
6887 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6888 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6889 
6890 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6891 struct target_oabi_flock64 {
6892     abi_short l_type;
6893     abi_short l_whence;
6894     abi_llong l_start;
6895     abi_llong l_len;
6896     abi_int   l_pid;
6897 } QEMU_PACKED;
6898 
6899 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6900                                                    abi_ulong target_flock_addr)
6901 {
6902     struct target_oabi_flock64 *target_fl;
6903     int l_type;
6904 
6905     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6906         return -TARGET_EFAULT;
6907     }
6908 
6909     __get_user(l_type, &target_fl->l_type);
6910     l_type = target_to_host_flock(l_type);
6911     if (l_type < 0) {
6912         return l_type;
6913     }
6914     fl->l_type = l_type;
6915     __get_user(fl->l_whence, &target_fl->l_whence);
6916     __get_user(fl->l_start, &target_fl->l_start);
6917     __get_user(fl->l_len, &target_fl->l_len);
6918     __get_user(fl->l_pid, &target_fl->l_pid);
6919     unlock_user_struct(target_fl, target_flock_addr, 0);
6920     return 0;
6921 }
6922 
6923 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6924                                                  const struct flock64 *fl)
6925 {
6926     struct target_oabi_flock64 *target_fl;
6927     short l_type;
6928 
6929     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6930         return -TARGET_EFAULT;
6931     }
6932 
6933     l_type = host_to_target_flock(fl->l_type);
6934     __put_user(l_type, &target_fl->l_type);
6935     __put_user(fl->l_whence, &target_fl->l_whence);
6936     __put_user(fl->l_start, &target_fl->l_start);
6937     __put_user(fl->l_len, &target_fl->l_len);
6938     __put_user(fl->l_pid, &target_fl->l_pid);
6939     unlock_user_struct(target_fl, target_flock_addr, 1);
6940     return 0;
6941 }
6942 #endif
6943 
6944 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6945                                               abi_ulong target_flock_addr)
6946 {
6947     struct target_flock64 *target_fl;
6948     int l_type;
6949 
6950     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6951         return -TARGET_EFAULT;
6952     }
6953 
6954     __get_user(l_type, &target_fl->l_type);
6955     l_type = target_to_host_flock(l_type);
6956     if (l_type < 0) {
6957         return l_type;
6958     }
6959     fl->l_type = l_type;
6960     __get_user(fl->l_whence, &target_fl->l_whence);
6961     __get_user(fl->l_start, &target_fl->l_start);
6962     __get_user(fl->l_len, &target_fl->l_len);
6963     __get_user(fl->l_pid, &target_fl->l_pid);
6964     unlock_user_struct(target_fl, target_flock_addr, 0);
6965     return 0;
6966 }
6967 
6968 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6969                                             const struct flock64 *fl)
6970 {
6971     struct target_flock64 *target_fl;
6972     short l_type;
6973 
6974     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6975         return -TARGET_EFAULT;
6976     }
6977 
6978     l_type = host_to_target_flock(fl->l_type);
6979     __put_user(l_type, &target_fl->l_type);
6980     __put_user(fl->l_whence, &target_fl->l_whence);
6981     __put_user(fl->l_start, &target_fl->l_start);
6982     __put_user(fl->l_len, &target_fl->l_len);
6983     __put_user(fl->l_pid, &target_fl->l_pid);
6984     unlock_user_struct(target_fl, target_flock_addr, 1);
6985     return 0;
6986 }
6987 
6988 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6989 {
6990     struct flock64 fl64;
6991 #ifdef F_GETOWN_EX
6992     struct f_owner_ex fox;
6993     struct target_f_owner_ex *target_fox;
6994 #endif
6995     abi_long ret;
6996     int host_cmd = target_to_host_fcntl_cmd(cmd);
6997 
6998     if (host_cmd == -TARGET_EINVAL)
6999 	    return host_cmd;
7000 
7001     switch(cmd) {
7002     case TARGET_F_GETLK:
7003         ret = copy_from_user_flock(&fl64, arg);
7004         if (ret) {
7005             return ret;
7006         }
7007         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7008         if (ret == 0) {
7009             ret = copy_to_user_flock(arg, &fl64);
7010         }
7011         break;
7012 
7013     case TARGET_F_SETLK:
7014     case TARGET_F_SETLKW:
7015         ret = copy_from_user_flock(&fl64, arg);
7016         if (ret) {
7017             return ret;
7018         }
7019         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7020         break;
7021 
7022     case TARGET_F_GETLK64:
7023     case TARGET_F_OFD_GETLK:
7024         ret = copy_from_user_flock64(&fl64, arg);
7025         if (ret) {
7026             return ret;
7027         }
7028         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7029         if (ret == 0) {
7030             ret = copy_to_user_flock64(arg, &fl64);
7031         }
7032         break;
7033     case TARGET_F_SETLK64:
7034     case TARGET_F_SETLKW64:
7035     case TARGET_F_OFD_SETLK:
7036     case TARGET_F_OFD_SETLKW:
7037         ret = copy_from_user_flock64(&fl64, arg);
7038         if (ret) {
7039             return ret;
7040         }
7041         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7042         break;
7043 
7044     case TARGET_F_GETFL:
7045         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7046         if (ret >= 0) {
7047             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7048         }
7049         break;
7050 
7051     case TARGET_F_SETFL:
7052         ret = get_errno(safe_fcntl(fd, host_cmd,
7053                                    target_to_host_bitmask(arg,
7054                                                           fcntl_flags_tbl)));
7055         break;
7056 
7057 #ifdef F_GETOWN_EX
7058     case TARGET_F_GETOWN_EX:
7059         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7060         if (ret >= 0) {
7061             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7062                 return -TARGET_EFAULT;
7063             target_fox->type = tswap32(fox.type);
7064             target_fox->pid = tswap32(fox.pid);
7065             unlock_user_struct(target_fox, arg, 1);
7066         }
7067         break;
7068 #endif
7069 
7070 #ifdef F_SETOWN_EX
7071     case TARGET_F_SETOWN_EX:
7072         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7073             return -TARGET_EFAULT;
7074         fox.type = tswap32(target_fox->type);
7075         fox.pid = tswap32(target_fox->pid);
7076         unlock_user_struct(target_fox, arg, 0);
7077         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7078         break;
7079 #endif
7080 
7081     case TARGET_F_SETSIG:
7082         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7083         break;
7084 
7085     case TARGET_F_GETSIG:
7086         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7087         break;
7088 
7089     case TARGET_F_SETOWN:
7090     case TARGET_F_GETOWN:
7091     case TARGET_F_SETLEASE:
7092     case TARGET_F_GETLEASE:
7093     case TARGET_F_SETPIPE_SZ:
7094     case TARGET_F_GETPIPE_SZ:
7095     case TARGET_F_ADD_SEALS:
7096     case TARGET_F_GET_SEALS:
7097         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7098         break;
7099 
7100     default:
7101         ret = get_errno(safe_fcntl(fd, cmd, arg));
7102         break;
7103     }
7104     return ret;
7105 }
7106 
7107 #ifdef USE_UID16
7108 
7109 static inline int high2lowuid(int uid)
7110 {
7111     if (uid > 65535)
7112         return 65534;
7113     else
7114         return uid;
7115 }
7116 
7117 static inline int high2lowgid(int gid)
7118 {
7119     if (gid > 65535)
7120         return 65534;
7121     else
7122         return gid;
7123 }
7124 
7125 static inline int low2highuid(int uid)
7126 {
7127     if ((int16_t)uid == -1)
7128         return -1;
7129     else
7130         return uid;
7131 }
7132 
7133 static inline int low2highgid(int gid)
7134 {
7135     if ((int16_t)gid == -1)
7136         return -1;
7137     else
7138         return gid;
7139 }
7140 static inline int tswapid(int id)
7141 {
7142     return tswap16(id);
7143 }
7144 
7145 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7146 
7147 #else /* !USE_UID16 */
7148 static inline int high2lowuid(int uid)
7149 {
7150     return uid;
7151 }
7152 static inline int high2lowgid(int gid)
7153 {
7154     return gid;
7155 }
7156 static inline int low2highuid(int uid)
7157 {
7158     return uid;
7159 }
7160 static inline int low2highgid(int gid)
7161 {
7162     return gid;
7163 }
7164 static inline int tswapid(int id)
7165 {
7166     return tswap32(id);
7167 }
7168 
7169 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7170 
7171 #endif /* USE_UID16 */
7172 
7173 /* We must do direct syscalls for setting UID/GID, because we want to
7174  * implement the Linux system call semantics of "change only for this thread",
7175  * not the libc/POSIX semantics of "change for all threads in process".
7176  * (See http://ewontfix.com/17/ for more details.)
7177  * We use the 32-bit version of the syscalls if present; if it is not
7178  * then either the host architecture supports 32-bit UIDs natively with
7179  * the standard syscall, or the 16-bit UID is the best we can do.
7180  */
7181 #ifdef __NR_setuid32
7182 #define __NR_sys_setuid __NR_setuid32
7183 #else
7184 #define __NR_sys_setuid __NR_setuid
7185 #endif
7186 #ifdef __NR_setgid32
7187 #define __NR_sys_setgid __NR_setgid32
7188 #else
7189 #define __NR_sys_setgid __NR_setgid
7190 #endif
7191 #ifdef __NR_setresuid32
7192 #define __NR_sys_setresuid __NR_setresuid32
7193 #else
7194 #define __NR_sys_setresuid __NR_setresuid
7195 #endif
7196 #ifdef __NR_setresgid32
7197 #define __NR_sys_setresgid __NR_setresgid32
7198 #else
7199 #define __NR_sys_setresgid __NR_setresgid
7200 #endif
7201 
7202 _syscall1(int, sys_setuid, uid_t, uid)
7203 _syscall1(int, sys_setgid, gid_t, gid)
7204 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7205 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7206 
7207 void syscall_init(void)
7208 {
7209     IOCTLEntry *ie;
7210     const argtype *arg_type;
7211     int size;
7212 
7213     thunk_init(STRUCT_MAX);
7214 
7215 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7216 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7217 #include "syscall_types.h"
7218 #undef STRUCT
7219 #undef STRUCT_SPECIAL
7220 
7221     /* we patch the ioctl size if necessary. We rely on the fact that
7222        no ioctl has all the bits at '1' in the size field */
7223     ie = ioctl_entries;
7224     while (ie->target_cmd != 0) {
7225         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7226             TARGET_IOC_SIZEMASK) {
7227             arg_type = ie->arg_type;
7228             if (arg_type[0] != TYPE_PTR) {
7229                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7230                         ie->target_cmd);
7231                 exit(1);
7232             }
7233             arg_type++;
7234             size = thunk_type_size(arg_type, 0);
7235             ie->target_cmd = (ie->target_cmd &
7236                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7237                 (size << TARGET_IOC_SIZESHIFT);
7238         }
7239 
7240         /* automatic consistency check if same arch */
7241 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7242     (defined(__x86_64__) && defined(TARGET_X86_64))
7243         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7244             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7245                     ie->name, ie->target_cmd, ie->host_cmd);
7246         }
7247 #endif
7248         ie++;
7249     }
7250 }
7251 
7252 #ifdef TARGET_NR_truncate64
7253 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7254                                          abi_long arg2,
7255                                          abi_long arg3,
7256                                          abi_long arg4)
7257 {
7258     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7259         arg2 = arg3;
7260         arg3 = arg4;
7261     }
7262     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7263 }
7264 #endif
7265 
7266 #ifdef TARGET_NR_ftruncate64
7267 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7268                                           abi_long arg2,
7269                                           abi_long arg3,
7270                                           abi_long arg4)
7271 {
7272     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7273         arg2 = arg3;
7274         arg3 = arg4;
7275     }
7276     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7277 }
7278 #endif
7279 
7280 #if defined(TARGET_NR_timer_settime) || \
7281     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7282 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7283                                                  abi_ulong target_addr)
7284 {
7285     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7286                                 offsetof(struct target_itimerspec,
7287                                          it_interval)) ||
7288         target_to_host_timespec(&host_its->it_value, target_addr +
7289                                 offsetof(struct target_itimerspec,
7290                                          it_value))) {
7291         return -TARGET_EFAULT;
7292     }
7293 
7294     return 0;
7295 }
7296 #endif
7297 
7298 #if defined(TARGET_NR_timer_settime64) || \
7299     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7300 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7301                                                    abi_ulong target_addr)
7302 {
7303     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7304                                   offsetof(struct target__kernel_itimerspec,
7305                                            it_interval)) ||
7306         target_to_host_timespec64(&host_its->it_value, target_addr +
7307                                   offsetof(struct target__kernel_itimerspec,
7308                                            it_value))) {
7309         return -TARGET_EFAULT;
7310     }
7311 
7312     return 0;
7313 }
7314 #endif
7315 
7316 #if ((defined(TARGET_NR_timerfd_gettime) || \
7317       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7318       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7319 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7320                                                  struct itimerspec *host_its)
7321 {
7322     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7323                                                        it_interval),
7324                                 &host_its->it_interval) ||
7325         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7326                                                        it_value),
7327                                 &host_its->it_value)) {
7328         return -TARGET_EFAULT;
7329     }
7330     return 0;
7331 }
7332 #endif
7333 
7334 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7335       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7336       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7337 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7338                                                    struct itimerspec *host_its)
7339 {
7340     if (host_to_target_timespec64(target_addr +
7341                                   offsetof(struct target__kernel_itimerspec,
7342                                            it_interval),
7343                                   &host_its->it_interval) ||
7344         host_to_target_timespec64(target_addr +
7345                                   offsetof(struct target__kernel_itimerspec,
7346                                            it_value),
7347                                   &host_its->it_value)) {
7348         return -TARGET_EFAULT;
7349     }
7350     return 0;
7351 }
7352 #endif
7353 
7354 #if defined(TARGET_NR_adjtimex) || \
7355     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7356 static inline abi_long target_to_host_timex(struct timex *host_tx,
7357                                             abi_long target_addr)
7358 {
7359     struct target_timex *target_tx;
7360 
7361     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7362         return -TARGET_EFAULT;
7363     }
7364 
7365     __get_user(host_tx->modes, &target_tx->modes);
7366     __get_user(host_tx->offset, &target_tx->offset);
7367     __get_user(host_tx->freq, &target_tx->freq);
7368     __get_user(host_tx->maxerror, &target_tx->maxerror);
7369     __get_user(host_tx->esterror, &target_tx->esterror);
7370     __get_user(host_tx->status, &target_tx->status);
7371     __get_user(host_tx->constant, &target_tx->constant);
7372     __get_user(host_tx->precision, &target_tx->precision);
7373     __get_user(host_tx->tolerance, &target_tx->tolerance);
7374     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7375     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7376     __get_user(host_tx->tick, &target_tx->tick);
7377     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7378     __get_user(host_tx->jitter, &target_tx->jitter);
7379     __get_user(host_tx->shift, &target_tx->shift);
7380     __get_user(host_tx->stabil, &target_tx->stabil);
7381     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7382     __get_user(host_tx->calcnt, &target_tx->calcnt);
7383     __get_user(host_tx->errcnt, &target_tx->errcnt);
7384     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7385     __get_user(host_tx->tai, &target_tx->tai);
7386 
7387     unlock_user_struct(target_tx, target_addr, 0);
7388     return 0;
7389 }
7390 
7391 static inline abi_long host_to_target_timex(abi_long target_addr,
7392                                             struct timex *host_tx)
7393 {
7394     struct target_timex *target_tx;
7395 
7396     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7397         return -TARGET_EFAULT;
7398     }
7399 
7400     __put_user(host_tx->modes, &target_tx->modes);
7401     __put_user(host_tx->offset, &target_tx->offset);
7402     __put_user(host_tx->freq, &target_tx->freq);
7403     __put_user(host_tx->maxerror, &target_tx->maxerror);
7404     __put_user(host_tx->esterror, &target_tx->esterror);
7405     __put_user(host_tx->status, &target_tx->status);
7406     __put_user(host_tx->constant, &target_tx->constant);
7407     __put_user(host_tx->precision, &target_tx->precision);
7408     __put_user(host_tx->tolerance, &target_tx->tolerance);
7409     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7410     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7411     __put_user(host_tx->tick, &target_tx->tick);
7412     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7413     __put_user(host_tx->jitter, &target_tx->jitter);
7414     __put_user(host_tx->shift, &target_tx->shift);
7415     __put_user(host_tx->stabil, &target_tx->stabil);
7416     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7417     __put_user(host_tx->calcnt, &target_tx->calcnt);
7418     __put_user(host_tx->errcnt, &target_tx->errcnt);
7419     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7420     __put_user(host_tx->tai, &target_tx->tai);
7421 
7422     unlock_user_struct(target_tx, target_addr, 1);
7423     return 0;
7424 }
7425 #endif
7426 
7427 
7428 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7429 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7430                                               abi_long target_addr)
7431 {
7432     struct target__kernel_timex *target_tx;
7433 
7434     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7435                                  offsetof(struct target__kernel_timex,
7436                                           time))) {
7437         return -TARGET_EFAULT;
7438     }
7439 
7440     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7441         return -TARGET_EFAULT;
7442     }
7443 
7444     __get_user(host_tx->modes, &target_tx->modes);
7445     __get_user(host_tx->offset, &target_tx->offset);
7446     __get_user(host_tx->freq, &target_tx->freq);
7447     __get_user(host_tx->maxerror, &target_tx->maxerror);
7448     __get_user(host_tx->esterror, &target_tx->esterror);
7449     __get_user(host_tx->status, &target_tx->status);
7450     __get_user(host_tx->constant, &target_tx->constant);
7451     __get_user(host_tx->precision, &target_tx->precision);
7452     __get_user(host_tx->tolerance, &target_tx->tolerance);
7453     __get_user(host_tx->tick, &target_tx->tick);
7454     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7455     __get_user(host_tx->jitter, &target_tx->jitter);
7456     __get_user(host_tx->shift, &target_tx->shift);
7457     __get_user(host_tx->stabil, &target_tx->stabil);
7458     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7459     __get_user(host_tx->calcnt, &target_tx->calcnt);
7460     __get_user(host_tx->errcnt, &target_tx->errcnt);
7461     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7462     __get_user(host_tx->tai, &target_tx->tai);
7463 
7464     unlock_user_struct(target_tx, target_addr, 0);
7465     return 0;
7466 }
7467 
7468 static inline abi_long host_to_target_timex64(abi_long target_addr,
7469                                               struct timex *host_tx)
7470 {
7471     struct target__kernel_timex *target_tx;
7472 
7473    if (copy_to_user_timeval64(target_addr +
7474                               offsetof(struct target__kernel_timex, time),
7475                               &host_tx->time)) {
7476         return -TARGET_EFAULT;
7477     }
7478 
7479     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7480         return -TARGET_EFAULT;
7481     }
7482 
7483     __put_user(host_tx->modes, &target_tx->modes);
7484     __put_user(host_tx->offset, &target_tx->offset);
7485     __put_user(host_tx->freq, &target_tx->freq);
7486     __put_user(host_tx->maxerror, &target_tx->maxerror);
7487     __put_user(host_tx->esterror, &target_tx->esterror);
7488     __put_user(host_tx->status, &target_tx->status);
7489     __put_user(host_tx->constant, &target_tx->constant);
7490     __put_user(host_tx->precision, &target_tx->precision);
7491     __put_user(host_tx->tolerance, &target_tx->tolerance);
7492     __put_user(host_tx->tick, &target_tx->tick);
7493     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7494     __put_user(host_tx->jitter, &target_tx->jitter);
7495     __put_user(host_tx->shift, &target_tx->shift);
7496     __put_user(host_tx->stabil, &target_tx->stabil);
7497     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7498     __put_user(host_tx->calcnt, &target_tx->calcnt);
7499     __put_user(host_tx->errcnt, &target_tx->errcnt);
7500     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7501     __put_user(host_tx->tai, &target_tx->tai);
7502 
7503     unlock_user_struct(target_tx, target_addr, 1);
7504     return 0;
7505 }
7506 #endif
7507 
7508 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7509 #define sigev_notify_thread_id _sigev_un._tid
7510 #endif
7511 
7512 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7513                                                abi_ulong target_addr)
7514 {
7515     struct target_sigevent *target_sevp;
7516 
7517     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7518         return -TARGET_EFAULT;
7519     }
7520 
7521     /* This union is awkward on 64 bit systems because it has a 32 bit
7522      * integer and a pointer in it; we follow the conversion approach
7523      * used for handling sigval types in signal.c so the guest should get
7524      * the correct value back even if we did a 64 bit byteswap and it's
7525      * using the 32 bit integer.
7526      */
7527     host_sevp->sigev_value.sival_ptr =
7528         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7529     host_sevp->sigev_signo =
7530         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7531     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7532     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7533 
7534     unlock_user_struct(target_sevp, target_addr, 1);
7535     return 0;
7536 }
7537 
7538 #if defined(TARGET_NR_mlockall)
7539 static inline int target_to_host_mlockall_arg(int arg)
7540 {
7541     int result = 0;
7542 
7543     if (arg & TARGET_MCL_CURRENT) {
7544         result |= MCL_CURRENT;
7545     }
7546     if (arg & TARGET_MCL_FUTURE) {
7547         result |= MCL_FUTURE;
7548     }
7549 #ifdef MCL_ONFAULT
7550     if (arg & TARGET_MCL_ONFAULT) {
7551         result |= MCL_ONFAULT;
7552     }
7553 #endif
7554 
7555     return result;
7556 }
7557 #endif
7558 
7559 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7560      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7561      defined(TARGET_NR_newfstatat))
7562 static inline abi_long host_to_target_stat64(void *cpu_env,
7563                                              abi_ulong target_addr,
7564                                              struct stat *host_st)
7565 {
7566 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7567     if (((CPUARMState *)cpu_env)->eabi) {
7568         struct target_eabi_stat64 *target_st;
7569 
7570         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7571             return -TARGET_EFAULT;
7572         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7573         __put_user(host_st->st_dev, &target_st->st_dev);
7574         __put_user(host_st->st_ino, &target_st->st_ino);
7575 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7576         __put_user(host_st->st_ino, &target_st->__st_ino);
7577 #endif
7578         __put_user(host_st->st_mode, &target_st->st_mode);
7579         __put_user(host_st->st_nlink, &target_st->st_nlink);
7580         __put_user(host_st->st_uid, &target_st->st_uid);
7581         __put_user(host_st->st_gid, &target_st->st_gid);
7582         __put_user(host_st->st_rdev, &target_st->st_rdev);
7583         __put_user(host_st->st_size, &target_st->st_size);
7584         __put_user(host_st->st_blksize, &target_st->st_blksize);
7585         __put_user(host_st->st_blocks, &target_st->st_blocks);
7586         __put_user(host_st->st_atime, &target_st->target_st_atime);
7587         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7588         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7589 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7590         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7591         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7592         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7593 #endif
7594         unlock_user_struct(target_st, target_addr, 1);
7595     } else
7596 #endif
7597     {
7598 #if defined(TARGET_HAS_STRUCT_STAT64)
7599         struct target_stat64 *target_st;
7600 #else
7601         struct target_stat *target_st;
7602 #endif
7603 
7604         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7605             return -TARGET_EFAULT;
7606         memset(target_st, 0, sizeof(*target_st));
7607         __put_user(host_st->st_dev, &target_st->st_dev);
7608         __put_user(host_st->st_ino, &target_st->st_ino);
7609 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7610         __put_user(host_st->st_ino, &target_st->__st_ino);
7611 #endif
7612         __put_user(host_st->st_mode, &target_st->st_mode);
7613         __put_user(host_st->st_nlink, &target_st->st_nlink);
7614         __put_user(host_st->st_uid, &target_st->st_uid);
7615         __put_user(host_st->st_gid, &target_st->st_gid);
7616         __put_user(host_st->st_rdev, &target_st->st_rdev);
7617         /* XXX: better use of kernel struct */
7618         __put_user(host_st->st_size, &target_st->st_size);
7619         __put_user(host_st->st_blksize, &target_st->st_blksize);
7620         __put_user(host_st->st_blocks, &target_st->st_blocks);
7621         __put_user(host_st->st_atime, &target_st->target_st_atime);
7622         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7623         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7624 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7625         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7626         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7627         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7628 #endif
7629         unlock_user_struct(target_st, target_addr, 1);
7630     }
7631 
7632     return 0;
7633 }
7634 #endif
7635 
7636 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7637 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7638                                             abi_ulong target_addr)
7639 {
7640     struct target_statx *target_stx;
7641 
7642     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7643         return -TARGET_EFAULT;
7644     }
7645     memset(target_stx, 0, sizeof(*target_stx));
7646 
7647     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7648     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7649     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7650     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7651     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7652     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7653     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7654     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7655     __put_user(host_stx->stx_size, &target_stx->stx_size);
7656     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7657     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7658     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7659     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7660     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7661     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7662     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7663     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7664     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7665     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7666     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7667     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7668     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7669     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7670 
7671     unlock_user_struct(target_stx, target_addr, 1);
7672 
7673     return 0;
7674 }
7675 #endif
7676 
7677 static int do_sys_futex(int *uaddr, int op, int val,
7678                          const struct timespec *timeout, int *uaddr2,
7679                          int val3)
7680 {
7681 #if HOST_LONG_BITS == 64
7682 #if defined(__NR_futex)
7683     /* always a 64-bit time_t, it doesn't define _time64 version  */
7684     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7685 
7686 #endif
7687 #else /* HOST_LONG_BITS == 64 */
7688 #if defined(__NR_futex_time64)
7689     if (sizeof(timeout->tv_sec) == 8) {
7690         /* _time64 function on 32bit arch */
7691         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7692     }
7693 #endif
7694 #if defined(__NR_futex)
7695     /* old function on 32bit arch */
7696     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7697 #endif
7698 #endif /* HOST_LONG_BITS == 64 */
7699     g_assert_not_reached();
7700 }
7701 
7702 static int do_safe_futex(int *uaddr, int op, int val,
7703                          const struct timespec *timeout, int *uaddr2,
7704                          int val3)
7705 {
7706 #if HOST_LONG_BITS == 64
7707 #if defined(__NR_futex)
7708     /* always a 64-bit time_t, it doesn't define _time64 version  */
7709     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7710 #endif
7711 #else /* HOST_LONG_BITS == 64 */
7712 #if defined(__NR_futex_time64)
7713     if (sizeof(timeout->tv_sec) == 8) {
7714         /* _time64 function on 32bit arch */
7715         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7716                                            val3));
7717     }
7718 #endif
7719 #if defined(__NR_futex)
7720     /* old function on 32bit arch */
7721     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7722 #endif
7723 #endif /* HOST_LONG_BITS == 64 */
7724     return -TARGET_ENOSYS;
7725 }
7726 
7727 /* ??? Using host futex calls even when target atomic operations
7728    are not really atomic probably breaks things.  However implementing
7729    futexes locally would make futexes shared between multiple processes
7730    tricky.  However they're probably useless because guest atomic
7731    operations won't work either.  */
7732 #if defined(TARGET_NR_futex)
7733 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7734                     target_ulong timeout, target_ulong uaddr2, int val3)
7735 {
7736     struct timespec ts, *pts;
7737     int base_op;
7738 
7739     /* ??? We assume FUTEX_* constants are the same on both host
7740        and target.  */
7741 #ifdef FUTEX_CMD_MASK
7742     base_op = op & FUTEX_CMD_MASK;
7743 #else
7744     base_op = op;
7745 #endif
7746     switch (base_op) {
7747     case FUTEX_WAIT:
7748     case FUTEX_WAIT_BITSET:
7749         if (timeout) {
7750             pts = &ts;
7751             target_to_host_timespec(pts, timeout);
7752         } else {
7753             pts = NULL;
7754         }
7755         return do_safe_futex(g2h(cpu, uaddr),
7756                              op, tswap32(val), pts, NULL, val3);
7757     case FUTEX_WAKE:
7758         return do_safe_futex(g2h(cpu, uaddr),
7759                              op, val, NULL, NULL, 0);
7760     case FUTEX_FD:
7761         return do_safe_futex(g2h(cpu, uaddr),
7762                              op, val, NULL, NULL, 0);
7763     case FUTEX_REQUEUE:
7764     case FUTEX_CMP_REQUEUE:
7765     case FUTEX_WAKE_OP:
7766         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7767            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7768            But the prototype takes a `struct timespec *'; insert casts
7769            to satisfy the compiler.  We do not need to tswap TIMEOUT
7770            since it's not compared to guest memory.  */
7771         pts = (struct timespec *)(uintptr_t) timeout;
7772         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7773                              (base_op == FUTEX_CMP_REQUEUE
7774                               ? tswap32(val3) : val3));
7775     default:
7776         return -TARGET_ENOSYS;
7777     }
7778 }
7779 #endif
7780 
7781 #if defined(TARGET_NR_futex_time64)
7782 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7783                            int val, target_ulong timeout,
7784                            target_ulong uaddr2, int val3)
7785 {
7786     struct timespec ts, *pts;
7787     int base_op;
7788 
7789     /* ??? We assume FUTEX_* constants are the same on both host
7790        and target.  */
7791 #ifdef FUTEX_CMD_MASK
7792     base_op = op & FUTEX_CMD_MASK;
7793 #else
7794     base_op = op;
7795 #endif
7796     switch (base_op) {
7797     case FUTEX_WAIT:
7798     case FUTEX_WAIT_BITSET:
7799         if (timeout) {
7800             pts = &ts;
7801             if (target_to_host_timespec64(pts, timeout)) {
7802                 return -TARGET_EFAULT;
7803             }
7804         } else {
7805             pts = NULL;
7806         }
7807         return do_safe_futex(g2h(cpu, uaddr), op,
7808                              tswap32(val), pts, NULL, val3);
7809     case FUTEX_WAKE:
7810         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7811     case FUTEX_FD:
7812         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7813     case FUTEX_REQUEUE:
7814     case FUTEX_CMP_REQUEUE:
7815     case FUTEX_WAKE_OP:
7816         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7817            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7818            But the prototype takes a `struct timespec *'; insert casts
7819            to satisfy the compiler.  We do not need to tswap TIMEOUT
7820            since it's not compared to guest memory.  */
7821         pts = (struct timespec *)(uintptr_t) timeout;
7822         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7823                              (base_op == FUTEX_CMP_REQUEUE
7824                               ? tswap32(val3) : val3));
7825     default:
7826         return -TARGET_ENOSYS;
7827     }
7828 }
7829 #endif
7830 
7831 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7832 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7833                                      abi_long handle, abi_long mount_id,
7834                                      abi_long flags)
7835 {
7836     struct file_handle *target_fh;
7837     struct file_handle *fh;
7838     int mid = 0;
7839     abi_long ret;
7840     char *name;
7841     unsigned int size, total_size;
7842 
7843     if (get_user_s32(size, handle)) {
7844         return -TARGET_EFAULT;
7845     }
7846 
7847     name = lock_user_string(pathname);
7848     if (!name) {
7849         return -TARGET_EFAULT;
7850     }
7851 
7852     total_size = sizeof(struct file_handle) + size;
7853     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7854     if (!target_fh) {
7855         unlock_user(name, pathname, 0);
7856         return -TARGET_EFAULT;
7857     }
7858 
7859     fh = g_malloc0(total_size);
7860     fh->handle_bytes = size;
7861 
7862     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7863     unlock_user(name, pathname, 0);
7864 
7865     /* man name_to_handle_at(2):
7866      * Other than the use of the handle_bytes field, the caller should treat
7867      * the file_handle structure as an opaque data type
7868      */
7869 
7870     memcpy(target_fh, fh, total_size);
7871     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7872     target_fh->handle_type = tswap32(fh->handle_type);
7873     g_free(fh);
7874     unlock_user(target_fh, handle, total_size);
7875 
7876     if (put_user_s32(mid, mount_id)) {
7877         return -TARGET_EFAULT;
7878     }
7879 
7880     return ret;
7881 
7882 }
7883 #endif
7884 
7885 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7886 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7887                                      abi_long flags)
7888 {
7889     struct file_handle *target_fh;
7890     struct file_handle *fh;
7891     unsigned int size, total_size;
7892     abi_long ret;
7893 
7894     if (get_user_s32(size, handle)) {
7895         return -TARGET_EFAULT;
7896     }
7897 
7898     total_size = sizeof(struct file_handle) + size;
7899     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7900     if (!target_fh) {
7901         return -TARGET_EFAULT;
7902     }
7903 
7904     fh = g_memdup(target_fh, total_size);
7905     fh->handle_bytes = size;
7906     fh->handle_type = tswap32(target_fh->handle_type);
7907 
7908     ret = get_errno(open_by_handle_at(mount_fd, fh,
7909                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7910 
7911     g_free(fh);
7912 
7913     unlock_user(target_fh, handle, total_size);
7914 
7915     return ret;
7916 }
7917 #endif
7918 
7919 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7920 
7921 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7922 {
7923     int host_flags;
7924     target_sigset_t *target_mask;
7925     sigset_t host_mask;
7926     abi_long ret;
7927 
7928     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7929         return -TARGET_EINVAL;
7930     }
7931     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7932         return -TARGET_EFAULT;
7933     }
7934 
7935     target_to_host_sigset(&host_mask, target_mask);
7936 
7937     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7938 
7939     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7940     if (ret >= 0) {
7941         fd_trans_register(ret, &target_signalfd_trans);
7942     }
7943 
7944     unlock_user_struct(target_mask, mask, 0);
7945 
7946     return ret;
7947 }
7948 #endif
7949 
7950 /* Map host to target signal numbers for the wait family of syscalls.
7951    Assume all other status bits are the same.  */
7952 int host_to_target_waitstatus(int status)
7953 {
7954     if (WIFSIGNALED(status)) {
7955         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7956     }
7957     if (WIFSTOPPED(status)) {
7958         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7959                | (status & 0xff);
7960     }
7961     return status;
7962 }
7963 
7964 static int open_self_cmdline(void *cpu_env, int fd)
7965 {
7966     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7967     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7968     int i;
7969 
7970     for (i = 0; i < bprm->argc; i++) {
7971         size_t len = strlen(bprm->argv[i]) + 1;
7972 
7973         if (write(fd, bprm->argv[i], len) != len) {
7974             return -1;
7975         }
7976     }
7977 
7978     return 0;
7979 }
7980 
7981 static int open_self_maps(void *cpu_env, int fd)
7982 {
7983     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7984     TaskState *ts = cpu->opaque;
7985     GSList *map_info = read_self_maps();
7986     GSList *s;
7987     int count;
7988 
7989     for (s = map_info; s; s = g_slist_next(s)) {
7990         MapInfo *e = (MapInfo *) s->data;
7991 
7992         if (h2g_valid(e->start)) {
7993             unsigned long min = e->start;
7994             unsigned long max = e->end;
7995             int flags = page_get_flags(h2g(min));
7996             const char *path;
7997 
7998             max = h2g_valid(max - 1) ?
7999                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8000 
8001             if (page_check_range(h2g(min), max - min, flags) == -1) {
8002                 continue;
8003             }
8004 
8005             if (h2g(min) == ts->info->stack_limit) {
8006                 path = "[stack]";
8007             } else {
8008                 path = e->path;
8009             }
8010 
8011             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8012                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8013                             h2g(min), h2g(max - 1) + 1,
8014                             (flags & PAGE_READ) ? 'r' : '-',
8015                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8016                             (flags & PAGE_EXEC) ? 'x' : '-',
8017                             e->is_priv ? 'p' : 's',
8018                             (uint64_t) e->offset, e->dev, e->inode);
8019             if (path) {
8020                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8021             } else {
8022                 dprintf(fd, "\n");
8023             }
8024         }
8025     }
8026 
8027     free_self_maps(map_info);
8028 
8029 #ifdef TARGET_VSYSCALL_PAGE
8030     /*
8031      * We only support execution from the vsyscall page.
8032      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8033      */
8034     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8035                     " --xp 00000000 00:00 0",
8036                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8037     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8038 #endif
8039 
8040     return 0;
8041 }
8042 
8043 static int open_self_stat(void *cpu_env, int fd)
8044 {
8045     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8046     TaskState *ts = cpu->opaque;
8047     g_autoptr(GString) buf = g_string_new(NULL);
8048     int i;
8049 
8050     for (i = 0; i < 44; i++) {
8051         if (i == 0) {
8052             /* pid */
8053             g_string_printf(buf, FMT_pid " ", getpid());
8054         } else if (i == 1) {
8055             /* app name */
8056             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8057             bin = bin ? bin + 1 : ts->bprm->argv[0];
8058             g_string_printf(buf, "(%.15s) ", bin);
8059         } else if (i == 3) {
8060             /* ppid */
8061             g_string_printf(buf, FMT_pid " ", getppid());
8062         } else if (i == 21) {
8063             /* starttime */
8064             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8065         } else if (i == 27) {
8066             /* stack bottom */
8067             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8068         } else {
8069             /* for the rest, there is MasterCard */
8070             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8071         }
8072 
8073         if (write(fd, buf->str, buf->len) != buf->len) {
8074             return -1;
8075         }
8076     }
8077 
8078     return 0;
8079 }
8080 
8081 static int open_self_auxv(void *cpu_env, int fd)
8082 {
8083     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8084     TaskState *ts = cpu->opaque;
8085     abi_ulong auxv = ts->info->saved_auxv;
8086     abi_ulong len = ts->info->auxv_len;
8087     char *ptr;
8088 
8089     /*
8090      * Auxiliary vector is stored in target process stack.
8091      * read in whole auxv vector and copy it to file
8092      */
8093     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8094     if (ptr != NULL) {
8095         while (len > 0) {
8096             ssize_t r;
8097             r = write(fd, ptr, len);
8098             if (r <= 0) {
8099                 break;
8100             }
8101             len -= r;
8102             ptr += r;
8103         }
8104         lseek(fd, 0, SEEK_SET);
8105         unlock_user(ptr, auxv, len);
8106     }
8107 
8108     return 0;
8109 }
8110 
8111 static int is_proc_myself(const char *filename, const char *entry)
8112 {
8113     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8114         filename += strlen("/proc/");
8115         if (!strncmp(filename, "self/", strlen("self/"))) {
8116             filename += strlen("self/");
8117         } else if (*filename >= '1' && *filename <= '9') {
8118             char myself[80];
8119             snprintf(myself, sizeof(myself), "%d/", getpid());
8120             if (!strncmp(filename, myself, strlen(myself))) {
8121                 filename += strlen(myself);
8122             } else {
8123                 return 0;
8124             }
8125         } else {
8126             return 0;
8127         }
8128         if (!strcmp(filename, entry)) {
8129             return 1;
8130         }
8131     }
8132     return 0;
8133 }
8134 
8135 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8136     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8137 static int is_proc(const char *filename, const char *entry)
8138 {
8139     return strcmp(filename, entry) == 0;
8140 }
8141 #endif
8142 
8143 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8144 static int open_net_route(void *cpu_env, int fd)
8145 {
8146     FILE *fp;
8147     char *line = NULL;
8148     size_t len = 0;
8149     ssize_t read;
8150 
8151     fp = fopen("/proc/net/route", "r");
8152     if (fp == NULL) {
8153         return -1;
8154     }
8155 
8156     /* read header */
8157 
8158     read = getline(&line, &len, fp);
8159     dprintf(fd, "%s", line);
8160 
8161     /* read routes */
8162 
8163     while ((read = getline(&line, &len, fp)) != -1) {
8164         char iface[16];
8165         uint32_t dest, gw, mask;
8166         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8167         int fields;
8168 
8169         fields = sscanf(line,
8170                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8171                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8172                         &mask, &mtu, &window, &irtt);
8173         if (fields != 11) {
8174             continue;
8175         }
8176         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8177                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8178                 metric, tswap32(mask), mtu, window, irtt);
8179     }
8180 
8181     free(line);
8182     fclose(fp);
8183 
8184     return 0;
8185 }
8186 #endif
8187 
8188 #if defined(TARGET_SPARC)
8189 static int open_cpuinfo(void *cpu_env, int fd)
8190 {
8191     dprintf(fd, "type\t\t: sun4u\n");
8192     return 0;
8193 }
8194 #endif
8195 
8196 #if defined(TARGET_HPPA)
8197 static int open_cpuinfo(void *cpu_env, int fd)
8198 {
8199     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8200     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8201     dprintf(fd, "capabilities\t: os32\n");
8202     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8203     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8204     return 0;
8205 }
8206 #endif
8207 
8208 #if defined(TARGET_M68K)
8209 static int open_hardware(void *cpu_env, int fd)
8210 {
8211     dprintf(fd, "Model:\t\tqemu-m68k\n");
8212     return 0;
8213 }
8214 #endif
8215 
8216 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8217 {
8218     struct fake_open {
8219         const char *filename;
8220         int (*fill)(void *cpu_env, int fd);
8221         int (*cmp)(const char *s1, const char *s2);
8222     };
8223     const struct fake_open *fake_open;
8224     static const struct fake_open fakes[] = {
8225         { "maps", open_self_maps, is_proc_myself },
8226         { "stat", open_self_stat, is_proc_myself },
8227         { "auxv", open_self_auxv, is_proc_myself },
8228         { "cmdline", open_self_cmdline, is_proc_myself },
8229 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8230         { "/proc/net/route", open_net_route, is_proc },
8231 #endif
8232 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8233         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8234 #endif
8235 #if defined(TARGET_M68K)
8236         { "/proc/hardware", open_hardware, is_proc },
8237 #endif
8238         { NULL, NULL, NULL }
8239     };
8240 
8241     if (is_proc_myself(pathname, "exe")) {
8242         int execfd = qemu_getauxval(AT_EXECFD);
8243         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8244     }
8245 
8246     for (fake_open = fakes; fake_open->filename; fake_open++) {
8247         if (fake_open->cmp(pathname, fake_open->filename)) {
8248             break;
8249         }
8250     }
8251 
8252     if (fake_open->filename) {
8253         const char *tmpdir;
8254         char filename[PATH_MAX];
8255         int fd, r;
8256 
8257         /* create temporary file to map stat to */
8258         tmpdir = getenv("TMPDIR");
8259         if (!tmpdir)
8260             tmpdir = "/tmp";
8261         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8262         fd = mkstemp(filename);
8263         if (fd < 0) {
8264             return fd;
8265         }
8266         unlink(filename);
8267 
8268         if ((r = fake_open->fill(cpu_env, fd))) {
8269             int e = errno;
8270             close(fd);
8271             errno = e;
8272             return r;
8273         }
8274         lseek(fd, 0, SEEK_SET);
8275 
8276         return fd;
8277     }
8278 
8279     return safe_openat(dirfd, path(pathname), flags, mode);
8280 }
8281 
8282 #define TIMER_MAGIC 0x0caf0000
8283 #define TIMER_MAGIC_MASK 0xffff0000
8284 
8285 /* Convert QEMU provided timer ID back to internal 16bit index format */
8286 static target_timer_t get_timer_id(abi_long arg)
8287 {
8288     target_timer_t timerid = arg;
8289 
8290     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8291         return -TARGET_EINVAL;
8292     }
8293 
8294     timerid &= 0xffff;
8295 
8296     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8297         return -TARGET_EINVAL;
8298     }
8299 
8300     return timerid;
8301 }
8302 
8303 static int target_to_host_cpu_mask(unsigned long *host_mask,
8304                                    size_t host_size,
8305                                    abi_ulong target_addr,
8306                                    size_t target_size)
8307 {
8308     unsigned target_bits = sizeof(abi_ulong) * 8;
8309     unsigned host_bits = sizeof(*host_mask) * 8;
8310     abi_ulong *target_mask;
8311     unsigned i, j;
8312 
8313     assert(host_size >= target_size);
8314 
8315     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8316     if (!target_mask) {
8317         return -TARGET_EFAULT;
8318     }
8319     memset(host_mask, 0, host_size);
8320 
8321     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8322         unsigned bit = i * target_bits;
8323         abi_ulong val;
8324 
8325         __get_user(val, &target_mask[i]);
8326         for (j = 0; j < target_bits; j++, bit++) {
8327             if (val & (1UL << j)) {
8328                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8329             }
8330         }
8331     }
8332 
8333     unlock_user(target_mask, target_addr, 0);
8334     return 0;
8335 }
8336 
8337 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8338                                    size_t host_size,
8339                                    abi_ulong target_addr,
8340                                    size_t target_size)
8341 {
8342     unsigned target_bits = sizeof(abi_ulong) * 8;
8343     unsigned host_bits = sizeof(*host_mask) * 8;
8344     abi_ulong *target_mask;
8345     unsigned i, j;
8346 
8347     assert(host_size >= target_size);
8348 
8349     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8350     if (!target_mask) {
8351         return -TARGET_EFAULT;
8352     }
8353 
8354     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8355         unsigned bit = i * target_bits;
8356         abi_ulong val = 0;
8357 
8358         for (j = 0; j < target_bits; j++, bit++) {
8359             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8360                 val |= 1UL << j;
8361             }
8362         }
8363         __put_user(val, &target_mask[i]);
8364     }
8365 
8366     unlock_user(target_mask, target_addr, target_size);
8367     return 0;
8368 }
8369 
8370 #ifdef TARGET_NR_getdents
8371 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8372 {
8373     g_autofree void *hdirp = NULL;
8374     void *tdirp;
8375     int hlen, hoff, toff;
8376     int hreclen, treclen;
8377     off64_t prev_diroff = 0;
8378 
8379     hdirp = g_try_malloc(count);
8380     if (!hdirp) {
8381         return -TARGET_ENOMEM;
8382     }
8383 
8384 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8385     hlen = sys_getdents(dirfd, hdirp, count);
8386 #else
8387     hlen = sys_getdents64(dirfd, hdirp, count);
8388 #endif
8389 
8390     hlen = get_errno(hlen);
8391     if (is_error(hlen)) {
8392         return hlen;
8393     }
8394 
8395     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8396     if (!tdirp) {
8397         return -TARGET_EFAULT;
8398     }
8399 
8400     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8401 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8402         struct linux_dirent *hde = hdirp + hoff;
8403 #else
8404         struct linux_dirent64 *hde = hdirp + hoff;
8405 #endif
8406         struct target_dirent *tde = tdirp + toff;
8407         int namelen;
8408         uint8_t type;
8409 
8410         namelen = strlen(hde->d_name);
8411         hreclen = hde->d_reclen;
8412         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8413         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8414 
8415         if (toff + treclen > count) {
8416             /*
8417              * If the host struct is smaller than the target struct, or
8418              * requires less alignment and thus packs into less space,
8419              * then the host can return more entries than we can pass
8420              * on to the guest.
8421              */
8422             if (toff == 0) {
8423                 toff = -TARGET_EINVAL; /* result buffer is too small */
8424                 break;
8425             }
8426             /*
8427              * Return what we have, resetting the file pointer to the
8428              * location of the first record not returned.
8429              */
8430             lseek64(dirfd, prev_diroff, SEEK_SET);
8431             break;
8432         }
8433 
8434         prev_diroff = hde->d_off;
8435         tde->d_ino = tswapal(hde->d_ino);
8436         tde->d_off = tswapal(hde->d_off);
8437         tde->d_reclen = tswap16(treclen);
8438         memcpy(tde->d_name, hde->d_name, namelen + 1);
8439 
8440         /*
8441          * The getdents type is in what was formerly a padding byte at the
8442          * end of the structure.
8443          */
8444 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8445         type = *((uint8_t *)hde + hreclen - 1);
8446 #else
8447         type = hde->d_type;
8448 #endif
8449         *((uint8_t *)tde + treclen - 1) = type;
8450     }
8451 
8452     unlock_user(tdirp, arg2, toff);
8453     return toff;
8454 }
8455 #endif /* TARGET_NR_getdents */
8456 
8457 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8458 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8459 {
8460     g_autofree void *hdirp = NULL;
8461     void *tdirp;
8462     int hlen, hoff, toff;
8463     int hreclen, treclen;
8464     off64_t prev_diroff = 0;
8465 
8466     hdirp = g_try_malloc(count);
8467     if (!hdirp) {
8468         return -TARGET_ENOMEM;
8469     }
8470 
8471     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8472     if (is_error(hlen)) {
8473         return hlen;
8474     }
8475 
8476     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8477     if (!tdirp) {
8478         return -TARGET_EFAULT;
8479     }
8480 
8481     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8482         struct linux_dirent64 *hde = hdirp + hoff;
8483         struct target_dirent64 *tde = tdirp + toff;
8484         int namelen;
8485 
8486         namelen = strlen(hde->d_name) + 1;
8487         hreclen = hde->d_reclen;
8488         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8489         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8490 
8491         if (toff + treclen > count) {
8492             /*
8493              * If the host struct is smaller than the target struct, or
8494              * requires less alignment and thus packs into less space,
8495              * then the host can return more entries than we can pass
8496              * on to the guest.
8497              */
8498             if (toff == 0) {
8499                 toff = -TARGET_EINVAL; /* result buffer is too small */
8500                 break;
8501             }
8502             /*
8503              * Return what we have, resetting the file pointer to the
8504              * location of the first record not returned.
8505              */
8506             lseek64(dirfd, prev_diroff, SEEK_SET);
8507             break;
8508         }
8509 
8510         prev_diroff = hde->d_off;
8511         tde->d_ino = tswap64(hde->d_ino);
8512         tde->d_off = tswap64(hde->d_off);
8513         tde->d_reclen = tswap16(treclen);
8514         tde->d_type = hde->d_type;
8515         memcpy(tde->d_name, hde->d_name, namelen);
8516     }
8517 
8518     unlock_user(tdirp, arg2, toff);
8519     return toff;
8520 }
8521 #endif /* TARGET_NR_getdents64 */
8522 
8523 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8524 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8525 #endif
8526 
8527 /* This is an internal helper for do_syscall so that it is easier
8528  * to have a single return point, so that actions, such as logging
8529  * of syscall results, can be performed.
8530  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8531  */
8532 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8533                             abi_long arg2, abi_long arg3, abi_long arg4,
8534                             abi_long arg5, abi_long arg6, abi_long arg7,
8535                             abi_long arg8)
8536 {
8537     CPUState *cpu = env_cpu(cpu_env);
8538     abi_long ret;
8539 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8540     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8541     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8542     || defined(TARGET_NR_statx)
8543     struct stat st;
8544 #endif
8545 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8546     || defined(TARGET_NR_fstatfs)
8547     struct statfs stfs;
8548 #endif
8549     void *p;
8550 
8551     switch(num) {
8552     case TARGET_NR_exit:
8553         /* In old applications this may be used to implement _exit(2).
8554            However in threaded applications it is used for thread termination,
8555            and _exit_group is used for application termination.
8556            Do thread termination if we have more then one thread.  */
8557 
8558         if (block_signals()) {
8559             return -QEMU_ERESTARTSYS;
8560         }
8561 
8562         pthread_mutex_lock(&clone_lock);
8563 
8564         if (CPU_NEXT(first_cpu)) {
8565             TaskState *ts = cpu->opaque;
8566 
8567             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8568             object_unref(OBJECT(cpu));
8569             /*
8570              * At this point the CPU should be unrealized and removed
8571              * from cpu lists. We can clean-up the rest of the thread
8572              * data without the lock held.
8573              */
8574 
8575             pthread_mutex_unlock(&clone_lock);
8576 
8577             if (ts->child_tidptr) {
8578                 put_user_u32(0, ts->child_tidptr);
8579                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8580                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8581             }
8582             thread_cpu = NULL;
8583             g_free(ts);
8584             rcu_unregister_thread();
8585             pthread_exit(NULL);
8586         }
8587 
8588         pthread_mutex_unlock(&clone_lock);
8589         preexit_cleanup(cpu_env, arg1);
8590         _exit(arg1);
8591         return 0; /* avoid warning */
8592     case TARGET_NR_read:
8593         if (arg2 == 0 && arg3 == 0) {
8594             return get_errno(safe_read(arg1, 0, 0));
8595         } else {
8596             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8597                 return -TARGET_EFAULT;
8598             ret = get_errno(safe_read(arg1, p, arg3));
8599             if (ret >= 0 &&
8600                 fd_trans_host_to_target_data(arg1)) {
8601                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8602             }
8603             unlock_user(p, arg2, ret);
8604         }
8605         return ret;
8606     case TARGET_NR_write:
8607         if (arg2 == 0 && arg3 == 0) {
8608             return get_errno(safe_write(arg1, 0, 0));
8609         }
8610         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8611             return -TARGET_EFAULT;
8612         if (fd_trans_target_to_host_data(arg1)) {
8613             void *copy = g_malloc(arg3);
8614             memcpy(copy, p, arg3);
8615             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8616             if (ret >= 0) {
8617                 ret = get_errno(safe_write(arg1, copy, ret));
8618             }
8619             g_free(copy);
8620         } else {
8621             ret = get_errno(safe_write(arg1, p, arg3));
8622         }
8623         unlock_user(p, arg2, 0);
8624         return ret;
8625 
8626 #ifdef TARGET_NR_open
8627     case TARGET_NR_open:
8628         if (!(p = lock_user_string(arg1)))
8629             return -TARGET_EFAULT;
8630         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8631                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8632                                   arg3));
8633         fd_trans_unregister(ret);
8634         unlock_user(p, arg1, 0);
8635         return ret;
8636 #endif
8637     case TARGET_NR_openat:
8638         if (!(p = lock_user_string(arg2)))
8639             return -TARGET_EFAULT;
8640         ret = get_errno(do_openat(cpu_env, arg1, p,
8641                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8642                                   arg4));
8643         fd_trans_unregister(ret);
8644         unlock_user(p, arg2, 0);
8645         return ret;
8646 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8647     case TARGET_NR_name_to_handle_at:
8648         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8649         return ret;
8650 #endif
8651 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8652     case TARGET_NR_open_by_handle_at:
8653         ret = do_open_by_handle_at(arg1, arg2, arg3);
8654         fd_trans_unregister(ret);
8655         return ret;
8656 #endif
8657     case TARGET_NR_close:
8658         fd_trans_unregister(arg1);
8659         return get_errno(close(arg1));
8660 
8661     case TARGET_NR_brk:
8662         return do_brk(arg1);
8663 #ifdef TARGET_NR_fork
8664     case TARGET_NR_fork:
8665         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8666 #endif
8667 #ifdef TARGET_NR_waitpid
8668     case TARGET_NR_waitpid:
8669         {
8670             int status;
8671             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8672             if (!is_error(ret) && arg2 && ret
8673                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8674                 return -TARGET_EFAULT;
8675         }
8676         return ret;
8677 #endif
8678 #ifdef TARGET_NR_waitid
8679     case TARGET_NR_waitid:
8680         {
8681             siginfo_t info;
8682             info.si_pid = 0;
8683             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8684             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8685                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8686                     return -TARGET_EFAULT;
8687                 host_to_target_siginfo(p, &info);
8688                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8689             }
8690         }
8691         return ret;
8692 #endif
8693 #ifdef TARGET_NR_creat /* not on alpha */
8694     case TARGET_NR_creat:
8695         if (!(p = lock_user_string(arg1)))
8696             return -TARGET_EFAULT;
8697         ret = get_errno(creat(p, arg2));
8698         fd_trans_unregister(ret);
8699         unlock_user(p, arg1, 0);
8700         return ret;
8701 #endif
8702 #ifdef TARGET_NR_link
8703     case TARGET_NR_link:
8704         {
8705             void * p2;
8706             p = lock_user_string(arg1);
8707             p2 = lock_user_string(arg2);
8708             if (!p || !p2)
8709                 ret = -TARGET_EFAULT;
8710             else
8711                 ret = get_errno(link(p, p2));
8712             unlock_user(p2, arg2, 0);
8713             unlock_user(p, arg1, 0);
8714         }
8715         return ret;
8716 #endif
8717 #if defined(TARGET_NR_linkat)
8718     case TARGET_NR_linkat:
8719         {
8720             void * p2 = NULL;
8721             if (!arg2 || !arg4)
8722                 return -TARGET_EFAULT;
8723             p  = lock_user_string(arg2);
8724             p2 = lock_user_string(arg4);
8725             if (!p || !p2)
8726                 ret = -TARGET_EFAULT;
8727             else
8728                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8729             unlock_user(p, arg2, 0);
8730             unlock_user(p2, arg4, 0);
8731         }
8732         return ret;
8733 #endif
8734 #ifdef TARGET_NR_unlink
8735     case TARGET_NR_unlink:
8736         if (!(p = lock_user_string(arg1)))
8737             return -TARGET_EFAULT;
8738         ret = get_errno(unlink(p));
8739         unlock_user(p, arg1, 0);
8740         return ret;
8741 #endif
8742 #if defined(TARGET_NR_unlinkat)
8743     case TARGET_NR_unlinkat:
8744         if (!(p = lock_user_string(arg2)))
8745             return -TARGET_EFAULT;
8746         ret = get_errno(unlinkat(arg1, p, arg3));
8747         unlock_user(p, arg2, 0);
8748         return ret;
8749 #endif
8750     case TARGET_NR_execve:
8751         {
8752             char **argp, **envp;
8753             int argc, envc;
8754             abi_ulong gp;
8755             abi_ulong guest_argp;
8756             abi_ulong guest_envp;
8757             abi_ulong addr;
8758             char **q;
8759 
8760             argc = 0;
8761             guest_argp = arg2;
8762             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8763                 if (get_user_ual(addr, gp))
8764                     return -TARGET_EFAULT;
8765                 if (!addr)
8766                     break;
8767                 argc++;
8768             }
8769             envc = 0;
8770             guest_envp = arg3;
8771             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8772                 if (get_user_ual(addr, gp))
8773                     return -TARGET_EFAULT;
8774                 if (!addr)
8775                     break;
8776                 envc++;
8777             }
8778 
8779             argp = g_new0(char *, argc + 1);
8780             envp = g_new0(char *, envc + 1);
8781 
8782             for (gp = guest_argp, q = argp; gp;
8783                   gp += sizeof(abi_ulong), q++) {
8784                 if (get_user_ual(addr, gp))
8785                     goto execve_efault;
8786                 if (!addr)
8787                     break;
8788                 if (!(*q = lock_user_string(addr)))
8789                     goto execve_efault;
8790             }
8791             *q = NULL;
8792 
8793             for (gp = guest_envp, q = envp; gp;
8794                   gp += sizeof(abi_ulong), q++) {
8795                 if (get_user_ual(addr, gp))
8796                     goto execve_efault;
8797                 if (!addr)
8798                     break;
8799                 if (!(*q = lock_user_string(addr)))
8800                     goto execve_efault;
8801             }
8802             *q = NULL;
8803 
8804             if (!(p = lock_user_string(arg1)))
8805                 goto execve_efault;
8806             /* Although execve() is not an interruptible syscall it is
8807              * a special case where we must use the safe_syscall wrapper:
8808              * if we allow a signal to happen before we make the host
8809              * syscall then we will 'lose' it, because at the point of
8810              * execve the process leaves QEMU's control. So we use the
8811              * safe syscall wrapper to ensure that we either take the
8812              * signal as a guest signal, or else it does not happen
8813              * before the execve completes and makes it the other
8814              * program's problem.
8815              */
8816             ret = get_errno(safe_execve(p, argp, envp));
8817             unlock_user(p, arg1, 0);
8818 
8819             goto execve_end;
8820 
8821         execve_efault:
8822             ret = -TARGET_EFAULT;
8823 
8824         execve_end:
8825             for (gp = guest_argp, q = argp; *q;
8826                   gp += sizeof(abi_ulong), q++) {
8827                 if (get_user_ual(addr, gp)
8828                     || !addr)
8829                     break;
8830                 unlock_user(*q, addr, 0);
8831             }
8832             for (gp = guest_envp, q = envp; *q;
8833                   gp += sizeof(abi_ulong), q++) {
8834                 if (get_user_ual(addr, gp)
8835                     || !addr)
8836                     break;
8837                 unlock_user(*q, addr, 0);
8838             }
8839 
8840             g_free(argp);
8841             g_free(envp);
8842         }
8843         return ret;
8844     case TARGET_NR_chdir:
8845         if (!(p = lock_user_string(arg1)))
8846             return -TARGET_EFAULT;
8847         ret = get_errno(chdir(p));
8848         unlock_user(p, arg1, 0);
8849         return ret;
8850 #ifdef TARGET_NR_time
8851     case TARGET_NR_time:
8852         {
8853             time_t host_time;
8854             ret = get_errno(time(&host_time));
8855             if (!is_error(ret)
8856                 && arg1
8857                 && put_user_sal(host_time, arg1))
8858                 return -TARGET_EFAULT;
8859         }
8860         return ret;
8861 #endif
8862 #ifdef TARGET_NR_mknod
8863     case TARGET_NR_mknod:
8864         if (!(p = lock_user_string(arg1)))
8865             return -TARGET_EFAULT;
8866         ret = get_errno(mknod(p, arg2, arg3));
8867         unlock_user(p, arg1, 0);
8868         return ret;
8869 #endif
8870 #if defined(TARGET_NR_mknodat)
8871     case TARGET_NR_mknodat:
8872         if (!(p = lock_user_string(arg2)))
8873             return -TARGET_EFAULT;
8874         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8875         unlock_user(p, arg2, 0);
8876         return ret;
8877 #endif
8878 #ifdef TARGET_NR_chmod
8879     case TARGET_NR_chmod:
8880         if (!(p = lock_user_string(arg1)))
8881             return -TARGET_EFAULT;
8882         ret = get_errno(chmod(p, arg2));
8883         unlock_user(p, arg1, 0);
8884         return ret;
8885 #endif
8886 #ifdef TARGET_NR_lseek
8887     case TARGET_NR_lseek:
8888         return get_errno(lseek(arg1, arg2, arg3));
8889 #endif
8890 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8891     /* Alpha specific */
8892     case TARGET_NR_getxpid:
8893         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8894         return get_errno(getpid());
8895 #endif
8896 #ifdef TARGET_NR_getpid
8897     case TARGET_NR_getpid:
8898         return get_errno(getpid());
8899 #endif
8900     case TARGET_NR_mount:
8901         {
8902             /* need to look at the data field */
8903             void *p2, *p3;
8904 
8905             if (arg1) {
8906                 p = lock_user_string(arg1);
8907                 if (!p) {
8908                     return -TARGET_EFAULT;
8909                 }
8910             } else {
8911                 p = NULL;
8912             }
8913 
8914             p2 = lock_user_string(arg2);
8915             if (!p2) {
8916                 if (arg1) {
8917                     unlock_user(p, arg1, 0);
8918                 }
8919                 return -TARGET_EFAULT;
8920             }
8921 
8922             if (arg3) {
8923                 p3 = lock_user_string(arg3);
8924                 if (!p3) {
8925                     if (arg1) {
8926                         unlock_user(p, arg1, 0);
8927                     }
8928                     unlock_user(p2, arg2, 0);
8929                     return -TARGET_EFAULT;
8930                 }
8931             } else {
8932                 p3 = NULL;
8933             }
8934 
8935             /* FIXME - arg5 should be locked, but it isn't clear how to
8936              * do that since it's not guaranteed to be a NULL-terminated
8937              * string.
8938              */
8939             if (!arg5) {
8940                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8941             } else {
8942                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8943             }
8944             ret = get_errno(ret);
8945 
8946             if (arg1) {
8947                 unlock_user(p, arg1, 0);
8948             }
8949             unlock_user(p2, arg2, 0);
8950             if (arg3) {
8951                 unlock_user(p3, arg3, 0);
8952             }
8953         }
8954         return ret;
8955 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8956 #if defined(TARGET_NR_umount)
8957     case TARGET_NR_umount:
8958 #endif
8959 #if defined(TARGET_NR_oldumount)
8960     case TARGET_NR_oldumount:
8961 #endif
8962         if (!(p = lock_user_string(arg1)))
8963             return -TARGET_EFAULT;
8964         ret = get_errno(umount(p));
8965         unlock_user(p, arg1, 0);
8966         return ret;
8967 #endif
8968 #ifdef TARGET_NR_stime /* not on alpha */
8969     case TARGET_NR_stime:
8970         {
8971             struct timespec ts;
8972             ts.tv_nsec = 0;
8973             if (get_user_sal(ts.tv_sec, arg1)) {
8974                 return -TARGET_EFAULT;
8975             }
8976             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8977         }
8978 #endif
8979 #ifdef TARGET_NR_alarm /* not on alpha */
8980     case TARGET_NR_alarm:
8981         return alarm(arg1);
8982 #endif
8983 #ifdef TARGET_NR_pause /* not on alpha */
8984     case TARGET_NR_pause:
8985         if (!block_signals()) {
8986             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8987         }
8988         return -TARGET_EINTR;
8989 #endif
8990 #ifdef TARGET_NR_utime
8991     case TARGET_NR_utime:
8992         {
8993             struct utimbuf tbuf, *host_tbuf;
8994             struct target_utimbuf *target_tbuf;
8995             if (arg2) {
8996                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8997                     return -TARGET_EFAULT;
8998                 tbuf.actime = tswapal(target_tbuf->actime);
8999                 tbuf.modtime = tswapal(target_tbuf->modtime);
9000                 unlock_user_struct(target_tbuf, arg2, 0);
9001                 host_tbuf = &tbuf;
9002             } else {
9003                 host_tbuf = NULL;
9004             }
9005             if (!(p = lock_user_string(arg1)))
9006                 return -TARGET_EFAULT;
9007             ret = get_errno(utime(p, host_tbuf));
9008             unlock_user(p, arg1, 0);
9009         }
9010         return ret;
9011 #endif
9012 #ifdef TARGET_NR_utimes
9013     case TARGET_NR_utimes:
9014         {
9015             struct timeval *tvp, tv[2];
9016             if (arg2) {
9017                 if (copy_from_user_timeval(&tv[0], arg2)
9018                     || copy_from_user_timeval(&tv[1],
9019                                               arg2 + sizeof(struct target_timeval)))
9020                     return -TARGET_EFAULT;
9021                 tvp = tv;
9022             } else {
9023                 tvp = NULL;
9024             }
9025             if (!(p = lock_user_string(arg1)))
9026                 return -TARGET_EFAULT;
9027             ret = get_errno(utimes(p, tvp));
9028             unlock_user(p, arg1, 0);
9029         }
9030         return ret;
9031 #endif
9032 #if defined(TARGET_NR_futimesat)
9033     case TARGET_NR_futimesat:
9034         {
9035             struct timeval *tvp, tv[2];
9036             if (arg3) {
9037                 if (copy_from_user_timeval(&tv[0], arg3)
9038                     || copy_from_user_timeval(&tv[1],
9039                                               arg3 + sizeof(struct target_timeval)))
9040                     return -TARGET_EFAULT;
9041                 tvp = tv;
9042             } else {
9043                 tvp = NULL;
9044             }
9045             if (!(p = lock_user_string(arg2))) {
9046                 return -TARGET_EFAULT;
9047             }
9048             ret = get_errno(futimesat(arg1, path(p), tvp));
9049             unlock_user(p, arg2, 0);
9050         }
9051         return ret;
9052 #endif
9053 #ifdef TARGET_NR_access
9054     case TARGET_NR_access:
9055         if (!(p = lock_user_string(arg1))) {
9056             return -TARGET_EFAULT;
9057         }
9058         ret = get_errno(access(path(p), arg2));
9059         unlock_user(p, arg1, 0);
9060         return ret;
9061 #endif
9062 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9063     case TARGET_NR_faccessat:
9064         if (!(p = lock_user_string(arg2))) {
9065             return -TARGET_EFAULT;
9066         }
9067         ret = get_errno(faccessat(arg1, p, arg3, 0));
9068         unlock_user(p, arg2, 0);
9069         return ret;
9070 #endif
9071 #ifdef TARGET_NR_nice /* not on alpha */
9072     case TARGET_NR_nice:
9073         return get_errno(nice(arg1));
9074 #endif
9075     case TARGET_NR_sync:
9076         sync();
9077         return 0;
9078 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9079     case TARGET_NR_syncfs:
9080         return get_errno(syncfs(arg1));
9081 #endif
9082     case TARGET_NR_kill:
9083         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9084 #ifdef TARGET_NR_rename
9085     case TARGET_NR_rename:
9086         {
9087             void *p2;
9088             p = lock_user_string(arg1);
9089             p2 = lock_user_string(arg2);
9090             if (!p || !p2)
9091                 ret = -TARGET_EFAULT;
9092             else
9093                 ret = get_errno(rename(p, p2));
9094             unlock_user(p2, arg2, 0);
9095             unlock_user(p, arg1, 0);
9096         }
9097         return ret;
9098 #endif
9099 #if defined(TARGET_NR_renameat)
9100     case TARGET_NR_renameat:
9101         {
9102             void *p2;
9103             p  = lock_user_string(arg2);
9104             p2 = lock_user_string(arg4);
9105             if (!p || !p2)
9106                 ret = -TARGET_EFAULT;
9107             else
9108                 ret = get_errno(renameat(arg1, p, arg3, p2));
9109             unlock_user(p2, arg4, 0);
9110             unlock_user(p, arg2, 0);
9111         }
9112         return ret;
9113 #endif
9114 #if defined(TARGET_NR_renameat2)
9115     case TARGET_NR_renameat2:
9116         {
9117             void *p2;
9118             p  = lock_user_string(arg2);
9119             p2 = lock_user_string(arg4);
9120             if (!p || !p2) {
9121                 ret = -TARGET_EFAULT;
9122             } else {
9123                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9124             }
9125             unlock_user(p2, arg4, 0);
9126             unlock_user(p, arg2, 0);
9127         }
9128         return ret;
9129 #endif
9130 #ifdef TARGET_NR_mkdir
9131     case TARGET_NR_mkdir:
9132         if (!(p = lock_user_string(arg1)))
9133             return -TARGET_EFAULT;
9134         ret = get_errno(mkdir(p, arg2));
9135         unlock_user(p, arg1, 0);
9136         return ret;
9137 #endif
9138 #if defined(TARGET_NR_mkdirat)
9139     case TARGET_NR_mkdirat:
9140         if (!(p = lock_user_string(arg2)))
9141             return -TARGET_EFAULT;
9142         ret = get_errno(mkdirat(arg1, p, arg3));
9143         unlock_user(p, arg2, 0);
9144         return ret;
9145 #endif
9146 #ifdef TARGET_NR_rmdir
9147     case TARGET_NR_rmdir:
9148         if (!(p = lock_user_string(arg1)))
9149             return -TARGET_EFAULT;
9150         ret = get_errno(rmdir(p));
9151         unlock_user(p, arg1, 0);
9152         return ret;
9153 #endif
9154     case TARGET_NR_dup:
9155         ret = get_errno(dup(arg1));
9156         if (ret >= 0) {
9157             fd_trans_dup(arg1, ret);
9158         }
9159         return ret;
9160 #ifdef TARGET_NR_pipe
9161     case TARGET_NR_pipe:
9162         return do_pipe(cpu_env, arg1, 0, 0);
9163 #endif
9164 #ifdef TARGET_NR_pipe2
9165     case TARGET_NR_pipe2:
9166         return do_pipe(cpu_env, arg1,
9167                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9168 #endif
9169     case TARGET_NR_times:
9170         {
9171             struct target_tms *tmsp;
9172             struct tms tms;
9173             ret = get_errno(times(&tms));
9174             if (arg1) {
9175                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9176                 if (!tmsp)
9177                     return -TARGET_EFAULT;
9178                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9179                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9180                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9181                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9182             }
9183             if (!is_error(ret))
9184                 ret = host_to_target_clock_t(ret);
9185         }
9186         return ret;
9187     case TARGET_NR_acct:
9188         if (arg1 == 0) {
9189             ret = get_errno(acct(NULL));
9190         } else {
9191             if (!(p = lock_user_string(arg1))) {
9192                 return -TARGET_EFAULT;
9193             }
9194             ret = get_errno(acct(path(p)));
9195             unlock_user(p, arg1, 0);
9196         }
9197         return ret;
9198 #ifdef TARGET_NR_umount2
9199     case TARGET_NR_umount2:
9200         if (!(p = lock_user_string(arg1)))
9201             return -TARGET_EFAULT;
9202         ret = get_errno(umount2(p, arg2));
9203         unlock_user(p, arg1, 0);
9204         return ret;
9205 #endif
9206     case TARGET_NR_ioctl:
9207         return do_ioctl(arg1, arg2, arg3);
9208 #ifdef TARGET_NR_fcntl
9209     case TARGET_NR_fcntl:
9210         return do_fcntl(arg1, arg2, arg3);
9211 #endif
9212     case TARGET_NR_setpgid:
9213         return get_errno(setpgid(arg1, arg2));
9214     case TARGET_NR_umask:
9215         return get_errno(umask(arg1));
9216     case TARGET_NR_chroot:
9217         if (!(p = lock_user_string(arg1)))
9218             return -TARGET_EFAULT;
9219         ret = get_errno(chroot(p));
9220         unlock_user(p, arg1, 0);
9221         return ret;
9222 #ifdef TARGET_NR_dup2
9223     case TARGET_NR_dup2:
9224         ret = get_errno(dup2(arg1, arg2));
9225         if (ret >= 0) {
9226             fd_trans_dup(arg1, arg2);
9227         }
9228         return ret;
9229 #endif
9230 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9231     case TARGET_NR_dup3:
9232     {
9233         int host_flags;
9234 
9235         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9236             return -EINVAL;
9237         }
9238         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9239         ret = get_errno(dup3(arg1, arg2, host_flags));
9240         if (ret >= 0) {
9241             fd_trans_dup(arg1, arg2);
9242         }
9243         return ret;
9244     }
9245 #endif
9246 #ifdef TARGET_NR_getppid /* not on alpha */
9247     case TARGET_NR_getppid:
9248         return get_errno(getppid());
9249 #endif
9250 #ifdef TARGET_NR_getpgrp
9251     case TARGET_NR_getpgrp:
9252         return get_errno(getpgrp());
9253 #endif
9254     case TARGET_NR_setsid:
9255         return get_errno(setsid());
9256 #ifdef TARGET_NR_sigaction
9257     case TARGET_NR_sigaction:
9258         {
9259 #if defined(TARGET_MIPS)
9260 	    struct target_sigaction act, oact, *pact, *old_act;
9261 
9262 	    if (arg2) {
9263                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9264                     return -TARGET_EFAULT;
9265 		act._sa_handler = old_act->_sa_handler;
9266 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9267 		act.sa_flags = old_act->sa_flags;
9268 		unlock_user_struct(old_act, arg2, 0);
9269 		pact = &act;
9270 	    } else {
9271 		pact = NULL;
9272 	    }
9273 
9274         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9275 
9276 	    if (!is_error(ret) && arg3) {
9277                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9278                     return -TARGET_EFAULT;
9279 		old_act->_sa_handler = oact._sa_handler;
9280 		old_act->sa_flags = oact.sa_flags;
9281 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9282 		old_act->sa_mask.sig[1] = 0;
9283 		old_act->sa_mask.sig[2] = 0;
9284 		old_act->sa_mask.sig[3] = 0;
9285 		unlock_user_struct(old_act, arg3, 1);
9286 	    }
9287 #else
9288             struct target_old_sigaction *old_act;
9289             struct target_sigaction act, oact, *pact;
9290             if (arg2) {
9291                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9292                     return -TARGET_EFAULT;
9293                 act._sa_handler = old_act->_sa_handler;
9294                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9295                 act.sa_flags = old_act->sa_flags;
9296 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9297                 act.sa_restorer = old_act->sa_restorer;
9298 #endif
9299                 unlock_user_struct(old_act, arg2, 0);
9300                 pact = &act;
9301             } else {
9302                 pact = NULL;
9303             }
9304             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9305             if (!is_error(ret) && arg3) {
9306                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9307                     return -TARGET_EFAULT;
9308                 old_act->_sa_handler = oact._sa_handler;
9309                 old_act->sa_mask = oact.sa_mask.sig[0];
9310                 old_act->sa_flags = oact.sa_flags;
9311 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9312                 old_act->sa_restorer = oact.sa_restorer;
9313 #endif
9314                 unlock_user_struct(old_act, arg3, 1);
9315             }
9316 #endif
9317         }
9318         return ret;
9319 #endif
9320     case TARGET_NR_rt_sigaction:
9321         {
9322             /*
9323              * For Alpha and SPARC this is a 5 argument syscall, with
9324              * a 'restorer' parameter which must be copied into the
9325              * sa_restorer field of the sigaction struct.
9326              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9327              * and arg5 is the sigsetsize.
9328              */
9329 #if defined(TARGET_ALPHA)
9330             target_ulong sigsetsize = arg4;
9331             target_ulong restorer = arg5;
9332 #elif defined(TARGET_SPARC)
9333             target_ulong restorer = arg4;
9334             target_ulong sigsetsize = arg5;
9335 #else
9336             target_ulong sigsetsize = arg4;
9337             target_ulong restorer = 0;
9338 #endif
9339             struct target_sigaction *act = NULL;
9340             struct target_sigaction *oact = NULL;
9341 
9342             if (sigsetsize != sizeof(target_sigset_t)) {
9343                 return -TARGET_EINVAL;
9344             }
9345             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9346                 return -TARGET_EFAULT;
9347             }
9348             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9349                 ret = -TARGET_EFAULT;
9350             } else {
9351                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9352                 if (oact) {
9353                     unlock_user_struct(oact, arg3, 1);
9354                 }
9355             }
9356             if (act) {
9357                 unlock_user_struct(act, arg2, 0);
9358             }
9359         }
9360         return ret;
9361 #ifdef TARGET_NR_sgetmask /* not on alpha */
9362     case TARGET_NR_sgetmask:
9363         {
9364             sigset_t cur_set;
9365             abi_ulong target_set;
9366             ret = do_sigprocmask(0, NULL, &cur_set);
9367             if (!ret) {
9368                 host_to_target_old_sigset(&target_set, &cur_set);
9369                 ret = target_set;
9370             }
9371         }
9372         return ret;
9373 #endif
9374 #ifdef TARGET_NR_ssetmask /* not on alpha */
9375     case TARGET_NR_ssetmask:
9376         {
9377             sigset_t set, oset;
9378             abi_ulong target_set = arg1;
9379             target_to_host_old_sigset(&set, &target_set);
9380             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9381             if (!ret) {
9382                 host_to_target_old_sigset(&target_set, &oset);
9383                 ret = target_set;
9384             }
9385         }
9386         return ret;
9387 #endif
9388 #ifdef TARGET_NR_sigprocmask
9389     case TARGET_NR_sigprocmask:
9390         {
9391 #if defined(TARGET_ALPHA)
9392             sigset_t set, oldset;
9393             abi_ulong mask;
9394             int how;
9395 
9396             switch (arg1) {
9397             case TARGET_SIG_BLOCK:
9398                 how = SIG_BLOCK;
9399                 break;
9400             case TARGET_SIG_UNBLOCK:
9401                 how = SIG_UNBLOCK;
9402                 break;
9403             case TARGET_SIG_SETMASK:
9404                 how = SIG_SETMASK;
9405                 break;
9406             default:
9407                 return -TARGET_EINVAL;
9408             }
9409             mask = arg2;
9410             target_to_host_old_sigset(&set, &mask);
9411 
9412             ret = do_sigprocmask(how, &set, &oldset);
9413             if (!is_error(ret)) {
9414                 host_to_target_old_sigset(&mask, &oldset);
9415                 ret = mask;
9416                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9417             }
9418 #else
9419             sigset_t set, oldset, *set_ptr;
9420             int how;
9421 
9422             if (arg2) {
9423                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9424                 if (!p) {
9425                     return -TARGET_EFAULT;
9426                 }
9427                 target_to_host_old_sigset(&set, p);
9428                 unlock_user(p, arg2, 0);
9429                 set_ptr = &set;
9430                 switch (arg1) {
9431                 case TARGET_SIG_BLOCK:
9432                     how = SIG_BLOCK;
9433                     break;
9434                 case TARGET_SIG_UNBLOCK:
9435                     how = SIG_UNBLOCK;
9436                     break;
9437                 case TARGET_SIG_SETMASK:
9438                     how = SIG_SETMASK;
9439                     break;
9440                 default:
9441                     return -TARGET_EINVAL;
9442                 }
9443             } else {
9444                 how = 0;
9445                 set_ptr = NULL;
9446             }
9447             ret = do_sigprocmask(how, set_ptr, &oldset);
9448             if (!is_error(ret) && arg3) {
9449                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9450                     return -TARGET_EFAULT;
9451                 host_to_target_old_sigset(p, &oldset);
9452                 unlock_user(p, arg3, sizeof(target_sigset_t));
9453             }
9454 #endif
9455         }
9456         return ret;
9457 #endif
9458     case TARGET_NR_rt_sigprocmask:
9459         {
9460             int how = arg1;
9461             sigset_t set, oldset, *set_ptr;
9462 
9463             if (arg4 != sizeof(target_sigset_t)) {
9464                 return -TARGET_EINVAL;
9465             }
9466 
9467             if (arg2) {
9468                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9469                 if (!p) {
9470                     return -TARGET_EFAULT;
9471                 }
9472                 target_to_host_sigset(&set, p);
9473                 unlock_user(p, arg2, 0);
9474                 set_ptr = &set;
9475                 switch(how) {
9476                 case TARGET_SIG_BLOCK:
9477                     how = SIG_BLOCK;
9478                     break;
9479                 case TARGET_SIG_UNBLOCK:
9480                     how = SIG_UNBLOCK;
9481                     break;
9482                 case TARGET_SIG_SETMASK:
9483                     how = SIG_SETMASK;
9484                     break;
9485                 default:
9486                     return -TARGET_EINVAL;
9487                 }
9488             } else {
9489                 how = 0;
9490                 set_ptr = NULL;
9491             }
9492             ret = do_sigprocmask(how, set_ptr, &oldset);
9493             if (!is_error(ret) && arg3) {
9494                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9495                     return -TARGET_EFAULT;
9496                 host_to_target_sigset(p, &oldset);
9497                 unlock_user(p, arg3, sizeof(target_sigset_t));
9498             }
9499         }
9500         return ret;
9501 #ifdef TARGET_NR_sigpending
9502     case TARGET_NR_sigpending:
9503         {
9504             sigset_t set;
9505             ret = get_errno(sigpending(&set));
9506             if (!is_error(ret)) {
9507                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9508                     return -TARGET_EFAULT;
9509                 host_to_target_old_sigset(p, &set);
9510                 unlock_user(p, arg1, sizeof(target_sigset_t));
9511             }
9512         }
9513         return ret;
9514 #endif
9515     case TARGET_NR_rt_sigpending:
9516         {
9517             sigset_t set;
9518 
9519             /* Yes, this check is >, not != like most. We follow the kernel's
9520              * logic and it does it like this because it implements
9521              * NR_sigpending through the same code path, and in that case
9522              * the old_sigset_t is smaller in size.
9523              */
9524             if (arg2 > sizeof(target_sigset_t)) {
9525                 return -TARGET_EINVAL;
9526             }
9527 
9528             ret = get_errno(sigpending(&set));
9529             if (!is_error(ret)) {
9530                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9531                     return -TARGET_EFAULT;
9532                 host_to_target_sigset(p, &set);
9533                 unlock_user(p, arg1, sizeof(target_sigset_t));
9534             }
9535         }
9536         return ret;
9537 #ifdef TARGET_NR_sigsuspend
9538     case TARGET_NR_sigsuspend:
9539         {
9540             sigset_t *set;
9541 
9542 #if defined(TARGET_ALPHA)
9543             TaskState *ts = cpu->opaque;
9544             /* target_to_host_old_sigset will bswap back */
9545             abi_ulong mask = tswapal(arg1);
9546             set = &ts->sigsuspend_mask;
9547             target_to_host_old_sigset(set, &mask);
9548 #else
9549             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9550             if (ret != 0) {
9551                 return ret;
9552             }
9553 #endif
9554             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9555             finish_sigsuspend_mask(ret);
9556         }
9557         return ret;
9558 #endif
9559     case TARGET_NR_rt_sigsuspend:
9560         {
9561             sigset_t *set;
9562 
9563             ret = process_sigsuspend_mask(&set, arg1, arg2);
9564             if (ret != 0) {
9565                 return ret;
9566             }
9567             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9568             finish_sigsuspend_mask(ret);
9569         }
9570         return ret;
9571 #ifdef TARGET_NR_rt_sigtimedwait
9572     case TARGET_NR_rt_sigtimedwait:
9573         {
9574             sigset_t set;
9575             struct timespec uts, *puts;
9576             siginfo_t uinfo;
9577 
9578             if (arg4 != sizeof(target_sigset_t)) {
9579                 return -TARGET_EINVAL;
9580             }
9581 
9582             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9583                 return -TARGET_EFAULT;
9584             target_to_host_sigset(&set, p);
9585             unlock_user(p, arg1, 0);
9586             if (arg3) {
9587                 puts = &uts;
9588                 if (target_to_host_timespec(puts, arg3)) {
9589                     return -TARGET_EFAULT;
9590                 }
9591             } else {
9592                 puts = NULL;
9593             }
9594             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9595                                                  SIGSET_T_SIZE));
9596             if (!is_error(ret)) {
9597                 if (arg2) {
9598                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9599                                   0);
9600                     if (!p) {
9601                         return -TARGET_EFAULT;
9602                     }
9603                     host_to_target_siginfo(p, &uinfo);
9604                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9605                 }
9606                 ret = host_to_target_signal(ret);
9607             }
9608         }
9609         return ret;
9610 #endif
9611 #ifdef TARGET_NR_rt_sigtimedwait_time64
9612     case TARGET_NR_rt_sigtimedwait_time64:
9613         {
9614             sigset_t set;
9615             struct timespec uts, *puts;
9616             siginfo_t uinfo;
9617 
9618             if (arg4 != sizeof(target_sigset_t)) {
9619                 return -TARGET_EINVAL;
9620             }
9621 
9622             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9623             if (!p) {
9624                 return -TARGET_EFAULT;
9625             }
9626             target_to_host_sigset(&set, p);
9627             unlock_user(p, arg1, 0);
9628             if (arg3) {
9629                 puts = &uts;
9630                 if (target_to_host_timespec64(puts, arg3)) {
9631                     return -TARGET_EFAULT;
9632                 }
9633             } else {
9634                 puts = NULL;
9635             }
9636             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9637                                                  SIGSET_T_SIZE));
9638             if (!is_error(ret)) {
9639                 if (arg2) {
9640                     p = lock_user(VERIFY_WRITE, arg2,
9641                                   sizeof(target_siginfo_t), 0);
9642                     if (!p) {
9643                         return -TARGET_EFAULT;
9644                     }
9645                     host_to_target_siginfo(p, &uinfo);
9646                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9647                 }
9648                 ret = host_to_target_signal(ret);
9649             }
9650         }
9651         return ret;
9652 #endif
9653     case TARGET_NR_rt_sigqueueinfo:
9654         {
9655             siginfo_t uinfo;
9656 
9657             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9658             if (!p) {
9659                 return -TARGET_EFAULT;
9660             }
9661             target_to_host_siginfo(&uinfo, p);
9662             unlock_user(p, arg3, 0);
9663             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9664         }
9665         return ret;
9666     case TARGET_NR_rt_tgsigqueueinfo:
9667         {
9668             siginfo_t uinfo;
9669 
9670             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9671             if (!p) {
9672                 return -TARGET_EFAULT;
9673             }
9674             target_to_host_siginfo(&uinfo, p);
9675             unlock_user(p, arg4, 0);
9676             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9677         }
9678         return ret;
9679 #ifdef TARGET_NR_sigreturn
9680     case TARGET_NR_sigreturn:
9681         if (block_signals()) {
9682             return -QEMU_ERESTARTSYS;
9683         }
9684         return do_sigreturn(cpu_env);
9685 #endif
9686     case TARGET_NR_rt_sigreturn:
9687         if (block_signals()) {
9688             return -QEMU_ERESTARTSYS;
9689         }
9690         return do_rt_sigreturn(cpu_env);
9691     case TARGET_NR_sethostname:
9692         if (!(p = lock_user_string(arg1)))
9693             return -TARGET_EFAULT;
9694         ret = get_errno(sethostname(p, arg2));
9695         unlock_user(p, arg1, 0);
9696         return ret;
9697 #ifdef TARGET_NR_setrlimit
9698     case TARGET_NR_setrlimit:
9699         {
9700             int resource = target_to_host_resource(arg1);
9701             struct target_rlimit *target_rlim;
9702             struct rlimit rlim;
9703             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9704                 return -TARGET_EFAULT;
9705             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9706             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9707             unlock_user_struct(target_rlim, arg2, 0);
9708             /*
9709              * If we just passed through resource limit settings for memory then
9710              * they would also apply to QEMU's own allocations, and QEMU will
9711              * crash or hang or die if its allocations fail. Ideally we would
9712              * track the guest allocations in QEMU and apply the limits ourselves.
9713              * For now, just tell the guest the call succeeded but don't actually
9714              * limit anything.
9715              */
9716             if (resource != RLIMIT_AS &&
9717                 resource != RLIMIT_DATA &&
9718                 resource != RLIMIT_STACK) {
9719                 return get_errno(setrlimit(resource, &rlim));
9720             } else {
9721                 return 0;
9722             }
9723         }
9724 #endif
9725 #ifdef TARGET_NR_getrlimit
9726     case TARGET_NR_getrlimit:
9727         {
9728             int resource = target_to_host_resource(arg1);
9729             struct target_rlimit *target_rlim;
9730             struct rlimit rlim;
9731 
9732             ret = get_errno(getrlimit(resource, &rlim));
9733             if (!is_error(ret)) {
9734                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9735                     return -TARGET_EFAULT;
9736                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9737                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9738                 unlock_user_struct(target_rlim, arg2, 1);
9739             }
9740         }
9741         return ret;
9742 #endif
9743     case TARGET_NR_getrusage:
9744         {
9745             struct rusage rusage;
9746             ret = get_errno(getrusage(arg1, &rusage));
9747             if (!is_error(ret)) {
9748                 ret = host_to_target_rusage(arg2, &rusage);
9749             }
9750         }
9751         return ret;
9752 #if defined(TARGET_NR_gettimeofday)
9753     case TARGET_NR_gettimeofday:
9754         {
9755             struct timeval tv;
9756             struct timezone tz;
9757 
9758             ret = get_errno(gettimeofday(&tv, &tz));
9759             if (!is_error(ret)) {
9760                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9761                     return -TARGET_EFAULT;
9762                 }
9763                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9764                     return -TARGET_EFAULT;
9765                 }
9766             }
9767         }
9768         return ret;
9769 #endif
9770 #if defined(TARGET_NR_settimeofday)
9771     case TARGET_NR_settimeofday:
9772         {
9773             struct timeval tv, *ptv = NULL;
9774             struct timezone tz, *ptz = NULL;
9775 
9776             if (arg1) {
9777                 if (copy_from_user_timeval(&tv, arg1)) {
9778                     return -TARGET_EFAULT;
9779                 }
9780                 ptv = &tv;
9781             }
9782 
9783             if (arg2) {
9784                 if (copy_from_user_timezone(&tz, arg2)) {
9785                     return -TARGET_EFAULT;
9786                 }
9787                 ptz = &tz;
9788             }
9789 
9790             return get_errno(settimeofday(ptv, ptz));
9791         }
9792 #endif
9793 #if defined(TARGET_NR_select)
9794     case TARGET_NR_select:
9795 #if defined(TARGET_WANT_NI_OLD_SELECT)
9796         /* some architectures used to have old_select here
9797          * but now ENOSYS it.
9798          */
9799         ret = -TARGET_ENOSYS;
9800 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9801         ret = do_old_select(arg1);
9802 #else
9803         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9804 #endif
9805         return ret;
9806 #endif
9807 #ifdef TARGET_NR_pselect6
9808     case TARGET_NR_pselect6:
9809         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9810 #endif
9811 #ifdef TARGET_NR_pselect6_time64
9812     case TARGET_NR_pselect6_time64:
9813         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9814 #endif
9815 #ifdef TARGET_NR_symlink
9816     case TARGET_NR_symlink:
9817         {
9818             void *p2;
9819             p = lock_user_string(arg1);
9820             p2 = lock_user_string(arg2);
9821             if (!p || !p2)
9822                 ret = -TARGET_EFAULT;
9823             else
9824                 ret = get_errno(symlink(p, p2));
9825             unlock_user(p2, arg2, 0);
9826             unlock_user(p, arg1, 0);
9827         }
9828         return ret;
9829 #endif
9830 #if defined(TARGET_NR_symlinkat)
9831     case TARGET_NR_symlinkat:
9832         {
9833             void *p2;
9834             p  = lock_user_string(arg1);
9835             p2 = lock_user_string(arg3);
9836             if (!p || !p2)
9837                 ret = -TARGET_EFAULT;
9838             else
9839                 ret = get_errno(symlinkat(p, arg2, p2));
9840             unlock_user(p2, arg3, 0);
9841             unlock_user(p, arg1, 0);
9842         }
9843         return ret;
9844 #endif
9845 #ifdef TARGET_NR_readlink
9846     case TARGET_NR_readlink:
9847         {
9848             void *p2;
9849             p = lock_user_string(arg1);
9850             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9851             if (!p || !p2) {
9852                 ret = -TARGET_EFAULT;
9853             } else if (!arg3) {
9854                 /* Short circuit this for the magic exe check. */
9855                 ret = -TARGET_EINVAL;
9856             } else if (is_proc_myself((const char *)p, "exe")) {
9857                 char real[PATH_MAX], *temp;
9858                 temp = realpath(exec_path, real);
9859                 /* Return value is # of bytes that we wrote to the buffer. */
9860                 if (temp == NULL) {
9861                     ret = get_errno(-1);
9862                 } else {
9863                     /* Don't worry about sign mismatch as earlier mapping
9864                      * logic would have thrown a bad address error. */
9865                     ret = MIN(strlen(real), arg3);
9866                     /* We cannot NUL terminate the string. */
9867                     memcpy(p2, real, ret);
9868                 }
9869             } else {
9870                 ret = get_errno(readlink(path(p), p2, arg3));
9871             }
9872             unlock_user(p2, arg2, ret);
9873             unlock_user(p, arg1, 0);
9874         }
9875         return ret;
9876 #endif
9877 #if defined(TARGET_NR_readlinkat)
9878     case TARGET_NR_readlinkat:
9879         {
9880             void *p2;
9881             p  = lock_user_string(arg2);
9882             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9883             if (!p || !p2) {
9884                 ret = -TARGET_EFAULT;
9885             } else if (is_proc_myself((const char *)p, "exe")) {
9886                 char real[PATH_MAX], *temp;
9887                 temp = realpath(exec_path, real);
9888                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9889                 snprintf((char *)p2, arg4, "%s", real);
9890             } else {
9891                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9892             }
9893             unlock_user(p2, arg3, ret);
9894             unlock_user(p, arg2, 0);
9895         }
9896         return ret;
9897 #endif
9898 #ifdef TARGET_NR_swapon
9899     case TARGET_NR_swapon:
9900         if (!(p = lock_user_string(arg1)))
9901             return -TARGET_EFAULT;
9902         ret = get_errno(swapon(p, arg2));
9903         unlock_user(p, arg1, 0);
9904         return ret;
9905 #endif
9906     case TARGET_NR_reboot:
9907         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9908            /* arg4 must be ignored in all other cases */
9909            p = lock_user_string(arg4);
9910            if (!p) {
9911                return -TARGET_EFAULT;
9912            }
9913            ret = get_errno(reboot(arg1, arg2, arg3, p));
9914            unlock_user(p, arg4, 0);
9915         } else {
9916            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9917         }
9918         return ret;
9919 #ifdef TARGET_NR_mmap
9920     case TARGET_NR_mmap:
9921 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9922     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9923     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9924     || defined(TARGET_S390X)
9925         {
9926             abi_ulong *v;
9927             abi_ulong v1, v2, v3, v4, v5, v6;
9928             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9929                 return -TARGET_EFAULT;
9930             v1 = tswapal(v[0]);
9931             v2 = tswapal(v[1]);
9932             v3 = tswapal(v[2]);
9933             v4 = tswapal(v[3]);
9934             v5 = tswapal(v[4]);
9935             v6 = tswapal(v[5]);
9936             unlock_user(v, arg1, 0);
9937             ret = get_errno(target_mmap(v1, v2, v3,
9938                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9939                                         v5, v6));
9940         }
9941 #else
9942         /* mmap pointers are always untagged */
9943         ret = get_errno(target_mmap(arg1, arg2, arg3,
9944                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9945                                     arg5,
9946                                     arg6));
9947 #endif
9948         return ret;
9949 #endif
9950 #ifdef TARGET_NR_mmap2
9951     case TARGET_NR_mmap2:
9952 #ifndef MMAP_SHIFT
9953 #define MMAP_SHIFT 12
9954 #endif
9955         ret = target_mmap(arg1, arg2, arg3,
9956                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9957                           arg5, arg6 << MMAP_SHIFT);
9958         return get_errno(ret);
9959 #endif
9960     case TARGET_NR_munmap:
9961         arg1 = cpu_untagged_addr(cpu, arg1);
9962         return get_errno(target_munmap(arg1, arg2));
9963     case TARGET_NR_mprotect:
9964         arg1 = cpu_untagged_addr(cpu, arg1);
9965         {
9966             TaskState *ts = cpu->opaque;
9967             /* Special hack to detect libc making the stack executable.  */
9968             if ((arg3 & PROT_GROWSDOWN)
9969                 && arg1 >= ts->info->stack_limit
9970                 && arg1 <= ts->info->start_stack) {
9971                 arg3 &= ~PROT_GROWSDOWN;
9972                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9973                 arg1 = ts->info->stack_limit;
9974             }
9975         }
9976         return get_errno(target_mprotect(arg1, arg2, arg3));
9977 #ifdef TARGET_NR_mremap
9978     case TARGET_NR_mremap:
9979         arg1 = cpu_untagged_addr(cpu, arg1);
9980         /* mremap new_addr (arg5) is always untagged */
9981         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9982 #endif
9983         /* ??? msync/mlock/munlock are broken for softmmu.  */
9984 #ifdef TARGET_NR_msync
9985     case TARGET_NR_msync:
9986         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9987 #endif
9988 #ifdef TARGET_NR_mlock
9989     case TARGET_NR_mlock:
9990         return get_errno(mlock(g2h(cpu, arg1), arg2));
9991 #endif
9992 #ifdef TARGET_NR_munlock
9993     case TARGET_NR_munlock:
9994         return get_errno(munlock(g2h(cpu, arg1), arg2));
9995 #endif
9996 #ifdef TARGET_NR_mlockall
9997     case TARGET_NR_mlockall:
9998         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9999 #endif
10000 #ifdef TARGET_NR_munlockall
10001     case TARGET_NR_munlockall:
10002         return get_errno(munlockall());
10003 #endif
10004 #ifdef TARGET_NR_truncate
10005     case TARGET_NR_truncate:
10006         if (!(p = lock_user_string(arg1)))
10007             return -TARGET_EFAULT;
10008         ret = get_errno(truncate(p, arg2));
10009         unlock_user(p, arg1, 0);
10010         return ret;
10011 #endif
10012 #ifdef TARGET_NR_ftruncate
10013     case TARGET_NR_ftruncate:
10014         return get_errno(ftruncate(arg1, arg2));
10015 #endif
10016     case TARGET_NR_fchmod:
10017         return get_errno(fchmod(arg1, arg2));
10018 #if defined(TARGET_NR_fchmodat)
10019     case TARGET_NR_fchmodat:
10020         if (!(p = lock_user_string(arg2)))
10021             return -TARGET_EFAULT;
10022         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10023         unlock_user(p, arg2, 0);
10024         return ret;
10025 #endif
10026     case TARGET_NR_getpriority:
10027         /* Note that negative values are valid for getpriority, so we must
10028            differentiate based on errno settings.  */
10029         errno = 0;
10030         ret = getpriority(arg1, arg2);
10031         if (ret == -1 && errno != 0) {
10032             return -host_to_target_errno(errno);
10033         }
10034 #ifdef TARGET_ALPHA
10035         /* Return value is the unbiased priority.  Signal no error.  */
10036         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
10037 #else
10038         /* Return value is a biased priority to avoid negative numbers.  */
10039         ret = 20 - ret;
10040 #endif
10041         return ret;
10042     case TARGET_NR_setpriority:
10043         return get_errno(setpriority(arg1, arg2, arg3));
10044 #ifdef TARGET_NR_statfs
10045     case TARGET_NR_statfs:
10046         if (!(p = lock_user_string(arg1))) {
10047             return -TARGET_EFAULT;
10048         }
10049         ret = get_errno(statfs(path(p), &stfs));
10050         unlock_user(p, arg1, 0);
10051     convert_statfs:
10052         if (!is_error(ret)) {
10053             struct target_statfs *target_stfs;
10054 
10055             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10056                 return -TARGET_EFAULT;
10057             __put_user(stfs.f_type, &target_stfs->f_type);
10058             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10059             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10060             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10061             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10062             __put_user(stfs.f_files, &target_stfs->f_files);
10063             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10064             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10065             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10066             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10067             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10068 #ifdef _STATFS_F_FLAGS
10069             __put_user(stfs.f_flags, &target_stfs->f_flags);
10070 #else
10071             __put_user(0, &target_stfs->f_flags);
10072 #endif
10073             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10074             unlock_user_struct(target_stfs, arg2, 1);
10075         }
10076         return ret;
10077 #endif
10078 #ifdef TARGET_NR_fstatfs
10079     case TARGET_NR_fstatfs:
10080         ret = get_errno(fstatfs(arg1, &stfs));
10081         goto convert_statfs;
10082 #endif
10083 #ifdef TARGET_NR_statfs64
10084     case TARGET_NR_statfs64:
10085         if (!(p = lock_user_string(arg1))) {
10086             return -TARGET_EFAULT;
10087         }
10088         ret = get_errno(statfs(path(p), &stfs));
10089         unlock_user(p, arg1, 0);
10090     convert_statfs64:
10091         if (!is_error(ret)) {
10092             struct target_statfs64 *target_stfs;
10093 
10094             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10095                 return -TARGET_EFAULT;
10096             __put_user(stfs.f_type, &target_stfs->f_type);
10097             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10098             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10099             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10100             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10101             __put_user(stfs.f_files, &target_stfs->f_files);
10102             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10103             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10104             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10105             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10106             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10107 #ifdef _STATFS_F_FLAGS
10108             __put_user(stfs.f_flags, &target_stfs->f_flags);
10109 #else
10110             __put_user(0, &target_stfs->f_flags);
10111 #endif
10112             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10113             unlock_user_struct(target_stfs, arg3, 1);
10114         }
10115         return ret;
10116     case TARGET_NR_fstatfs64:
10117         ret = get_errno(fstatfs(arg1, &stfs));
10118         goto convert_statfs64;
10119 #endif
10120 #ifdef TARGET_NR_socketcall
10121     case TARGET_NR_socketcall:
10122         return do_socketcall(arg1, arg2);
10123 #endif
10124 #ifdef TARGET_NR_accept
10125     case TARGET_NR_accept:
10126         return do_accept4(arg1, arg2, arg3, 0);
10127 #endif
10128 #ifdef TARGET_NR_accept4
10129     case TARGET_NR_accept4:
10130         return do_accept4(arg1, arg2, arg3, arg4);
10131 #endif
10132 #ifdef TARGET_NR_bind
10133     case TARGET_NR_bind:
10134         return do_bind(arg1, arg2, arg3);
10135 #endif
10136 #ifdef TARGET_NR_connect
10137     case TARGET_NR_connect:
10138         return do_connect(arg1, arg2, arg3);
10139 #endif
10140 #ifdef TARGET_NR_getpeername
10141     case TARGET_NR_getpeername:
10142         return do_getpeername(arg1, arg2, arg3);
10143 #endif
10144 #ifdef TARGET_NR_getsockname
10145     case TARGET_NR_getsockname:
10146         return do_getsockname(arg1, arg2, arg3);
10147 #endif
10148 #ifdef TARGET_NR_getsockopt
10149     case TARGET_NR_getsockopt:
10150         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10151 #endif
10152 #ifdef TARGET_NR_listen
10153     case TARGET_NR_listen:
10154         return get_errno(listen(arg1, arg2));
10155 #endif
10156 #ifdef TARGET_NR_recv
10157     case TARGET_NR_recv:
10158         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10159 #endif
10160 #ifdef TARGET_NR_recvfrom
10161     case TARGET_NR_recvfrom:
10162         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10163 #endif
10164 #ifdef TARGET_NR_recvmsg
10165     case TARGET_NR_recvmsg:
10166         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10167 #endif
10168 #ifdef TARGET_NR_send
10169     case TARGET_NR_send:
10170         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10171 #endif
10172 #ifdef TARGET_NR_sendmsg
10173     case TARGET_NR_sendmsg:
10174         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10175 #endif
10176 #ifdef TARGET_NR_sendmmsg
10177     case TARGET_NR_sendmmsg:
10178         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10179 #endif
10180 #ifdef TARGET_NR_recvmmsg
10181     case TARGET_NR_recvmmsg:
10182         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10183 #endif
10184 #ifdef TARGET_NR_sendto
10185     case TARGET_NR_sendto:
10186         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10187 #endif
10188 #ifdef TARGET_NR_shutdown
10189     case TARGET_NR_shutdown:
10190         return get_errno(shutdown(arg1, arg2));
10191 #endif
10192 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10193     case TARGET_NR_getrandom:
10194         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10195         if (!p) {
10196             return -TARGET_EFAULT;
10197         }
10198         ret = get_errno(getrandom(p, arg2, arg3));
10199         unlock_user(p, arg1, ret);
10200         return ret;
10201 #endif
10202 #ifdef TARGET_NR_socket
10203     case TARGET_NR_socket:
10204         return do_socket(arg1, arg2, arg3);
10205 #endif
10206 #ifdef TARGET_NR_socketpair
10207     case TARGET_NR_socketpair:
10208         return do_socketpair(arg1, arg2, arg3, arg4);
10209 #endif
10210 #ifdef TARGET_NR_setsockopt
10211     case TARGET_NR_setsockopt:
10212         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10213 #endif
10214 #if defined(TARGET_NR_syslog)
10215     case TARGET_NR_syslog:
10216         {
10217             int len = arg2;
10218 
10219             switch (arg1) {
10220             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10221             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10222             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10223             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10224             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10225             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10226             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10227             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10228                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10229             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10230             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10231             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10232                 {
10233                     if (len < 0) {
10234                         return -TARGET_EINVAL;
10235                     }
10236                     if (len == 0) {
10237                         return 0;
10238                     }
10239                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10240                     if (!p) {
10241                         return -TARGET_EFAULT;
10242                     }
10243                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10244                     unlock_user(p, arg2, arg3);
10245                 }
10246                 return ret;
10247             default:
10248                 return -TARGET_EINVAL;
10249             }
10250         }
10251         break;
10252 #endif
10253     case TARGET_NR_setitimer:
10254         {
10255             struct itimerval value, ovalue, *pvalue;
10256 
10257             if (arg2) {
10258                 pvalue = &value;
10259                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10260                     || copy_from_user_timeval(&pvalue->it_value,
10261                                               arg2 + sizeof(struct target_timeval)))
10262                     return -TARGET_EFAULT;
10263             } else {
10264                 pvalue = NULL;
10265             }
10266             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10267             if (!is_error(ret) && arg3) {
10268                 if (copy_to_user_timeval(arg3,
10269                                          &ovalue.it_interval)
10270                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10271                                             &ovalue.it_value))
10272                     return -TARGET_EFAULT;
10273             }
10274         }
10275         return ret;
10276     case TARGET_NR_getitimer:
10277         {
10278             struct itimerval value;
10279 
10280             ret = get_errno(getitimer(arg1, &value));
10281             if (!is_error(ret) && arg2) {
10282                 if (copy_to_user_timeval(arg2,
10283                                          &value.it_interval)
10284                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10285                                             &value.it_value))
10286                     return -TARGET_EFAULT;
10287             }
10288         }
10289         return ret;
10290 #ifdef TARGET_NR_stat
10291     case TARGET_NR_stat:
10292         if (!(p = lock_user_string(arg1))) {
10293             return -TARGET_EFAULT;
10294         }
10295         ret = get_errno(stat(path(p), &st));
10296         unlock_user(p, arg1, 0);
10297         goto do_stat;
10298 #endif
10299 #ifdef TARGET_NR_lstat
10300     case TARGET_NR_lstat:
10301         if (!(p = lock_user_string(arg1))) {
10302             return -TARGET_EFAULT;
10303         }
10304         ret = get_errno(lstat(path(p), &st));
10305         unlock_user(p, arg1, 0);
10306         goto do_stat;
10307 #endif
10308 #ifdef TARGET_NR_fstat
10309     case TARGET_NR_fstat:
10310         {
10311             ret = get_errno(fstat(arg1, &st));
10312 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10313         do_stat:
10314 #endif
10315             if (!is_error(ret)) {
10316                 struct target_stat *target_st;
10317 
10318                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10319                     return -TARGET_EFAULT;
10320                 memset(target_st, 0, sizeof(*target_st));
10321                 __put_user(st.st_dev, &target_st->st_dev);
10322                 __put_user(st.st_ino, &target_st->st_ino);
10323                 __put_user(st.st_mode, &target_st->st_mode);
10324                 __put_user(st.st_uid, &target_st->st_uid);
10325                 __put_user(st.st_gid, &target_st->st_gid);
10326                 __put_user(st.st_nlink, &target_st->st_nlink);
10327                 __put_user(st.st_rdev, &target_st->st_rdev);
10328                 __put_user(st.st_size, &target_st->st_size);
10329                 __put_user(st.st_blksize, &target_st->st_blksize);
10330                 __put_user(st.st_blocks, &target_st->st_blocks);
10331                 __put_user(st.st_atime, &target_st->target_st_atime);
10332                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10333                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10334 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10335                 __put_user(st.st_atim.tv_nsec,
10336                            &target_st->target_st_atime_nsec);
10337                 __put_user(st.st_mtim.tv_nsec,
10338                            &target_st->target_st_mtime_nsec);
10339                 __put_user(st.st_ctim.tv_nsec,
10340                            &target_st->target_st_ctime_nsec);
10341 #endif
10342                 unlock_user_struct(target_st, arg2, 1);
10343             }
10344         }
10345         return ret;
10346 #endif
10347     case TARGET_NR_vhangup:
10348         return get_errno(vhangup());
10349 #ifdef TARGET_NR_syscall
10350     case TARGET_NR_syscall:
10351         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10352                           arg6, arg7, arg8, 0);
10353 #endif
10354 #if defined(TARGET_NR_wait4)
10355     case TARGET_NR_wait4:
10356         {
10357             int status;
10358             abi_long status_ptr = arg2;
10359             struct rusage rusage, *rusage_ptr;
10360             abi_ulong target_rusage = arg4;
10361             abi_long rusage_err;
10362             if (target_rusage)
10363                 rusage_ptr = &rusage;
10364             else
10365                 rusage_ptr = NULL;
10366             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10367             if (!is_error(ret)) {
10368                 if (status_ptr && ret) {
10369                     status = host_to_target_waitstatus(status);
10370                     if (put_user_s32(status, status_ptr))
10371                         return -TARGET_EFAULT;
10372                 }
10373                 if (target_rusage) {
10374                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10375                     if (rusage_err) {
10376                         ret = rusage_err;
10377                     }
10378                 }
10379             }
10380         }
10381         return ret;
10382 #endif
10383 #ifdef TARGET_NR_swapoff
10384     case TARGET_NR_swapoff:
10385         if (!(p = lock_user_string(arg1)))
10386             return -TARGET_EFAULT;
10387         ret = get_errno(swapoff(p));
10388         unlock_user(p, arg1, 0);
10389         return ret;
10390 #endif
10391     case TARGET_NR_sysinfo:
10392         {
10393             struct target_sysinfo *target_value;
10394             struct sysinfo value;
10395             ret = get_errno(sysinfo(&value));
10396             if (!is_error(ret) && arg1)
10397             {
10398                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10399                     return -TARGET_EFAULT;
10400                 __put_user(value.uptime, &target_value->uptime);
10401                 __put_user(value.loads[0], &target_value->loads[0]);
10402                 __put_user(value.loads[1], &target_value->loads[1]);
10403                 __put_user(value.loads[2], &target_value->loads[2]);
10404                 __put_user(value.totalram, &target_value->totalram);
10405                 __put_user(value.freeram, &target_value->freeram);
10406                 __put_user(value.sharedram, &target_value->sharedram);
10407                 __put_user(value.bufferram, &target_value->bufferram);
10408                 __put_user(value.totalswap, &target_value->totalswap);
10409                 __put_user(value.freeswap, &target_value->freeswap);
10410                 __put_user(value.procs, &target_value->procs);
10411                 __put_user(value.totalhigh, &target_value->totalhigh);
10412                 __put_user(value.freehigh, &target_value->freehigh);
10413                 __put_user(value.mem_unit, &target_value->mem_unit);
10414                 unlock_user_struct(target_value, arg1, 1);
10415             }
10416         }
10417         return ret;
10418 #ifdef TARGET_NR_ipc
10419     case TARGET_NR_ipc:
10420         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10421 #endif
10422 #ifdef TARGET_NR_semget
10423     case TARGET_NR_semget:
10424         return get_errno(semget(arg1, arg2, arg3));
10425 #endif
10426 #ifdef TARGET_NR_semop
10427     case TARGET_NR_semop:
10428         return do_semtimedop(arg1, arg2, arg3, 0, false);
10429 #endif
10430 #ifdef TARGET_NR_semtimedop
10431     case TARGET_NR_semtimedop:
10432         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10433 #endif
10434 #ifdef TARGET_NR_semtimedop_time64
10435     case TARGET_NR_semtimedop_time64:
10436         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10437 #endif
10438 #ifdef TARGET_NR_semctl
10439     case TARGET_NR_semctl:
10440         return do_semctl(arg1, arg2, arg3, arg4);
10441 #endif
10442 #ifdef TARGET_NR_msgctl
10443     case TARGET_NR_msgctl:
10444         return do_msgctl(arg1, arg2, arg3);
10445 #endif
10446 #ifdef TARGET_NR_msgget
10447     case TARGET_NR_msgget:
10448         return get_errno(msgget(arg1, arg2));
10449 #endif
10450 #ifdef TARGET_NR_msgrcv
10451     case TARGET_NR_msgrcv:
10452         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10453 #endif
10454 #ifdef TARGET_NR_msgsnd
10455     case TARGET_NR_msgsnd:
10456         return do_msgsnd(arg1, arg2, arg3, arg4);
10457 #endif
10458 #ifdef TARGET_NR_shmget
10459     case TARGET_NR_shmget:
10460         return get_errno(shmget(arg1, arg2, arg3));
10461 #endif
10462 #ifdef TARGET_NR_shmctl
10463     case TARGET_NR_shmctl:
10464         return do_shmctl(arg1, arg2, arg3);
10465 #endif
10466 #ifdef TARGET_NR_shmat
10467     case TARGET_NR_shmat:
10468         return do_shmat(cpu_env, arg1, arg2, arg3);
10469 #endif
10470 #ifdef TARGET_NR_shmdt
10471     case TARGET_NR_shmdt:
10472         return do_shmdt(arg1);
10473 #endif
10474     case TARGET_NR_fsync:
10475         return get_errno(fsync(arg1));
10476     case TARGET_NR_clone:
10477         /* Linux manages to have three different orderings for its
10478          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10479          * match the kernel's CONFIG_CLONE_* settings.
10480          * Microblaze is further special in that it uses a sixth
10481          * implicit argument to clone for the TLS pointer.
10482          */
10483 #if defined(TARGET_MICROBLAZE)
10484         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10485 #elif defined(TARGET_CLONE_BACKWARDS)
10486         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10487 #elif defined(TARGET_CLONE_BACKWARDS2)
10488         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10489 #else
10490         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10491 #endif
10492         return ret;
10493 #ifdef __NR_exit_group
10494         /* new thread calls */
10495     case TARGET_NR_exit_group:
10496         preexit_cleanup(cpu_env, arg1);
10497         return get_errno(exit_group(arg1));
10498 #endif
10499     case TARGET_NR_setdomainname:
10500         if (!(p = lock_user_string(arg1)))
10501             return -TARGET_EFAULT;
10502         ret = get_errno(setdomainname(p, arg2));
10503         unlock_user(p, arg1, 0);
10504         return ret;
10505     case TARGET_NR_uname:
10506         /* no need to transcode because we use the linux syscall */
10507         {
10508             struct new_utsname * buf;
10509 
10510             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10511                 return -TARGET_EFAULT;
10512             ret = get_errno(sys_uname(buf));
10513             if (!is_error(ret)) {
10514                 /* Overwrite the native machine name with whatever is being
10515                    emulated. */
10516                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10517                           sizeof(buf->machine));
10518                 /* Allow the user to override the reported release.  */
10519                 if (qemu_uname_release && *qemu_uname_release) {
10520                     g_strlcpy(buf->release, qemu_uname_release,
10521                               sizeof(buf->release));
10522                 }
10523             }
10524             unlock_user_struct(buf, arg1, 1);
10525         }
10526         return ret;
10527 #ifdef TARGET_I386
10528     case TARGET_NR_modify_ldt:
10529         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10530 #if !defined(TARGET_X86_64)
10531     case TARGET_NR_vm86:
10532         return do_vm86(cpu_env, arg1, arg2);
10533 #endif
10534 #endif
10535 #if defined(TARGET_NR_adjtimex)
10536     case TARGET_NR_adjtimex:
10537         {
10538             struct timex host_buf;
10539 
10540             if (target_to_host_timex(&host_buf, arg1) != 0) {
10541                 return -TARGET_EFAULT;
10542             }
10543             ret = get_errno(adjtimex(&host_buf));
10544             if (!is_error(ret)) {
10545                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10546                     return -TARGET_EFAULT;
10547                 }
10548             }
10549         }
10550         return ret;
10551 #endif
10552 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10553     case TARGET_NR_clock_adjtime:
10554         {
10555             struct timex htx, *phtx = &htx;
10556 
10557             if (target_to_host_timex(phtx, arg2) != 0) {
10558                 return -TARGET_EFAULT;
10559             }
10560             ret = get_errno(clock_adjtime(arg1, phtx));
10561             if (!is_error(ret) && phtx) {
10562                 if (host_to_target_timex(arg2, phtx) != 0) {
10563                     return -TARGET_EFAULT;
10564                 }
10565             }
10566         }
10567         return ret;
10568 #endif
10569 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10570     case TARGET_NR_clock_adjtime64:
10571         {
10572             struct timex htx;
10573 
10574             if (target_to_host_timex64(&htx, arg2) != 0) {
10575                 return -TARGET_EFAULT;
10576             }
10577             ret = get_errno(clock_adjtime(arg1, &htx));
10578             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10579                     return -TARGET_EFAULT;
10580             }
10581         }
10582         return ret;
10583 #endif
10584     case TARGET_NR_getpgid:
10585         return get_errno(getpgid(arg1));
10586     case TARGET_NR_fchdir:
10587         return get_errno(fchdir(arg1));
10588     case TARGET_NR_personality:
10589         return get_errno(personality(arg1));
10590 #ifdef TARGET_NR__llseek /* Not on alpha */
10591     case TARGET_NR__llseek:
10592         {
10593             int64_t res;
10594 #if !defined(__NR_llseek)
10595             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10596             if (res == -1) {
10597                 ret = get_errno(res);
10598             } else {
10599                 ret = 0;
10600             }
10601 #else
10602             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10603 #endif
10604             if ((ret == 0) && put_user_s64(res, arg4)) {
10605                 return -TARGET_EFAULT;
10606             }
10607         }
10608         return ret;
10609 #endif
10610 #ifdef TARGET_NR_getdents
10611     case TARGET_NR_getdents:
10612         return do_getdents(arg1, arg2, arg3);
10613 #endif /* TARGET_NR_getdents */
10614 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10615     case TARGET_NR_getdents64:
10616         return do_getdents64(arg1, arg2, arg3);
10617 #endif /* TARGET_NR_getdents64 */
10618 #if defined(TARGET_NR__newselect)
10619     case TARGET_NR__newselect:
10620         return do_select(arg1, arg2, arg3, arg4, arg5);
10621 #endif
10622 #ifdef TARGET_NR_poll
10623     case TARGET_NR_poll:
10624         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10625 #endif
10626 #ifdef TARGET_NR_ppoll
10627     case TARGET_NR_ppoll:
10628         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10629 #endif
10630 #ifdef TARGET_NR_ppoll_time64
10631     case TARGET_NR_ppoll_time64:
10632         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10633 #endif
10634     case TARGET_NR_flock:
10635         /* NOTE: the flock constant seems to be the same for every
10636            Linux platform */
10637         return get_errno(safe_flock(arg1, arg2));
10638     case TARGET_NR_readv:
10639         {
10640             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10641             if (vec != NULL) {
10642                 ret = get_errno(safe_readv(arg1, vec, arg3));
10643                 unlock_iovec(vec, arg2, arg3, 1);
10644             } else {
10645                 ret = -host_to_target_errno(errno);
10646             }
10647         }
10648         return ret;
10649     case TARGET_NR_writev:
10650         {
10651             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10652             if (vec != NULL) {
10653                 ret = get_errno(safe_writev(arg1, vec, arg3));
10654                 unlock_iovec(vec, arg2, arg3, 0);
10655             } else {
10656                 ret = -host_to_target_errno(errno);
10657             }
10658         }
10659         return ret;
10660 #if defined(TARGET_NR_preadv)
10661     case TARGET_NR_preadv:
10662         {
10663             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10664             if (vec != NULL) {
10665                 unsigned long low, high;
10666 
10667                 target_to_host_low_high(arg4, arg5, &low, &high);
10668                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10669                 unlock_iovec(vec, arg2, arg3, 1);
10670             } else {
10671                 ret = -host_to_target_errno(errno);
10672            }
10673         }
10674         return ret;
10675 #endif
10676 #if defined(TARGET_NR_pwritev)
10677     case TARGET_NR_pwritev:
10678         {
10679             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10680             if (vec != NULL) {
10681                 unsigned long low, high;
10682 
10683                 target_to_host_low_high(arg4, arg5, &low, &high);
10684                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10685                 unlock_iovec(vec, arg2, arg3, 0);
10686             } else {
10687                 ret = -host_to_target_errno(errno);
10688            }
10689         }
10690         return ret;
10691 #endif
10692     case TARGET_NR_getsid:
10693         return get_errno(getsid(arg1));
10694 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10695     case TARGET_NR_fdatasync:
10696         return get_errno(fdatasync(arg1));
10697 #endif
10698     case TARGET_NR_sched_getaffinity:
10699         {
10700             unsigned int mask_size;
10701             unsigned long *mask;
10702 
10703             /*
10704              * sched_getaffinity needs multiples of ulong, so need to take
10705              * care of mismatches between target ulong and host ulong sizes.
10706              */
10707             if (arg2 & (sizeof(abi_ulong) - 1)) {
10708                 return -TARGET_EINVAL;
10709             }
10710             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10711 
10712             mask = alloca(mask_size);
10713             memset(mask, 0, mask_size);
10714             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10715 
10716             if (!is_error(ret)) {
10717                 if (ret > arg2) {
10718                     /* More data returned than the caller's buffer will fit.
10719                      * This only happens if sizeof(abi_long) < sizeof(long)
10720                      * and the caller passed us a buffer holding an odd number
10721                      * of abi_longs. If the host kernel is actually using the
10722                      * extra 4 bytes then fail EINVAL; otherwise we can just
10723                      * ignore them and only copy the interesting part.
10724                      */
10725                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10726                     if (numcpus > arg2 * 8) {
10727                         return -TARGET_EINVAL;
10728                     }
10729                     ret = arg2;
10730                 }
10731 
10732                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10733                     return -TARGET_EFAULT;
10734                 }
10735             }
10736         }
10737         return ret;
10738     case TARGET_NR_sched_setaffinity:
10739         {
10740             unsigned int mask_size;
10741             unsigned long *mask;
10742 
10743             /*
10744              * sched_setaffinity needs multiples of ulong, so need to take
10745              * care of mismatches between target ulong and host ulong sizes.
10746              */
10747             if (arg2 & (sizeof(abi_ulong) - 1)) {
10748                 return -TARGET_EINVAL;
10749             }
10750             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10751             mask = alloca(mask_size);
10752 
10753             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10754             if (ret) {
10755                 return ret;
10756             }
10757 
10758             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10759         }
10760     case TARGET_NR_getcpu:
10761         {
10762             unsigned cpu, node;
10763             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10764                                        arg2 ? &node : NULL,
10765                                        NULL));
10766             if (is_error(ret)) {
10767                 return ret;
10768             }
10769             if (arg1 && put_user_u32(cpu, arg1)) {
10770                 return -TARGET_EFAULT;
10771             }
10772             if (arg2 && put_user_u32(node, arg2)) {
10773                 return -TARGET_EFAULT;
10774             }
10775         }
10776         return ret;
10777     case TARGET_NR_sched_setparam:
10778         {
10779             struct target_sched_param *target_schp;
10780             struct sched_param schp;
10781 
10782             if (arg2 == 0) {
10783                 return -TARGET_EINVAL;
10784             }
10785             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10786                 return -TARGET_EFAULT;
10787             }
10788             schp.sched_priority = tswap32(target_schp->sched_priority);
10789             unlock_user_struct(target_schp, arg2, 0);
10790             return get_errno(sys_sched_setparam(arg1, &schp));
10791         }
10792     case TARGET_NR_sched_getparam:
10793         {
10794             struct target_sched_param *target_schp;
10795             struct sched_param schp;
10796 
10797             if (arg2 == 0) {
10798                 return -TARGET_EINVAL;
10799             }
10800             ret = get_errno(sys_sched_getparam(arg1, &schp));
10801             if (!is_error(ret)) {
10802                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10803                     return -TARGET_EFAULT;
10804                 }
10805                 target_schp->sched_priority = tswap32(schp.sched_priority);
10806                 unlock_user_struct(target_schp, arg2, 1);
10807             }
10808         }
10809         return ret;
10810     case TARGET_NR_sched_setscheduler:
10811         {
10812             struct target_sched_param *target_schp;
10813             struct sched_param schp;
10814             if (arg3 == 0) {
10815                 return -TARGET_EINVAL;
10816             }
10817             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10818                 return -TARGET_EFAULT;
10819             }
10820             schp.sched_priority = tswap32(target_schp->sched_priority);
10821             unlock_user_struct(target_schp, arg3, 0);
10822             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10823         }
10824     case TARGET_NR_sched_getscheduler:
10825         return get_errno(sys_sched_getscheduler(arg1));
10826     case TARGET_NR_sched_getattr:
10827         {
10828             struct target_sched_attr *target_scha;
10829             struct sched_attr scha;
10830             if (arg2 == 0) {
10831                 return -TARGET_EINVAL;
10832             }
10833             if (arg3 > sizeof(scha)) {
10834                 arg3 = sizeof(scha);
10835             }
10836             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10837             if (!is_error(ret)) {
10838                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10839                 if (!target_scha) {
10840                     return -TARGET_EFAULT;
10841                 }
10842                 target_scha->size = tswap32(scha.size);
10843                 target_scha->sched_policy = tswap32(scha.sched_policy);
10844                 target_scha->sched_flags = tswap64(scha.sched_flags);
10845                 target_scha->sched_nice = tswap32(scha.sched_nice);
10846                 target_scha->sched_priority = tswap32(scha.sched_priority);
10847                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10848                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10849                 target_scha->sched_period = tswap64(scha.sched_period);
10850                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10851                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10852                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10853                 }
10854                 unlock_user(target_scha, arg2, arg3);
10855             }
10856             return ret;
10857         }
10858     case TARGET_NR_sched_setattr:
10859         {
10860             struct target_sched_attr *target_scha;
10861             struct sched_attr scha;
10862             uint32_t size;
10863             int zeroed;
10864             if (arg2 == 0) {
10865                 return -TARGET_EINVAL;
10866             }
10867             if (get_user_u32(size, arg2)) {
10868                 return -TARGET_EFAULT;
10869             }
10870             if (!size) {
10871                 size = offsetof(struct target_sched_attr, sched_util_min);
10872             }
10873             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10874                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10875                     return -TARGET_EFAULT;
10876                 }
10877                 return -TARGET_E2BIG;
10878             }
10879 
10880             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10881             if (zeroed < 0) {
10882                 return zeroed;
10883             } else if (zeroed == 0) {
10884                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10885                     return -TARGET_EFAULT;
10886                 }
10887                 return -TARGET_E2BIG;
10888             }
10889             if (size > sizeof(struct target_sched_attr)) {
10890                 size = sizeof(struct target_sched_attr);
10891             }
10892 
10893             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10894             if (!target_scha) {
10895                 return -TARGET_EFAULT;
10896             }
10897             scha.size = size;
10898             scha.sched_policy = tswap32(target_scha->sched_policy);
10899             scha.sched_flags = tswap64(target_scha->sched_flags);
10900             scha.sched_nice = tswap32(target_scha->sched_nice);
10901             scha.sched_priority = tswap32(target_scha->sched_priority);
10902             scha.sched_runtime = tswap64(target_scha->sched_runtime);
10903             scha.sched_deadline = tswap64(target_scha->sched_deadline);
10904             scha.sched_period = tswap64(target_scha->sched_period);
10905             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10906                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10907                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10908             }
10909             unlock_user(target_scha, arg2, 0);
10910             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10911         }
10912     case TARGET_NR_sched_yield:
10913         return get_errno(sched_yield());
10914     case TARGET_NR_sched_get_priority_max:
10915         return get_errno(sched_get_priority_max(arg1));
10916     case TARGET_NR_sched_get_priority_min:
10917         return get_errno(sched_get_priority_min(arg1));
10918 #ifdef TARGET_NR_sched_rr_get_interval
10919     case TARGET_NR_sched_rr_get_interval:
10920         {
10921             struct timespec ts;
10922             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10923             if (!is_error(ret)) {
10924                 ret = host_to_target_timespec(arg2, &ts);
10925             }
10926         }
10927         return ret;
10928 #endif
10929 #ifdef TARGET_NR_sched_rr_get_interval_time64
10930     case TARGET_NR_sched_rr_get_interval_time64:
10931         {
10932             struct timespec ts;
10933             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10934             if (!is_error(ret)) {
10935                 ret = host_to_target_timespec64(arg2, &ts);
10936             }
10937         }
10938         return ret;
10939 #endif
10940 #if defined(TARGET_NR_nanosleep)
10941     case TARGET_NR_nanosleep:
10942         {
10943             struct timespec req, rem;
10944             target_to_host_timespec(&req, arg1);
10945             ret = get_errno(safe_nanosleep(&req, &rem));
10946             if (is_error(ret) && arg2) {
10947                 host_to_target_timespec(arg2, &rem);
10948             }
10949         }
10950         return ret;
10951 #endif
10952     case TARGET_NR_prctl:
10953         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10954         break;
10955 #ifdef TARGET_NR_arch_prctl
10956     case TARGET_NR_arch_prctl:
10957         return do_arch_prctl(cpu_env, arg1, arg2);
10958 #endif
10959 #ifdef TARGET_NR_pread64
10960     case TARGET_NR_pread64:
10961         if (regpairs_aligned(cpu_env, num)) {
10962             arg4 = arg5;
10963             arg5 = arg6;
10964         }
10965         if (arg2 == 0 && arg3 == 0) {
10966             /* Special-case NULL buffer and zero length, which should succeed */
10967             p = 0;
10968         } else {
10969             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10970             if (!p) {
10971                 return -TARGET_EFAULT;
10972             }
10973         }
10974         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10975         unlock_user(p, arg2, ret);
10976         return ret;
10977     case TARGET_NR_pwrite64:
10978         if (regpairs_aligned(cpu_env, num)) {
10979             arg4 = arg5;
10980             arg5 = arg6;
10981         }
10982         if (arg2 == 0 && arg3 == 0) {
10983             /* Special-case NULL buffer and zero length, which should succeed */
10984             p = 0;
10985         } else {
10986             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10987             if (!p) {
10988                 return -TARGET_EFAULT;
10989             }
10990         }
10991         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10992         unlock_user(p, arg2, 0);
10993         return ret;
10994 #endif
10995     case TARGET_NR_getcwd:
10996         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10997             return -TARGET_EFAULT;
10998         ret = get_errno(sys_getcwd1(p, arg2));
10999         unlock_user(p, arg1, ret);
11000         return ret;
11001     case TARGET_NR_capget:
11002     case TARGET_NR_capset:
11003     {
11004         struct target_user_cap_header *target_header;
11005         struct target_user_cap_data *target_data = NULL;
11006         struct __user_cap_header_struct header;
11007         struct __user_cap_data_struct data[2];
11008         struct __user_cap_data_struct *dataptr = NULL;
11009         int i, target_datalen;
11010         int data_items = 1;
11011 
11012         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11013             return -TARGET_EFAULT;
11014         }
11015         header.version = tswap32(target_header->version);
11016         header.pid = tswap32(target_header->pid);
11017 
11018         if (header.version != _LINUX_CAPABILITY_VERSION) {
11019             /* Version 2 and up takes pointer to two user_data structs */
11020             data_items = 2;
11021         }
11022 
11023         target_datalen = sizeof(*target_data) * data_items;
11024 
11025         if (arg2) {
11026             if (num == TARGET_NR_capget) {
11027                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11028             } else {
11029                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11030             }
11031             if (!target_data) {
11032                 unlock_user_struct(target_header, arg1, 0);
11033                 return -TARGET_EFAULT;
11034             }
11035 
11036             if (num == TARGET_NR_capset) {
11037                 for (i = 0; i < data_items; i++) {
11038                     data[i].effective = tswap32(target_data[i].effective);
11039                     data[i].permitted = tswap32(target_data[i].permitted);
11040                     data[i].inheritable = tswap32(target_data[i].inheritable);
11041                 }
11042             }
11043 
11044             dataptr = data;
11045         }
11046 
11047         if (num == TARGET_NR_capget) {
11048             ret = get_errno(capget(&header, dataptr));
11049         } else {
11050             ret = get_errno(capset(&header, dataptr));
11051         }
11052 
11053         /* The kernel always updates version for both capget and capset */
11054         target_header->version = tswap32(header.version);
11055         unlock_user_struct(target_header, arg1, 1);
11056 
11057         if (arg2) {
11058             if (num == TARGET_NR_capget) {
11059                 for (i = 0; i < data_items; i++) {
11060                     target_data[i].effective = tswap32(data[i].effective);
11061                     target_data[i].permitted = tswap32(data[i].permitted);
11062                     target_data[i].inheritable = tswap32(data[i].inheritable);
11063                 }
11064                 unlock_user(target_data, arg2, target_datalen);
11065             } else {
11066                 unlock_user(target_data, arg2, 0);
11067             }
11068         }
11069         return ret;
11070     }
11071     case TARGET_NR_sigaltstack:
11072         return do_sigaltstack(arg1, arg2, cpu_env);
11073 
11074 #ifdef CONFIG_SENDFILE
11075 #ifdef TARGET_NR_sendfile
11076     case TARGET_NR_sendfile:
11077     {
11078         off_t *offp = NULL;
11079         off_t off;
11080         if (arg3) {
11081             ret = get_user_sal(off, arg3);
11082             if (is_error(ret)) {
11083                 return ret;
11084             }
11085             offp = &off;
11086         }
11087         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11088         if (!is_error(ret) && arg3) {
11089             abi_long ret2 = put_user_sal(off, arg3);
11090             if (is_error(ret2)) {
11091                 ret = ret2;
11092             }
11093         }
11094         return ret;
11095     }
11096 #endif
11097 #ifdef TARGET_NR_sendfile64
11098     case TARGET_NR_sendfile64:
11099     {
11100         off_t *offp = NULL;
11101         off_t off;
11102         if (arg3) {
11103             ret = get_user_s64(off, arg3);
11104             if (is_error(ret)) {
11105                 return ret;
11106             }
11107             offp = &off;
11108         }
11109         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11110         if (!is_error(ret) && arg3) {
11111             abi_long ret2 = put_user_s64(off, arg3);
11112             if (is_error(ret2)) {
11113                 ret = ret2;
11114             }
11115         }
11116         return ret;
11117     }
11118 #endif
11119 #endif
11120 #ifdef TARGET_NR_vfork
11121     case TARGET_NR_vfork:
11122         return get_errno(do_fork(cpu_env,
11123                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11124                          0, 0, 0, 0));
11125 #endif
11126 #ifdef TARGET_NR_ugetrlimit
11127     case TARGET_NR_ugetrlimit:
11128     {
11129 	struct rlimit rlim;
11130 	int resource = target_to_host_resource(arg1);
11131 	ret = get_errno(getrlimit(resource, &rlim));
11132 	if (!is_error(ret)) {
11133 	    struct target_rlimit *target_rlim;
11134             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11135                 return -TARGET_EFAULT;
11136 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11137 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11138             unlock_user_struct(target_rlim, arg2, 1);
11139 	}
11140         return ret;
11141     }
11142 #endif
11143 #ifdef TARGET_NR_truncate64
11144     case TARGET_NR_truncate64:
11145         if (!(p = lock_user_string(arg1)))
11146             return -TARGET_EFAULT;
11147 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11148         unlock_user(p, arg1, 0);
11149         return ret;
11150 #endif
11151 #ifdef TARGET_NR_ftruncate64
11152     case TARGET_NR_ftruncate64:
11153         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11154 #endif
11155 #ifdef TARGET_NR_stat64
11156     case TARGET_NR_stat64:
11157         if (!(p = lock_user_string(arg1))) {
11158             return -TARGET_EFAULT;
11159         }
11160         ret = get_errno(stat(path(p), &st));
11161         unlock_user(p, arg1, 0);
11162         if (!is_error(ret))
11163             ret = host_to_target_stat64(cpu_env, arg2, &st);
11164         return ret;
11165 #endif
11166 #ifdef TARGET_NR_lstat64
11167     case TARGET_NR_lstat64:
11168         if (!(p = lock_user_string(arg1))) {
11169             return -TARGET_EFAULT;
11170         }
11171         ret = get_errno(lstat(path(p), &st));
11172         unlock_user(p, arg1, 0);
11173         if (!is_error(ret))
11174             ret = host_to_target_stat64(cpu_env, arg2, &st);
11175         return ret;
11176 #endif
11177 #ifdef TARGET_NR_fstat64
11178     case TARGET_NR_fstat64:
11179         ret = get_errno(fstat(arg1, &st));
11180         if (!is_error(ret))
11181             ret = host_to_target_stat64(cpu_env, arg2, &st);
11182         return ret;
11183 #endif
11184 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11185 #ifdef TARGET_NR_fstatat64
11186     case TARGET_NR_fstatat64:
11187 #endif
11188 #ifdef TARGET_NR_newfstatat
11189     case TARGET_NR_newfstatat:
11190 #endif
11191         if (!(p = lock_user_string(arg2))) {
11192             return -TARGET_EFAULT;
11193         }
11194         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11195         unlock_user(p, arg2, 0);
11196         if (!is_error(ret))
11197             ret = host_to_target_stat64(cpu_env, arg3, &st);
11198         return ret;
11199 #endif
11200 #if defined(TARGET_NR_statx)
11201     case TARGET_NR_statx:
11202         {
11203             struct target_statx *target_stx;
11204             int dirfd = arg1;
11205             int flags = arg3;
11206 
11207             p = lock_user_string(arg2);
11208             if (p == NULL) {
11209                 return -TARGET_EFAULT;
11210             }
11211 #if defined(__NR_statx)
11212             {
11213                 /*
11214                  * It is assumed that struct statx is architecture independent.
11215                  */
11216                 struct target_statx host_stx;
11217                 int mask = arg4;
11218 
11219                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11220                 if (!is_error(ret)) {
11221                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11222                         unlock_user(p, arg2, 0);
11223                         return -TARGET_EFAULT;
11224                     }
11225                 }
11226 
11227                 if (ret != -TARGET_ENOSYS) {
11228                     unlock_user(p, arg2, 0);
11229                     return ret;
11230                 }
11231             }
11232 #endif
11233             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11234             unlock_user(p, arg2, 0);
11235 
11236             if (!is_error(ret)) {
11237                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11238                     return -TARGET_EFAULT;
11239                 }
11240                 memset(target_stx, 0, sizeof(*target_stx));
11241                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11242                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11243                 __put_user(st.st_ino, &target_stx->stx_ino);
11244                 __put_user(st.st_mode, &target_stx->stx_mode);
11245                 __put_user(st.st_uid, &target_stx->stx_uid);
11246                 __put_user(st.st_gid, &target_stx->stx_gid);
11247                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11248                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11249                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11250                 __put_user(st.st_size, &target_stx->stx_size);
11251                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11252                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11253                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11254                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11255                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11256                 unlock_user_struct(target_stx, arg5, 1);
11257             }
11258         }
11259         return ret;
11260 #endif
11261 #ifdef TARGET_NR_lchown
11262     case TARGET_NR_lchown:
11263         if (!(p = lock_user_string(arg1)))
11264             return -TARGET_EFAULT;
11265         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11266         unlock_user(p, arg1, 0);
11267         return ret;
11268 #endif
11269 #ifdef TARGET_NR_getuid
11270     case TARGET_NR_getuid:
11271         return get_errno(high2lowuid(getuid()));
11272 #endif
11273 #ifdef TARGET_NR_getgid
11274     case TARGET_NR_getgid:
11275         return get_errno(high2lowgid(getgid()));
11276 #endif
11277 #ifdef TARGET_NR_geteuid
11278     case TARGET_NR_geteuid:
11279         return get_errno(high2lowuid(geteuid()));
11280 #endif
11281 #ifdef TARGET_NR_getegid
11282     case TARGET_NR_getegid:
11283         return get_errno(high2lowgid(getegid()));
11284 #endif
11285     case TARGET_NR_setreuid:
11286         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11287     case TARGET_NR_setregid:
11288         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11289     case TARGET_NR_getgroups:
11290         {
11291             int gidsetsize = arg1;
11292             target_id *target_grouplist;
11293             gid_t *grouplist;
11294             int i;
11295 
11296             grouplist = alloca(gidsetsize * sizeof(gid_t));
11297             ret = get_errno(getgroups(gidsetsize, grouplist));
11298             if (gidsetsize == 0)
11299                 return ret;
11300             if (!is_error(ret)) {
11301                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11302                 if (!target_grouplist)
11303                     return -TARGET_EFAULT;
11304                 for(i = 0;i < ret; i++)
11305                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11306                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11307             }
11308         }
11309         return ret;
11310     case TARGET_NR_setgroups:
11311         {
11312             int gidsetsize = arg1;
11313             target_id *target_grouplist;
11314             gid_t *grouplist = NULL;
11315             int i;
11316             if (gidsetsize) {
11317                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11318                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11319                 if (!target_grouplist) {
11320                     return -TARGET_EFAULT;
11321                 }
11322                 for (i = 0; i < gidsetsize; i++) {
11323                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11324                 }
11325                 unlock_user(target_grouplist, arg2, 0);
11326             }
11327             return get_errno(setgroups(gidsetsize, grouplist));
11328         }
11329     case TARGET_NR_fchown:
11330         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11331 #if defined(TARGET_NR_fchownat)
11332     case TARGET_NR_fchownat:
11333         if (!(p = lock_user_string(arg2)))
11334             return -TARGET_EFAULT;
11335         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11336                                  low2highgid(arg4), arg5));
11337         unlock_user(p, arg2, 0);
11338         return ret;
11339 #endif
11340 #ifdef TARGET_NR_setresuid
11341     case TARGET_NR_setresuid:
11342         return get_errno(sys_setresuid(low2highuid(arg1),
11343                                        low2highuid(arg2),
11344                                        low2highuid(arg3)));
11345 #endif
11346 #ifdef TARGET_NR_getresuid
11347     case TARGET_NR_getresuid:
11348         {
11349             uid_t ruid, euid, suid;
11350             ret = get_errno(getresuid(&ruid, &euid, &suid));
11351             if (!is_error(ret)) {
11352                 if (put_user_id(high2lowuid(ruid), arg1)
11353                     || put_user_id(high2lowuid(euid), arg2)
11354                     || put_user_id(high2lowuid(suid), arg3))
11355                     return -TARGET_EFAULT;
11356             }
11357         }
11358         return ret;
11359 #endif
11360 #ifdef TARGET_NR_getresgid
11361     case TARGET_NR_setresgid:
11362         return get_errno(sys_setresgid(low2highgid(arg1),
11363                                        low2highgid(arg2),
11364                                        low2highgid(arg3)));
11365 #endif
11366 #ifdef TARGET_NR_getresgid
11367     case TARGET_NR_getresgid:
11368         {
11369             gid_t rgid, egid, sgid;
11370             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11371             if (!is_error(ret)) {
11372                 if (put_user_id(high2lowgid(rgid), arg1)
11373                     || put_user_id(high2lowgid(egid), arg2)
11374                     || put_user_id(high2lowgid(sgid), arg3))
11375                     return -TARGET_EFAULT;
11376             }
11377         }
11378         return ret;
11379 #endif
11380 #ifdef TARGET_NR_chown
11381     case TARGET_NR_chown:
11382         if (!(p = lock_user_string(arg1)))
11383             return -TARGET_EFAULT;
11384         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11385         unlock_user(p, arg1, 0);
11386         return ret;
11387 #endif
11388     case TARGET_NR_setuid:
11389         return get_errno(sys_setuid(low2highuid(arg1)));
11390     case TARGET_NR_setgid:
11391         return get_errno(sys_setgid(low2highgid(arg1)));
11392     case TARGET_NR_setfsuid:
11393         return get_errno(setfsuid(arg1));
11394     case TARGET_NR_setfsgid:
11395         return get_errno(setfsgid(arg1));
11396 
11397 #ifdef TARGET_NR_lchown32
11398     case TARGET_NR_lchown32:
11399         if (!(p = lock_user_string(arg1)))
11400             return -TARGET_EFAULT;
11401         ret = get_errno(lchown(p, arg2, arg3));
11402         unlock_user(p, arg1, 0);
11403         return ret;
11404 #endif
11405 #ifdef TARGET_NR_getuid32
11406     case TARGET_NR_getuid32:
11407         return get_errno(getuid());
11408 #endif
11409 
11410 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11411    /* Alpha specific */
11412     case TARGET_NR_getxuid:
11413          {
11414             uid_t euid;
11415             euid=geteuid();
11416             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11417          }
11418         return get_errno(getuid());
11419 #endif
11420 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11421    /* Alpha specific */
11422     case TARGET_NR_getxgid:
11423          {
11424             uid_t egid;
11425             egid=getegid();
11426             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11427          }
11428         return get_errno(getgid());
11429 #endif
11430 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11431     /* Alpha specific */
11432     case TARGET_NR_osf_getsysinfo:
11433         ret = -TARGET_EOPNOTSUPP;
11434         switch (arg1) {
11435           case TARGET_GSI_IEEE_FP_CONTROL:
11436             {
11437                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11438                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11439 
11440                 swcr &= ~SWCR_STATUS_MASK;
11441                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11442 
11443                 if (put_user_u64 (swcr, arg2))
11444                         return -TARGET_EFAULT;
11445                 ret = 0;
11446             }
11447             break;
11448 
11449           /* case GSI_IEEE_STATE_AT_SIGNAL:
11450              -- Not implemented in linux kernel.
11451              case GSI_UACPROC:
11452              -- Retrieves current unaligned access state; not much used.
11453              case GSI_PROC_TYPE:
11454              -- Retrieves implver information; surely not used.
11455              case GSI_GET_HWRPB:
11456              -- Grabs a copy of the HWRPB; surely not used.
11457           */
11458         }
11459         return ret;
11460 #endif
11461 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11462     /* Alpha specific */
11463     case TARGET_NR_osf_setsysinfo:
11464         ret = -TARGET_EOPNOTSUPP;
11465         switch (arg1) {
11466           case TARGET_SSI_IEEE_FP_CONTROL:
11467             {
11468                 uint64_t swcr, fpcr;
11469 
11470                 if (get_user_u64 (swcr, arg2)) {
11471                     return -TARGET_EFAULT;
11472                 }
11473 
11474                 /*
11475                  * The kernel calls swcr_update_status to update the
11476                  * status bits from the fpcr at every point that it
11477                  * could be queried.  Therefore, we store the status
11478                  * bits only in FPCR.
11479                  */
11480                 ((CPUAlphaState *)cpu_env)->swcr
11481                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11482 
11483                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11484                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11485                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11486                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11487                 ret = 0;
11488             }
11489             break;
11490 
11491           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11492             {
11493                 uint64_t exc, fpcr, fex;
11494 
11495                 if (get_user_u64(exc, arg2)) {
11496                     return -TARGET_EFAULT;
11497                 }
11498                 exc &= SWCR_STATUS_MASK;
11499                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11500 
11501                 /* Old exceptions are not signaled.  */
11502                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11503                 fex = exc & ~fex;
11504                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11505                 fex &= ((CPUArchState *)cpu_env)->swcr;
11506 
11507                 /* Update the hardware fpcr.  */
11508                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11509                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11510 
11511                 if (fex) {
11512                     int si_code = TARGET_FPE_FLTUNK;
11513                     target_siginfo_t info;
11514 
11515                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11516                         si_code = TARGET_FPE_FLTUND;
11517                     }
11518                     if (fex & SWCR_TRAP_ENABLE_INE) {
11519                         si_code = TARGET_FPE_FLTRES;
11520                     }
11521                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11522                         si_code = TARGET_FPE_FLTUND;
11523                     }
11524                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11525                         si_code = TARGET_FPE_FLTOVF;
11526                     }
11527                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11528                         si_code = TARGET_FPE_FLTDIV;
11529                     }
11530                     if (fex & SWCR_TRAP_ENABLE_INV) {
11531                         si_code = TARGET_FPE_FLTINV;
11532                     }
11533 
11534                     info.si_signo = SIGFPE;
11535                     info.si_errno = 0;
11536                     info.si_code = si_code;
11537                     info._sifields._sigfault._addr
11538                         = ((CPUArchState *)cpu_env)->pc;
11539                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11540                                  QEMU_SI_FAULT, &info);
11541                 }
11542                 ret = 0;
11543             }
11544             break;
11545 
11546           /* case SSI_NVPAIRS:
11547              -- Used with SSIN_UACPROC to enable unaligned accesses.
11548              case SSI_IEEE_STATE_AT_SIGNAL:
11549              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11550              -- Not implemented in linux kernel
11551           */
11552         }
11553         return ret;
11554 #endif
11555 #ifdef TARGET_NR_osf_sigprocmask
11556     /* Alpha specific.  */
11557     case TARGET_NR_osf_sigprocmask:
11558         {
11559             abi_ulong mask;
11560             int how;
11561             sigset_t set, oldset;
11562 
11563             switch(arg1) {
11564             case TARGET_SIG_BLOCK:
11565                 how = SIG_BLOCK;
11566                 break;
11567             case TARGET_SIG_UNBLOCK:
11568                 how = SIG_UNBLOCK;
11569                 break;
11570             case TARGET_SIG_SETMASK:
11571                 how = SIG_SETMASK;
11572                 break;
11573             default:
11574                 return -TARGET_EINVAL;
11575             }
11576             mask = arg2;
11577             target_to_host_old_sigset(&set, &mask);
11578             ret = do_sigprocmask(how, &set, &oldset);
11579             if (!ret) {
11580                 host_to_target_old_sigset(&mask, &oldset);
11581                 ret = mask;
11582             }
11583         }
11584         return ret;
11585 #endif
11586 
11587 #ifdef TARGET_NR_getgid32
11588     case TARGET_NR_getgid32:
11589         return get_errno(getgid());
11590 #endif
11591 #ifdef TARGET_NR_geteuid32
11592     case TARGET_NR_geteuid32:
11593         return get_errno(geteuid());
11594 #endif
11595 #ifdef TARGET_NR_getegid32
11596     case TARGET_NR_getegid32:
11597         return get_errno(getegid());
11598 #endif
11599 #ifdef TARGET_NR_setreuid32
11600     case TARGET_NR_setreuid32:
11601         return get_errno(setreuid(arg1, arg2));
11602 #endif
11603 #ifdef TARGET_NR_setregid32
11604     case TARGET_NR_setregid32:
11605         return get_errno(setregid(arg1, arg2));
11606 #endif
11607 #ifdef TARGET_NR_getgroups32
11608     case TARGET_NR_getgroups32:
11609         {
11610             int gidsetsize = arg1;
11611             uint32_t *target_grouplist;
11612             gid_t *grouplist;
11613             int i;
11614 
11615             grouplist = alloca(gidsetsize * sizeof(gid_t));
11616             ret = get_errno(getgroups(gidsetsize, grouplist));
11617             if (gidsetsize == 0)
11618                 return ret;
11619             if (!is_error(ret)) {
11620                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11621                 if (!target_grouplist) {
11622                     return -TARGET_EFAULT;
11623                 }
11624                 for(i = 0;i < ret; i++)
11625                     target_grouplist[i] = tswap32(grouplist[i]);
11626                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11627             }
11628         }
11629         return ret;
11630 #endif
11631 #ifdef TARGET_NR_setgroups32
11632     case TARGET_NR_setgroups32:
11633         {
11634             int gidsetsize = arg1;
11635             uint32_t *target_grouplist;
11636             gid_t *grouplist;
11637             int i;
11638 
11639             grouplist = alloca(gidsetsize * sizeof(gid_t));
11640             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11641             if (!target_grouplist) {
11642                 return -TARGET_EFAULT;
11643             }
11644             for(i = 0;i < gidsetsize; i++)
11645                 grouplist[i] = tswap32(target_grouplist[i]);
11646             unlock_user(target_grouplist, arg2, 0);
11647             return get_errno(setgroups(gidsetsize, grouplist));
11648         }
11649 #endif
11650 #ifdef TARGET_NR_fchown32
11651     case TARGET_NR_fchown32:
11652         return get_errno(fchown(arg1, arg2, arg3));
11653 #endif
11654 #ifdef TARGET_NR_setresuid32
11655     case TARGET_NR_setresuid32:
11656         return get_errno(sys_setresuid(arg1, arg2, arg3));
11657 #endif
11658 #ifdef TARGET_NR_getresuid32
11659     case TARGET_NR_getresuid32:
11660         {
11661             uid_t ruid, euid, suid;
11662             ret = get_errno(getresuid(&ruid, &euid, &suid));
11663             if (!is_error(ret)) {
11664                 if (put_user_u32(ruid, arg1)
11665                     || put_user_u32(euid, arg2)
11666                     || put_user_u32(suid, arg3))
11667                     return -TARGET_EFAULT;
11668             }
11669         }
11670         return ret;
11671 #endif
11672 #ifdef TARGET_NR_setresgid32
11673     case TARGET_NR_setresgid32:
11674         return get_errno(sys_setresgid(arg1, arg2, arg3));
11675 #endif
11676 #ifdef TARGET_NR_getresgid32
11677     case TARGET_NR_getresgid32:
11678         {
11679             gid_t rgid, egid, sgid;
11680             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11681             if (!is_error(ret)) {
11682                 if (put_user_u32(rgid, arg1)
11683                     || put_user_u32(egid, arg2)
11684                     || put_user_u32(sgid, arg3))
11685                     return -TARGET_EFAULT;
11686             }
11687         }
11688         return ret;
11689 #endif
11690 #ifdef TARGET_NR_chown32
11691     case TARGET_NR_chown32:
11692         if (!(p = lock_user_string(arg1)))
11693             return -TARGET_EFAULT;
11694         ret = get_errno(chown(p, arg2, arg3));
11695         unlock_user(p, arg1, 0);
11696         return ret;
11697 #endif
11698 #ifdef TARGET_NR_setuid32
11699     case TARGET_NR_setuid32:
11700         return get_errno(sys_setuid(arg1));
11701 #endif
11702 #ifdef TARGET_NR_setgid32
11703     case TARGET_NR_setgid32:
11704         return get_errno(sys_setgid(arg1));
11705 #endif
11706 #ifdef TARGET_NR_setfsuid32
11707     case TARGET_NR_setfsuid32:
11708         return get_errno(setfsuid(arg1));
11709 #endif
11710 #ifdef TARGET_NR_setfsgid32
11711     case TARGET_NR_setfsgid32:
11712         return get_errno(setfsgid(arg1));
11713 #endif
11714 #ifdef TARGET_NR_mincore
11715     case TARGET_NR_mincore:
11716         {
11717             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11718             if (!a) {
11719                 return -TARGET_ENOMEM;
11720             }
11721             p = lock_user_string(arg3);
11722             if (!p) {
11723                 ret = -TARGET_EFAULT;
11724             } else {
11725                 ret = get_errno(mincore(a, arg2, p));
11726                 unlock_user(p, arg3, ret);
11727             }
11728             unlock_user(a, arg1, 0);
11729         }
11730         return ret;
11731 #endif
11732 #ifdef TARGET_NR_arm_fadvise64_64
11733     case TARGET_NR_arm_fadvise64_64:
11734         /* arm_fadvise64_64 looks like fadvise64_64 but
11735          * with different argument order: fd, advice, offset, len
11736          * rather than the usual fd, offset, len, advice.
11737          * Note that offset and len are both 64-bit so appear as
11738          * pairs of 32-bit registers.
11739          */
11740         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11741                             target_offset64(arg5, arg6), arg2);
11742         return -host_to_target_errno(ret);
11743 #endif
11744 
11745 #if TARGET_ABI_BITS == 32
11746 
11747 #ifdef TARGET_NR_fadvise64_64
11748     case TARGET_NR_fadvise64_64:
11749 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11750         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11751         ret = arg2;
11752         arg2 = arg3;
11753         arg3 = arg4;
11754         arg4 = arg5;
11755         arg5 = arg6;
11756         arg6 = ret;
11757 #else
11758         /* 6 args: fd, offset (high, low), len (high, low), advice */
11759         if (regpairs_aligned(cpu_env, num)) {
11760             /* offset is in (3,4), len in (5,6) and advice in 7 */
11761             arg2 = arg3;
11762             arg3 = arg4;
11763             arg4 = arg5;
11764             arg5 = arg6;
11765             arg6 = arg7;
11766         }
11767 #endif
11768         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11769                             target_offset64(arg4, arg5), arg6);
11770         return -host_to_target_errno(ret);
11771 #endif
11772 
11773 #ifdef TARGET_NR_fadvise64
11774     case TARGET_NR_fadvise64:
11775         /* 5 args: fd, offset (high, low), len, advice */
11776         if (regpairs_aligned(cpu_env, num)) {
11777             /* offset is in (3,4), len in 5 and advice in 6 */
11778             arg2 = arg3;
11779             arg3 = arg4;
11780             arg4 = arg5;
11781             arg5 = arg6;
11782         }
11783         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11784         return -host_to_target_errno(ret);
11785 #endif
11786 
11787 #else /* not a 32-bit ABI */
11788 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11789 #ifdef TARGET_NR_fadvise64_64
11790     case TARGET_NR_fadvise64_64:
11791 #endif
11792 #ifdef TARGET_NR_fadvise64
11793     case TARGET_NR_fadvise64:
11794 #endif
11795 #ifdef TARGET_S390X
11796         switch (arg4) {
11797         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11798         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11799         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11800         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11801         default: break;
11802         }
11803 #endif
11804         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11805 #endif
11806 #endif /* end of 64-bit ABI fadvise handling */
11807 
11808 #ifdef TARGET_NR_madvise
11809     case TARGET_NR_madvise:
11810         /* A straight passthrough may not be safe because qemu sometimes
11811            turns private file-backed mappings into anonymous mappings.
11812            This will break MADV_DONTNEED.
11813            This is a hint, so ignoring and returning success is ok.  */
11814         return 0;
11815 #endif
11816 #ifdef TARGET_NR_fcntl64
11817     case TARGET_NR_fcntl64:
11818     {
11819         int cmd;
11820         struct flock64 fl;
11821         from_flock64_fn *copyfrom = copy_from_user_flock64;
11822         to_flock64_fn *copyto = copy_to_user_flock64;
11823 
11824 #ifdef TARGET_ARM
11825         if (!((CPUARMState *)cpu_env)->eabi) {
11826             copyfrom = copy_from_user_oabi_flock64;
11827             copyto = copy_to_user_oabi_flock64;
11828         }
11829 #endif
11830 
11831         cmd = target_to_host_fcntl_cmd(arg2);
11832         if (cmd == -TARGET_EINVAL) {
11833             return cmd;
11834         }
11835 
11836         switch(arg2) {
11837         case TARGET_F_GETLK64:
11838             ret = copyfrom(&fl, arg3);
11839             if (ret) {
11840                 break;
11841             }
11842             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11843             if (ret == 0) {
11844                 ret = copyto(arg3, &fl);
11845             }
11846 	    break;
11847 
11848         case TARGET_F_SETLK64:
11849         case TARGET_F_SETLKW64:
11850             ret = copyfrom(&fl, arg3);
11851             if (ret) {
11852                 break;
11853             }
11854             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11855 	    break;
11856         default:
11857             ret = do_fcntl(arg1, arg2, arg3);
11858             break;
11859         }
11860         return ret;
11861     }
11862 #endif
11863 #ifdef TARGET_NR_cacheflush
11864     case TARGET_NR_cacheflush:
11865         /* self-modifying code is handled automatically, so nothing needed */
11866         return 0;
11867 #endif
11868 #ifdef TARGET_NR_getpagesize
11869     case TARGET_NR_getpagesize:
11870         return TARGET_PAGE_SIZE;
11871 #endif
11872     case TARGET_NR_gettid:
11873         return get_errno(sys_gettid());
11874 #ifdef TARGET_NR_readahead
11875     case TARGET_NR_readahead:
11876 #if TARGET_ABI_BITS == 32
11877         if (regpairs_aligned(cpu_env, num)) {
11878             arg2 = arg3;
11879             arg3 = arg4;
11880             arg4 = arg5;
11881         }
11882         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11883 #else
11884         ret = get_errno(readahead(arg1, arg2, arg3));
11885 #endif
11886         return ret;
11887 #endif
11888 #ifdef CONFIG_ATTR
11889 #ifdef TARGET_NR_setxattr
11890     case TARGET_NR_listxattr:
11891     case TARGET_NR_llistxattr:
11892     {
11893         void *p, *b = 0;
11894         if (arg2) {
11895             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11896             if (!b) {
11897                 return -TARGET_EFAULT;
11898             }
11899         }
11900         p = lock_user_string(arg1);
11901         if (p) {
11902             if (num == TARGET_NR_listxattr) {
11903                 ret = get_errno(listxattr(p, b, arg3));
11904             } else {
11905                 ret = get_errno(llistxattr(p, b, arg3));
11906             }
11907         } else {
11908             ret = -TARGET_EFAULT;
11909         }
11910         unlock_user(p, arg1, 0);
11911         unlock_user(b, arg2, arg3);
11912         return ret;
11913     }
11914     case TARGET_NR_flistxattr:
11915     {
11916         void *b = 0;
11917         if (arg2) {
11918             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11919             if (!b) {
11920                 return -TARGET_EFAULT;
11921             }
11922         }
11923         ret = get_errno(flistxattr(arg1, b, arg3));
11924         unlock_user(b, arg2, arg3);
11925         return ret;
11926     }
11927     case TARGET_NR_setxattr:
11928     case TARGET_NR_lsetxattr:
11929         {
11930             void *p, *n, *v = 0;
11931             if (arg3) {
11932                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11933                 if (!v) {
11934                     return -TARGET_EFAULT;
11935                 }
11936             }
11937             p = lock_user_string(arg1);
11938             n = lock_user_string(arg2);
11939             if (p && n) {
11940                 if (num == TARGET_NR_setxattr) {
11941                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11942                 } else {
11943                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11944                 }
11945             } else {
11946                 ret = -TARGET_EFAULT;
11947             }
11948             unlock_user(p, arg1, 0);
11949             unlock_user(n, arg2, 0);
11950             unlock_user(v, arg3, 0);
11951         }
11952         return ret;
11953     case TARGET_NR_fsetxattr:
11954         {
11955             void *n, *v = 0;
11956             if (arg3) {
11957                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11958                 if (!v) {
11959                     return -TARGET_EFAULT;
11960                 }
11961             }
11962             n = lock_user_string(arg2);
11963             if (n) {
11964                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11965             } else {
11966                 ret = -TARGET_EFAULT;
11967             }
11968             unlock_user(n, arg2, 0);
11969             unlock_user(v, arg3, 0);
11970         }
11971         return ret;
11972     case TARGET_NR_getxattr:
11973     case TARGET_NR_lgetxattr:
11974         {
11975             void *p, *n, *v = 0;
11976             if (arg3) {
11977                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11978                 if (!v) {
11979                     return -TARGET_EFAULT;
11980                 }
11981             }
11982             p = lock_user_string(arg1);
11983             n = lock_user_string(arg2);
11984             if (p && n) {
11985                 if (num == TARGET_NR_getxattr) {
11986                     ret = get_errno(getxattr(p, n, v, arg4));
11987                 } else {
11988                     ret = get_errno(lgetxattr(p, n, v, arg4));
11989                 }
11990             } else {
11991                 ret = -TARGET_EFAULT;
11992             }
11993             unlock_user(p, arg1, 0);
11994             unlock_user(n, arg2, 0);
11995             unlock_user(v, arg3, arg4);
11996         }
11997         return ret;
11998     case TARGET_NR_fgetxattr:
11999         {
12000             void *n, *v = 0;
12001             if (arg3) {
12002                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12003                 if (!v) {
12004                     return -TARGET_EFAULT;
12005                 }
12006             }
12007             n = lock_user_string(arg2);
12008             if (n) {
12009                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12010             } else {
12011                 ret = -TARGET_EFAULT;
12012             }
12013             unlock_user(n, arg2, 0);
12014             unlock_user(v, arg3, arg4);
12015         }
12016         return ret;
12017     case TARGET_NR_removexattr:
12018     case TARGET_NR_lremovexattr:
12019         {
12020             void *p, *n;
12021             p = lock_user_string(arg1);
12022             n = lock_user_string(arg2);
12023             if (p && n) {
12024                 if (num == TARGET_NR_removexattr) {
12025                     ret = get_errno(removexattr(p, n));
12026                 } else {
12027                     ret = get_errno(lremovexattr(p, n));
12028                 }
12029             } else {
12030                 ret = -TARGET_EFAULT;
12031             }
12032             unlock_user(p, arg1, 0);
12033             unlock_user(n, arg2, 0);
12034         }
12035         return ret;
12036     case TARGET_NR_fremovexattr:
12037         {
12038             void *n;
12039             n = lock_user_string(arg2);
12040             if (n) {
12041                 ret = get_errno(fremovexattr(arg1, n));
12042             } else {
12043                 ret = -TARGET_EFAULT;
12044             }
12045             unlock_user(n, arg2, 0);
12046         }
12047         return ret;
12048 #endif
12049 #endif /* CONFIG_ATTR */
12050 #ifdef TARGET_NR_set_thread_area
12051     case TARGET_NR_set_thread_area:
12052 #if defined(TARGET_MIPS)
12053       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12054       return 0;
12055 #elif defined(TARGET_CRIS)
12056       if (arg1 & 0xff)
12057           ret = -TARGET_EINVAL;
12058       else {
12059           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12060           ret = 0;
12061       }
12062       return ret;
12063 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12064       return do_set_thread_area(cpu_env, arg1);
12065 #elif defined(TARGET_M68K)
12066       {
12067           TaskState *ts = cpu->opaque;
12068           ts->tp_value = arg1;
12069           return 0;
12070       }
12071 #else
12072       return -TARGET_ENOSYS;
12073 #endif
12074 #endif
12075 #ifdef TARGET_NR_get_thread_area
12076     case TARGET_NR_get_thread_area:
12077 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12078         return do_get_thread_area(cpu_env, arg1);
12079 #elif defined(TARGET_M68K)
12080         {
12081             TaskState *ts = cpu->opaque;
12082             return ts->tp_value;
12083         }
12084 #else
12085         return -TARGET_ENOSYS;
12086 #endif
12087 #endif
12088 #ifdef TARGET_NR_getdomainname
12089     case TARGET_NR_getdomainname:
12090         return -TARGET_ENOSYS;
12091 #endif
12092 
12093 #ifdef TARGET_NR_clock_settime
12094     case TARGET_NR_clock_settime:
12095     {
12096         struct timespec ts;
12097 
12098         ret = target_to_host_timespec(&ts, arg2);
12099         if (!is_error(ret)) {
12100             ret = get_errno(clock_settime(arg1, &ts));
12101         }
12102         return ret;
12103     }
12104 #endif
12105 #ifdef TARGET_NR_clock_settime64
12106     case TARGET_NR_clock_settime64:
12107     {
12108         struct timespec ts;
12109 
12110         ret = target_to_host_timespec64(&ts, arg2);
12111         if (!is_error(ret)) {
12112             ret = get_errno(clock_settime(arg1, &ts));
12113         }
12114         return ret;
12115     }
12116 #endif
12117 #ifdef TARGET_NR_clock_gettime
12118     case TARGET_NR_clock_gettime:
12119     {
12120         struct timespec ts;
12121         ret = get_errno(clock_gettime(arg1, &ts));
12122         if (!is_error(ret)) {
12123             ret = host_to_target_timespec(arg2, &ts);
12124         }
12125         return ret;
12126     }
12127 #endif
12128 #ifdef TARGET_NR_clock_gettime64
12129     case TARGET_NR_clock_gettime64:
12130     {
12131         struct timespec ts;
12132         ret = get_errno(clock_gettime(arg1, &ts));
12133         if (!is_error(ret)) {
12134             ret = host_to_target_timespec64(arg2, &ts);
12135         }
12136         return ret;
12137     }
12138 #endif
12139 #ifdef TARGET_NR_clock_getres
12140     case TARGET_NR_clock_getres:
12141     {
12142         struct timespec ts;
12143         ret = get_errno(clock_getres(arg1, &ts));
12144         if (!is_error(ret)) {
12145             host_to_target_timespec(arg2, &ts);
12146         }
12147         return ret;
12148     }
12149 #endif
12150 #ifdef TARGET_NR_clock_getres_time64
12151     case TARGET_NR_clock_getres_time64:
12152     {
12153         struct timespec ts;
12154         ret = get_errno(clock_getres(arg1, &ts));
12155         if (!is_error(ret)) {
12156             host_to_target_timespec64(arg2, &ts);
12157         }
12158         return ret;
12159     }
12160 #endif
12161 #ifdef TARGET_NR_clock_nanosleep
12162     case TARGET_NR_clock_nanosleep:
12163     {
12164         struct timespec ts;
12165         if (target_to_host_timespec(&ts, arg3)) {
12166             return -TARGET_EFAULT;
12167         }
12168         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12169                                              &ts, arg4 ? &ts : NULL));
12170         /*
12171          * if the call is interrupted by a signal handler, it fails
12172          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12173          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12174          */
12175         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12176             host_to_target_timespec(arg4, &ts)) {
12177               return -TARGET_EFAULT;
12178         }
12179 
12180         return ret;
12181     }
12182 #endif
12183 #ifdef TARGET_NR_clock_nanosleep_time64
12184     case TARGET_NR_clock_nanosleep_time64:
12185     {
12186         struct timespec ts;
12187 
12188         if (target_to_host_timespec64(&ts, arg3)) {
12189             return -TARGET_EFAULT;
12190         }
12191 
12192         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12193                                              &ts, arg4 ? &ts : NULL));
12194 
12195         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12196             host_to_target_timespec64(arg4, &ts)) {
12197             return -TARGET_EFAULT;
12198         }
12199         return ret;
12200     }
12201 #endif
12202 
12203 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12204     case TARGET_NR_set_tid_address:
12205         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12206 #endif
12207 
12208     case TARGET_NR_tkill:
12209         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12210 
12211     case TARGET_NR_tgkill:
12212         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12213                          target_to_host_signal(arg3)));
12214 
12215 #ifdef TARGET_NR_set_robust_list
12216     case TARGET_NR_set_robust_list:
12217     case TARGET_NR_get_robust_list:
12218         /* The ABI for supporting robust futexes has userspace pass
12219          * the kernel a pointer to a linked list which is updated by
12220          * userspace after the syscall; the list is walked by the kernel
12221          * when the thread exits. Since the linked list in QEMU guest
12222          * memory isn't a valid linked list for the host and we have
12223          * no way to reliably intercept the thread-death event, we can't
12224          * support these. Silently return ENOSYS so that guest userspace
12225          * falls back to a non-robust futex implementation (which should
12226          * be OK except in the corner case of the guest crashing while
12227          * holding a mutex that is shared with another process via
12228          * shared memory).
12229          */
12230         return -TARGET_ENOSYS;
12231 #endif
12232 
12233 #if defined(TARGET_NR_utimensat)
12234     case TARGET_NR_utimensat:
12235         {
12236             struct timespec *tsp, ts[2];
12237             if (!arg3) {
12238                 tsp = NULL;
12239             } else {
12240                 if (target_to_host_timespec(ts, arg3)) {
12241                     return -TARGET_EFAULT;
12242                 }
12243                 if (target_to_host_timespec(ts + 1, arg3 +
12244                                             sizeof(struct target_timespec))) {
12245                     return -TARGET_EFAULT;
12246                 }
12247                 tsp = ts;
12248             }
12249             if (!arg2)
12250                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12251             else {
12252                 if (!(p = lock_user_string(arg2))) {
12253                     return -TARGET_EFAULT;
12254                 }
12255                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12256                 unlock_user(p, arg2, 0);
12257             }
12258         }
12259         return ret;
12260 #endif
12261 #ifdef TARGET_NR_utimensat_time64
12262     case TARGET_NR_utimensat_time64:
12263         {
12264             struct timespec *tsp, ts[2];
12265             if (!arg3) {
12266                 tsp = NULL;
12267             } else {
12268                 if (target_to_host_timespec64(ts, arg3)) {
12269                     return -TARGET_EFAULT;
12270                 }
12271                 if (target_to_host_timespec64(ts + 1, arg3 +
12272                                      sizeof(struct target__kernel_timespec))) {
12273                     return -TARGET_EFAULT;
12274                 }
12275                 tsp = ts;
12276             }
12277             if (!arg2)
12278                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12279             else {
12280                 p = lock_user_string(arg2);
12281                 if (!p) {
12282                     return -TARGET_EFAULT;
12283                 }
12284                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12285                 unlock_user(p, arg2, 0);
12286             }
12287         }
12288         return ret;
12289 #endif
12290 #ifdef TARGET_NR_futex
12291     case TARGET_NR_futex:
12292         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12293 #endif
12294 #ifdef TARGET_NR_futex_time64
12295     case TARGET_NR_futex_time64:
12296         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12297 #endif
12298 #ifdef CONFIG_INOTIFY
12299 #if defined(TARGET_NR_inotify_init)
12300     case TARGET_NR_inotify_init:
12301         ret = get_errno(inotify_init());
12302         if (ret >= 0) {
12303             fd_trans_register(ret, &target_inotify_trans);
12304         }
12305         return ret;
12306 #endif
12307 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12308     case TARGET_NR_inotify_init1:
12309         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12310                                           fcntl_flags_tbl)));
12311         if (ret >= 0) {
12312             fd_trans_register(ret, &target_inotify_trans);
12313         }
12314         return ret;
12315 #endif
12316 #if defined(TARGET_NR_inotify_add_watch)
12317     case TARGET_NR_inotify_add_watch:
12318         p = lock_user_string(arg2);
12319         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12320         unlock_user(p, arg2, 0);
12321         return ret;
12322 #endif
12323 #if defined(TARGET_NR_inotify_rm_watch)
12324     case TARGET_NR_inotify_rm_watch:
12325         return get_errno(inotify_rm_watch(arg1, arg2));
12326 #endif
12327 #endif
12328 
12329 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12330     case TARGET_NR_mq_open:
12331         {
12332             struct mq_attr posix_mq_attr;
12333             struct mq_attr *pposix_mq_attr;
12334             int host_flags;
12335 
12336             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12337             pposix_mq_attr = NULL;
12338             if (arg4) {
12339                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12340                     return -TARGET_EFAULT;
12341                 }
12342                 pposix_mq_attr = &posix_mq_attr;
12343             }
12344             p = lock_user_string(arg1 - 1);
12345             if (!p) {
12346                 return -TARGET_EFAULT;
12347             }
12348             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12349             unlock_user (p, arg1, 0);
12350         }
12351         return ret;
12352 
12353     case TARGET_NR_mq_unlink:
12354         p = lock_user_string(arg1 - 1);
12355         if (!p) {
12356             return -TARGET_EFAULT;
12357         }
12358         ret = get_errno(mq_unlink(p));
12359         unlock_user (p, arg1, 0);
12360         return ret;
12361 
12362 #ifdef TARGET_NR_mq_timedsend
12363     case TARGET_NR_mq_timedsend:
12364         {
12365             struct timespec ts;
12366 
12367             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12368             if (arg5 != 0) {
12369                 if (target_to_host_timespec(&ts, arg5)) {
12370                     return -TARGET_EFAULT;
12371                 }
12372                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12373                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12374                     return -TARGET_EFAULT;
12375                 }
12376             } else {
12377                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12378             }
12379             unlock_user (p, arg2, arg3);
12380         }
12381         return ret;
12382 #endif
12383 #ifdef TARGET_NR_mq_timedsend_time64
12384     case TARGET_NR_mq_timedsend_time64:
12385         {
12386             struct timespec ts;
12387 
12388             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12389             if (arg5 != 0) {
12390                 if (target_to_host_timespec64(&ts, arg5)) {
12391                     return -TARGET_EFAULT;
12392                 }
12393                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12394                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12395                     return -TARGET_EFAULT;
12396                 }
12397             } else {
12398                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12399             }
12400             unlock_user(p, arg2, arg3);
12401         }
12402         return ret;
12403 #endif
12404 
12405 #ifdef TARGET_NR_mq_timedreceive
12406     case TARGET_NR_mq_timedreceive:
12407         {
12408             struct timespec ts;
12409             unsigned int prio;
12410 
12411             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12412             if (arg5 != 0) {
12413                 if (target_to_host_timespec(&ts, arg5)) {
12414                     return -TARGET_EFAULT;
12415                 }
12416                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12417                                                      &prio, &ts));
12418                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12419                     return -TARGET_EFAULT;
12420                 }
12421             } else {
12422                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12423                                                      &prio, NULL));
12424             }
12425             unlock_user (p, arg2, arg3);
12426             if (arg4 != 0)
12427                 put_user_u32(prio, arg4);
12428         }
12429         return ret;
12430 #endif
12431 #ifdef TARGET_NR_mq_timedreceive_time64
12432     case TARGET_NR_mq_timedreceive_time64:
12433         {
12434             struct timespec ts;
12435             unsigned int prio;
12436 
12437             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12438             if (arg5 != 0) {
12439                 if (target_to_host_timespec64(&ts, arg5)) {
12440                     return -TARGET_EFAULT;
12441                 }
12442                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12443                                                      &prio, &ts));
12444                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12445                     return -TARGET_EFAULT;
12446                 }
12447             } else {
12448                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12449                                                      &prio, NULL));
12450             }
12451             unlock_user(p, arg2, arg3);
12452             if (arg4 != 0) {
12453                 put_user_u32(prio, arg4);
12454             }
12455         }
12456         return ret;
12457 #endif
12458 
12459     /* Not implemented for now... */
12460 /*     case TARGET_NR_mq_notify: */
12461 /*         break; */
12462 
12463     case TARGET_NR_mq_getsetattr:
12464         {
12465             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12466             ret = 0;
12467             if (arg2 != 0) {
12468                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12469                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12470                                            &posix_mq_attr_out));
12471             } else if (arg3 != 0) {
12472                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12473             }
12474             if (ret == 0 && arg3 != 0) {
12475                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12476             }
12477         }
12478         return ret;
12479 #endif
12480 
12481 #ifdef CONFIG_SPLICE
12482 #ifdef TARGET_NR_tee
12483     case TARGET_NR_tee:
12484         {
12485             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12486         }
12487         return ret;
12488 #endif
12489 #ifdef TARGET_NR_splice
12490     case TARGET_NR_splice:
12491         {
12492             loff_t loff_in, loff_out;
12493             loff_t *ploff_in = NULL, *ploff_out = NULL;
12494             if (arg2) {
12495                 if (get_user_u64(loff_in, arg2)) {
12496                     return -TARGET_EFAULT;
12497                 }
12498                 ploff_in = &loff_in;
12499             }
12500             if (arg4) {
12501                 if (get_user_u64(loff_out, arg4)) {
12502                     return -TARGET_EFAULT;
12503                 }
12504                 ploff_out = &loff_out;
12505             }
12506             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12507             if (arg2) {
12508                 if (put_user_u64(loff_in, arg2)) {
12509                     return -TARGET_EFAULT;
12510                 }
12511             }
12512             if (arg4) {
12513                 if (put_user_u64(loff_out, arg4)) {
12514                     return -TARGET_EFAULT;
12515                 }
12516             }
12517         }
12518         return ret;
12519 #endif
12520 #ifdef TARGET_NR_vmsplice
12521 	case TARGET_NR_vmsplice:
12522         {
12523             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12524             if (vec != NULL) {
12525                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12526                 unlock_iovec(vec, arg2, arg3, 0);
12527             } else {
12528                 ret = -host_to_target_errno(errno);
12529             }
12530         }
12531         return ret;
12532 #endif
12533 #endif /* CONFIG_SPLICE */
12534 #ifdef CONFIG_EVENTFD
12535 #if defined(TARGET_NR_eventfd)
12536     case TARGET_NR_eventfd:
12537         ret = get_errno(eventfd(arg1, 0));
12538         if (ret >= 0) {
12539             fd_trans_register(ret, &target_eventfd_trans);
12540         }
12541         return ret;
12542 #endif
12543 #if defined(TARGET_NR_eventfd2)
12544     case TARGET_NR_eventfd2:
12545     {
12546         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12547         if (arg2 & TARGET_O_NONBLOCK) {
12548             host_flags |= O_NONBLOCK;
12549         }
12550         if (arg2 & TARGET_O_CLOEXEC) {
12551             host_flags |= O_CLOEXEC;
12552         }
12553         ret = get_errno(eventfd(arg1, host_flags));
12554         if (ret >= 0) {
12555             fd_trans_register(ret, &target_eventfd_trans);
12556         }
12557         return ret;
12558     }
12559 #endif
12560 #endif /* CONFIG_EVENTFD  */
12561 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12562     case TARGET_NR_fallocate:
12563 #if TARGET_ABI_BITS == 32
12564         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12565                                   target_offset64(arg5, arg6)));
12566 #else
12567         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12568 #endif
12569         return ret;
12570 #endif
12571 #if defined(CONFIG_SYNC_FILE_RANGE)
12572 #if defined(TARGET_NR_sync_file_range)
12573     case TARGET_NR_sync_file_range:
12574 #if TARGET_ABI_BITS == 32
12575 #if defined(TARGET_MIPS)
12576         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12577                                         target_offset64(arg5, arg6), arg7));
12578 #else
12579         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12580                                         target_offset64(arg4, arg5), arg6));
12581 #endif /* !TARGET_MIPS */
12582 #else
12583         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12584 #endif
12585         return ret;
12586 #endif
12587 #if defined(TARGET_NR_sync_file_range2) || \
12588     defined(TARGET_NR_arm_sync_file_range)
12589 #if defined(TARGET_NR_sync_file_range2)
12590     case TARGET_NR_sync_file_range2:
12591 #endif
12592 #if defined(TARGET_NR_arm_sync_file_range)
12593     case TARGET_NR_arm_sync_file_range:
12594 #endif
12595         /* This is like sync_file_range but the arguments are reordered */
12596 #if TARGET_ABI_BITS == 32
12597         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12598                                         target_offset64(arg5, arg6), arg2));
12599 #else
12600         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12601 #endif
12602         return ret;
12603 #endif
12604 #endif
12605 #if defined(TARGET_NR_signalfd4)
12606     case TARGET_NR_signalfd4:
12607         return do_signalfd4(arg1, arg2, arg4);
12608 #endif
12609 #if defined(TARGET_NR_signalfd)
12610     case TARGET_NR_signalfd:
12611         return do_signalfd4(arg1, arg2, 0);
12612 #endif
12613 #if defined(CONFIG_EPOLL)
12614 #if defined(TARGET_NR_epoll_create)
12615     case TARGET_NR_epoll_create:
12616         return get_errno(epoll_create(arg1));
12617 #endif
12618 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12619     case TARGET_NR_epoll_create1:
12620         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12621 #endif
12622 #if defined(TARGET_NR_epoll_ctl)
12623     case TARGET_NR_epoll_ctl:
12624     {
12625         struct epoll_event ep;
12626         struct epoll_event *epp = 0;
12627         if (arg4) {
12628             if (arg2 != EPOLL_CTL_DEL) {
12629                 struct target_epoll_event *target_ep;
12630                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12631                     return -TARGET_EFAULT;
12632                 }
12633                 ep.events = tswap32(target_ep->events);
12634                 /*
12635                  * The epoll_data_t union is just opaque data to the kernel,
12636                  * so we transfer all 64 bits across and need not worry what
12637                  * actual data type it is.
12638                  */
12639                 ep.data.u64 = tswap64(target_ep->data.u64);
12640                 unlock_user_struct(target_ep, arg4, 0);
12641             }
12642             /*
12643              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12644              * non-null pointer, even though this argument is ignored.
12645              *
12646              */
12647             epp = &ep;
12648         }
12649         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12650     }
12651 #endif
12652 
12653 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12654 #if defined(TARGET_NR_epoll_wait)
12655     case TARGET_NR_epoll_wait:
12656 #endif
12657 #if defined(TARGET_NR_epoll_pwait)
12658     case TARGET_NR_epoll_pwait:
12659 #endif
12660     {
12661         struct target_epoll_event *target_ep;
12662         struct epoll_event *ep;
12663         int epfd = arg1;
12664         int maxevents = arg3;
12665         int timeout = arg4;
12666 
12667         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12668             return -TARGET_EINVAL;
12669         }
12670 
12671         target_ep = lock_user(VERIFY_WRITE, arg2,
12672                               maxevents * sizeof(struct target_epoll_event), 1);
12673         if (!target_ep) {
12674             return -TARGET_EFAULT;
12675         }
12676 
12677         ep = g_try_new(struct epoll_event, maxevents);
12678         if (!ep) {
12679             unlock_user(target_ep, arg2, 0);
12680             return -TARGET_ENOMEM;
12681         }
12682 
12683         switch (num) {
12684 #if defined(TARGET_NR_epoll_pwait)
12685         case TARGET_NR_epoll_pwait:
12686         {
12687             sigset_t *set = NULL;
12688 
12689             if (arg5) {
12690                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12691                 if (ret != 0) {
12692                     break;
12693                 }
12694             }
12695 
12696             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12697                                              set, SIGSET_T_SIZE));
12698 
12699             if (set) {
12700                 finish_sigsuspend_mask(ret);
12701             }
12702             break;
12703         }
12704 #endif
12705 #if defined(TARGET_NR_epoll_wait)
12706         case TARGET_NR_epoll_wait:
12707             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12708                                              NULL, 0));
12709             break;
12710 #endif
12711         default:
12712             ret = -TARGET_ENOSYS;
12713         }
12714         if (!is_error(ret)) {
12715             int i;
12716             for (i = 0; i < ret; i++) {
12717                 target_ep[i].events = tswap32(ep[i].events);
12718                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12719             }
12720             unlock_user(target_ep, arg2,
12721                         ret * sizeof(struct target_epoll_event));
12722         } else {
12723             unlock_user(target_ep, arg2, 0);
12724         }
12725         g_free(ep);
12726         return ret;
12727     }
12728 #endif
12729 #endif
12730 #ifdef TARGET_NR_prlimit64
12731     case TARGET_NR_prlimit64:
12732     {
12733         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12734         struct target_rlimit64 *target_rnew, *target_rold;
12735         struct host_rlimit64 rnew, rold, *rnewp = 0;
12736         int resource = target_to_host_resource(arg2);
12737 
12738         if (arg3 && (resource != RLIMIT_AS &&
12739                      resource != RLIMIT_DATA &&
12740                      resource != RLIMIT_STACK)) {
12741             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12742                 return -TARGET_EFAULT;
12743             }
12744             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12745             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12746             unlock_user_struct(target_rnew, arg3, 0);
12747             rnewp = &rnew;
12748         }
12749 
12750         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12751         if (!is_error(ret) && arg4) {
12752             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12753                 return -TARGET_EFAULT;
12754             }
12755             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12756             target_rold->rlim_max = tswap64(rold.rlim_max);
12757             unlock_user_struct(target_rold, arg4, 1);
12758         }
12759         return ret;
12760     }
12761 #endif
12762 #ifdef TARGET_NR_gethostname
12763     case TARGET_NR_gethostname:
12764     {
12765         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12766         if (name) {
12767             ret = get_errno(gethostname(name, arg2));
12768             unlock_user(name, arg1, arg2);
12769         } else {
12770             ret = -TARGET_EFAULT;
12771         }
12772         return ret;
12773     }
12774 #endif
12775 #ifdef TARGET_NR_atomic_cmpxchg_32
12776     case TARGET_NR_atomic_cmpxchg_32:
12777     {
12778         /* should use start_exclusive from main.c */
12779         abi_ulong mem_value;
12780         if (get_user_u32(mem_value, arg6)) {
12781             target_siginfo_t info;
12782             info.si_signo = SIGSEGV;
12783             info.si_errno = 0;
12784             info.si_code = TARGET_SEGV_MAPERR;
12785             info._sifields._sigfault._addr = arg6;
12786             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12787                          QEMU_SI_FAULT, &info);
12788             ret = 0xdeadbeef;
12789 
12790         }
12791         if (mem_value == arg2)
12792             put_user_u32(arg1, arg6);
12793         return mem_value;
12794     }
12795 #endif
12796 #ifdef TARGET_NR_atomic_barrier
12797     case TARGET_NR_atomic_barrier:
12798         /* Like the kernel implementation and the
12799            qemu arm barrier, no-op this? */
12800         return 0;
12801 #endif
12802 
12803 #ifdef TARGET_NR_timer_create
12804     case TARGET_NR_timer_create:
12805     {
12806         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12807 
12808         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12809 
12810         int clkid = arg1;
12811         int timer_index = next_free_host_timer();
12812 
12813         if (timer_index < 0) {
12814             ret = -TARGET_EAGAIN;
12815         } else {
12816             timer_t *phtimer = g_posix_timers  + timer_index;
12817 
12818             if (arg2) {
12819                 phost_sevp = &host_sevp;
12820                 ret = target_to_host_sigevent(phost_sevp, arg2);
12821                 if (ret != 0) {
12822                     return ret;
12823                 }
12824             }
12825 
12826             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12827             if (ret) {
12828                 phtimer = NULL;
12829             } else {
12830                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12831                     return -TARGET_EFAULT;
12832                 }
12833             }
12834         }
12835         return ret;
12836     }
12837 #endif
12838 
12839 #ifdef TARGET_NR_timer_settime
12840     case TARGET_NR_timer_settime:
12841     {
12842         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12843          * struct itimerspec * old_value */
12844         target_timer_t timerid = get_timer_id(arg1);
12845 
12846         if (timerid < 0) {
12847             ret = timerid;
12848         } else if (arg3 == 0) {
12849             ret = -TARGET_EINVAL;
12850         } else {
12851             timer_t htimer = g_posix_timers[timerid];
12852             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12853 
12854             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12855                 return -TARGET_EFAULT;
12856             }
12857             ret = get_errno(
12858                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12859             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12860                 return -TARGET_EFAULT;
12861             }
12862         }
12863         return ret;
12864     }
12865 #endif
12866 
12867 #ifdef TARGET_NR_timer_settime64
12868     case TARGET_NR_timer_settime64:
12869     {
12870         target_timer_t timerid = get_timer_id(arg1);
12871 
12872         if (timerid < 0) {
12873             ret = timerid;
12874         } else if (arg3 == 0) {
12875             ret = -TARGET_EINVAL;
12876         } else {
12877             timer_t htimer = g_posix_timers[timerid];
12878             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12879 
12880             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12881                 return -TARGET_EFAULT;
12882             }
12883             ret = get_errno(
12884                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12885             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12886                 return -TARGET_EFAULT;
12887             }
12888         }
12889         return ret;
12890     }
12891 #endif
12892 
12893 #ifdef TARGET_NR_timer_gettime
12894     case TARGET_NR_timer_gettime:
12895     {
12896         /* args: timer_t timerid, struct itimerspec *curr_value */
12897         target_timer_t timerid = get_timer_id(arg1);
12898 
12899         if (timerid < 0) {
12900             ret = timerid;
12901         } else if (!arg2) {
12902             ret = -TARGET_EFAULT;
12903         } else {
12904             timer_t htimer = g_posix_timers[timerid];
12905             struct itimerspec hspec;
12906             ret = get_errno(timer_gettime(htimer, &hspec));
12907 
12908             if (host_to_target_itimerspec(arg2, &hspec)) {
12909                 ret = -TARGET_EFAULT;
12910             }
12911         }
12912         return ret;
12913     }
12914 #endif
12915 
12916 #ifdef TARGET_NR_timer_gettime64
12917     case TARGET_NR_timer_gettime64:
12918     {
12919         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12920         target_timer_t timerid = get_timer_id(arg1);
12921 
12922         if (timerid < 0) {
12923             ret = timerid;
12924         } else if (!arg2) {
12925             ret = -TARGET_EFAULT;
12926         } else {
12927             timer_t htimer = g_posix_timers[timerid];
12928             struct itimerspec hspec;
12929             ret = get_errno(timer_gettime(htimer, &hspec));
12930 
12931             if (host_to_target_itimerspec64(arg2, &hspec)) {
12932                 ret = -TARGET_EFAULT;
12933             }
12934         }
12935         return ret;
12936     }
12937 #endif
12938 
12939 #ifdef TARGET_NR_timer_getoverrun
12940     case TARGET_NR_timer_getoverrun:
12941     {
12942         /* args: timer_t timerid */
12943         target_timer_t timerid = get_timer_id(arg1);
12944 
12945         if (timerid < 0) {
12946             ret = timerid;
12947         } else {
12948             timer_t htimer = g_posix_timers[timerid];
12949             ret = get_errno(timer_getoverrun(htimer));
12950         }
12951         return ret;
12952     }
12953 #endif
12954 
12955 #ifdef TARGET_NR_timer_delete
12956     case TARGET_NR_timer_delete:
12957     {
12958         /* args: timer_t timerid */
12959         target_timer_t timerid = get_timer_id(arg1);
12960 
12961         if (timerid < 0) {
12962             ret = timerid;
12963         } else {
12964             timer_t htimer = g_posix_timers[timerid];
12965             ret = get_errno(timer_delete(htimer));
12966             g_posix_timers[timerid] = 0;
12967         }
12968         return ret;
12969     }
12970 #endif
12971 
12972 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12973     case TARGET_NR_timerfd_create:
12974         return get_errno(timerfd_create(arg1,
12975                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12976 #endif
12977 
12978 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12979     case TARGET_NR_timerfd_gettime:
12980         {
12981             struct itimerspec its_curr;
12982 
12983             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12984 
12985             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12986                 return -TARGET_EFAULT;
12987             }
12988         }
12989         return ret;
12990 #endif
12991 
12992 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12993     case TARGET_NR_timerfd_gettime64:
12994         {
12995             struct itimerspec its_curr;
12996 
12997             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12998 
12999             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13000                 return -TARGET_EFAULT;
13001             }
13002         }
13003         return ret;
13004 #endif
13005 
13006 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13007     case TARGET_NR_timerfd_settime:
13008         {
13009             struct itimerspec its_new, its_old, *p_new;
13010 
13011             if (arg3) {
13012                 if (target_to_host_itimerspec(&its_new, arg3)) {
13013                     return -TARGET_EFAULT;
13014                 }
13015                 p_new = &its_new;
13016             } else {
13017                 p_new = NULL;
13018             }
13019 
13020             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13021 
13022             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13023                 return -TARGET_EFAULT;
13024             }
13025         }
13026         return ret;
13027 #endif
13028 
13029 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13030     case TARGET_NR_timerfd_settime64:
13031         {
13032             struct itimerspec its_new, its_old, *p_new;
13033 
13034             if (arg3) {
13035                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13036                     return -TARGET_EFAULT;
13037                 }
13038                 p_new = &its_new;
13039             } else {
13040                 p_new = NULL;
13041             }
13042 
13043             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13044 
13045             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13046                 return -TARGET_EFAULT;
13047             }
13048         }
13049         return ret;
13050 #endif
13051 
13052 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13053     case TARGET_NR_ioprio_get:
13054         return get_errno(ioprio_get(arg1, arg2));
13055 #endif
13056 
13057 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13058     case TARGET_NR_ioprio_set:
13059         return get_errno(ioprio_set(arg1, arg2, arg3));
13060 #endif
13061 
13062 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13063     case TARGET_NR_setns:
13064         return get_errno(setns(arg1, arg2));
13065 #endif
13066 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13067     case TARGET_NR_unshare:
13068         return get_errno(unshare(arg1));
13069 #endif
13070 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13071     case TARGET_NR_kcmp:
13072         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13073 #endif
13074 #ifdef TARGET_NR_swapcontext
13075     case TARGET_NR_swapcontext:
13076         /* PowerPC specific.  */
13077         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13078 #endif
13079 #ifdef TARGET_NR_memfd_create
13080     case TARGET_NR_memfd_create:
13081         p = lock_user_string(arg1);
13082         if (!p) {
13083             return -TARGET_EFAULT;
13084         }
13085         ret = get_errno(memfd_create(p, arg2));
13086         fd_trans_unregister(ret);
13087         unlock_user(p, arg1, 0);
13088         return ret;
13089 #endif
13090 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13091     case TARGET_NR_membarrier:
13092         return get_errno(membarrier(arg1, arg2));
13093 #endif
13094 
13095 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13096     case TARGET_NR_copy_file_range:
13097         {
13098             loff_t inoff, outoff;
13099             loff_t *pinoff = NULL, *poutoff = NULL;
13100 
13101             if (arg2) {
13102                 if (get_user_u64(inoff, arg2)) {
13103                     return -TARGET_EFAULT;
13104                 }
13105                 pinoff = &inoff;
13106             }
13107             if (arg4) {
13108                 if (get_user_u64(outoff, arg4)) {
13109                     return -TARGET_EFAULT;
13110                 }
13111                 poutoff = &outoff;
13112             }
13113             /* Do not sign-extend the count parameter. */
13114             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13115                                                  (abi_ulong)arg5, arg6));
13116             if (!is_error(ret) && ret > 0) {
13117                 if (arg2) {
13118                     if (put_user_u64(inoff, arg2)) {
13119                         return -TARGET_EFAULT;
13120                     }
13121                 }
13122                 if (arg4) {
13123                     if (put_user_u64(outoff, arg4)) {
13124                         return -TARGET_EFAULT;
13125                     }
13126                 }
13127             }
13128         }
13129         return ret;
13130 #endif
13131 
13132 #if defined(TARGET_NR_pivot_root)
13133     case TARGET_NR_pivot_root:
13134         {
13135             void *p2;
13136             p = lock_user_string(arg1); /* new_root */
13137             p2 = lock_user_string(arg2); /* put_old */
13138             if (!p || !p2) {
13139                 ret = -TARGET_EFAULT;
13140             } else {
13141                 ret = get_errno(pivot_root(p, p2));
13142             }
13143             unlock_user(p2, arg2, 0);
13144             unlock_user(p, arg1, 0);
13145         }
13146         return ret;
13147 #endif
13148 
13149     default:
13150         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13151         return -TARGET_ENOSYS;
13152     }
13153     return ret;
13154 }
13155 
13156 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13157                     abi_long arg2, abi_long arg3, abi_long arg4,
13158                     abi_long arg5, abi_long arg6, abi_long arg7,
13159                     abi_long arg8)
13160 {
13161     CPUState *cpu = env_cpu(cpu_env);
13162     abi_long ret;
13163 
13164 #ifdef DEBUG_ERESTARTSYS
13165     /* Debug-only code for exercising the syscall-restart code paths
13166      * in the per-architecture cpu main loops: restart every syscall
13167      * the guest makes once before letting it through.
13168      */
13169     {
13170         static bool flag;
13171         flag = !flag;
13172         if (flag) {
13173             return -QEMU_ERESTARTSYS;
13174         }
13175     }
13176 #endif
13177 
13178     record_syscall_start(cpu, num, arg1,
13179                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13180 
13181     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13182         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13183     }
13184 
13185     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13186                       arg5, arg6, arg7, arg8);
13187 
13188     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13189         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13190                           arg3, arg4, arg5, arg6);
13191     }
13192 
13193     record_syscall_return(cpu, num, ret);
13194     return ret;
13195 }
13196