xref: /qemu/linux-user/syscall.c (revision 7a21bee2)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 
144 #ifndef CLONE_IO
145 #define CLONE_IO                0x80000000      /* Clone io context */
146 #endif
147 
148 /* We can't directly call the host clone syscall, because this will
149  * badly confuse libc (breaking mutexes, for example). So we must
150  * divide clone flags into:
151  *  * flag combinations that look like pthread_create()
152  *  * flag combinations that look like fork()
153  *  * flags we can implement within QEMU itself
154  *  * flags we can't support and will return an error for
155  */
156 /* For thread creation, all these flags must be present; for
157  * fork, none must be present.
158  */
159 #define CLONE_THREAD_FLAGS                              \
160     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
161      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 
163 /* These flags are ignored:
164  * CLONE_DETACHED is now ignored by the kernel;
165  * CLONE_IO is just an optimisation hint to the I/O scheduler
166  */
167 #define CLONE_IGNORED_FLAGS                     \
168     (CLONE_DETACHED | CLONE_IO)
169 
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS               \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
177     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
178      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 
180 #define CLONE_INVALID_FORK_FLAGS                                        \
181     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 
183 #define CLONE_INVALID_THREAD_FLAGS                                      \
184     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
185        CLONE_IGNORED_FLAGS))
186 
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188  * have almost all been allocated. We cannot support any of
189  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191  * The checks against the invalid thread masks above will catch these.
192  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193  */
194 
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196  * once. This exercises the codepaths for restart.
197  */
198 //#define DEBUG_ERESTARTSYS
199 
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205 
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213 
214 #define _syscall0(type,name)		\
215 static type name (void)			\
216 {					\
217 	return syscall(__NR_##name);	\
218 }
219 
220 #define _syscall1(type,name,type1,arg1)		\
221 static type name (type1 arg1)			\
222 {						\
223 	return syscall(__NR_##name, arg1);	\
224 }
225 
226 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
227 static type name (type1 arg1,type2 arg2)		\
228 {							\
229 	return syscall(__NR_##name, arg1, arg2);	\
230 }
231 
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3)		\
234 {								\
235 	return syscall(__NR_##name, arg1, arg2, arg3);		\
236 }
237 
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
242 }
243 
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5)							\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
249 }
250 
251 
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5,type6,arg6)					\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
255                   type6 arg6)							\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
258 }
259 
260 
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_statx __NR_statx
276 
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
279 #endif
280 
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
284 #endif
285 
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
289 #endif
290 
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
293 
294 /* For the 64-bit guest on 32-bit host case we must emulate
295  * getdents using getdents64, because otherwise the host
296  * might hand us back more dirent records than we can fit
297  * into the guest buffer after structure format conversion.
298  * Otherwise we emulate getdents with getdents if the host has it.
299  */
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
302 #endif
303 
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #endif
307 #if (defined(TARGET_NR_getdents) && \
308       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #endif
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
314           loff_t *, res, uint, wh);
315 #endif
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318           siginfo_t *, uinfo)
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
322 #endif
323 #if defined(__NR_futex)
324 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
325           const struct timespec *,timeout,int *,uaddr2,int,val3)
326 #endif
327 #if defined(__NR_futex_time64)
328 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
329           const struct timespec *,timeout,int *,uaddr2,int,val3)
330 #endif
331 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
332 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
335 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
336           unsigned long *, user_mask_ptr);
337 /* sched_attr is not defined in glibc */
338 struct sched_attr {
339     uint32_t size;
340     uint32_t sched_policy;
341     uint64_t sched_flags;
342     int32_t sched_nice;
343     uint32_t sched_priority;
344     uint64_t sched_runtime;
345     uint64_t sched_deadline;
346     uint64_t sched_period;
347     uint32_t sched_util_min;
348     uint32_t sched_util_max;
349 };
350 #define __NR_sys_sched_getattr __NR_sched_getattr
351 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
352           unsigned int, size, unsigned int, flags);
353 #define __NR_sys_sched_setattr __NR_sched_setattr
354 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
355           unsigned int, flags);
356 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
357 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
358 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
359 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
360           const struct sched_param *, param);
361 #define __NR_sys_sched_getparam __NR_sched_getparam
362 _syscall2(int, sys_sched_getparam, pid_t, pid,
363           struct sched_param *, param);
364 #define __NR_sys_sched_setparam __NR_sched_setparam
365 _syscall2(int, sys_sched_setparam, pid_t, pid,
366           const struct sched_param *, param);
367 #define __NR_sys_getcpu __NR_getcpu
368 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
369 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
370           void *, arg);
371 _syscall2(int, capget, struct __user_cap_header_struct *, header,
372           struct __user_cap_data_struct *, data);
373 _syscall2(int, capset, struct __user_cap_header_struct *, header,
374           struct __user_cap_data_struct *, data);
375 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
376 _syscall2(int, ioprio_get, int, which, int, who)
377 #endif
378 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
379 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
380 #endif
381 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
382 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
383 #endif
384 
385 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
386 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
387           unsigned long, idx1, unsigned long, idx2)
388 #endif
389 
390 /*
391  * It is assumed that struct statx is architecture independent.
392  */
393 #if defined(TARGET_NR_statx) && defined(__NR_statx)
394 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
395           unsigned int, mask, struct target_statx *, statxbuf)
396 #endif
397 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
398 _syscall2(int, membarrier, int, cmd, int, flags)
399 #endif
400 
401 static const bitmask_transtbl fcntl_flags_tbl[] = {
402   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
403   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
404   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
405   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
406   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
407   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
408   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
409   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
410   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
411   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
412   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
413   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
414   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
415 #if defined(O_DIRECT)
416   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
417 #endif
418 #if defined(O_NOATIME)
419   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
420 #endif
421 #if defined(O_CLOEXEC)
422   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
423 #endif
424 #if defined(O_PATH)
425   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
426 #endif
427 #if defined(O_TMPFILE)
428   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
429 #endif
430   /* Don't terminate the list prematurely on 64-bit host+guest.  */
431 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
432   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
433 #endif
434   { 0, 0, 0, 0 }
435 };
436 
437 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
438 
439 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
440 #if defined(__NR_utimensat)
441 #define __NR_sys_utimensat __NR_utimensat
442 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
443           const struct timespec *,tsp,int,flags)
444 #else
445 static int sys_utimensat(int dirfd, const char *pathname,
446                          const struct timespec times[2], int flags)
447 {
448     errno = ENOSYS;
449     return -1;
450 }
451 #endif
452 #endif /* TARGET_NR_utimensat */
453 
454 #ifdef TARGET_NR_renameat2
455 #if defined(__NR_renameat2)
456 #define __NR_sys_renameat2 __NR_renameat2
457 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
458           const char *, new, unsigned int, flags)
459 #else
460 static int sys_renameat2(int oldfd, const char *old,
461                          int newfd, const char *new, int flags)
462 {
463     if (flags == 0) {
464         return renameat(oldfd, old, newfd, new);
465     }
466     errno = ENOSYS;
467     return -1;
468 }
469 #endif
470 #endif /* TARGET_NR_renameat2 */
471 
472 #ifdef CONFIG_INOTIFY
473 #include <sys/inotify.h>
474 #else
475 /* Userspace can usually survive runtime without inotify */
476 #undef TARGET_NR_inotify_init
477 #undef TARGET_NR_inotify_init1
478 #undef TARGET_NR_inotify_add_watch
479 #undef TARGET_NR_inotify_rm_watch
480 #endif /* CONFIG_INOTIFY  */
481 
482 #if defined(TARGET_NR_prlimit64)
483 #ifndef __NR_prlimit64
484 # define __NR_prlimit64 -1
485 #endif
486 #define __NR_sys_prlimit64 __NR_prlimit64
487 /* The glibc rlimit structure may not be that used by the underlying syscall */
488 struct host_rlimit64 {
489     uint64_t rlim_cur;
490     uint64_t rlim_max;
491 };
492 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
493           const struct host_rlimit64 *, new_limit,
494           struct host_rlimit64 *, old_limit)
495 #endif
496 
497 
498 #if defined(TARGET_NR_timer_create)
499 /* Maximum of 32 active POSIX timers allowed at any one time. */
500 static timer_t g_posix_timers[32] = { 0, } ;
501 
502 static inline int next_free_host_timer(void)
503 {
504     int k ;
505     /* FIXME: Does finding the next free slot require a lock? */
506     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
507         if (g_posix_timers[k] == 0) {
508             g_posix_timers[k] = (timer_t) 1;
509             return k;
510         }
511     }
512     return -1;
513 }
514 #endif
515 
516 static inline int host_to_target_errno(int host_errno)
517 {
518     switch (host_errno) {
519 #define E(X)  case X: return TARGET_##X;
520 #include "errnos.c.inc"
521 #undef E
522     default:
523         return host_errno;
524     }
525 }
526 
527 static inline int target_to_host_errno(int target_errno)
528 {
529     switch (target_errno) {
530 #define E(X)  case TARGET_##X: return X;
531 #include "errnos.c.inc"
532 #undef E
533     default:
534         return target_errno;
535     }
536 }
537 
538 abi_long get_errno(abi_long ret)
539 {
540     if (ret == -1)
541         return -host_to_target_errno(errno);
542     else
543         return ret;
544 }
545 
546 const char *target_strerror(int err)
547 {
548     if (err == QEMU_ERESTARTSYS) {
549         return "To be restarted";
550     }
551     if (err == QEMU_ESIGRETURN) {
552         return "Successful exit from sigreturn";
553     }
554 
555     return strerror(target_to_host_errno(err));
556 }
557 
558 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
559 {
560     int i;
561     uint8_t b;
562     if (usize <= ksize) {
563         return 1;
564     }
565     for (i = ksize; i < usize; i++) {
566         if (get_user_u8(b, addr + i)) {
567             return -TARGET_EFAULT;
568         }
569         if (b != 0) {
570             return 0;
571         }
572     }
573     return 1;
574 }
575 
576 #define safe_syscall0(type, name) \
577 static type safe_##name(void) \
578 { \
579     return safe_syscall(__NR_##name); \
580 }
581 
582 #define safe_syscall1(type, name, type1, arg1) \
583 static type safe_##name(type1 arg1) \
584 { \
585     return safe_syscall(__NR_##name, arg1); \
586 }
587 
588 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
589 static type safe_##name(type1 arg1, type2 arg2) \
590 { \
591     return safe_syscall(__NR_##name, arg1, arg2); \
592 }
593 
594 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
595 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
596 { \
597     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
598 }
599 
600 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
601     type4, arg4) \
602 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
603 { \
604     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
605 }
606 
607 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
608     type4, arg4, type5, arg5) \
609 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
610     type5 arg5) \
611 { \
612     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
613 }
614 
615 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
616     type4, arg4, type5, arg5, type6, arg6) \
617 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
618     type5 arg5, type6 arg6) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
621 }
622 
623 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
624 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
625 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
626               int, flags, mode_t, mode)
627 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
628 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
629               struct rusage *, rusage)
630 #endif
631 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
632               int, options, struct rusage *, rusage)
633 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
634 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
635     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
636 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
637               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
638 #endif
639 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
640 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
641               struct timespec *, tsp, const sigset_t *, sigmask,
642               size_t, sigsetsize)
643 #endif
644 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
645               int, maxevents, int, timeout, const sigset_t *, sigmask,
646               size_t, sigsetsize)
647 #if defined(__NR_futex)
648 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
649               const struct timespec *,timeout,int *,uaddr2,int,val3)
650 #endif
651 #if defined(__NR_futex_time64)
652 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
653               const struct timespec *,timeout,int *,uaddr2,int,val3)
654 #endif
655 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
656 safe_syscall2(int, kill, pid_t, pid, int, sig)
657 safe_syscall2(int, tkill, int, tid, int, sig)
658 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
659 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
660 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
661 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
662               unsigned long, pos_l, unsigned long, pos_h)
663 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
664               unsigned long, pos_l, unsigned long, pos_h)
665 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
666               socklen_t, addrlen)
667 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
668               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
669 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
670               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
671 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
672 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
673 safe_syscall2(int, flock, int, fd, int, operation)
674 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
675 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
676               const struct timespec *, uts, size_t, sigsetsize)
677 #endif
678 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
679               int, flags)
680 #if defined(TARGET_NR_nanosleep)
681 safe_syscall2(int, nanosleep, const struct timespec *, req,
682               struct timespec *, rem)
683 #endif
684 #if defined(TARGET_NR_clock_nanosleep) || \
685     defined(TARGET_NR_clock_nanosleep_time64)
686 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
687               const struct timespec *, req, struct timespec *, rem)
688 #endif
689 #ifdef __NR_ipc
690 #ifdef __s390x__
691 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
692               void *, ptr)
693 #else
694 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
695               void *, ptr, long, fifth)
696 #endif
697 #endif
698 #ifdef __NR_msgsnd
699 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
700               int, flags)
701 #endif
702 #ifdef __NR_msgrcv
703 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
704               long, msgtype, int, flags)
705 #endif
706 #ifdef __NR_semtimedop
707 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
708               unsigned, nsops, const struct timespec *, timeout)
709 #endif
710 #if defined(TARGET_NR_mq_timedsend) || \
711     defined(TARGET_NR_mq_timedsend_time64)
712 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
713               size_t, len, unsigned, prio, const struct timespec *, timeout)
714 #endif
715 #if defined(TARGET_NR_mq_timedreceive) || \
716     defined(TARGET_NR_mq_timedreceive_time64)
717 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
718               size_t, len, unsigned *, prio, const struct timespec *, timeout)
719 #endif
720 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
721 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
722               int, outfd, loff_t *, poutoff, size_t, length,
723               unsigned int, flags)
724 #endif
725 
726 /* We do ioctl like this rather than via safe_syscall3 to preserve the
727  * "third argument might be integer or pointer or not present" behaviour of
728  * the libc function.
729  */
730 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
731 /* Similarly for fcntl. Note that callers must always:
732  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
733  *  use the flock64 struct rather than unsuffixed flock
734  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
735  */
736 #ifdef __NR_fcntl64
737 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
738 #else
739 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
740 #endif
741 
742 static inline int host_to_target_sock_type(int host_type)
743 {
744     int target_type;
745 
746     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
747     case SOCK_DGRAM:
748         target_type = TARGET_SOCK_DGRAM;
749         break;
750     case SOCK_STREAM:
751         target_type = TARGET_SOCK_STREAM;
752         break;
753     default:
754         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
755         break;
756     }
757 
758 #if defined(SOCK_CLOEXEC)
759     if (host_type & SOCK_CLOEXEC) {
760         target_type |= TARGET_SOCK_CLOEXEC;
761     }
762 #endif
763 
764 #if defined(SOCK_NONBLOCK)
765     if (host_type & SOCK_NONBLOCK) {
766         target_type |= TARGET_SOCK_NONBLOCK;
767     }
768 #endif
769 
770     return target_type;
771 }
772 
773 static abi_ulong target_brk;
774 static abi_ulong target_original_brk;
775 static abi_ulong brk_page;
776 
777 void target_set_brk(abi_ulong new_brk)
778 {
779     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
780     brk_page = HOST_PAGE_ALIGN(target_brk);
781 }
782 
783 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
784 #define DEBUGF_BRK(message, args...)
785 
786 /* do_brk() must return target values and target errnos. */
787 abi_long do_brk(abi_ulong new_brk)
788 {
789     abi_long mapped_addr;
790     abi_ulong new_alloc_size;
791 
792     /* brk pointers are always untagged */
793 
794     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
795 
796     if (!new_brk) {
797         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
798         return target_brk;
799     }
800     if (new_brk < target_original_brk) {
801         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
802                    target_brk);
803         return target_brk;
804     }
805 
806     /* If the new brk is less than the highest page reserved to the
807      * target heap allocation, set it and we're almost done...  */
808     if (new_brk <= brk_page) {
809         /* Heap contents are initialized to zero, as for anonymous
810          * mapped pages.  */
811         if (new_brk > target_brk) {
812             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
813         }
814 	target_brk = new_brk;
815         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
816 	return target_brk;
817     }
818 
819     /* We need to allocate more memory after the brk... Note that
820      * we don't use MAP_FIXED because that will map over the top of
821      * any existing mapping (like the one with the host libc or qemu
822      * itself); instead we treat "mapped but at wrong address" as
823      * a failure and unmap again.
824      */
825     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
826     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
827                                         PROT_READ|PROT_WRITE,
828                                         MAP_ANON|MAP_PRIVATE, 0, 0));
829 
830     if (mapped_addr == brk_page) {
831         /* Heap contents are initialized to zero, as for anonymous
832          * mapped pages.  Technically the new pages are already
833          * initialized to zero since they *are* anonymous mapped
834          * pages, however we have to take care with the contents that
835          * come from the remaining part of the previous page: it may
836          * contains garbage data due to a previous heap usage (grown
837          * then shrunken).  */
838         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
839 
840         target_brk = new_brk;
841         brk_page = HOST_PAGE_ALIGN(target_brk);
842         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
843             target_brk);
844         return target_brk;
845     } else if (mapped_addr != -1) {
846         /* Mapped but at wrong address, meaning there wasn't actually
847          * enough space for this brk.
848          */
849         target_munmap(mapped_addr, new_alloc_size);
850         mapped_addr = -1;
851         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
852     }
853     else {
854         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
855     }
856 
857 #if defined(TARGET_ALPHA)
858     /* We (partially) emulate OSF/1 on Alpha, which requires we
859        return a proper errno, not an unchanged brk value.  */
860     return -TARGET_ENOMEM;
861 #endif
862     /* For everything else, return the previous break. */
863     return target_brk;
864 }
865 
866 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
867     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
868 static inline abi_long copy_from_user_fdset(fd_set *fds,
869                                             abi_ulong target_fds_addr,
870                                             int n)
871 {
872     int i, nw, j, k;
873     abi_ulong b, *target_fds;
874 
875     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
876     if (!(target_fds = lock_user(VERIFY_READ,
877                                  target_fds_addr,
878                                  sizeof(abi_ulong) * nw,
879                                  1)))
880         return -TARGET_EFAULT;
881 
882     FD_ZERO(fds);
883     k = 0;
884     for (i = 0; i < nw; i++) {
885         /* grab the abi_ulong */
886         __get_user(b, &target_fds[i]);
887         for (j = 0; j < TARGET_ABI_BITS; j++) {
888             /* check the bit inside the abi_ulong */
889             if ((b >> j) & 1)
890                 FD_SET(k, fds);
891             k++;
892         }
893     }
894 
895     unlock_user(target_fds, target_fds_addr, 0);
896 
897     return 0;
898 }
899 
900 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
901                                                  abi_ulong target_fds_addr,
902                                                  int n)
903 {
904     if (target_fds_addr) {
905         if (copy_from_user_fdset(fds, target_fds_addr, n))
906             return -TARGET_EFAULT;
907         *fds_ptr = fds;
908     } else {
909         *fds_ptr = NULL;
910     }
911     return 0;
912 }
913 
914 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
915                                           const fd_set *fds,
916                                           int n)
917 {
918     int i, nw, j, k;
919     abi_long v;
920     abi_ulong *target_fds;
921 
922     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
923     if (!(target_fds = lock_user(VERIFY_WRITE,
924                                  target_fds_addr,
925                                  sizeof(abi_ulong) * nw,
926                                  0)))
927         return -TARGET_EFAULT;
928 
929     k = 0;
930     for (i = 0; i < nw; i++) {
931         v = 0;
932         for (j = 0; j < TARGET_ABI_BITS; j++) {
933             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
934             k++;
935         }
936         __put_user(v, &target_fds[i]);
937     }
938 
939     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
940 
941     return 0;
942 }
943 #endif
944 
945 #if defined(__alpha__)
946 #define HOST_HZ 1024
947 #else
948 #define HOST_HZ 100
949 #endif
950 
951 static inline abi_long host_to_target_clock_t(long ticks)
952 {
953 #if HOST_HZ == TARGET_HZ
954     return ticks;
955 #else
956     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
957 #endif
958 }
959 
960 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
961                                              const struct rusage *rusage)
962 {
963     struct target_rusage *target_rusage;
964 
965     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
966         return -TARGET_EFAULT;
967     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
968     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
969     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
970     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
971     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
972     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
973     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
974     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
975     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
976     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
977     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
978     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
979     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
980     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
981     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
982     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
983     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
984     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
985     unlock_user_struct(target_rusage, target_addr, 1);
986 
987     return 0;
988 }
989 
990 #ifdef TARGET_NR_setrlimit
991 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
992 {
993     abi_ulong target_rlim_swap;
994     rlim_t result;
995 
996     target_rlim_swap = tswapal(target_rlim);
997     if (target_rlim_swap == TARGET_RLIM_INFINITY)
998         return RLIM_INFINITY;
999 
1000     result = target_rlim_swap;
1001     if (target_rlim_swap != (rlim_t)result)
1002         return RLIM_INFINITY;
1003 
1004     return result;
1005 }
1006 #endif
1007 
1008 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1009 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1010 {
1011     abi_ulong target_rlim_swap;
1012     abi_ulong result;
1013 
1014     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1015         target_rlim_swap = TARGET_RLIM_INFINITY;
1016     else
1017         target_rlim_swap = rlim;
1018     result = tswapal(target_rlim_swap);
1019 
1020     return result;
1021 }
1022 #endif
1023 
1024 static inline int target_to_host_resource(int code)
1025 {
1026     switch (code) {
1027     case TARGET_RLIMIT_AS:
1028         return RLIMIT_AS;
1029     case TARGET_RLIMIT_CORE:
1030         return RLIMIT_CORE;
1031     case TARGET_RLIMIT_CPU:
1032         return RLIMIT_CPU;
1033     case TARGET_RLIMIT_DATA:
1034         return RLIMIT_DATA;
1035     case TARGET_RLIMIT_FSIZE:
1036         return RLIMIT_FSIZE;
1037     case TARGET_RLIMIT_LOCKS:
1038         return RLIMIT_LOCKS;
1039     case TARGET_RLIMIT_MEMLOCK:
1040         return RLIMIT_MEMLOCK;
1041     case TARGET_RLIMIT_MSGQUEUE:
1042         return RLIMIT_MSGQUEUE;
1043     case TARGET_RLIMIT_NICE:
1044         return RLIMIT_NICE;
1045     case TARGET_RLIMIT_NOFILE:
1046         return RLIMIT_NOFILE;
1047     case TARGET_RLIMIT_NPROC:
1048         return RLIMIT_NPROC;
1049     case TARGET_RLIMIT_RSS:
1050         return RLIMIT_RSS;
1051     case TARGET_RLIMIT_RTPRIO:
1052         return RLIMIT_RTPRIO;
1053 #ifdef RLIMIT_RTTIME
1054     case TARGET_RLIMIT_RTTIME:
1055         return RLIMIT_RTTIME;
1056 #endif
1057     case TARGET_RLIMIT_SIGPENDING:
1058         return RLIMIT_SIGPENDING;
1059     case TARGET_RLIMIT_STACK:
1060         return RLIMIT_STACK;
1061     default:
1062         return code;
1063     }
1064 }
1065 
1066 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1067                                               abi_ulong target_tv_addr)
1068 {
1069     struct target_timeval *target_tv;
1070 
1071     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1072         return -TARGET_EFAULT;
1073     }
1074 
1075     __get_user(tv->tv_sec, &target_tv->tv_sec);
1076     __get_user(tv->tv_usec, &target_tv->tv_usec);
1077 
1078     unlock_user_struct(target_tv, target_tv_addr, 0);
1079 
1080     return 0;
1081 }
1082 
1083 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1084                                             const struct timeval *tv)
1085 {
1086     struct target_timeval *target_tv;
1087 
1088     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1089         return -TARGET_EFAULT;
1090     }
1091 
1092     __put_user(tv->tv_sec, &target_tv->tv_sec);
1093     __put_user(tv->tv_usec, &target_tv->tv_usec);
1094 
1095     unlock_user_struct(target_tv, target_tv_addr, 1);
1096 
1097     return 0;
1098 }
1099 
1100 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1101 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1102                                                 abi_ulong target_tv_addr)
1103 {
1104     struct target__kernel_sock_timeval *target_tv;
1105 
1106     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1107         return -TARGET_EFAULT;
1108     }
1109 
1110     __get_user(tv->tv_sec, &target_tv->tv_sec);
1111     __get_user(tv->tv_usec, &target_tv->tv_usec);
1112 
1113     unlock_user_struct(target_tv, target_tv_addr, 0);
1114 
1115     return 0;
1116 }
1117 #endif
1118 
1119 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1120                                               const struct timeval *tv)
1121 {
1122     struct target__kernel_sock_timeval *target_tv;
1123 
1124     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1125         return -TARGET_EFAULT;
1126     }
1127 
1128     __put_user(tv->tv_sec, &target_tv->tv_sec);
1129     __put_user(tv->tv_usec, &target_tv->tv_usec);
1130 
1131     unlock_user_struct(target_tv, target_tv_addr, 1);
1132 
1133     return 0;
1134 }
1135 
1136 #if defined(TARGET_NR_futex) || \
1137     defined(TARGET_NR_rt_sigtimedwait) || \
1138     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1139     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1140     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1141     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1142     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1143     defined(TARGET_NR_timer_settime) || \
1144     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1145 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1146                                                abi_ulong target_addr)
1147 {
1148     struct target_timespec *target_ts;
1149 
1150     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1151         return -TARGET_EFAULT;
1152     }
1153     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1154     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1155     unlock_user_struct(target_ts, target_addr, 0);
1156     return 0;
1157 }
1158 #endif
1159 
1160 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1161     defined(TARGET_NR_timer_settime64) || \
1162     defined(TARGET_NR_mq_timedsend_time64) || \
1163     defined(TARGET_NR_mq_timedreceive_time64) || \
1164     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1165     defined(TARGET_NR_clock_nanosleep_time64) || \
1166     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1167     defined(TARGET_NR_utimensat) || \
1168     defined(TARGET_NR_utimensat_time64) || \
1169     defined(TARGET_NR_semtimedop_time64) || \
1170     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1171 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1172                                                  abi_ulong target_addr)
1173 {
1174     struct target__kernel_timespec *target_ts;
1175 
1176     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1177         return -TARGET_EFAULT;
1178     }
1179     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1180     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1181     /* in 32bit mode, this drops the padding */
1182     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1183     unlock_user_struct(target_ts, target_addr, 0);
1184     return 0;
1185 }
1186 #endif
1187 
1188 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1189                                                struct timespec *host_ts)
1190 {
1191     struct target_timespec *target_ts;
1192 
1193     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1194         return -TARGET_EFAULT;
1195     }
1196     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1197     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1198     unlock_user_struct(target_ts, target_addr, 1);
1199     return 0;
1200 }
1201 
1202 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1203                                                  struct timespec *host_ts)
1204 {
1205     struct target__kernel_timespec *target_ts;
1206 
1207     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1208         return -TARGET_EFAULT;
1209     }
1210     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1211     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1212     unlock_user_struct(target_ts, target_addr, 1);
1213     return 0;
1214 }
1215 
1216 #if defined(TARGET_NR_gettimeofday)
1217 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1218                                              struct timezone *tz)
1219 {
1220     struct target_timezone *target_tz;
1221 
1222     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1223         return -TARGET_EFAULT;
1224     }
1225 
1226     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1227     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1228 
1229     unlock_user_struct(target_tz, target_tz_addr, 1);
1230 
1231     return 0;
1232 }
1233 #endif
1234 
1235 #if defined(TARGET_NR_settimeofday)
1236 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1237                                                abi_ulong target_tz_addr)
1238 {
1239     struct target_timezone *target_tz;
1240 
1241     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1242         return -TARGET_EFAULT;
1243     }
1244 
1245     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1246     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1247 
1248     unlock_user_struct(target_tz, target_tz_addr, 0);
1249 
1250     return 0;
1251 }
1252 #endif
1253 
1254 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1255 #include <mqueue.h>
1256 
1257 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1258                                               abi_ulong target_mq_attr_addr)
1259 {
1260     struct target_mq_attr *target_mq_attr;
1261 
1262     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1263                           target_mq_attr_addr, 1))
1264         return -TARGET_EFAULT;
1265 
1266     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1267     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1268     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1269     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1270 
1271     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1272 
1273     return 0;
1274 }
1275 
1276 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1277                                             const struct mq_attr *attr)
1278 {
1279     struct target_mq_attr *target_mq_attr;
1280 
1281     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1282                           target_mq_attr_addr, 0))
1283         return -TARGET_EFAULT;
1284 
1285     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1286     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1287     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1288     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1289 
1290     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1291 
1292     return 0;
1293 }
1294 #endif
1295 
1296 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1297 /* do_select() must return target values and target errnos. */
1298 static abi_long do_select(int n,
1299                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1300                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1301 {
1302     fd_set rfds, wfds, efds;
1303     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1304     struct timeval tv;
1305     struct timespec ts, *ts_ptr;
1306     abi_long ret;
1307 
1308     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1309     if (ret) {
1310         return ret;
1311     }
1312     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1313     if (ret) {
1314         return ret;
1315     }
1316     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1317     if (ret) {
1318         return ret;
1319     }
1320 
1321     if (target_tv_addr) {
1322         if (copy_from_user_timeval(&tv, target_tv_addr))
1323             return -TARGET_EFAULT;
1324         ts.tv_sec = tv.tv_sec;
1325         ts.tv_nsec = tv.tv_usec * 1000;
1326         ts_ptr = &ts;
1327     } else {
1328         ts_ptr = NULL;
1329     }
1330 
1331     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1332                                   ts_ptr, NULL));
1333 
1334     if (!is_error(ret)) {
1335         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1336             return -TARGET_EFAULT;
1337         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1338             return -TARGET_EFAULT;
1339         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1340             return -TARGET_EFAULT;
1341 
1342         if (target_tv_addr) {
1343             tv.tv_sec = ts.tv_sec;
1344             tv.tv_usec = ts.tv_nsec / 1000;
1345             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1346                 return -TARGET_EFAULT;
1347             }
1348         }
1349     }
1350 
1351     return ret;
1352 }
1353 
1354 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1355 static abi_long do_old_select(abi_ulong arg1)
1356 {
1357     struct target_sel_arg_struct *sel;
1358     abi_ulong inp, outp, exp, tvp;
1359     long nsel;
1360 
1361     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1362         return -TARGET_EFAULT;
1363     }
1364 
1365     nsel = tswapal(sel->n);
1366     inp = tswapal(sel->inp);
1367     outp = tswapal(sel->outp);
1368     exp = tswapal(sel->exp);
1369     tvp = tswapal(sel->tvp);
1370 
1371     unlock_user_struct(sel, arg1, 0);
1372 
1373     return do_select(nsel, inp, outp, exp, tvp);
1374 }
1375 #endif
1376 #endif
1377 
1378 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1379 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1380                             abi_long arg4, abi_long arg5, abi_long arg6,
1381                             bool time64)
1382 {
1383     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1384     fd_set rfds, wfds, efds;
1385     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1386     struct timespec ts, *ts_ptr;
1387     abi_long ret;
1388 
1389     /*
1390      * The 6th arg is actually two args smashed together,
1391      * so we cannot use the C library.
1392      */
1393     struct {
1394         sigset_t *set;
1395         size_t size;
1396     } sig, *sig_ptr;
1397 
1398     abi_ulong arg_sigset, arg_sigsize, *arg7;
1399 
1400     n = arg1;
1401     rfd_addr = arg2;
1402     wfd_addr = arg3;
1403     efd_addr = arg4;
1404     ts_addr = arg5;
1405 
1406     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1407     if (ret) {
1408         return ret;
1409     }
1410     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1411     if (ret) {
1412         return ret;
1413     }
1414     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1415     if (ret) {
1416         return ret;
1417     }
1418 
1419     /*
1420      * This takes a timespec, and not a timeval, so we cannot
1421      * use the do_select() helper ...
1422      */
1423     if (ts_addr) {
1424         if (time64) {
1425             if (target_to_host_timespec64(&ts, ts_addr)) {
1426                 return -TARGET_EFAULT;
1427             }
1428         } else {
1429             if (target_to_host_timespec(&ts, ts_addr)) {
1430                 return -TARGET_EFAULT;
1431             }
1432         }
1433             ts_ptr = &ts;
1434     } else {
1435         ts_ptr = NULL;
1436     }
1437 
1438     /* Extract the two packed args for the sigset */
1439     sig_ptr = NULL;
1440     if (arg6) {
1441         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1442         if (!arg7) {
1443             return -TARGET_EFAULT;
1444         }
1445         arg_sigset = tswapal(arg7[0]);
1446         arg_sigsize = tswapal(arg7[1]);
1447         unlock_user(arg7, arg6, 0);
1448 
1449         if (arg_sigset) {
1450             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1451             if (ret != 0) {
1452                 return ret;
1453             }
1454             sig_ptr = &sig;
1455             sig.size = SIGSET_T_SIZE;
1456         }
1457     }
1458 
1459     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1460                                   ts_ptr, sig_ptr));
1461 
1462     if (sig_ptr) {
1463         finish_sigsuspend_mask(ret);
1464     }
1465 
1466     if (!is_error(ret)) {
1467         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1468             return -TARGET_EFAULT;
1469         }
1470         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1471             return -TARGET_EFAULT;
1472         }
1473         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1474             return -TARGET_EFAULT;
1475         }
1476         if (time64) {
1477             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1478                 return -TARGET_EFAULT;
1479             }
1480         } else {
1481             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1482                 return -TARGET_EFAULT;
1483             }
1484         }
1485     }
1486     return ret;
1487 }
1488 #endif
1489 
1490 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1491     defined(TARGET_NR_ppoll_time64)
1492 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1493                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1494 {
1495     struct target_pollfd *target_pfd;
1496     unsigned int nfds = arg2;
1497     struct pollfd *pfd;
1498     unsigned int i;
1499     abi_long ret;
1500 
1501     pfd = NULL;
1502     target_pfd = NULL;
1503     if (nfds) {
1504         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1505             return -TARGET_EINVAL;
1506         }
1507         target_pfd = lock_user(VERIFY_WRITE, arg1,
1508                                sizeof(struct target_pollfd) * nfds, 1);
1509         if (!target_pfd) {
1510             return -TARGET_EFAULT;
1511         }
1512 
1513         pfd = alloca(sizeof(struct pollfd) * nfds);
1514         for (i = 0; i < nfds; i++) {
1515             pfd[i].fd = tswap32(target_pfd[i].fd);
1516             pfd[i].events = tswap16(target_pfd[i].events);
1517         }
1518     }
1519     if (ppoll) {
1520         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1521         sigset_t *set = NULL;
1522 
1523         if (arg3) {
1524             if (time64) {
1525                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1526                     unlock_user(target_pfd, arg1, 0);
1527                     return -TARGET_EFAULT;
1528                 }
1529             } else {
1530                 if (target_to_host_timespec(timeout_ts, arg3)) {
1531                     unlock_user(target_pfd, arg1, 0);
1532                     return -TARGET_EFAULT;
1533                 }
1534             }
1535         } else {
1536             timeout_ts = NULL;
1537         }
1538 
1539         if (arg4) {
1540             ret = process_sigsuspend_mask(&set, arg4, arg5);
1541             if (ret != 0) {
1542                 unlock_user(target_pfd, arg1, 0);
1543                 return ret;
1544             }
1545         }
1546 
1547         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1548                                    set, SIGSET_T_SIZE));
1549 
1550         if (set) {
1551             finish_sigsuspend_mask(ret);
1552         }
1553         if (!is_error(ret) && arg3) {
1554             if (time64) {
1555                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1556                     return -TARGET_EFAULT;
1557                 }
1558             } else {
1559                 if (host_to_target_timespec(arg3, timeout_ts)) {
1560                     return -TARGET_EFAULT;
1561                 }
1562             }
1563         }
1564     } else {
1565           struct timespec ts, *pts;
1566 
1567           if (arg3 >= 0) {
1568               /* Convert ms to secs, ns */
1569               ts.tv_sec = arg3 / 1000;
1570               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1571               pts = &ts;
1572           } else {
1573               /* -ve poll() timeout means "infinite" */
1574               pts = NULL;
1575           }
1576           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1577     }
1578 
1579     if (!is_error(ret)) {
1580         for (i = 0; i < nfds; i++) {
1581             target_pfd[i].revents = tswap16(pfd[i].revents);
1582         }
1583     }
1584     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1585     return ret;
1586 }
1587 #endif
1588 
1589 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1590                         int flags, int is_pipe2)
1591 {
1592     int host_pipe[2];
1593     abi_long ret;
1594     ret = pipe2(host_pipe, flags);
1595 
1596     if (is_error(ret))
1597         return get_errno(ret);
1598 
1599     /* Several targets have special calling conventions for the original
1600        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1601     if (!is_pipe2) {
1602 #if defined(TARGET_ALPHA)
1603         cpu_env->ir[IR_A4] = host_pipe[1];
1604         return host_pipe[0];
1605 #elif defined(TARGET_MIPS)
1606         cpu_env->active_tc.gpr[3] = host_pipe[1];
1607         return host_pipe[0];
1608 #elif defined(TARGET_SH4)
1609         cpu_env->gregs[1] = host_pipe[1];
1610         return host_pipe[0];
1611 #elif defined(TARGET_SPARC)
1612         cpu_env->regwptr[1] = host_pipe[1];
1613         return host_pipe[0];
1614 #endif
1615     }
1616 
1617     if (put_user_s32(host_pipe[0], pipedes)
1618         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1619         return -TARGET_EFAULT;
1620     return get_errno(ret);
1621 }
1622 
1623 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1624                                               abi_ulong target_addr,
1625                                               socklen_t len)
1626 {
1627     struct target_ip_mreqn *target_smreqn;
1628 
1629     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1630     if (!target_smreqn)
1631         return -TARGET_EFAULT;
1632     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1633     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1634     if (len == sizeof(struct target_ip_mreqn))
1635         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1636     unlock_user(target_smreqn, target_addr, 0);
1637 
1638     return 0;
1639 }
1640 
1641 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1642                                                abi_ulong target_addr,
1643                                                socklen_t len)
1644 {
1645     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1646     sa_family_t sa_family;
1647     struct target_sockaddr *target_saddr;
1648 
1649     if (fd_trans_target_to_host_addr(fd)) {
1650         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1651     }
1652 
1653     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1654     if (!target_saddr)
1655         return -TARGET_EFAULT;
1656 
1657     sa_family = tswap16(target_saddr->sa_family);
1658 
1659     /* Oops. The caller might send a incomplete sun_path; sun_path
1660      * must be terminated by \0 (see the manual page), but
1661      * unfortunately it is quite common to specify sockaddr_un
1662      * length as "strlen(x->sun_path)" while it should be
1663      * "strlen(...) + 1". We'll fix that here if needed.
1664      * Linux kernel has a similar feature.
1665      */
1666 
1667     if (sa_family == AF_UNIX) {
1668         if (len < unix_maxlen && len > 0) {
1669             char *cp = (char*)target_saddr;
1670 
1671             if ( cp[len-1] && !cp[len] )
1672                 len++;
1673         }
1674         if (len > unix_maxlen)
1675             len = unix_maxlen;
1676     }
1677 
1678     memcpy(addr, target_saddr, len);
1679     addr->sa_family = sa_family;
1680     if (sa_family == AF_NETLINK) {
1681         struct sockaddr_nl *nladdr;
1682 
1683         nladdr = (struct sockaddr_nl *)addr;
1684         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1685         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1686     } else if (sa_family == AF_PACKET) {
1687 	struct target_sockaddr_ll *lladdr;
1688 
1689 	lladdr = (struct target_sockaddr_ll *)addr;
1690 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1691 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1692     }
1693     unlock_user(target_saddr, target_addr, 0);
1694 
1695     return 0;
1696 }
1697 
1698 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1699                                                struct sockaddr *addr,
1700                                                socklen_t len)
1701 {
1702     struct target_sockaddr *target_saddr;
1703 
1704     if (len == 0) {
1705         return 0;
1706     }
1707     assert(addr);
1708 
1709     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1710     if (!target_saddr)
1711         return -TARGET_EFAULT;
1712     memcpy(target_saddr, addr, len);
1713     if (len >= offsetof(struct target_sockaddr, sa_family) +
1714         sizeof(target_saddr->sa_family)) {
1715         target_saddr->sa_family = tswap16(addr->sa_family);
1716     }
1717     if (addr->sa_family == AF_NETLINK &&
1718         len >= sizeof(struct target_sockaddr_nl)) {
1719         struct target_sockaddr_nl *target_nl =
1720                (struct target_sockaddr_nl *)target_saddr;
1721         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1722         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1723     } else if (addr->sa_family == AF_PACKET) {
1724         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1725         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1726         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1727     } else if (addr->sa_family == AF_INET6 &&
1728                len >= sizeof(struct target_sockaddr_in6)) {
1729         struct target_sockaddr_in6 *target_in6 =
1730                (struct target_sockaddr_in6 *)target_saddr;
1731         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1732     }
1733     unlock_user(target_saddr, target_addr, len);
1734 
1735     return 0;
1736 }
1737 
1738 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1739                                            struct target_msghdr *target_msgh)
1740 {
1741     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1742     abi_long msg_controllen;
1743     abi_ulong target_cmsg_addr;
1744     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1745     socklen_t space = 0;
1746 
1747     msg_controllen = tswapal(target_msgh->msg_controllen);
1748     if (msg_controllen < sizeof (struct target_cmsghdr))
1749         goto the_end;
1750     target_cmsg_addr = tswapal(target_msgh->msg_control);
1751     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1752     target_cmsg_start = target_cmsg;
1753     if (!target_cmsg)
1754         return -TARGET_EFAULT;
1755 
1756     while (cmsg && target_cmsg) {
1757         void *data = CMSG_DATA(cmsg);
1758         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1759 
1760         int len = tswapal(target_cmsg->cmsg_len)
1761             - sizeof(struct target_cmsghdr);
1762 
1763         space += CMSG_SPACE(len);
1764         if (space > msgh->msg_controllen) {
1765             space -= CMSG_SPACE(len);
1766             /* This is a QEMU bug, since we allocated the payload
1767              * area ourselves (unlike overflow in host-to-target
1768              * conversion, which is just the guest giving us a buffer
1769              * that's too small). It can't happen for the payload types
1770              * we currently support; if it becomes an issue in future
1771              * we would need to improve our allocation strategy to
1772              * something more intelligent than "twice the size of the
1773              * target buffer we're reading from".
1774              */
1775             qemu_log_mask(LOG_UNIMP,
1776                           ("Unsupported ancillary data %d/%d: "
1777                            "unhandled msg size\n"),
1778                           tswap32(target_cmsg->cmsg_level),
1779                           tswap32(target_cmsg->cmsg_type));
1780             break;
1781         }
1782 
1783         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1784             cmsg->cmsg_level = SOL_SOCKET;
1785         } else {
1786             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1787         }
1788         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1789         cmsg->cmsg_len = CMSG_LEN(len);
1790 
1791         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1792             int *fd = (int *)data;
1793             int *target_fd = (int *)target_data;
1794             int i, numfds = len / sizeof(int);
1795 
1796             for (i = 0; i < numfds; i++) {
1797                 __get_user(fd[i], target_fd + i);
1798             }
1799         } else if (cmsg->cmsg_level == SOL_SOCKET
1800                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1801             struct ucred *cred = (struct ucred *)data;
1802             struct target_ucred *target_cred =
1803                 (struct target_ucred *)target_data;
1804 
1805             __get_user(cred->pid, &target_cred->pid);
1806             __get_user(cred->uid, &target_cred->uid);
1807             __get_user(cred->gid, &target_cred->gid);
1808         } else {
1809             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1810                           cmsg->cmsg_level, cmsg->cmsg_type);
1811             memcpy(data, target_data, len);
1812         }
1813 
1814         cmsg = CMSG_NXTHDR(msgh, cmsg);
1815         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1816                                          target_cmsg_start);
1817     }
1818     unlock_user(target_cmsg, target_cmsg_addr, 0);
1819  the_end:
1820     msgh->msg_controllen = space;
1821     return 0;
1822 }
1823 
1824 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1825                                            struct msghdr *msgh)
1826 {
1827     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1828     abi_long msg_controllen;
1829     abi_ulong target_cmsg_addr;
1830     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1831     socklen_t space = 0;
1832 
1833     msg_controllen = tswapal(target_msgh->msg_controllen);
1834     if (msg_controllen < sizeof (struct target_cmsghdr))
1835         goto the_end;
1836     target_cmsg_addr = tswapal(target_msgh->msg_control);
1837     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1838     target_cmsg_start = target_cmsg;
1839     if (!target_cmsg)
1840         return -TARGET_EFAULT;
1841 
1842     while (cmsg && target_cmsg) {
1843         void *data = CMSG_DATA(cmsg);
1844         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1845 
1846         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1847         int tgt_len, tgt_space;
1848 
1849         /* We never copy a half-header but may copy half-data;
1850          * this is Linux's behaviour in put_cmsg(). Note that
1851          * truncation here is a guest problem (which we report
1852          * to the guest via the CTRUNC bit), unlike truncation
1853          * in target_to_host_cmsg, which is a QEMU bug.
1854          */
1855         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1856             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1857             break;
1858         }
1859 
1860         if (cmsg->cmsg_level == SOL_SOCKET) {
1861             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1862         } else {
1863             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1864         }
1865         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1866 
1867         /* Payload types which need a different size of payload on
1868          * the target must adjust tgt_len here.
1869          */
1870         tgt_len = len;
1871         switch (cmsg->cmsg_level) {
1872         case SOL_SOCKET:
1873             switch (cmsg->cmsg_type) {
1874             case SO_TIMESTAMP:
1875                 tgt_len = sizeof(struct target_timeval);
1876                 break;
1877             default:
1878                 break;
1879             }
1880             break;
1881         default:
1882             break;
1883         }
1884 
1885         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1886             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1887             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1888         }
1889 
1890         /* We must now copy-and-convert len bytes of payload
1891          * into tgt_len bytes of destination space. Bear in mind
1892          * that in both source and destination we may be dealing
1893          * with a truncated value!
1894          */
1895         switch (cmsg->cmsg_level) {
1896         case SOL_SOCKET:
1897             switch (cmsg->cmsg_type) {
1898             case SCM_RIGHTS:
1899             {
1900                 int *fd = (int *)data;
1901                 int *target_fd = (int *)target_data;
1902                 int i, numfds = tgt_len / sizeof(int);
1903 
1904                 for (i = 0; i < numfds; i++) {
1905                     __put_user(fd[i], target_fd + i);
1906                 }
1907                 break;
1908             }
1909             case SO_TIMESTAMP:
1910             {
1911                 struct timeval *tv = (struct timeval *)data;
1912                 struct target_timeval *target_tv =
1913                     (struct target_timeval *)target_data;
1914 
1915                 if (len != sizeof(struct timeval) ||
1916                     tgt_len != sizeof(struct target_timeval)) {
1917                     goto unimplemented;
1918                 }
1919 
1920                 /* copy struct timeval to target */
1921                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1922                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1923                 break;
1924             }
1925             case SCM_CREDENTIALS:
1926             {
1927                 struct ucred *cred = (struct ucred *)data;
1928                 struct target_ucred *target_cred =
1929                     (struct target_ucred *)target_data;
1930 
1931                 __put_user(cred->pid, &target_cred->pid);
1932                 __put_user(cred->uid, &target_cred->uid);
1933                 __put_user(cred->gid, &target_cred->gid);
1934                 break;
1935             }
1936             default:
1937                 goto unimplemented;
1938             }
1939             break;
1940 
1941         case SOL_IP:
1942             switch (cmsg->cmsg_type) {
1943             case IP_TTL:
1944             {
1945                 uint32_t *v = (uint32_t *)data;
1946                 uint32_t *t_int = (uint32_t *)target_data;
1947 
1948                 if (len != sizeof(uint32_t) ||
1949                     tgt_len != sizeof(uint32_t)) {
1950                     goto unimplemented;
1951                 }
1952                 __put_user(*v, t_int);
1953                 break;
1954             }
1955             case IP_RECVERR:
1956             {
1957                 struct errhdr_t {
1958                    struct sock_extended_err ee;
1959                    struct sockaddr_in offender;
1960                 };
1961                 struct errhdr_t *errh = (struct errhdr_t *)data;
1962                 struct errhdr_t *target_errh =
1963                     (struct errhdr_t *)target_data;
1964 
1965                 if (len != sizeof(struct errhdr_t) ||
1966                     tgt_len != sizeof(struct errhdr_t)) {
1967                     goto unimplemented;
1968                 }
1969                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1970                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1971                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1972                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1973                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1974                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1975                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1976                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1977                     (void *) &errh->offender, sizeof(errh->offender));
1978                 break;
1979             }
1980             default:
1981                 goto unimplemented;
1982             }
1983             break;
1984 
1985         case SOL_IPV6:
1986             switch (cmsg->cmsg_type) {
1987             case IPV6_HOPLIMIT:
1988             {
1989                 uint32_t *v = (uint32_t *)data;
1990                 uint32_t *t_int = (uint32_t *)target_data;
1991 
1992                 if (len != sizeof(uint32_t) ||
1993                     tgt_len != sizeof(uint32_t)) {
1994                     goto unimplemented;
1995                 }
1996                 __put_user(*v, t_int);
1997                 break;
1998             }
1999             case IPV6_RECVERR:
2000             {
2001                 struct errhdr6_t {
2002                    struct sock_extended_err ee;
2003                    struct sockaddr_in6 offender;
2004                 };
2005                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2006                 struct errhdr6_t *target_errh =
2007                     (struct errhdr6_t *)target_data;
2008 
2009                 if (len != sizeof(struct errhdr6_t) ||
2010                     tgt_len != sizeof(struct errhdr6_t)) {
2011                     goto unimplemented;
2012                 }
2013                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2014                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2015                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2016                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2017                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2018                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2019                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2020                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2021                     (void *) &errh->offender, sizeof(errh->offender));
2022                 break;
2023             }
2024             default:
2025                 goto unimplemented;
2026             }
2027             break;
2028 
2029         default:
2030         unimplemented:
2031             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2032                           cmsg->cmsg_level, cmsg->cmsg_type);
2033             memcpy(target_data, data, MIN(len, tgt_len));
2034             if (tgt_len > len) {
2035                 memset(target_data + len, 0, tgt_len - len);
2036             }
2037         }
2038 
2039         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2040         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2041         if (msg_controllen < tgt_space) {
2042             tgt_space = msg_controllen;
2043         }
2044         msg_controllen -= tgt_space;
2045         space += tgt_space;
2046         cmsg = CMSG_NXTHDR(msgh, cmsg);
2047         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2048                                          target_cmsg_start);
2049     }
2050     unlock_user(target_cmsg, target_cmsg_addr, space);
2051  the_end:
2052     target_msgh->msg_controllen = tswapal(space);
2053     return 0;
2054 }
2055 
2056 /* do_setsockopt() Must return target values and target errnos. */
2057 static abi_long do_setsockopt(int sockfd, int level, int optname,
2058                               abi_ulong optval_addr, socklen_t optlen)
2059 {
2060     abi_long ret;
2061     int val;
2062     struct ip_mreqn *ip_mreq;
2063     struct ip_mreq_source *ip_mreq_source;
2064 
2065     switch(level) {
2066     case SOL_TCP:
2067     case SOL_UDP:
2068         /* TCP and UDP options all take an 'int' value.  */
2069         if (optlen < sizeof(uint32_t))
2070             return -TARGET_EINVAL;
2071 
2072         if (get_user_u32(val, optval_addr))
2073             return -TARGET_EFAULT;
2074         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2075         break;
2076     case SOL_IP:
2077         switch(optname) {
2078         case IP_TOS:
2079         case IP_TTL:
2080         case IP_HDRINCL:
2081         case IP_ROUTER_ALERT:
2082         case IP_RECVOPTS:
2083         case IP_RETOPTS:
2084         case IP_PKTINFO:
2085         case IP_MTU_DISCOVER:
2086         case IP_RECVERR:
2087         case IP_RECVTTL:
2088         case IP_RECVTOS:
2089 #ifdef IP_FREEBIND
2090         case IP_FREEBIND:
2091 #endif
2092         case IP_MULTICAST_TTL:
2093         case IP_MULTICAST_LOOP:
2094             val = 0;
2095             if (optlen >= sizeof(uint32_t)) {
2096                 if (get_user_u32(val, optval_addr))
2097                     return -TARGET_EFAULT;
2098             } else if (optlen >= 1) {
2099                 if (get_user_u8(val, optval_addr))
2100                     return -TARGET_EFAULT;
2101             }
2102             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2103             break;
2104         case IP_ADD_MEMBERSHIP:
2105         case IP_DROP_MEMBERSHIP:
2106             if (optlen < sizeof (struct target_ip_mreq) ||
2107                 optlen > sizeof (struct target_ip_mreqn))
2108                 return -TARGET_EINVAL;
2109 
2110             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2111             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2112             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2113             break;
2114 
2115         case IP_BLOCK_SOURCE:
2116         case IP_UNBLOCK_SOURCE:
2117         case IP_ADD_SOURCE_MEMBERSHIP:
2118         case IP_DROP_SOURCE_MEMBERSHIP:
2119             if (optlen != sizeof (struct target_ip_mreq_source))
2120                 return -TARGET_EINVAL;
2121 
2122             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2123             if (!ip_mreq_source) {
2124                 return -TARGET_EFAULT;
2125             }
2126             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2127             unlock_user (ip_mreq_source, optval_addr, 0);
2128             break;
2129 
2130         default:
2131             goto unimplemented;
2132         }
2133         break;
2134     case SOL_IPV6:
2135         switch (optname) {
2136         case IPV6_MTU_DISCOVER:
2137         case IPV6_MTU:
2138         case IPV6_V6ONLY:
2139         case IPV6_RECVPKTINFO:
2140         case IPV6_UNICAST_HOPS:
2141         case IPV6_MULTICAST_HOPS:
2142         case IPV6_MULTICAST_LOOP:
2143         case IPV6_RECVERR:
2144         case IPV6_RECVHOPLIMIT:
2145         case IPV6_2292HOPLIMIT:
2146         case IPV6_CHECKSUM:
2147         case IPV6_ADDRFORM:
2148         case IPV6_2292PKTINFO:
2149         case IPV6_RECVTCLASS:
2150         case IPV6_RECVRTHDR:
2151         case IPV6_2292RTHDR:
2152         case IPV6_RECVHOPOPTS:
2153         case IPV6_2292HOPOPTS:
2154         case IPV6_RECVDSTOPTS:
2155         case IPV6_2292DSTOPTS:
2156         case IPV6_TCLASS:
2157         case IPV6_ADDR_PREFERENCES:
2158 #ifdef IPV6_RECVPATHMTU
2159         case IPV6_RECVPATHMTU:
2160 #endif
2161 #ifdef IPV6_TRANSPARENT
2162         case IPV6_TRANSPARENT:
2163 #endif
2164 #ifdef IPV6_FREEBIND
2165         case IPV6_FREEBIND:
2166 #endif
2167 #ifdef IPV6_RECVORIGDSTADDR
2168         case IPV6_RECVORIGDSTADDR:
2169 #endif
2170             val = 0;
2171             if (optlen < sizeof(uint32_t)) {
2172                 return -TARGET_EINVAL;
2173             }
2174             if (get_user_u32(val, optval_addr)) {
2175                 return -TARGET_EFAULT;
2176             }
2177             ret = get_errno(setsockopt(sockfd, level, optname,
2178                                        &val, sizeof(val)));
2179             break;
2180         case IPV6_PKTINFO:
2181         {
2182             struct in6_pktinfo pki;
2183 
2184             if (optlen < sizeof(pki)) {
2185                 return -TARGET_EINVAL;
2186             }
2187 
2188             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2189                 return -TARGET_EFAULT;
2190             }
2191 
2192             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2193 
2194             ret = get_errno(setsockopt(sockfd, level, optname,
2195                                        &pki, sizeof(pki)));
2196             break;
2197         }
2198         case IPV6_ADD_MEMBERSHIP:
2199         case IPV6_DROP_MEMBERSHIP:
2200         {
2201             struct ipv6_mreq ipv6mreq;
2202 
2203             if (optlen < sizeof(ipv6mreq)) {
2204                 return -TARGET_EINVAL;
2205             }
2206 
2207             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2208                 return -TARGET_EFAULT;
2209             }
2210 
2211             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2212 
2213             ret = get_errno(setsockopt(sockfd, level, optname,
2214                                        &ipv6mreq, sizeof(ipv6mreq)));
2215             break;
2216         }
2217         default:
2218             goto unimplemented;
2219         }
2220         break;
2221     case SOL_ICMPV6:
2222         switch (optname) {
2223         case ICMPV6_FILTER:
2224         {
2225             struct icmp6_filter icmp6f;
2226 
2227             if (optlen > sizeof(icmp6f)) {
2228                 optlen = sizeof(icmp6f);
2229             }
2230 
2231             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2232                 return -TARGET_EFAULT;
2233             }
2234 
2235             for (val = 0; val < 8; val++) {
2236                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2237             }
2238 
2239             ret = get_errno(setsockopt(sockfd, level, optname,
2240                                        &icmp6f, optlen));
2241             break;
2242         }
2243         default:
2244             goto unimplemented;
2245         }
2246         break;
2247     case SOL_RAW:
2248         switch (optname) {
2249         case ICMP_FILTER:
2250         case IPV6_CHECKSUM:
2251             /* those take an u32 value */
2252             if (optlen < sizeof(uint32_t)) {
2253                 return -TARGET_EINVAL;
2254             }
2255 
2256             if (get_user_u32(val, optval_addr)) {
2257                 return -TARGET_EFAULT;
2258             }
2259             ret = get_errno(setsockopt(sockfd, level, optname,
2260                                        &val, sizeof(val)));
2261             break;
2262 
2263         default:
2264             goto unimplemented;
2265         }
2266         break;
2267 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2268     case SOL_ALG:
2269         switch (optname) {
2270         case ALG_SET_KEY:
2271         {
2272             char *alg_key = g_malloc(optlen);
2273 
2274             if (!alg_key) {
2275                 return -TARGET_ENOMEM;
2276             }
2277             if (copy_from_user(alg_key, optval_addr, optlen)) {
2278                 g_free(alg_key);
2279                 return -TARGET_EFAULT;
2280             }
2281             ret = get_errno(setsockopt(sockfd, level, optname,
2282                                        alg_key, optlen));
2283             g_free(alg_key);
2284             break;
2285         }
2286         case ALG_SET_AEAD_AUTHSIZE:
2287         {
2288             ret = get_errno(setsockopt(sockfd, level, optname,
2289                                        NULL, optlen));
2290             break;
2291         }
2292         default:
2293             goto unimplemented;
2294         }
2295         break;
2296 #endif
2297     case TARGET_SOL_SOCKET:
2298         switch (optname) {
2299         case TARGET_SO_RCVTIMEO:
2300         {
2301                 struct timeval tv;
2302 
2303                 optname = SO_RCVTIMEO;
2304 
2305 set_timeout:
2306                 if (optlen != sizeof(struct target_timeval)) {
2307                     return -TARGET_EINVAL;
2308                 }
2309 
2310                 if (copy_from_user_timeval(&tv, optval_addr)) {
2311                     return -TARGET_EFAULT;
2312                 }
2313 
2314                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2315                                 &tv, sizeof(tv)));
2316                 return ret;
2317         }
2318         case TARGET_SO_SNDTIMEO:
2319                 optname = SO_SNDTIMEO;
2320                 goto set_timeout;
2321         case TARGET_SO_ATTACH_FILTER:
2322         {
2323                 struct target_sock_fprog *tfprog;
2324                 struct target_sock_filter *tfilter;
2325                 struct sock_fprog fprog;
2326                 struct sock_filter *filter;
2327                 int i;
2328 
2329                 if (optlen != sizeof(*tfprog)) {
2330                     return -TARGET_EINVAL;
2331                 }
2332                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2333                     return -TARGET_EFAULT;
2334                 }
2335                 if (!lock_user_struct(VERIFY_READ, tfilter,
2336                                       tswapal(tfprog->filter), 0)) {
2337                     unlock_user_struct(tfprog, optval_addr, 1);
2338                     return -TARGET_EFAULT;
2339                 }
2340 
2341                 fprog.len = tswap16(tfprog->len);
2342                 filter = g_try_new(struct sock_filter, fprog.len);
2343                 if (filter == NULL) {
2344                     unlock_user_struct(tfilter, tfprog->filter, 1);
2345                     unlock_user_struct(tfprog, optval_addr, 1);
2346                     return -TARGET_ENOMEM;
2347                 }
2348                 for (i = 0; i < fprog.len; i++) {
2349                     filter[i].code = tswap16(tfilter[i].code);
2350                     filter[i].jt = tfilter[i].jt;
2351                     filter[i].jf = tfilter[i].jf;
2352                     filter[i].k = tswap32(tfilter[i].k);
2353                 }
2354                 fprog.filter = filter;
2355 
2356                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2357                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2358                 g_free(filter);
2359 
2360                 unlock_user_struct(tfilter, tfprog->filter, 1);
2361                 unlock_user_struct(tfprog, optval_addr, 1);
2362                 return ret;
2363         }
2364 	case TARGET_SO_BINDTODEVICE:
2365 	{
2366 		char *dev_ifname, *addr_ifname;
2367 
2368 		if (optlen > IFNAMSIZ - 1) {
2369 		    optlen = IFNAMSIZ - 1;
2370 		}
2371 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2372 		if (!dev_ifname) {
2373 		    return -TARGET_EFAULT;
2374 		}
2375 		optname = SO_BINDTODEVICE;
2376 		addr_ifname = alloca(IFNAMSIZ);
2377 		memcpy(addr_ifname, dev_ifname, optlen);
2378 		addr_ifname[optlen] = 0;
2379 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2380                                            addr_ifname, optlen));
2381 		unlock_user (dev_ifname, optval_addr, 0);
2382 		return ret;
2383 	}
2384         case TARGET_SO_LINGER:
2385         {
2386                 struct linger lg;
2387                 struct target_linger *tlg;
2388 
2389                 if (optlen != sizeof(struct target_linger)) {
2390                     return -TARGET_EINVAL;
2391                 }
2392                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2393                     return -TARGET_EFAULT;
2394                 }
2395                 __get_user(lg.l_onoff, &tlg->l_onoff);
2396                 __get_user(lg.l_linger, &tlg->l_linger);
2397                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2398                                 &lg, sizeof(lg)));
2399                 unlock_user_struct(tlg, optval_addr, 0);
2400                 return ret;
2401         }
2402             /* Options with 'int' argument.  */
2403         case TARGET_SO_DEBUG:
2404 		optname = SO_DEBUG;
2405 		break;
2406         case TARGET_SO_REUSEADDR:
2407 		optname = SO_REUSEADDR;
2408 		break;
2409 #ifdef SO_REUSEPORT
2410         case TARGET_SO_REUSEPORT:
2411                 optname = SO_REUSEPORT;
2412                 break;
2413 #endif
2414         case TARGET_SO_TYPE:
2415 		optname = SO_TYPE;
2416 		break;
2417         case TARGET_SO_ERROR:
2418 		optname = SO_ERROR;
2419 		break;
2420         case TARGET_SO_DONTROUTE:
2421 		optname = SO_DONTROUTE;
2422 		break;
2423         case TARGET_SO_BROADCAST:
2424 		optname = SO_BROADCAST;
2425 		break;
2426         case TARGET_SO_SNDBUF:
2427 		optname = SO_SNDBUF;
2428 		break;
2429         case TARGET_SO_SNDBUFFORCE:
2430                 optname = SO_SNDBUFFORCE;
2431                 break;
2432         case TARGET_SO_RCVBUF:
2433 		optname = SO_RCVBUF;
2434 		break;
2435         case TARGET_SO_RCVBUFFORCE:
2436                 optname = SO_RCVBUFFORCE;
2437                 break;
2438         case TARGET_SO_KEEPALIVE:
2439 		optname = SO_KEEPALIVE;
2440 		break;
2441         case TARGET_SO_OOBINLINE:
2442 		optname = SO_OOBINLINE;
2443 		break;
2444         case TARGET_SO_NO_CHECK:
2445 		optname = SO_NO_CHECK;
2446 		break;
2447         case TARGET_SO_PRIORITY:
2448 		optname = SO_PRIORITY;
2449 		break;
2450 #ifdef SO_BSDCOMPAT
2451         case TARGET_SO_BSDCOMPAT:
2452 		optname = SO_BSDCOMPAT;
2453 		break;
2454 #endif
2455         case TARGET_SO_PASSCRED:
2456 		optname = SO_PASSCRED;
2457 		break;
2458         case TARGET_SO_PASSSEC:
2459                 optname = SO_PASSSEC;
2460                 break;
2461         case TARGET_SO_TIMESTAMP:
2462 		optname = SO_TIMESTAMP;
2463 		break;
2464         case TARGET_SO_RCVLOWAT:
2465 		optname = SO_RCVLOWAT;
2466 		break;
2467         default:
2468             goto unimplemented;
2469         }
2470 	if (optlen < sizeof(uint32_t))
2471             return -TARGET_EINVAL;
2472 
2473 	if (get_user_u32(val, optval_addr))
2474             return -TARGET_EFAULT;
2475 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2476         break;
2477 #ifdef SOL_NETLINK
2478     case SOL_NETLINK:
2479         switch (optname) {
2480         case NETLINK_PKTINFO:
2481         case NETLINK_ADD_MEMBERSHIP:
2482         case NETLINK_DROP_MEMBERSHIP:
2483         case NETLINK_BROADCAST_ERROR:
2484         case NETLINK_NO_ENOBUFS:
2485 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2486         case NETLINK_LISTEN_ALL_NSID:
2487         case NETLINK_CAP_ACK:
2488 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2489 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2490         case NETLINK_EXT_ACK:
2491 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2493         case NETLINK_GET_STRICT_CHK:
2494 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2495             break;
2496         default:
2497             goto unimplemented;
2498         }
2499         val = 0;
2500         if (optlen < sizeof(uint32_t)) {
2501             return -TARGET_EINVAL;
2502         }
2503         if (get_user_u32(val, optval_addr)) {
2504             return -TARGET_EFAULT;
2505         }
2506         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2507                                    sizeof(val)));
2508         break;
2509 #endif /* SOL_NETLINK */
2510     default:
2511     unimplemented:
2512         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2513                       level, optname);
2514         ret = -TARGET_ENOPROTOOPT;
2515     }
2516     return ret;
2517 }
2518 
2519 /* do_getsockopt() Must return target values and target errnos. */
2520 static abi_long do_getsockopt(int sockfd, int level, int optname,
2521                               abi_ulong optval_addr, abi_ulong optlen)
2522 {
2523     abi_long ret;
2524     int len, val;
2525     socklen_t lv;
2526 
2527     switch(level) {
2528     case TARGET_SOL_SOCKET:
2529         level = SOL_SOCKET;
2530         switch (optname) {
2531         /* These don't just return a single integer */
2532         case TARGET_SO_PEERNAME:
2533             goto unimplemented;
2534         case TARGET_SO_RCVTIMEO: {
2535             struct timeval tv;
2536             socklen_t tvlen;
2537 
2538             optname = SO_RCVTIMEO;
2539 
2540 get_timeout:
2541             if (get_user_u32(len, optlen)) {
2542                 return -TARGET_EFAULT;
2543             }
2544             if (len < 0) {
2545                 return -TARGET_EINVAL;
2546             }
2547 
2548             tvlen = sizeof(tv);
2549             ret = get_errno(getsockopt(sockfd, level, optname,
2550                                        &tv, &tvlen));
2551             if (ret < 0) {
2552                 return ret;
2553             }
2554             if (len > sizeof(struct target_timeval)) {
2555                 len = sizeof(struct target_timeval);
2556             }
2557             if (copy_to_user_timeval(optval_addr, &tv)) {
2558                 return -TARGET_EFAULT;
2559             }
2560             if (put_user_u32(len, optlen)) {
2561                 return -TARGET_EFAULT;
2562             }
2563             break;
2564         }
2565         case TARGET_SO_SNDTIMEO:
2566             optname = SO_SNDTIMEO;
2567             goto get_timeout;
2568         case TARGET_SO_PEERCRED: {
2569             struct ucred cr;
2570             socklen_t crlen;
2571             struct target_ucred *tcr;
2572 
2573             if (get_user_u32(len, optlen)) {
2574                 return -TARGET_EFAULT;
2575             }
2576             if (len < 0) {
2577                 return -TARGET_EINVAL;
2578             }
2579 
2580             crlen = sizeof(cr);
2581             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2582                                        &cr, &crlen));
2583             if (ret < 0) {
2584                 return ret;
2585             }
2586             if (len > crlen) {
2587                 len = crlen;
2588             }
2589             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2590                 return -TARGET_EFAULT;
2591             }
2592             __put_user(cr.pid, &tcr->pid);
2593             __put_user(cr.uid, &tcr->uid);
2594             __put_user(cr.gid, &tcr->gid);
2595             unlock_user_struct(tcr, optval_addr, 1);
2596             if (put_user_u32(len, optlen)) {
2597                 return -TARGET_EFAULT;
2598             }
2599             break;
2600         }
2601         case TARGET_SO_PEERSEC: {
2602             char *name;
2603 
2604             if (get_user_u32(len, optlen)) {
2605                 return -TARGET_EFAULT;
2606             }
2607             if (len < 0) {
2608                 return -TARGET_EINVAL;
2609             }
2610             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2611             if (!name) {
2612                 return -TARGET_EFAULT;
2613             }
2614             lv = len;
2615             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2616                                        name, &lv));
2617             if (put_user_u32(lv, optlen)) {
2618                 ret = -TARGET_EFAULT;
2619             }
2620             unlock_user(name, optval_addr, lv);
2621             break;
2622         }
2623         case TARGET_SO_LINGER:
2624         {
2625             struct linger lg;
2626             socklen_t lglen;
2627             struct target_linger *tlg;
2628 
2629             if (get_user_u32(len, optlen)) {
2630                 return -TARGET_EFAULT;
2631             }
2632             if (len < 0) {
2633                 return -TARGET_EINVAL;
2634             }
2635 
2636             lglen = sizeof(lg);
2637             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2638                                        &lg, &lglen));
2639             if (ret < 0) {
2640                 return ret;
2641             }
2642             if (len > lglen) {
2643                 len = lglen;
2644             }
2645             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2646                 return -TARGET_EFAULT;
2647             }
2648             __put_user(lg.l_onoff, &tlg->l_onoff);
2649             __put_user(lg.l_linger, &tlg->l_linger);
2650             unlock_user_struct(tlg, optval_addr, 1);
2651             if (put_user_u32(len, optlen)) {
2652                 return -TARGET_EFAULT;
2653             }
2654             break;
2655         }
2656         /* Options with 'int' argument.  */
2657         case TARGET_SO_DEBUG:
2658             optname = SO_DEBUG;
2659             goto int_case;
2660         case TARGET_SO_REUSEADDR:
2661             optname = SO_REUSEADDR;
2662             goto int_case;
2663 #ifdef SO_REUSEPORT
2664         case TARGET_SO_REUSEPORT:
2665             optname = SO_REUSEPORT;
2666             goto int_case;
2667 #endif
2668         case TARGET_SO_TYPE:
2669             optname = SO_TYPE;
2670             goto int_case;
2671         case TARGET_SO_ERROR:
2672             optname = SO_ERROR;
2673             goto int_case;
2674         case TARGET_SO_DONTROUTE:
2675             optname = SO_DONTROUTE;
2676             goto int_case;
2677         case TARGET_SO_BROADCAST:
2678             optname = SO_BROADCAST;
2679             goto int_case;
2680         case TARGET_SO_SNDBUF:
2681             optname = SO_SNDBUF;
2682             goto int_case;
2683         case TARGET_SO_RCVBUF:
2684             optname = SO_RCVBUF;
2685             goto int_case;
2686         case TARGET_SO_KEEPALIVE:
2687             optname = SO_KEEPALIVE;
2688             goto int_case;
2689         case TARGET_SO_OOBINLINE:
2690             optname = SO_OOBINLINE;
2691             goto int_case;
2692         case TARGET_SO_NO_CHECK:
2693             optname = SO_NO_CHECK;
2694             goto int_case;
2695         case TARGET_SO_PRIORITY:
2696             optname = SO_PRIORITY;
2697             goto int_case;
2698 #ifdef SO_BSDCOMPAT
2699         case TARGET_SO_BSDCOMPAT:
2700             optname = SO_BSDCOMPAT;
2701             goto int_case;
2702 #endif
2703         case TARGET_SO_PASSCRED:
2704             optname = SO_PASSCRED;
2705             goto int_case;
2706         case TARGET_SO_TIMESTAMP:
2707             optname = SO_TIMESTAMP;
2708             goto int_case;
2709         case TARGET_SO_RCVLOWAT:
2710             optname = SO_RCVLOWAT;
2711             goto int_case;
2712         case TARGET_SO_ACCEPTCONN:
2713             optname = SO_ACCEPTCONN;
2714             goto int_case;
2715         case TARGET_SO_PROTOCOL:
2716             optname = SO_PROTOCOL;
2717             goto int_case;
2718         case TARGET_SO_DOMAIN:
2719             optname = SO_DOMAIN;
2720             goto int_case;
2721         default:
2722             goto int_case;
2723         }
2724         break;
2725     case SOL_TCP:
2726     case SOL_UDP:
2727         /* TCP and UDP options all take an 'int' value.  */
2728     int_case:
2729         if (get_user_u32(len, optlen))
2730             return -TARGET_EFAULT;
2731         if (len < 0)
2732             return -TARGET_EINVAL;
2733         lv = sizeof(lv);
2734         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2735         if (ret < 0)
2736             return ret;
2737         if (optname == SO_TYPE) {
2738             val = host_to_target_sock_type(val);
2739         }
2740         if (len > lv)
2741             len = lv;
2742         if (len == 4) {
2743             if (put_user_u32(val, optval_addr))
2744                 return -TARGET_EFAULT;
2745         } else {
2746             if (put_user_u8(val, optval_addr))
2747                 return -TARGET_EFAULT;
2748         }
2749         if (put_user_u32(len, optlen))
2750             return -TARGET_EFAULT;
2751         break;
2752     case SOL_IP:
2753         switch(optname) {
2754         case IP_TOS:
2755         case IP_TTL:
2756         case IP_HDRINCL:
2757         case IP_ROUTER_ALERT:
2758         case IP_RECVOPTS:
2759         case IP_RETOPTS:
2760         case IP_PKTINFO:
2761         case IP_MTU_DISCOVER:
2762         case IP_RECVERR:
2763         case IP_RECVTOS:
2764 #ifdef IP_FREEBIND
2765         case IP_FREEBIND:
2766 #endif
2767         case IP_MULTICAST_TTL:
2768         case IP_MULTICAST_LOOP:
2769             if (get_user_u32(len, optlen))
2770                 return -TARGET_EFAULT;
2771             if (len < 0)
2772                 return -TARGET_EINVAL;
2773             lv = sizeof(lv);
2774             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2775             if (ret < 0)
2776                 return ret;
2777             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2778                 len = 1;
2779                 if (put_user_u32(len, optlen)
2780                     || put_user_u8(val, optval_addr))
2781                     return -TARGET_EFAULT;
2782             } else {
2783                 if (len > sizeof(int))
2784                     len = sizeof(int);
2785                 if (put_user_u32(len, optlen)
2786                     || put_user_u32(val, optval_addr))
2787                     return -TARGET_EFAULT;
2788             }
2789             break;
2790         default:
2791             ret = -TARGET_ENOPROTOOPT;
2792             break;
2793         }
2794         break;
2795     case SOL_IPV6:
2796         switch (optname) {
2797         case IPV6_MTU_DISCOVER:
2798         case IPV6_MTU:
2799         case IPV6_V6ONLY:
2800         case IPV6_RECVPKTINFO:
2801         case IPV6_UNICAST_HOPS:
2802         case IPV6_MULTICAST_HOPS:
2803         case IPV6_MULTICAST_LOOP:
2804         case IPV6_RECVERR:
2805         case IPV6_RECVHOPLIMIT:
2806         case IPV6_2292HOPLIMIT:
2807         case IPV6_CHECKSUM:
2808         case IPV6_ADDRFORM:
2809         case IPV6_2292PKTINFO:
2810         case IPV6_RECVTCLASS:
2811         case IPV6_RECVRTHDR:
2812         case IPV6_2292RTHDR:
2813         case IPV6_RECVHOPOPTS:
2814         case IPV6_2292HOPOPTS:
2815         case IPV6_RECVDSTOPTS:
2816         case IPV6_2292DSTOPTS:
2817         case IPV6_TCLASS:
2818         case IPV6_ADDR_PREFERENCES:
2819 #ifdef IPV6_RECVPATHMTU
2820         case IPV6_RECVPATHMTU:
2821 #endif
2822 #ifdef IPV6_TRANSPARENT
2823         case IPV6_TRANSPARENT:
2824 #endif
2825 #ifdef IPV6_FREEBIND
2826         case IPV6_FREEBIND:
2827 #endif
2828 #ifdef IPV6_RECVORIGDSTADDR
2829         case IPV6_RECVORIGDSTADDR:
2830 #endif
2831             if (get_user_u32(len, optlen))
2832                 return -TARGET_EFAULT;
2833             if (len < 0)
2834                 return -TARGET_EINVAL;
2835             lv = sizeof(lv);
2836             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2837             if (ret < 0)
2838                 return ret;
2839             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2840                 len = 1;
2841                 if (put_user_u32(len, optlen)
2842                     || put_user_u8(val, optval_addr))
2843                     return -TARGET_EFAULT;
2844             } else {
2845                 if (len > sizeof(int))
2846                     len = sizeof(int);
2847                 if (put_user_u32(len, optlen)
2848                     || put_user_u32(val, optval_addr))
2849                     return -TARGET_EFAULT;
2850             }
2851             break;
2852         default:
2853             ret = -TARGET_ENOPROTOOPT;
2854             break;
2855         }
2856         break;
2857 #ifdef SOL_NETLINK
2858     case SOL_NETLINK:
2859         switch (optname) {
2860         case NETLINK_PKTINFO:
2861         case NETLINK_BROADCAST_ERROR:
2862         case NETLINK_NO_ENOBUFS:
2863 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2864         case NETLINK_LISTEN_ALL_NSID:
2865         case NETLINK_CAP_ACK:
2866 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2867 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2868         case NETLINK_EXT_ACK:
2869 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2870 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2871         case NETLINK_GET_STRICT_CHK:
2872 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2873             if (get_user_u32(len, optlen)) {
2874                 return -TARGET_EFAULT;
2875             }
2876             if (len != sizeof(val)) {
2877                 return -TARGET_EINVAL;
2878             }
2879             lv = len;
2880             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2881             if (ret < 0) {
2882                 return ret;
2883             }
2884             if (put_user_u32(lv, optlen)
2885                 || put_user_u32(val, optval_addr)) {
2886                 return -TARGET_EFAULT;
2887             }
2888             break;
2889 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2890         case NETLINK_LIST_MEMBERSHIPS:
2891         {
2892             uint32_t *results;
2893             int i;
2894             if (get_user_u32(len, optlen)) {
2895                 return -TARGET_EFAULT;
2896             }
2897             if (len < 0) {
2898                 return -TARGET_EINVAL;
2899             }
2900             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2901             if (!results && len > 0) {
2902                 return -TARGET_EFAULT;
2903             }
2904             lv = len;
2905             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2906             if (ret < 0) {
2907                 unlock_user(results, optval_addr, 0);
2908                 return ret;
2909             }
2910             /* swap host endianess to target endianess. */
2911             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2912                 results[i] = tswap32(results[i]);
2913             }
2914             if (put_user_u32(lv, optlen)) {
2915                 return -TARGET_EFAULT;
2916             }
2917             unlock_user(results, optval_addr, 0);
2918             break;
2919         }
2920 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2921         default:
2922             goto unimplemented;
2923         }
2924         break;
2925 #endif /* SOL_NETLINK */
2926     default:
2927     unimplemented:
2928         qemu_log_mask(LOG_UNIMP,
2929                       "getsockopt level=%d optname=%d not yet supported\n",
2930                       level, optname);
2931         ret = -TARGET_EOPNOTSUPP;
2932         break;
2933     }
2934     return ret;
2935 }
2936 
2937 /* Convert target low/high pair representing file offset into the host
2938  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2939  * as the kernel doesn't handle them either.
2940  */
2941 static void target_to_host_low_high(abi_ulong tlow,
2942                                     abi_ulong thigh,
2943                                     unsigned long *hlow,
2944                                     unsigned long *hhigh)
2945 {
2946     uint64_t off = tlow |
2947         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2948         TARGET_LONG_BITS / 2;
2949 
2950     *hlow = off;
2951     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2952 }
2953 
2954 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2955                                 abi_ulong count, int copy)
2956 {
2957     struct target_iovec *target_vec;
2958     struct iovec *vec;
2959     abi_ulong total_len, max_len;
2960     int i;
2961     int err = 0;
2962     bool bad_address = false;
2963 
2964     if (count == 0) {
2965         errno = 0;
2966         return NULL;
2967     }
2968     if (count > IOV_MAX) {
2969         errno = EINVAL;
2970         return NULL;
2971     }
2972 
2973     vec = g_try_new0(struct iovec, count);
2974     if (vec == NULL) {
2975         errno = ENOMEM;
2976         return NULL;
2977     }
2978 
2979     target_vec = lock_user(VERIFY_READ, target_addr,
2980                            count * sizeof(struct target_iovec), 1);
2981     if (target_vec == NULL) {
2982         err = EFAULT;
2983         goto fail2;
2984     }
2985 
2986     /* ??? If host page size > target page size, this will result in a
2987        value larger than what we can actually support.  */
2988     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2989     total_len = 0;
2990 
2991     for (i = 0; i < count; i++) {
2992         abi_ulong base = tswapal(target_vec[i].iov_base);
2993         abi_long len = tswapal(target_vec[i].iov_len);
2994 
2995         if (len < 0) {
2996             err = EINVAL;
2997             goto fail;
2998         } else if (len == 0) {
2999             /* Zero length pointer is ignored.  */
3000             vec[i].iov_base = 0;
3001         } else {
3002             vec[i].iov_base = lock_user(type, base, len, copy);
3003             /* If the first buffer pointer is bad, this is a fault.  But
3004              * subsequent bad buffers will result in a partial write; this
3005              * is realized by filling the vector with null pointers and
3006              * zero lengths. */
3007             if (!vec[i].iov_base) {
3008                 if (i == 0) {
3009                     err = EFAULT;
3010                     goto fail;
3011                 } else {
3012                     bad_address = true;
3013                 }
3014             }
3015             if (bad_address) {
3016                 len = 0;
3017             }
3018             if (len > max_len - total_len) {
3019                 len = max_len - total_len;
3020             }
3021         }
3022         vec[i].iov_len = len;
3023         total_len += len;
3024     }
3025 
3026     unlock_user(target_vec, target_addr, 0);
3027     return vec;
3028 
3029  fail:
3030     while (--i >= 0) {
3031         if (tswapal(target_vec[i].iov_len) > 0) {
3032             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3033         }
3034     }
3035     unlock_user(target_vec, target_addr, 0);
3036  fail2:
3037     g_free(vec);
3038     errno = err;
3039     return NULL;
3040 }
3041 
3042 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3043                          abi_ulong count, int copy)
3044 {
3045     struct target_iovec *target_vec;
3046     int i;
3047 
3048     target_vec = lock_user(VERIFY_READ, target_addr,
3049                            count * sizeof(struct target_iovec), 1);
3050     if (target_vec) {
3051         for (i = 0; i < count; i++) {
3052             abi_ulong base = tswapal(target_vec[i].iov_base);
3053             abi_long len = tswapal(target_vec[i].iov_len);
3054             if (len < 0) {
3055                 break;
3056             }
3057             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3058         }
3059         unlock_user(target_vec, target_addr, 0);
3060     }
3061 
3062     g_free(vec);
3063 }
3064 
3065 static inline int target_to_host_sock_type(int *type)
3066 {
3067     int host_type = 0;
3068     int target_type = *type;
3069 
3070     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3071     case TARGET_SOCK_DGRAM:
3072         host_type = SOCK_DGRAM;
3073         break;
3074     case TARGET_SOCK_STREAM:
3075         host_type = SOCK_STREAM;
3076         break;
3077     default:
3078         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3079         break;
3080     }
3081     if (target_type & TARGET_SOCK_CLOEXEC) {
3082 #if defined(SOCK_CLOEXEC)
3083         host_type |= SOCK_CLOEXEC;
3084 #else
3085         return -TARGET_EINVAL;
3086 #endif
3087     }
3088     if (target_type & TARGET_SOCK_NONBLOCK) {
3089 #if defined(SOCK_NONBLOCK)
3090         host_type |= SOCK_NONBLOCK;
3091 #elif !defined(O_NONBLOCK)
3092         return -TARGET_EINVAL;
3093 #endif
3094     }
3095     *type = host_type;
3096     return 0;
3097 }
3098 
3099 /* Try to emulate socket type flags after socket creation.  */
3100 static int sock_flags_fixup(int fd, int target_type)
3101 {
3102 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3103     if (target_type & TARGET_SOCK_NONBLOCK) {
3104         int flags = fcntl(fd, F_GETFL);
3105         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3106             close(fd);
3107             return -TARGET_EINVAL;
3108         }
3109     }
3110 #endif
3111     return fd;
3112 }
3113 
3114 /* do_socket() Must return target values and target errnos. */
3115 static abi_long do_socket(int domain, int type, int protocol)
3116 {
3117     int target_type = type;
3118     int ret;
3119 
3120     ret = target_to_host_sock_type(&type);
3121     if (ret) {
3122         return ret;
3123     }
3124 
3125     if (domain == PF_NETLINK && !(
3126 #ifdef CONFIG_RTNETLINK
3127          protocol == NETLINK_ROUTE ||
3128 #endif
3129          protocol == NETLINK_KOBJECT_UEVENT ||
3130          protocol == NETLINK_AUDIT)) {
3131         return -TARGET_EPROTONOSUPPORT;
3132     }
3133 
3134     if (domain == AF_PACKET ||
3135         (domain == AF_INET && type == SOCK_PACKET)) {
3136         protocol = tswap16(protocol);
3137     }
3138 
3139     ret = get_errno(socket(domain, type, protocol));
3140     if (ret >= 0) {
3141         ret = sock_flags_fixup(ret, target_type);
3142         if (type == SOCK_PACKET) {
3143             /* Manage an obsolete case :
3144              * if socket type is SOCK_PACKET, bind by name
3145              */
3146             fd_trans_register(ret, &target_packet_trans);
3147         } else if (domain == PF_NETLINK) {
3148             switch (protocol) {
3149 #ifdef CONFIG_RTNETLINK
3150             case NETLINK_ROUTE:
3151                 fd_trans_register(ret, &target_netlink_route_trans);
3152                 break;
3153 #endif
3154             case NETLINK_KOBJECT_UEVENT:
3155                 /* nothing to do: messages are strings */
3156                 break;
3157             case NETLINK_AUDIT:
3158                 fd_trans_register(ret, &target_netlink_audit_trans);
3159                 break;
3160             default:
3161                 g_assert_not_reached();
3162             }
3163         }
3164     }
3165     return ret;
3166 }
3167 
3168 /* do_bind() Must return target values and target errnos. */
3169 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3170                         socklen_t addrlen)
3171 {
3172     void *addr;
3173     abi_long ret;
3174 
3175     if ((int)addrlen < 0) {
3176         return -TARGET_EINVAL;
3177     }
3178 
3179     addr = alloca(addrlen+1);
3180 
3181     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3182     if (ret)
3183         return ret;
3184 
3185     return get_errno(bind(sockfd, addr, addrlen));
3186 }
3187 
3188 /* do_connect() Must return target values and target errnos. */
3189 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3190                            socklen_t addrlen)
3191 {
3192     void *addr;
3193     abi_long ret;
3194 
3195     if ((int)addrlen < 0) {
3196         return -TARGET_EINVAL;
3197     }
3198 
3199     addr = alloca(addrlen+1);
3200 
3201     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3202     if (ret)
3203         return ret;
3204 
3205     return get_errno(safe_connect(sockfd, addr, addrlen));
3206 }
3207 
3208 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3209 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3210                                       int flags, int send)
3211 {
3212     abi_long ret, len;
3213     struct msghdr msg;
3214     abi_ulong count;
3215     struct iovec *vec;
3216     abi_ulong target_vec;
3217 
3218     if (msgp->msg_name) {
3219         msg.msg_namelen = tswap32(msgp->msg_namelen);
3220         msg.msg_name = alloca(msg.msg_namelen+1);
3221         ret = target_to_host_sockaddr(fd, msg.msg_name,
3222                                       tswapal(msgp->msg_name),
3223                                       msg.msg_namelen);
3224         if (ret == -TARGET_EFAULT) {
3225             /* For connected sockets msg_name and msg_namelen must
3226              * be ignored, so returning EFAULT immediately is wrong.
3227              * Instead, pass a bad msg_name to the host kernel, and
3228              * let it decide whether to return EFAULT or not.
3229              */
3230             msg.msg_name = (void *)-1;
3231         } else if (ret) {
3232             goto out2;
3233         }
3234     } else {
3235         msg.msg_name = NULL;
3236         msg.msg_namelen = 0;
3237     }
3238     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3239     msg.msg_control = alloca(msg.msg_controllen);
3240     memset(msg.msg_control, 0, msg.msg_controllen);
3241 
3242     msg.msg_flags = tswap32(msgp->msg_flags);
3243 
3244     count = tswapal(msgp->msg_iovlen);
3245     target_vec = tswapal(msgp->msg_iov);
3246 
3247     if (count > IOV_MAX) {
3248         /* sendrcvmsg returns a different errno for this condition than
3249          * readv/writev, so we must catch it here before lock_iovec() does.
3250          */
3251         ret = -TARGET_EMSGSIZE;
3252         goto out2;
3253     }
3254 
3255     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3256                      target_vec, count, send);
3257     if (vec == NULL) {
3258         ret = -host_to_target_errno(errno);
3259         goto out2;
3260     }
3261     msg.msg_iovlen = count;
3262     msg.msg_iov = vec;
3263 
3264     if (send) {
3265         if (fd_trans_target_to_host_data(fd)) {
3266             void *host_msg;
3267 
3268             host_msg = g_malloc(msg.msg_iov->iov_len);
3269             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3270             ret = fd_trans_target_to_host_data(fd)(host_msg,
3271                                                    msg.msg_iov->iov_len);
3272             if (ret >= 0) {
3273                 msg.msg_iov->iov_base = host_msg;
3274                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3275             }
3276             g_free(host_msg);
3277         } else {
3278             ret = target_to_host_cmsg(&msg, msgp);
3279             if (ret == 0) {
3280                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3281             }
3282         }
3283     } else {
3284         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3285         if (!is_error(ret)) {
3286             len = ret;
3287             if (fd_trans_host_to_target_data(fd)) {
3288                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3289                                                MIN(msg.msg_iov->iov_len, len));
3290             } else {
3291                 ret = host_to_target_cmsg(msgp, &msg);
3292             }
3293             if (!is_error(ret)) {
3294                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3295                 msgp->msg_flags = tswap32(msg.msg_flags);
3296                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3297                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3298                                     msg.msg_name, msg.msg_namelen);
3299                     if (ret) {
3300                         goto out;
3301                     }
3302                 }
3303 
3304                 ret = len;
3305             }
3306         }
3307     }
3308 
3309 out:
3310     unlock_iovec(vec, target_vec, count, !send);
3311 out2:
3312     return ret;
3313 }
3314 
3315 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3316                                int flags, int send)
3317 {
3318     abi_long ret;
3319     struct target_msghdr *msgp;
3320 
3321     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3322                           msgp,
3323                           target_msg,
3324                           send ? 1 : 0)) {
3325         return -TARGET_EFAULT;
3326     }
3327     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3328     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3329     return ret;
3330 }
3331 
3332 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3333  * so it might not have this *mmsg-specific flag either.
3334  */
3335 #ifndef MSG_WAITFORONE
3336 #define MSG_WAITFORONE 0x10000
3337 #endif
3338 
3339 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3340                                 unsigned int vlen, unsigned int flags,
3341                                 int send)
3342 {
3343     struct target_mmsghdr *mmsgp;
3344     abi_long ret = 0;
3345     int i;
3346 
3347     if (vlen > UIO_MAXIOV) {
3348         vlen = UIO_MAXIOV;
3349     }
3350 
3351     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3352     if (!mmsgp) {
3353         return -TARGET_EFAULT;
3354     }
3355 
3356     for (i = 0; i < vlen; i++) {
3357         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3358         if (is_error(ret)) {
3359             break;
3360         }
3361         mmsgp[i].msg_len = tswap32(ret);
3362         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3363         if (flags & MSG_WAITFORONE) {
3364             flags |= MSG_DONTWAIT;
3365         }
3366     }
3367 
3368     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3369 
3370     /* Return number of datagrams sent if we sent any at all;
3371      * otherwise return the error.
3372      */
3373     if (i) {
3374         return i;
3375     }
3376     return ret;
3377 }
3378 
3379 /* do_accept4() Must return target values and target errnos. */
3380 static abi_long do_accept4(int fd, abi_ulong target_addr,
3381                            abi_ulong target_addrlen_addr, int flags)
3382 {
3383     socklen_t addrlen, ret_addrlen;
3384     void *addr;
3385     abi_long ret;
3386     int host_flags;
3387 
3388     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3389 
3390     if (target_addr == 0) {
3391         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3392     }
3393 
3394     /* linux returns EFAULT if addrlen pointer is invalid */
3395     if (get_user_u32(addrlen, target_addrlen_addr))
3396         return -TARGET_EFAULT;
3397 
3398     if ((int)addrlen < 0) {
3399         return -TARGET_EINVAL;
3400     }
3401 
3402     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3403         return -TARGET_EFAULT;
3404     }
3405 
3406     addr = alloca(addrlen);
3407 
3408     ret_addrlen = addrlen;
3409     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3410     if (!is_error(ret)) {
3411         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3412         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3413             ret = -TARGET_EFAULT;
3414         }
3415     }
3416     return ret;
3417 }
3418 
3419 /* do_getpeername() Must return target values and target errnos. */
3420 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3421                                abi_ulong target_addrlen_addr)
3422 {
3423     socklen_t addrlen, ret_addrlen;
3424     void *addr;
3425     abi_long ret;
3426 
3427     if (get_user_u32(addrlen, target_addrlen_addr))
3428         return -TARGET_EFAULT;
3429 
3430     if ((int)addrlen < 0) {
3431         return -TARGET_EINVAL;
3432     }
3433 
3434     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3435         return -TARGET_EFAULT;
3436     }
3437 
3438     addr = alloca(addrlen);
3439 
3440     ret_addrlen = addrlen;
3441     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3442     if (!is_error(ret)) {
3443         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3444         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3445             ret = -TARGET_EFAULT;
3446         }
3447     }
3448     return ret;
3449 }
3450 
3451 /* do_getsockname() Must return target values and target errnos. */
3452 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3453                                abi_ulong target_addrlen_addr)
3454 {
3455     socklen_t addrlen, ret_addrlen;
3456     void *addr;
3457     abi_long ret;
3458 
3459     if (get_user_u32(addrlen, target_addrlen_addr))
3460         return -TARGET_EFAULT;
3461 
3462     if ((int)addrlen < 0) {
3463         return -TARGET_EINVAL;
3464     }
3465 
3466     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3467         return -TARGET_EFAULT;
3468     }
3469 
3470     addr = alloca(addrlen);
3471 
3472     ret_addrlen = addrlen;
3473     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3474     if (!is_error(ret)) {
3475         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3476         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3477             ret = -TARGET_EFAULT;
3478         }
3479     }
3480     return ret;
3481 }
3482 
3483 /* do_socketpair() Must return target values and target errnos. */
3484 static abi_long do_socketpair(int domain, int type, int protocol,
3485                               abi_ulong target_tab_addr)
3486 {
3487     int tab[2];
3488     abi_long ret;
3489 
3490     target_to_host_sock_type(&type);
3491 
3492     ret = get_errno(socketpair(domain, type, protocol, tab));
3493     if (!is_error(ret)) {
3494         if (put_user_s32(tab[0], target_tab_addr)
3495             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3496             ret = -TARGET_EFAULT;
3497     }
3498     return ret;
3499 }
3500 
3501 /* do_sendto() Must return target values and target errnos. */
3502 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3503                           abi_ulong target_addr, socklen_t addrlen)
3504 {
3505     void *addr;
3506     void *host_msg;
3507     void *copy_msg = NULL;
3508     abi_long ret;
3509 
3510     if ((int)addrlen < 0) {
3511         return -TARGET_EINVAL;
3512     }
3513 
3514     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3515     if (!host_msg)
3516         return -TARGET_EFAULT;
3517     if (fd_trans_target_to_host_data(fd)) {
3518         copy_msg = host_msg;
3519         host_msg = g_malloc(len);
3520         memcpy(host_msg, copy_msg, len);
3521         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3522         if (ret < 0) {
3523             goto fail;
3524         }
3525     }
3526     if (target_addr) {
3527         addr = alloca(addrlen+1);
3528         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3529         if (ret) {
3530             goto fail;
3531         }
3532         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3533     } else {
3534         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3535     }
3536 fail:
3537     if (copy_msg) {
3538         g_free(host_msg);
3539         host_msg = copy_msg;
3540     }
3541     unlock_user(host_msg, msg, 0);
3542     return ret;
3543 }
3544 
3545 /* do_recvfrom() Must return target values and target errnos. */
3546 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3547                             abi_ulong target_addr,
3548                             abi_ulong target_addrlen)
3549 {
3550     socklen_t addrlen, ret_addrlen;
3551     void *addr;
3552     void *host_msg;
3553     abi_long ret;
3554 
3555     if (!msg) {
3556         host_msg = NULL;
3557     } else {
3558         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3559         if (!host_msg) {
3560             return -TARGET_EFAULT;
3561         }
3562     }
3563     if (target_addr) {
3564         if (get_user_u32(addrlen, target_addrlen)) {
3565             ret = -TARGET_EFAULT;
3566             goto fail;
3567         }
3568         if ((int)addrlen < 0) {
3569             ret = -TARGET_EINVAL;
3570             goto fail;
3571         }
3572         addr = alloca(addrlen);
3573         ret_addrlen = addrlen;
3574         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3575                                       addr, &ret_addrlen));
3576     } else {
3577         addr = NULL; /* To keep compiler quiet.  */
3578         addrlen = 0; /* To keep compiler quiet.  */
3579         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3580     }
3581     if (!is_error(ret)) {
3582         if (fd_trans_host_to_target_data(fd)) {
3583             abi_long trans;
3584             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3585             if (is_error(trans)) {
3586                 ret = trans;
3587                 goto fail;
3588             }
3589         }
3590         if (target_addr) {
3591             host_to_target_sockaddr(target_addr, addr,
3592                                     MIN(addrlen, ret_addrlen));
3593             if (put_user_u32(ret_addrlen, target_addrlen)) {
3594                 ret = -TARGET_EFAULT;
3595                 goto fail;
3596             }
3597         }
3598         unlock_user(host_msg, msg, len);
3599     } else {
3600 fail:
3601         unlock_user(host_msg, msg, 0);
3602     }
3603     return ret;
3604 }
3605 
3606 #ifdef TARGET_NR_socketcall
3607 /* do_socketcall() must return target values and target errnos. */
3608 static abi_long do_socketcall(int num, abi_ulong vptr)
3609 {
3610     static const unsigned nargs[] = { /* number of arguments per operation */
3611         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3612         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3613         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3614         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3615         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3616         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3617         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3618         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3619         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3620         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3621         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3622         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3623         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3624         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3625         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3626         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3627         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3628         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3629         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3630         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3631     };
3632     abi_long a[6]; /* max 6 args */
3633     unsigned i;
3634 
3635     /* check the range of the first argument num */
3636     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3637     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3638         return -TARGET_EINVAL;
3639     }
3640     /* ensure we have space for args */
3641     if (nargs[num] > ARRAY_SIZE(a)) {
3642         return -TARGET_EINVAL;
3643     }
3644     /* collect the arguments in a[] according to nargs[] */
3645     for (i = 0; i < nargs[num]; ++i) {
3646         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3647             return -TARGET_EFAULT;
3648         }
3649     }
3650     /* now when we have the args, invoke the appropriate underlying function */
3651     switch (num) {
3652     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3653         return do_socket(a[0], a[1], a[2]);
3654     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3655         return do_bind(a[0], a[1], a[2]);
3656     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3657         return do_connect(a[0], a[1], a[2]);
3658     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3659         return get_errno(listen(a[0], a[1]));
3660     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3661         return do_accept4(a[0], a[1], a[2], 0);
3662     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3663         return do_getsockname(a[0], a[1], a[2]);
3664     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3665         return do_getpeername(a[0], a[1], a[2]);
3666     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3667         return do_socketpair(a[0], a[1], a[2], a[3]);
3668     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3669         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3670     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3671         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3672     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3673         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3674     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3675         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3676     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3677         return get_errno(shutdown(a[0], a[1]));
3678     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3679         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3680     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3681         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3682     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3683         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3684     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3685         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3686     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3687         return do_accept4(a[0], a[1], a[2], a[3]);
3688     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3689         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3690     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3691         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3692     default:
3693         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3694         return -TARGET_EINVAL;
3695     }
3696 }
3697 #endif
3698 
3699 #define N_SHM_REGIONS	32
3700 
3701 static struct shm_region {
3702     abi_ulong start;
3703     abi_ulong size;
3704     bool in_use;
3705 } shm_regions[N_SHM_REGIONS];
3706 
3707 #ifndef TARGET_SEMID64_DS
3708 /* asm-generic version of this struct */
3709 struct target_semid64_ds
3710 {
3711   struct target_ipc_perm sem_perm;
3712   abi_ulong sem_otime;
3713 #if TARGET_ABI_BITS == 32
3714   abi_ulong __unused1;
3715 #endif
3716   abi_ulong sem_ctime;
3717 #if TARGET_ABI_BITS == 32
3718   abi_ulong __unused2;
3719 #endif
3720   abi_ulong sem_nsems;
3721   abi_ulong __unused3;
3722   abi_ulong __unused4;
3723 };
3724 #endif
3725 
3726 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3727                                                abi_ulong target_addr)
3728 {
3729     struct target_ipc_perm *target_ip;
3730     struct target_semid64_ds *target_sd;
3731 
3732     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3733         return -TARGET_EFAULT;
3734     target_ip = &(target_sd->sem_perm);
3735     host_ip->__key = tswap32(target_ip->__key);
3736     host_ip->uid = tswap32(target_ip->uid);
3737     host_ip->gid = tswap32(target_ip->gid);
3738     host_ip->cuid = tswap32(target_ip->cuid);
3739     host_ip->cgid = tswap32(target_ip->cgid);
3740 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3741     host_ip->mode = tswap32(target_ip->mode);
3742 #else
3743     host_ip->mode = tswap16(target_ip->mode);
3744 #endif
3745 #if defined(TARGET_PPC)
3746     host_ip->__seq = tswap32(target_ip->__seq);
3747 #else
3748     host_ip->__seq = tswap16(target_ip->__seq);
3749 #endif
3750     unlock_user_struct(target_sd, target_addr, 0);
3751     return 0;
3752 }
3753 
3754 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3755                                                struct ipc_perm *host_ip)
3756 {
3757     struct target_ipc_perm *target_ip;
3758     struct target_semid64_ds *target_sd;
3759 
3760     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3761         return -TARGET_EFAULT;
3762     target_ip = &(target_sd->sem_perm);
3763     target_ip->__key = tswap32(host_ip->__key);
3764     target_ip->uid = tswap32(host_ip->uid);
3765     target_ip->gid = tswap32(host_ip->gid);
3766     target_ip->cuid = tswap32(host_ip->cuid);
3767     target_ip->cgid = tswap32(host_ip->cgid);
3768 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3769     target_ip->mode = tswap32(host_ip->mode);
3770 #else
3771     target_ip->mode = tswap16(host_ip->mode);
3772 #endif
3773 #if defined(TARGET_PPC)
3774     target_ip->__seq = tswap32(host_ip->__seq);
3775 #else
3776     target_ip->__seq = tswap16(host_ip->__seq);
3777 #endif
3778     unlock_user_struct(target_sd, target_addr, 1);
3779     return 0;
3780 }
3781 
3782 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3783                                                abi_ulong target_addr)
3784 {
3785     struct target_semid64_ds *target_sd;
3786 
3787     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3788         return -TARGET_EFAULT;
3789     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3790         return -TARGET_EFAULT;
3791     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3792     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3793     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3794     unlock_user_struct(target_sd, target_addr, 0);
3795     return 0;
3796 }
3797 
3798 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3799                                                struct semid_ds *host_sd)
3800 {
3801     struct target_semid64_ds *target_sd;
3802 
3803     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3804         return -TARGET_EFAULT;
3805     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3806         return -TARGET_EFAULT;
3807     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3808     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3809     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3810     unlock_user_struct(target_sd, target_addr, 1);
3811     return 0;
3812 }
3813 
3814 struct target_seminfo {
3815     int semmap;
3816     int semmni;
3817     int semmns;
3818     int semmnu;
3819     int semmsl;
3820     int semopm;
3821     int semume;
3822     int semusz;
3823     int semvmx;
3824     int semaem;
3825 };
3826 
3827 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3828                                               struct seminfo *host_seminfo)
3829 {
3830     struct target_seminfo *target_seminfo;
3831     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3832         return -TARGET_EFAULT;
3833     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3834     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3835     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3836     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3837     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3838     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3839     __put_user(host_seminfo->semume, &target_seminfo->semume);
3840     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3841     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3842     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3843     unlock_user_struct(target_seminfo, target_addr, 1);
3844     return 0;
3845 }
3846 
3847 union semun {
3848 	int val;
3849 	struct semid_ds *buf;
3850 	unsigned short *array;
3851 	struct seminfo *__buf;
3852 };
3853 
3854 union target_semun {
3855 	int val;
3856 	abi_ulong buf;
3857 	abi_ulong array;
3858 	abi_ulong __buf;
3859 };
3860 
3861 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3862                                                abi_ulong target_addr)
3863 {
3864     int nsems;
3865     unsigned short *array;
3866     union semun semun;
3867     struct semid_ds semid_ds;
3868     int i, ret;
3869 
3870     semun.buf = &semid_ds;
3871 
3872     ret = semctl(semid, 0, IPC_STAT, semun);
3873     if (ret == -1)
3874         return get_errno(ret);
3875 
3876     nsems = semid_ds.sem_nsems;
3877 
3878     *host_array = g_try_new(unsigned short, nsems);
3879     if (!*host_array) {
3880         return -TARGET_ENOMEM;
3881     }
3882     array = lock_user(VERIFY_READ, target_addr,
3883                       nsems*sizeof(unsigned short), 1);
3884     if (!array) {
3885         g_free(*host_array);
3886         return -TARGET_EFAULT;
3887     }
3888 
3889     for(i=0; i<nsems; i++) {
3890         __get_user((*host_array)[i], &array[i]);
3891     }
3892     unlock_user(array, target_addr, 0);
3893 
3894     return 0;
3895 }
3896 
3897 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3898                                                unsigned short **host_array)
3899 {
3900     int nsems;
3901     unsigned short *array;
3902     union semun semun;
3903     struct semid_ds semid_ds;
3904     int i, ret;
3905 
3906     semun.buf = &semid_ds;
3907 
3908     ret = semctl(semid, 0, IPC_STAT, semun);
3909     if (ret == -1)
3910         return get_errno(ret);
3911 
3912     nsems = semid_ds.sem_nsems;
3913 
3914     array = lock_user(VERIFY_WRITE, target_addr,
3915                       nsems*sizeof(unsigned short), 0);
3916     if (!array)
3917         return -TARGET_EFAULT;
3918 
3919     for(i=0; i<nsems; i++) {
3920         __put_user((*host_array)[i], &array[i]);
3921     }
3922     g_free(*host_array);
3923     unlock_user(array, target_addr, 1);
3924 
3925     return 0;
3926 }
3927 
3928 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3929                                  abi_ulong target_arg)
3930 {
3931     union target_semun target_su = { .buf = target_arg };
3932     union semun arg;
3933     struct semid_ds dsarg;
3934     unsigned short *array = NULL;
3935     struct seminfo seminfo;
3936     abi_long ret = -TARGET_EINVAL;
3937     abi_long err;
3938     cmd &= 0xff;
3939 
3940     switch( cmd ) {
3941 	case GETVAL:
3942 	case SETVAL:
3943             /* In 64 bit cross-endian situations, we will erroneously pick up
3944              * the wrong half of the union for the "val" element.  To rectify
3945              * this, the entire 8-byte structure is byteswapped, followed by
3946 	     * a swap of the 4 byte val field. In other cases, the data is
3947 	     * already in proper host byte order. */
3948 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3949 		target_su.buf = tswapal(target_su.buf);
3950 		arg.val = tswap32(target_su.val);
3951 	    } else {
3952 		arg.val = target_su.val;
3953 	    }
3954             ret = get_errno(semctl(semid, semnum, cmd, arg));
3955             break;
3956 	case GETALL:
3957 	case SETALL:
3958             err = target_to_host_semarray(semid, &array, target_su.array);
3959             if (err)
3960                 return err;
3961             arg.array = array;
3962             ret = get_errno(semctl(semid, semnum, cmd, arg));
3963             err = host_to_target_semarray(semid, target_su.array, &array);
3964             if (err)
3965                 return err;
3966             break;
3967 	case IPC_STAT:
3968 	case IPC_SET:
3969 	case SEM_STAT:
3970             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3971             if (err)
3972                 return err;
3973             arg.buf = &dsarg;
3974             ret = get_errno(semctl(semid, semnum, cmd, arg));
3975             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3976             if (err)
3977                 return err;
3978             break;
3979 	case IPC_INFO:
3980 	case SEM_INFO:
3981             arg.__buf = &seminfo;
3982             ret = get_errno(semctl(semid, semnum, cmd, arg));
3983             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3984             if (err)
3985                 return err;
3986             break;
3987 	case IPC_RMID:
3988 	case GETPID:
3989 	case GETNCNT:
3990 	case GETZCNT:
3991             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3992             break;
3993     }
3994 
3995     return ret;
3996 }
3997 
3998 struct target_sembuf {
3999     unsigned short sem_num;
4000     short sem_op;
4001     short sem_flg;
4002 };
4003 
4004 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4005                                              abi_ulong target_addr,
4006                                              unsigned nsops)
4007 {
4008     struct target_sembuf *target_sembuf;
4009     int i;
4010 
4011     target_sembuf = lock_user(VERIFY_READ, target_addr,
4012                               nsops*sizeof(struct target_sembuf), 1);
4013     if (!target_sembuf)
4014         return -TARGET_EFAULT;
4015 
4016     for(i=0; i<nsops; i++) {
4017         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4018         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4019         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4020     }
4021 
4022     unlock_user(target_sembuf, target_addr, 0);
4023 
4024     return 0;
4025 }
4026 
4027 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4028     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4029 
4030 /*
4031  * This macro is required to handle the s390 variants, which passes the
4032  * arguments in a different order than default.
4033  */
4034 #ifdef __s390x__
4035 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4036   (__nsops), (__timeout), (__sops)
4037 #else
4038 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4039   (__nsops), 0, (__sops), (__timeout)
4040 #endif
4041 
4042 static inline abi_long do_semtimedop(int semid,
4043                                      abi_long ptr,
4044                                      unsigned nsops,
4045                                      abi_long timeout, bool time64)
4046 {
4047     struct sembuf *sops;
4048     struct timespec ts, *pts = NULL;
4049     abi_long ret;
4050 
4051     if (timeout) {
4052         pts = &ts;
4053         if (time64) {
4054             if (target_to_host_timespec64(pts, timeout)) {
4055                 return -TARGET_EFAULT;
4056             }
4057         } else {
4058             if (target_to_host_timespec(pts, timeout)) {
4059                 return -TARGET_EFAULT;
4060             }
4061         }
4062     }
4063 
4064     if (nsops > TARGET_SEMOPM) {
4065         return -TARGET_E2BIG;
4066     }
4067 
4068     sops = g_new(struct sembuf, nsops);
4069 
4070     if (target_to_host_sembuf(sops, ptr, nsops)) {
4071         g_free(sops);
4072         return -TARGET_EFAULT;
4073     }
4074 
4075     ret = -TARGET_ENOSYS;
4076 #ifdef __NR_semtimedop
4077     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4078 #endif
4079 #ifdef __NR_ipc
4080     if (ret == -TARGET_ENOSYS) {
4081         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4082                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4083     }
4084 #endif
4085     g_free(sops);
4086     return ret;
4087 }
4088 #endif
4089 
4090 struct target_msqid_ds
4091 {
4092     struct target_ipc_perm msg_perm;
4093     abi_ulong msg_stime;
4094 #if TARGET_ABI_BITS == 32
4095     abi_ulong __unused1;
4096 #endif
4097     abi_ulong msg_rtime;
4098 #if TARGET_ABI_BITS == 32
4099     abi_ulong __unused2;
4100 #endif
4101     abi_ulong msg_ctime;
4102 #if TARGET_ABI_BITS == 32
4103     abi_ulong __unused3;
4104 #endif
4105     abi_ulong __msg_cbytes;
4106     abi_ulong msg_qnum;
4107     abi_ulong msg_qbytes;
4108     abi_ulong msg_lspid;
4109     abi_ulong msg_lrpid;
4110     abi_ulong __unused4;
4111     abi_ulong __unused5;
4112 };
4113 
4114 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4115                                                abi_ulong target_addr)
4116 {
4117     struct target_msqid_ds *target_md;
4118 
4119     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4120         return -TARGET_EFAULT;
4121     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4122         return -TARGET_EFAULT;
4123     host_md->msg_stime = tswapal(target_md->msg_stime);
4124     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4125     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4126     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4127     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4128     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4129     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4130     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4131     unlock_user_struct(target_md, target_addr, 0);
4132     return 0;
4133 }
4134 
4135 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4136                                                struct msqid_ds *host_md)
4137 {
4138     struct target_msqid_ds *target_md;
4139 
4140     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4141         return -TARGET_EFAULT;
4142     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4143         return -TARGET_EFAULT;
4144     target_md->msg_stime = tswapal(host_md->msg_stime);
4145     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4146     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4147     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4148     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4149     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4150     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4151     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4152     unlock_user_struct(target_md, target_addr, 1);
4153     return 0;
4154 }
4155 
4156 struct target_msginfo {
4157     int msgpool;
4158     int msgmap;
4159     int msgmax;
4160     int msgmnb;
4161     int msgmni;
4162     int msgssz;
4163     int msgtql;
4164     unsigned short int msgseg;
4165 };
4166 
4167 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4168                                               struct msginfo *host_msginfo)
4169 {
4170     struct target_msginfo *target_msginfo;
4171     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4172         return -TARGET_EFAULT;
4173     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4174     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4175     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4176     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4177     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4178     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4179     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4180     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4181     unlock_user_struct(target_msginfo, target_addr, 1);
4182     return 0;
4183 }
4184 
4185 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4186 {
4187     struct msqid_ds dsarg;
4188     struct msginfo msginfo;
4189     abi_long ret = -TARGET_EINVAL;
4190 
4191     cmd &= 0xff;
4192 
4193     switch (cmd) {
4194     case IPC_STAT:
4195     case IPC_SET:
4196     case MSG_STAT:
4197         if (target_to_host_msqid_ds(&dsarg,ptr))
4198             return -TARGET_EFAULT;
4199         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4200         if (host_to_target_msqid_ds(ptr,&dsarg))
4201             return -TARGET_EFAULT;
4202         break;
4203     case IPC_RMID:
4204         ret = get_errno(msgctl(msgid, cmd, NULL));
4205         break;
4206     case IPC_INFO:
4207     case MSG_INFO:
4208         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4209         if (host_to_target_msginfo(ptr, &msginfo))
4210             return -TARGET_EFAULT;
4211         break;
4212     }
4213 
4214     return ret;
4215 }
4216 
4217 struct target_msgbuf {
4218     abi_long mtype;
4219     char	mtext[1];
4220 };
4221 
4222 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4223                                  ssize_t msgsz, int msgflg)
4224 {
4225     struct target_msgbuf *target_mb;
4226     struct msgbuf *host_mb;
4227     abi_long ret = 0;
4228 
4229     if (msgsz < 0) {
4230         return -TARGET_EINVAL;
4231     }
4232 
4233     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4234         return -TARGET_EFAULT;
4235     host_mb = g_try_malloc(msgsz + sizeof(long));
4236     if (!host_mb) {
4237         unlock_user_struct(target_mb, msgp, 0);
4238         return -TARGET_ENOMEM;
4239     }
4240     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4241     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4242     ret = -TARGET_ENOSYS;
4243 #ifdef __NR_msgsnd
4244     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4245 #endif
4246 #ifdef __NR_ipc
4247     if (ret == -TARGET_ENOSYS) {
4248 #ifdef __s390x__
4249         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4250                                  host_mb));
4251 #else
4252         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4253                                  host_mb, 0));
4254 #endif
4255     }
4256 #endif
4257     g_free(host_mb);
4258     unlock_user_struct(target_mb, msgp, 0);
4259 
4260     return ret;
4261 }
4262 
4263 #ifdef __NR_ipc
4264 #if defined(__sparc__)
4265 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4266 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4267 #elif defined(__s390x__)
4268 /* The s390 sys_ipc variant has only five parameters.  */
4269 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4270     ((long int[]){(long int)__msgp, __msgtyp})
4271 #else
4272 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4273     ((long int[]){(long int)__msgp, __msgtyp}), 0
4274 #endif
4275 #endif
4276 
4277 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4278                                  ssize_t msgsz, abi_long msgtyp,
4279                                  int msgflg)
4280 {
4281     struct target_msgbuf *target_mb;
4282     char *target_mtext;
4283     struct msgbuf *host_mb;
4284     abi_long ret = 0;
4285 
4286     if (msgsz < 0) {
4287         return -TARGET_EINVAL;
4288     }
4289 
4290     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4291         return -TARGET_EFAULT;
4292 
4293     host_mb = g_try_malloc(msgsz + sizeof(long));
4294     if (!host_mb) {
4295         ret = -TARGET_ENOMEM;
4296         goto end;
4297     }
4298     ret = -TARGET_ENOSYS;
4299 #ifdef __NR_msgrcv
4300     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4301 #endif
4302 #ifdef __NR_ipc
4303     if (ret == -TARGET_ENOSYS) {
4304         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4305                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4306     }
4307 #endif
4308 
4309     if (ret > 0) {
4310         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4311         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4312         if (!target_mtext) {
4313             ret = -TARGET_EFAULT;
4314             goto end;
4315         }
4316         memcpy(target_mb->mtext, host_mb->mtext, ret);
4317         unlock_user(target_mtext, target_mtext_addr, ret);
4318     }
4319 
4320     target_mb->mtype = tswapal(host_mb->mtype);
4321 
4322 end:
4323     if (target_mb)
4324         unlock_user_struct(target_mb, msgp, 1);
4325     g_free(host_mb);
4326     return ret;
4327 }
4328 
4329 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4330                                                abi_ulong target_addr)
4331 {
4332     struct target_shmid_ds *target_sd;
4333 
4334     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4335         return -TARGET_EFAULT;
4336     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4337         return -TARGET_EFAULT;
4338     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4339     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4340     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4341     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4342     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4343     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4344     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4345     unlock_user_struct(target_sd, target_addr, 0);
4346     return 0;
4347 }
4348 
4349 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4350                                                struct shmid_ds *host_sd)
4351 {
4352     struct target_shmid_ds *target_sd;
4353 
4354     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4355         return -TARGET_EFAULT;
4356     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4357         return -TARGET_EFAULT;
4358     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4359     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4360     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4361     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4362     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4363     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4364     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4365     unlock_user_struct(target_sd, target_addr, 1);
4366     return 0;
4367 }
4368 
4369 struct  target_shminfo {
4370     abi_ulong shmmax;
4371     abi_ulong shmmin;
4372     abi_ulong shmmni;
4373     abi_ulong shmseg;
4374     abi_ulong shmall;
4375 };
4376 
4377 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4378                                               struct shminfo *host_shminfo)
4379 {
4380     struct target_shminfo *target_shminfo;
4381     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4382         return -TARGET_EFAULT;
4383     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4384     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4385     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4386     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4387     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4388     unlock_user_struct(target_shminfo, target_addr, 1);
4389     return 0;
4390 }
4391 
4392 struct target_shm_info {
4393     int used_ids;
4394     abi_ulong shm_tot;
4395     abi_ulong shm_rss;
4396     abi_ulong shm_swp;
4397     abi_ulong swap_attempts;
4398     abi_ulong swap_successes;
4399 };
4400 
4401 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4402                                                struct shm_info *host_shm_info)
4403 {
4404     struct target_shm_info *target_shm_info;
4405     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4406         return -TARGET_EFAULT;
4407     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4408     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4409     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4410     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4411     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4412     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4413     unlock_user_struct(target_shm_info, target_addr, 1);
4414     return 0;
4415 }
4416 
4417 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4418 {
4419     struct shmid_ds dsarg;
4420     struct shminfo shminfo;
4421     struct shm_info shm_info;
4422     abi_long ret = -TARGET_EINVAL;
4423 
4424     cmd &= 0xff;
4425 
4426     switch(cmd) {
4427     case IPC_STAT:
4428     case IPC_SET:
4429     case SHM_STAT:
4430         if (target_to_host_shmid_ds(&dsarg, buf))
4431             return -TARGET_EFAULT;
4432         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4433         if (host_to_target_shmid_ds(buf, &dsarg))
4434             return -TARGET_EFAULT;
4435         break;
4436     case IPC_INFO:
4437         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4438         if (host_to_target_shminfo(buf, &shminfo))
4439             return -TARGET_EFAULT;
4440         break;
4441     case SHM_INFO:
4442         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4443         if (host_to_target_shm_info(buf, &shm_info))
4444             return -TARGET_EFAULT;
4445         break;
4446     case IPC_RMID:
4447     case SHM_LOCK:
4448     case SHM_UNLOCK:
4449         ret = get_errno(shmctl(shmid, cmd, NULL));
4450         break;
4451     }
4452 
4453     return ret;
4454 }
4455 
4456 #ifndef TARGET_FORCE_SHMLBA
4457 /* For most architectures, SHMLBA is the same as the page size;
4458  * some architectures have larger values, in which case they should
4459  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4460  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4461  * and defining its own value for SHMLBA.
4462  *
4463  * The kernel also permits SHMLBA to be set by the architecture to a
4464  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4465  * this means that addresses are rounded to the large size if
4466  * SHM_RND is set but addresses not aligned to that size are not rejected
4467  * as long as they are at least page-aligned. Since the only architecture
4468  * which uses this is ia64 this code doesn't provide for that oddity.
4469  */
4470 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4471 {
4472     return TARGET_PAGE_SIZE;
4473 }
4474 #endif
4475 
4476 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4477                                  int shmid, abi_ulong shmaddr, int shmflg)
4478 {
4479     CPUState *cpu = env_cpu(cpu_env);
4480     abi_long raddr;
4481     void *host_raddr;
4482     struct shmid_ds shm_info;
4483     int i,ret;
4484     abi_ulong shmlba;
4485 
4486     /* shmat pointers are always untagged */
4487 
4488     /* find out the length of the shared memory segment */
4489     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4490     if (is_error(ret)) {
4491         /* can't get length, bail out */
4492         return ret;
4493     }
4494 
4495     shmlba = target_shmlba(cpu_env);
4496 
4497     if (shmaddr & (shmlba - 1)) {
4498         if (shmflg & SHM_RND) {
4499             shmaddr &= ~(shmlba - 1);
4500         } else {
4501             return -TARGET_EINVAL;
4502         }
4503     }
4504     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4505         return -TARGET_EINVAL;
4506     }
4507 
4508     mmap_lock();
4509 
4510     /*
4511      * We're mapping shared memory, so ensure we generate code for parallel
4512      * execution and flush old translations.  This will work up to the level
4513      * supported by the host -- anything that requires EXCP_ATOMIC will not
4514      * be atomic with respect to an external process.
4515      */
4516     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4517         cpu->tcg_cflags |= CF_PARALLEL;
4518         tb_flush(cpu);
4519     }
4520 
4521     if (shmaddr)
4522         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4523     else {
4524         abi_ulong mmap_start;
4525 
4526         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4527         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4528 
4529         if (mmap_start == -1) {
4530             errno = ENOMEM;
4531             host_raddr = (void *)-1;
4532         } else
4533             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4534                                shmflg | SHM_REMAP);
4535     }
4536 
4537     if (host_raddr == (void *)-1) {
4538         mmap_unlock();
4539         return get_errno((long)host_raddr);
4540     }
4541     raddr=h2g((unsigned long)host_raddr);
4542 
4543     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4544                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4545                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4546 
4547     for (i = 0; i < N_SHM_REGIONS; i++) {
4548         if (!shm_regions[i].in_use) {
4549             shm_regions[i].in_use = true;
4550             shm_regions[i].start = raddr;
4551             shm_regions[i].size = shm_info.shm_segsz;
4552             break;
4553         }
4554     }
4555 
4556     mmap_unlock();
4557     return raddr;
4558 
4559 }
4560 
4561 static inline abi_long do_shmdt(abi_ulong shmaddr)
4562 {
4563     int i;
4564     abi_long rv;
4565 
4566     /* shmdt pointers are always untagged */
4567 
4568     mmap_lock();
4569 
4570     for (i = 0; i < N_SHM_REGIONS; ++i) {
4571         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4572             shm_regions[i].in_use = false;
4573             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4574             break;
4575         }
4576     }
4577     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4578 
4579     mmap_unlock();
4580 
4581     return rv;
4582 }
4583 
4584 #ifdef TARGET_NR_ipc
4585 /* ??? This only works with linear mappings.  */
4586 /* do_ipc() must return target values and target errnos. */
4587 static abi_long do_ipc(CPUArchState *cpu_env,
4588                        unsigned int call, abi_long first,
4589                        abi_long second, abi_long third,
4590                        abi_long ptr, abi_long fifth)
4591 {
4592     int version;
4593     abi_long ret = 0;
4594 
4595     version = call >> 16;
4596     call &= 0xffff;
4597 
4598     switch (call) {
4599     case IPCOP_semop:
4600         ret = do_semtimedop(first, ptr, second, 0, false);
4601         break;
4602     case IPCOP_semtimedop:
4603     /*
4604      * The s390 sys_ipc variant has only five parameters instead of six
4605      * (as for default variant) and the only difference is the handling of
4606      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4607      * to a struct timespec where the generic variant uses fifth parameter.
4608      */
4609 #if defined(TARGET_S390X)
4610         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4611 #else
4612         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4613 #endif
4614         break;
4615 
4616     case IPCOP_semget:
4617         ret = get_errno(semget(first, second, third));
4618         break;
4619 
4620     case IPCOP_semctl: {
4621         /* The semun argument to semctl is passed by value, so dereference the
4622          * ptr argument. */
4623         abi_ulong atptr;
4624         get_user_ual(atptr, ptr);
4625         ret = do_semctl(first, second, third, atptr);
4626         break;
4627     }
4628 
4629     case IPCOP_msgget:
4630         ret = get_errno(msgget(first, second));
4631         break;
4632 
4633     case IPCOP_msgsnd:
4634         ret = do_msgsnd(first, ptr, second, third);
4635         break;
4636 
4637     case IPCOP_msgctl:
4638         ret = do_msgctl(first, second, ptr);
4639         break;
4640 
4641     case IPCOP_msgrcv:
4642         switch (version) {
4643         case 0:
4644             {
4645                 struct target_ipc_kludge {
4646                     abi_long msgp;
4647                     abi_long msgtyp;
4648                 } *tmp;
4649 
4650                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4651                     ret = -TARGET_EFAULT;
4652                     break;
4653                 }
4654 
4655                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4656 
4657                 unlock_user_struct(tmp, ptr, 0);
4658                 break;
4659             }
4660         default:
4661             ret = do_msgrcv(first, ptr, second, fifth, third);
4662         }
4663         break;
4664 
4665     case IPCOP_shmat:
4666         switch (version) {
4667         default:
4668         {
4669             abi_ulong raddr;
4670             raddr = do_shmat(cpu_env, first, ptr, second);
4671             if (is_error(raddr))
4672                 return get_errno(raddr);
4673             if (put_user_ual(raddr, third))
4674                 return -TARGET_EFAULT;
4675             break;
4676         }
4677         case 1:
4678             ret = -TARGET_EINVAL;
4679             break;
4680         }
4681 	break;
4682     case IPCOP_shmdt:
4683         ret = do_shmdt(ptr);
4684 	break;
4685 
4686     case IPCOP_shmget:
4687 	/* IPC_* flag values are the same on all linux platforms */
4688 	ret = get_errno(shmget(first, second, third));
4689 	break;
4690 
4691 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4692     case IPCOP_shmctl:
4693         ret = do_shmctl(first, second, ptr);
4694         break;
4695     default:
4696         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4697                       call, version);
4698 	ret = -TARGET_ENOSYS;
4699 	break;
4700     }
4701     return ret;
4702 }
4703 #endif
4704 
4705 /* kernel structure types definitions */
4706 
4707 #define STRUCT(name, ...) STRUCT_ ## name,
4708 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4709 enum {
4710 #include "syscall_types.h"
4711 STRUCT_MAX
4712 };
4713 #undef STRUCT
4714 #undef STRUCT_SPECIAL
4715 
4716 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4717 #define STRUCT_SPECIAL(name)
4718 #include "syscall_types.h"
4719 #undef STRUCT
4720 #undef STRUCT_SPECIAL
4721 
4722 #define MAX_STRUCT_SIZE 4096
4723 
4724 #ifdef CONFIG_FIEMAP
4725 /* So fiemap access checks don't overflow on 32 bit systems.
4726  * This is very slightly smaller than the limit imposed by
4727  * the underlying kernel.
4728  */
4729 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4730                             / sizeof(struct fiemap_extent))
4731 
4732 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4733                                        int fd, int cmd, abi_long arg)
4734 {
4735     /* The parameter for this ioctl is a struct fiemap followed
4736      * by an array of struct fiemap_extent whose size is set
4737      * in fiemap->fm_extent_count. The array is filled in by the
4738      * ioctl.
4739      */
4740     int target_size_in, target_size_out;
4741     struct fiemap *fm;
4742     const argtype *arg_type = ie->arg_type;
4743     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4744     void *argptr, *p;
4745     abi_long ret;
4746     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4747     uint32_t outbufsz;
4748     int free_fm = 0;
4749 
4750     assert(arg_type[0] == TYPE_PTR);
4751     assert(ie->access == IOC_RW);
4752     arg_type++;
4753     target_size_in = thunk_type_size(arg_type, 0);
4754     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4755     if (!argptr) {
4756         return -TARGET_EFAULT;
4757     }
4758     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4759     unlock_user(argptr, arg, 0);
4760     fm = (struct fiemap *)buf_temp;
4761     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4762         return -TARGET_EINVAL;
4763     }
4764 
4765     outbufsz = sizeof (*fm) +
4766         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4767 
4768     if (outbufsz > MAX_STRUCT_SIZE) {
4769         /* We can't fit all the extents into the fixed size buffer.
4770          * Allocate one that is large enough and use it instead.
4771          */
4772         fm = g_try_malloc(outbufsz);
4773         if (!fm) {
4774             return -TARGET_ENOMEM;
4775         }
4776         memcpy(fm, buf_temp, sizeof(struct fiemap));
4777         free_fm = 1;
4778     }
4779     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4780     if (!is_error(ret)) {
4781         target_size_out = target_size_in;
4782         /* An extent_count of 0 means we were only counting the extents
4783          * so there are no structs to copy
4784          */
4785         if (fm->fm_extent_count != 0) {
4786             target_size_out += fm->fm_mapped_extents * extent_size;
4787         }
4788         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4789         if (!argptr) {
4790             ret = -TARGET_EFAULT;
4791         } else {
4792             /* Convert the struct fiemap */
4793             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4794             if (fm->fm_extent_count != 0) {
4795                 p = argptr + target_size_in;
4796                 /* ...and then all the struct fiemap_extents */
4797                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4798                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4799                                   THUNK_TARGET);
4800                     p += extent_size;
4801                 }
4802             }
4803             unlock_user(argptr, arg, target_size_out);
4804         }
4805     }
4806     if (free_fm) {
4807         g_free(fm);
4808     }
4809     return ret;
4810 }
4811 #endif
4812 
4813 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4814                                 int fd, int cmd, abi_long arg)
4815 {
4816     const argtype *arg_type = ie->arg_type;
4817     int target_size;
4818     void *argptr;
4819     int ret;
4820     struct ifconf *host_ifconf;
4821     uint32_t outbufsz;
4822     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4823     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4824     int target_ifreq_size;
4825     int nb_ifreq;
4826     int free_buf = 0;
4827     int i;
4828     int target_ifc_len;
4829     abi_long target_ifc_buf;
4830     int host_ifc_len;
4831     char *host_ifc_buf;
4832 
4833     assert(arg_type[0] == TYPE_PTR);
4834     assert(ie->access == IOC_RW);
4835 
4836     arg_type++;
4837     target_size = thunk_type_size(arg_type, 0);
4838 
4839     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4840     if (!argptr)
4841         return -TARGET_EFAULT;
4842     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4843     unlock_user(argptr, arg, 0);
4844 
4845     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4846     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4847     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4848 
4849     if (target_ifc_buf != 0) {
4850         target_ifc_len = host_ifconf->ifc_len;
4851         nb_ifreq = target_ifc_len / target_ifreq_size;
4852         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4853 
4854         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4855         if (outbufsz > MAX_STRUCT_SIZE) {
4856             /*
4857              * We can't fit all the extents into the fixed size buffer.
4858              * Allocate one that is large enough and use it instead.
4859              */
4860             host_ifconf = g_try_malloc(outbufsz);
4861             if (!host_ifconf) {
4862                 return -TARGET_ENOMEM;
4863             }
4864             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4865             free_buf = 1;
4866         }
4867         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4868 
4869         host_ifconf->ifc_len = host_ifc_len;
4870     } else {
4871       host_ifc_buf = NULL;
4872     }
4873     host_ifconf->ifc_buf = host_ifc_buf;
4874 
4875     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4876     if (!is_error(ret)) {
4877 	/* convert host ifc_len to target ifc_len */
4878 
4879         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4880         target_ifc_len = nb_ifreq * target_ifreq_size;
4881         host_ifconf->ifc_len = target_ifc_len;
4882 
4883 	/* restore target ifc_buf */
4884 
4885         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4886 
4887 	/* copy struct ifconf to target user */
4888 
4889         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4890         if (!argptr)
4891             return -TARGET_EFAULT;
4892         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4893         unlock_user(argptr, arg, target_size);
4894 
4895         if (target_ifc_buf != 0) {
4896             /* copy ifreq[] to target user */
4897             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4898             for (i = 0; i < nb_ifreq ; i++) {
4899                 thunk_convert(argptr + i * target_ifreq_size,
4900                               host_ifc_buf + i * sizeof(struct ifreq),
4901                               ifreq_arg_type, THUNK_TARGET);
4902             }
4903             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4904         }
4905     }
4906 
4907     if (free_buf) {
4908         g_free(host_ifconf);
4909     }
4910 
4911     return ret;
4912 }
4913 
4914 #if defined(CONFIG_USBFS)
4915 #if HOST_LONG_BITS > 64
4916 #error USBDEVFS thunks do not support >64 bit hosts yet.
4917 #endif
4918 struct live_urb {
4919     uint64_t target_urb_adr;
4920     uint64_t target_buf_adr;
4921     char *target_buf_ptr;
4922     struct usbdevfs_urb host_urb;
4923 };
4924 
4925 static GHashTable *usbdevfs_urb_hashtable(void)
4926 {
4927     static GHashTable *urb_hashtable;
4928 
4929     if (!urb_hashtable) {
4930         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4931     }
4932     return urb_hashtable;
4933 }
4934 
4935 static void urb_hashtable_insert(struct live_urb *urb)
4936 {
4937     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4938     g_hash_table_insert(urb_hashtable, urb, urb);
4939 }
4940 
4941 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4942 {
4943     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4944     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4945 }
4946 
4947 static void urb_hashtable_remove(struct live_urb *urb)
4948 {
4949     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4950     g_hash_table_remove(urb_hashtable, urb);
4951 }
4952 
4953 static abi_long
4954 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4955                           int fd, int cmd, abi_long arg)
4956 {
4957     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4958     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4959     struct live_urb *lurb;
4960     void *argptr;
4961     uint64_t hurb;
4962     int target_size;
4963     uintptr_t target_urb_adr;
4964     abi_long ret;
4965 
4966     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4967 
4968     memset(buf_temp, 0, sizeof(uint64_t));
4969     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4970     if (is_error(ret)) {
4971         return ret;
4972     }
4973 
4974     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4975     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4976     if (!lurb->target_urb_adr) {
4977         return -TARGET_EFAULT;
4978     }
4979     urb_hashtable_remove(lurb);
4980     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4981         lurb->host_urb.buffer_length);
4982     lurb->target_buf_ptr = NULL;
4983 
4984     /* restore the guest buffer pointer */
4985     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4986 
4987     /* update the guest urb struct */
4988     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4989     if (!argptr) {
4990         g_free(lurb);
4991         return -TARGET_EFAULT;
4992     }
4993     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4994     unlock_user(argptr, lurb->target_urb_adr, target_size);
4995 
4996     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4997     /* write back the urb handle */
4998     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4999     if (!argptr) {
5000         g_free(lurb);
5001         return -TARGET_EFAULT;
5002     }
5003 
5004     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5005     target_urb_adr = lurb->target_urb_adr;
5006     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5007     unlock_user(argptr, arg, target_size);
5008 
5009     g_free(lurb);
5010     return ret;
5011 }
5012 
5013 static abi_long
5014 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5015                              uint8_t *buf_temp __attribute__((unused)),
5016                              int fd, int cmd, abi_long arg)
5017 {
5018     struct live_urb *lurb;
5019 
5020     /* map target address back to host URB with metadata. */
5021     lurb = urb_hashtable_lookup(arg);
5022     if (!lurb) {
5023         return -TARGET_EFAULT;
5024     }
5025     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5026 }
5027 
5028 static abi_long
5029 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5030                             int fd, int cmd, abi_long arg)
5031 {
5032     const argtype *arg_type = ie->arg_type;
5033     int target_size;
5034     abi_long ret;
5035     void *argptr;
5036     int rw_dir;
5037     struct live_urb *lurb;
5038 
5039     /*
5040      * each submitted URB needs to map to a unique ID for the
5041      * kernel, and that unique ID needs to be a pointer to
5042      * host memory.  hence, we need to malloc for each URB.
5043      * isochronous transfers have a variable length struct.
5044      */
5045     arg_type++;
5046     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5047 
5048     /* construct host copy of urb and metadata */
5049     lurb = g_try_new0(struct live_urb, 1);
5050     if (!lurb) {
5051         return -TARGET_ENOMEM;
5052     }
5053 
5054     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5055     if (!argptr) {
5056         g_free(lurb);
5057         return -TARGET_EFAULT;
5058     }
5059     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5060     unlock_user(argptr, arg, 0);
5061 
5062     lurb->target_urb_adr = arg;
5063     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5064 
5065     /* buffer space used depends on endpoint type so lock the entire buffer */
5066     /* control type urbs should check the buffer contents for true direction */
5067     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5068     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5069         lurb->host_urb.buffer_length, 1);
5070     if (lurb->target_buf_ptr == NULL) {
5071         g_free(lurb);
5072         return -TARGET_EFAULT;
5073     }
5074 
5075     /* update buffer pointer in host copy */
5076     lurb->host_urb.buffer = lurb->target_buf_ptr;
5077 
5078     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5079     if (is_error(ret)) {
5080         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5081         g_free(lurb);
5082     } else {
5083         urb_hashtable_insert(lurb);
5084     }
5085 
5086     return ret;
5087 }
5088 #endif /* CONFIG_USBFS */
5089 
5090 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5091                             int cmd, abi_long arg)
5092 {
5093     void *argptr;
5094     struct dm_ioctl *host_dm;
5095     abi_long guest_data;
5096     uint32_t guest_data_size;
5097     int target_size;
5098     const argtype *arg_type = ie->arg_type;
5099     abi_long ret;
5100     void *big_buf = NULL;
5101     char *host_data;
5102 
5103     arg_type++;
5104     target_size = thunk_type_size(arg_type, 0);
5105     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5106     if (!argptr) {
5107         ret = -TARGET_EFAULT;
5108         goto out;
5109     }
5110     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5111     unlock_user(argptr, arg, 0);
5112 
5113     /* buf_temp is too small, so fetch things into a bigger buffer */
5114     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5115     memcpy(big_buf, buf_temp, target_size);
5116     buf_temp = big_buf;
5117     host_dm = big_buf;
5118 
5119     guest_data = arg + host_dm->data_start;
5120     if ((guest_data - arg) < 0) {
5121         ret = -TARGET_EINVAL;
5122         goto out;
5123     }
5124     guest_data_size = host_dm->data_size - host_dm->data_start;
5125     host_data = (char*)host_dm + host_dm->data_start;
5126 
5127     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5128     if (!argptr) {
5129         ret = -TARGET_EFAULT;
5130         goto out;
5131     }
5132 
5133     switch (ie->host_cmd) {
5134     case DM_REMOVE_ALL:
5135     case DM_LIST_DEVICES:
5136     case DM_DEV_CREATE:
5137     case DM_DEV_REMOVE:
5138     case DM_DEV_SUSPEND:
5139     case DM_DEV_STATUS:
5140     case DM_DEV_WAIT:
5141     case DM_TABLE_STATUS:
5142     case DM_TABLE_CLEAR:
5143     case DM_TABLE_DEPS:
5144     case DM_LIST_VERSIONS:
5145         /* no input data */
5146         break;
5147     case DM_DEV_RENAME:
5148     case DM_DEV_SET_GEOMETRY:
5149         /* data contains only strings */
5150         memcpy(host_data, argptr, guest_data_size);
5151         break;
5152     case DM_TARGET_MSG:
5153         memcpy(host_data, argptr, guest_data_size);
5154         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5155         break;
5156     case DM_TABLE_LOAD:
5157     {
5158         void *gspec = argptr;
5159         void *cur_data = host_data;
5160         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5161         int spec_size = thunk_type_size(arg_type, 0);
5162         int i;
5163 
5164         for (i = 0; i < host_dm->target_count; i++) {
5165             struct dm_target_spec *spec = cur_data;
5166             uint32_t next;
5167             int slen;
5168 
5169             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5170             slen = strlen((char*)gspec + spec_size) + 1;
5171             next = spec->next;
5172             spec->next = sizeof(*spec) + slen;
5173             strcpy((char*)&spec[1], gspec + spec_size);
5174             gspec += next;
5175             cur_data += spec->next;
5176         }
5177         break;
5178     }
5179     default:
5180         ret = -TARGET_EINVAL;
5181         unlock_user(argptr, guest_data, 0);
5182         goto out;
5183     }
5184     unlock_user(argptr, guest_data, 0);
5185 
5186     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5187     if (!is_error(ret)) {
5188         guest_data = arg + host_dm->data_start;
5189         guest_data_size = host_dm->data_size - host_dm->data_start;
5190         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5191         switch (ie->host_cmd) {
5192         case DM_REMOVE_ALL:
5193         case DM_DEV_CREATE:
5194         case DM_DEV_REMOVE:
5195         case DM_DEV_RENAME:
5196         case DM_DEV_SUSPEND:
5197         case DM_DEV_STATUS:
5198         case DM_TABLE_LOAD:
5199         case DM_TABLE_CLEAR:
5200         case DM_TARGET_MSG:
5201         case DM_DEV_SET_GEOMETRY:
5202             /* no return data */
5203             break;
5204         case DM_LIST_DEVICES:
5205         {
5206             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5207             uint32_t remaining_data = guest_data_size;
5208             void *cur_data = argptr;
5209             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5210             int nl_size = 12; /* can't use thunk_size due to alignment */
5211 
5212             while (1) {
5213                 uint32_t next = nl->next;
5214                 if (next) {
5215                     nl->next = nl_size + (strlen(nl->name) + 1);
5216                 }
5217                 if (remaining_data < nl->next) {
5218                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5219                     break;
5220                 }
5221                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5222                 strcpy(cur_data + nl_size, nl->name);
5223                 cur_data += nl->next;
5224                 remaining_data -= nl->next;
5225                 if (!next) {
5226                     break;
5227                 }
5228                 nl = (void*)nl + next;
5229             }
5230             break;
5231         }
5232         case DM_DEV_WAIT:
5233         case DM_TABLE_STATUS:
5234         {
5235             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5236             void *cur_data = argptr;
5237             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5238             int spec_size = thunk_type_size(arg_type, 0);
5239             int i;
5240 
5241             for (i = 0; i < host_dm->target_count; i++) {
5242                 uint32_t next = spec->next;
5243                 int slen = strlen((char*)&spec[1]) + 1;
5244                 spec->next = (cur_data - argptr) + spec_size + slen;
5245                 if (guest_data_size < spec->next) {
5246                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5247                     break;
5248                 }
5249                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5250                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5251                 cur_data = argptr + spec->next;
5252                 spec = (void*)host_dm + host_dm->data_start + next;
5253             }
5254             break;
5255         }
5256         case DM_TABLE_DEPS:
5257         {
5258             void *hdata = (void*)host_dm + host_dm->data_start;
5259             int count = *(uint32_t*)hdata;
5260             uint64_t *hdev = hdata + 8;
5261             uint64_t *gdev = argptr + 8;
5262             int i;
5263 
5264             *(uint32_t*)argptr = tswap32(count);
5265             for (i = 0; i < count; i++) {
5266                 *gdev = tswap64(*hdev);
5267                 gdev++;
5268                 hdev++;
5269             }
5270             break;
5271         }
5272         case DM_LIST_VERSIONS:
5273         {
5274             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5275             uint32_t remaining_data = guest_data_size;
5276             void *cur_data = argptr;
5277             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5278             int vers_size = thunk_type_size(arg_type, 0);
5279 
5280             while (1) {
5281                 uint32_t next = vers->next;
5282                 if (next) {
5283                     vers->next = vers_size + (strlen(vers->name) + 1);
5284                 }
5285                 if (remaining_data < vers->next) {
5286                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5287                     break;
5288                 }
5289                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5290                 strcpy(cur_data + vers_size, vers->name);
5291                 cur_data += vers->next;
5292                 remaining_data -= vers->next;
5293                 if (!next) {
5294                     break;
5295                 }
5296                 vers = (void*)vers + next;
5297             }
5298             break;
5299         }
5300         default:
5301             unlock_user(argptr, guest_data, 0);
5302             ret = -TARGET_EINVAL;
5303             goto out;
5304         }
5305         unlock_user(argptr, guest_data, guest_data_size);
5306 
5307         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5308         if (!argptr) {
5309             ret = -TARGET_EFAULT;
5310             goto out;
5311         }
5312         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5313         unlock_user(argptr, arg, target_size);
5314     }
5315 out:
5316     g_free(big_buf);
5317     return ret;
5318 }
5319 
5320 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5321                                int cmd, abi_long arg)
5322 {
5323     void *argptr;
5324     int target_size;
5325     const argtype *arg_type = ie->arg_type;
5326     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5327     abi_long ret;
5328 
5329     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5330     struct blkpg_partition host_part;
5331 
5332     /* Read and convert blkpg */
5333     arg_type++;
5334     target_size = thunk_type_size(arg_type, 0);
5335     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5336     if (!argptr) {
5337         ret = -TARGET_EFAULT;
5338         goto out;
5339     }
5340     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5341     unlock_user(argptr, arg, 0);
5342 
5343     switch (host_blkpg->op) {
5344     case BLKPG_ADD_PARTITION:
5345     case BLKPG_DEL_PARTITION:
5346         /* payload is struct blkpg_partition */
5347         break;
5348     default:
5349         /* Unknown opcode */
5350         ret = -TARGET_EINVAL;
5351         goto out;
5352     }
5353 
5354     /* Read and convert blkpg->data */
5355     arg = (abi_long)(uintptr_t)host_blkpg->data;
5356     target_size = thunk_type_size(part_arg_type, 0);
5357     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5358     if (!argptr) {
5359         ret = -TARGET_EFAULT;
5360         goto out;
5361     }
5362     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5363     unlock_user(argptr, arg, 0);
5364 
5365     /* Swizzle the data pointer to our local copy and call! */
5366     host_blkpg->data = &host_part;
5367     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5368 
5369 out:
5370     return ret;
5371 }
5372 
5373 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5374                                 int fd, int cmd, abi_long arg)
5375 {
5376     const argtype *arg_type = ie->arg_type;
5377     const StructEntry *se;
5378     const argtype *field_types;
5379     const int *dst_offsets, *src_offsets;
5380     int target_size;
5381     void *argptr;
5382     abi_ulong *target_rt_dev_ptr = NULL;
5383     unsigned long *host_rt_dev_ptr = NULL;
5384     abi_long ret;
5385     int i;
5386 
5387     assert(ie->access == IOC_W);
5388     assert(*arg_type == TYPE_PTR);
5389     arg_type++;
5390     assert(*arg_type == TYPE_STRUCT);
5391     target_size = thunk_type_size(arg_type, 0);
5392     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5393     if (!argptr) {
5394         return -TARGET_EFAULT;
5395     }
5396     arg_type++;
5397     assert(*arg_type == (int)STRUCT_rtentry);
5398     se = struct_entries + *arg_type++;
5399     assert(se->convert[0] == NULL);
5400     /* convert struct here to be able to catch rt_dev string */
5401     field_types = se->field_types;
5402     dst_offsets = se->field_offsets[THUNK_HOST];
5403     src_offsets = se->field_offsets[THUNK_TARGET];
5404     for (i = 0; i < se->nb_fields; i++) {
5405         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5406             assert(*field_types == TYPE_PTRVOID);
5407             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5408             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5409             if (*target_rt_dev_ptr != 0) {
5410                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5411                                                   tswapal(*target_rt_dev_ptr));
5412                 if (!*host_rt_dev_ptr) {
5413                     unlock_user(argptr, arg, 0);
5414                     return -TARGET_EFAULT;
5415                 }
5416             } else {
5417                 *host_rt_dev_ptr = 0;
5418             }
5419             field_types++;
5420             continue;
5421         }
5422         field_types = thunk_convert(buf_temp + dst_offsets[i],
5423                                     argptr + src_offsets[i],
5424                                     field_types, THUNK_HOST);
5425     }
5426     unlock_user(argptr, arg, 0);
5427 
5428     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5429 
5430     assert(host_rt_dev_ptr != NULL);
5431     assert(target_rt_dev_ptr != NULL);
5432     if (*host_rt_dev_ptr != 0) {
5433         unlock_user((void *)*host_rt_dev_ptr,
5434                     *target_rt_dev_ptr, 0);
5435     }
5436     return ret;
5437 }
5438 
5439 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5440                                      int fd, int cmd, abi_long arg)
5441 {
5442     int sig = target_to_host_signal(arg);
5443     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5444 }
5445 
5446 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5447                                     int fd, int cmd, abi_long arg)
5448 {
5449     struct timeval tv;
5450     abi_long ret;
5451 
5452     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5453     if (is_error(ret)) {
5454         return ret;
5455     }
5456 
5457     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5458         if (copy_to_user_timeval(arg, &tv)) {
5459             return -TARGET_EFAULT;
5460         }
5461     } else {
5462         if (copy_to_user_timeval64(arg, &tv)) {
5463             return -TARGET_EFAULT;
5464         }
5465     }
5466 
5467     return ret;
5468 }
5469 
5470 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5471                                       int fd, int cmd, abi_long arg)
5472 {
5473     struct timespec ts;
5474     abi_long ret;
5475 
5476     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5477     if (is_error(ret)) {
5478         return ret;
5479     }
5480 
5481     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5482         if (host_to_target_timespec(arg, &ts)) {
5483             return -TARGET_EFAULT;
5484         }
5485     } else{
5486         if (host_to_target_timespec64(arg, &ts)) {
5487             return -TARGET_EFAULT;
5488         }
5489     }
5490 
5491     return ret;
5492 }
5493 
5494 #ifdef TIOCGPTPEER
5495 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5496                                      int fd, int cmd, abi_long arg)
5497 {
5498     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5499     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5500 }
5501 #endif
5502 
5503 #ifdef HAVE_DRM_H
5504 
5505 static void unlock_drm_version(struct drm_version *host_ver,
5506                                struct target_drm_version *target_ver,
5507                                bool copy)
5508 {
5509     unlock_user(host_ver->name, target_ver->name,
5510                                 copy ? host_ver->name_len : 0);
5511     unlock_user(host_ver->date, target_ver->date,
5512                                 copy ? host_ver->date_len : 0);
5513     unlock_user(host_ver->desc, target_ver->desc,
5514                                 copy ? host_ver->desc_len : 0);
5515 }
5516 
5517 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5518                                           struct target_drm_version *target_ver)
5519 {
5520     memset(host_ver, 0, sizeof(*host_ver));
5521 
5522     __get_user(host_ver->name_len, &target_ver->name_len);
5523     if (host_ver->name_len) {
5524         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5525                                    target_ver->name_len, 0);
5526         if (!host_ver->name) {
5527             return -EFAULT;
5528         }
5529     }
5530 
5531     __get_user(host_ver->date_len, &target_ver->date_len);
5532     if (host_ver->date_len) {
5533         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5534                                    target_ver->date_len, 0);
5535         if (!host_ver->date) {
5536             goto err;
5537         }
5538     }
5539 
5540     __get_user(host_ver->desc_len, &target_ver->desc_len);
5541     if (host_ver->desc_len) {
5542         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5543                                    target_ver->desc_len, 0);
5544         if (!host_ver->desc) {
5545             goto err;
5546         }
5547     }
5548 
5549     return 0;
5550 err:
5551     unlock_drm_version(host_ver, target_ver, false);
5552     return -EFAULT;
5553 }
5554 
5555 static inline void host_to_target_drmversion(
5556                                           struct target_drm_version *target_ver,
5557                                           struct drm_version *host_ver)
5558 {
5559     __put_user(host_ver->version_major, &target_ver->version_major);
5560     __put_user(host_ver->version_minor, &target_ver->version_minor);
5561     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5562     __put_user(host_ver->name_len, &target_ver->name_len);
5563     __put_user(host_ver->date_len, &target_ver->date_len);
5564     __put_user(host_ver->desc_len, &target_ver->desc_len);
5565     unlock_drm_version(host_ver, target_ver, true);
5566 }
5567 
5568 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5569                              int fd, int cmd, abi_long arg)
5570 {
5571     struct drm_version *ver;
5572     struct target_drm_version *target_ver;
5573     abi_long ret;
5574 
5575     switch (ie->host_cmd) {
5576     case DRM_IOCTL_VERSION:
5577         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5578             return -TARGET_EFAULT;
5579         }
5580         ver = (struct drm_version *)buf_temp;
5581         ret = target_to_host_drmversion(ver, target_ver);
5582         if (!is_error(ret)) {
5583             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5584             if (is_error(ret)) {
5585                 unlock_drm_version(ver, target_ver, false);
5586             } else {
5587                 host_to_target_drmversion(target_ver, ver);
5588             }
5589         }
5590         unlock_user_struct(target_ver, arg, 0);
5591         return ret;
5592     }
5593     return -TARGET_ENOSYS;
5594 }
5595 
5596 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5597                                            struct drm_i915_getparam *gparam,
5598                                            int fd, abi_long arg)
5599 {
5600     abi_long ret;
5601     int value;
5602     struct target_drm_i915_getparam *target_gparam;
5603 
5604     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5605         return -TARGET_EFAULT;
5606     }
5607 
5608     __get_user(gparam->param, &target_gparam->param);
5609     gparam->value = &value;
5610     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5611     put_user_s32(value, target_gparam->value);
5612 
5613     unlock_user_struct(target_gparam, arg, 0);
5614     return ret;
5615 }
5616 
5617 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5618                                   int fd, int cmd, abi_long arg)
5619 {
5620     switch (ie->host_cmd) {
5621     case DRM_IOCTL_I915_GETPARAM:
5622         return do_ioctl_drm_i915_getparam(ie,
5623                                           (struct drm_i915_getparam *)buf_temp,
5624                                           fd, arg);
5625     default:
5626         return -TARGET_ENOSYS;
5627     }
5628 }
5629 
5630 #endif
5631 
5632 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5633                                         int fd, int cmd, abi_long arg)
5634 {
5635     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5636     struct tun_filter *target_filter;
5637     char *target_addr;
5638 
5639     assert(ie->access == IOC_W);
5640 
5641     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5642     if (!target_filter) {
5643         return -TARGET_EFAULT;
5644     }
5645     filter->flags = tswap16(target_filter->flags);
5646     filter->count = tswap16(target_filter->count);
5647     unlock_user(target_filter, arg, 0);
5648 
5649     if (filter->count) {
5650         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5651             MAX_STRUCT_SIZE) {
5652             return -TARGET_EFAULT;
5653         }
5654 
5655         target_addr = lock_user(VERIFY_READ,
5656                                 arg + offsetof(struct tun_filter, addr),
5657                                 filter->count * ETH_ALEN, 1);
5658         if (!target_addr) {
5659             return -TARGET_EFAULT;
5660         }
5661         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5662         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5663     }
5664 
5665     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5666 }
5667 
5668 IOCTLEntry ioctl_entries[] = {
5669 #define IOCTL(cmd, access, ...) \
5670     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5671 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5672     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5673 #define IOCTL_IGNORE(cmd) \
5674     { TARGET_ ## cmd, 0, #cmd },
5675 #include "ioctls.h"
5676     { 0, 0, },
5677 };
5678 
5679 /* ??? Implement proper locking for ioctls.  */
5680 /* do_ioctl() Must return target values and target errnos. */
5681 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5682 {
5683     const IOCTLEntry *ie;
5684     const argtype *arg_type;
5685     abi_long ret;
5686     uint8_t buf_temp[MAX_STRUCT_SIZE];
5687     int target_size;
5688     void *argptr;
5689 
5690     ie = ioctl_entries;
5691     for(;;) {
5692         if (ie->target_cmd == 0) {
5693             qemu_log_mask(
5694                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5695             return -TARGET_ENOSYS;
5696         }
5697         if (ie->target_cmd == cmd)
5698             break;
5699         ie++;
5700     }
5701     arg_type = ie->arg_type;
5702     if (ie->do_ioctl) {
5703         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5704     } else if (!ie->host_cmd) {
5705         /* Some architectures define BSD ioctls in their headers
5706            that are not implemented in Linux.  */
5707         return -TARGET_ENOSYS;
5708     }
5709 
5710     switch(arg_type[0]) {
5711     case TYPE_NULL:
5712         /* no argument */
5713         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5714         break;
5715     case TYPE_PTRVOID:
5716     case TYPE_INT:
5717     case TYPE_LONG:
5718     case TYPE_ULONG:
5719         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5720         break;
5721     case TYPE_PTR:
5722         arg_type++;
5723         target_size = thunk_type_size(arg_type, 0);
5724         switch(ie->access) {
5725         case IOC_R:
5726             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5727             if (!is_error(ret)) {
5728                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5729                 if (!argptr)
5730                     return -TARGET_EFAULT;
5731                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5732                 unlock_user(argptr, arg, target_size);
5733             }
5734             break;
5735         case IOC_W:
5736             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5737             if (!argptr)
5738                 return -TARGET_EFAULT;
5739             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5740             unlock_user(argptr, arg, 0);
5741             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5742             break;
5743         default:
5744         case IOC_RW:
5745             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5746             if (!argptr)
5747                 return -TARGET_EFAULT;
5748             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5749             unlock_user(argptr, arg, 0);
5750             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5751             if (!is_error(ret)) {
5752                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5753                 if (!argptr)
5754                     return -TARGET_EFAULT;
5755                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5756                 unlock_user(argptr, arg, target_size);
5757             }
5758             break;
5759         }
5760         break;
5761     default:
5762         qemu_log_mask(LOG_UNIMP,
5763                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5764                       (long)cmd, arg_type[0]);
5765         ret = -TARGET_ENOSYS;
5766         break;
5767     }
5768     return ret;
5769 }
5770 
5771 static const bitmask_transtbl iflag_tbl[] = {
5772         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5773         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5774         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5775         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5776         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5777         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5778         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5779         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5780         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5781         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5782         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5783         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5784         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5785         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5786         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5787         { 0, 0, 0, 0 }
5788 };
5789 
5790 static const bitmask_transtbl oflag_tbl[] = {
5791 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5792 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5793 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5794 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5795 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5796 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5797 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5798 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5799 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5800 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5801 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5802 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5803 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5804 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5805 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5806 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5807 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5808 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5809 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5810 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5811 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5812 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5813 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5814 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5815 	{ 0, 0, 0, 0 }
5816 };
5817 
5818 static const bitmask_transtbl cflag_tbl[] = {
5819 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5820 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5821 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5822 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5823 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5824 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5825 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5826 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5827 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5828 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5829 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5830 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5831 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5832 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5833 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5834 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5835 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5836 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5837 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5838 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5839 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5840 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5841 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5842 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5843 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5844 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5845 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5846 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5847 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5848 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5849 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5850 	{ 0, 0, 0, 0 }
5851 };
5852 
5853 static const bitmask_transtbl lflag_tbl[] = {
5854   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5855   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5856   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5857   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5858   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5859   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5860   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5861   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5862   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5863   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5864   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5865   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5866   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5867   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5868   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5869   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5870   { 0, 0, 0, 0 }
5871 };
5872 
5873 static void target_to_host_termios (void *dst, const void *src)
5874 {
5875     struct host_termios *host = dst;
5876     const struct target_termios *target = src;
5877 
5878     host->c_iflag =
5879         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5880     host->c_oflag =
5881         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5882     host->c_cflag =
5883         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5884     host->c_lflag =
5885         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5886     host->c_line = target->c_line;
5887 
5888     memset(host->c_cc, 0, sizeof(host->c_cc));
5889     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5890     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5891     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5892     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5893     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5894     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5895     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5896     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5897     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5898     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5899     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5900     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5901     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5902     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5903     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5904     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5905     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5906 }
5907 
5908 static void host_to_target_termios (void *dst, const void *src)
5909 {
5910     struct target_termios *target = dst;
5911     const struct host_termios *host = src;
5912 
5913     target->c_iflag =
5914         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5915     target->c_oflag =
5916         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5917     target->c_cflag =
5918         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5919     target->c_lflag =
5920         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5921     target->c_line = host->c_line;
5922 
5923     memset(target->c_cc, 0, sizeof(target->c_cc));
5924     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5925     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5926     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5927     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5928     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5929     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5930     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5931     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5932     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5933     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5934     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5935     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5936     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5937     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5938     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5939     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5940     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5941 }
5942 
5943 static const StructEntry struct_termios_def = {
5944     .convert = { host_to_target_termios, target_to_host_termios },
5945     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5946     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5947     .print = print_termios,
5948 };
5949 
5950 static const bitmask_transtbl mmap_flags_tbl[] = {
5951     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5952     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5953     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5954     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5955       MAP_ANONYMOUS, MAP_ANONYMOUS },
5956     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5957       MAP_GROWSDOWN, MAP_GROWSDOWN },
5958     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5959       MAP_DENYWRITE, MAP_DENYWRITE },
5960     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5961       MAP_EXECUTABLE, MAP_EXECUTABLE },
5962     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5963     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5964       MAP_NORESERVE, MAP_NORESERVE },
5965     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5966     /* MAP_STACK had been ignored by the kernel for quite some time.
5967        Recognize it for the target insofar as we do not want to pass
5968        it through to the host.  */
5969     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5970     { 0, 0, 0, 0 }
5971 };
5972 
5973 /*
5974  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5975  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5976  */
5977 #if defined(TARGET_I386)
5978 
5979 /* NOTE: there is really one LDT for all the threads */
5980 static uint8_t *ldt_table;
5981 
5982 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5983 {
5984     int size;
5985     void *p;
5986 
5987     if (!ldt_table)
5988         return 0;
5989     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5990     if (size > bytecount)
5991         size = bytecount;
5992     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5993     if (!p)
5994         return -TARGET_EFAULT;
5995     /* ??? Should this by byteswapped?  */
5996     memcpy(p, ldt_table, size);
5997     unlock_user(p, ptr, size);
5998     return size;
5999 }
6000 
6001 /* XXX: add locking support */
6002 static abi_long write_ldt(CPUX86State *env,
6003                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6004 {
6005     struct target_modify_ldt_ldt_s ldt_info;
6006     struct target_modify_ldt_ldt_s *target_ldt_info;
6007     int seg_32bit, contents, read_exec_only, limit_in_pages;
6008     int seg_not_present, useable, lm;
6009     uint32_t *lp, entry_1, entry_2;
6010 
6011     if (bytecount != sizeof(ldt_info))
6012         return -TARGET_EINVAL;
6013     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6014         return -TARGET_EFAULT;
6015     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6016     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6017     ldt_info.limit = tswap32(target_ldt_info->limit);
6018     ldt_info.flags = tswap32(target_ldt_info->flags);
6019     unlock_user_struct(target_ldt_info, ptr, 0);
6020 
6021     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6022         return -TARGET_EINVAL;
6023     seg_32bit = ldt_info.flags & 1;
6024     contents = (ldt_info.flags >> 1) & 3;
6025     read_exec_only = (ldt_info.flags >> 3) & 1;
6026     limit_in_pages = (ldt_info.flags >> 4) & 1;
6027     seg_not_present = (ldt_info.flags >> 5) & 1;
6028     useable = (ldt_info.flags >> 6) & 1;
6029 #ifdef TARGET_ABI32
6030     lm = 0;
6031 #else
6032     lm = (ldt_info.flags >> 7) & 1;
6033 #endif
6034     if (contents == 3) {
6035         if (oldmode)
6036             return -TARGET_EINVAL;
6037         if (seg_not_present == 0)
6038             return -TARGET_EINVAL;
6039     }
6040     /* allocate the LDT */
6041     if (!ldt_table) {
6042         env->ldt.base = target_mmap(0,
6043                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6044                                     PROT_READ|PROT_WRITE,
6045                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6046         if (env->ldt.base == -1)
6047             return -TARGET_ENOMEM;
6048         memset(g2h_untagged(env->ldt.base), 0,
6049                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6050         env->ldt.limit = 0xffff;
6051         ldt_table = g2h_untagged(env->ldt.base);
6052     }
6053 
6054     /* NOTE: same code as Linux kernel */
6055     /* Allow LDTs to be cleared by the user. */
6056     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6057         if (oldmode ||
6058             (contents == 0		&&
6059              read_exec_only == 1	&&
6060              seg_32bit == 0		&&
6061              limit_in_pages == 0	&&
6062              seg_not_present == 1	&&
6063              useable == 0 )) {
6064             entry_1 = 0;
6065             entry_2 = 0;
6066             goto install;
6067         }
6068     }
6069 
6070     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6071         (ldt_info.limit & 0x0ffff);
6072     entry_2 = (ldt_info.base_addr & 0xff000000) |
6073         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6074         (ldt_info.limit & 0xf0000) |
6075         ((read_exec_only ^ 1) << 9) |
6076         (contents << 10) |
6077         ((seg_not_present ^ 1) << 15) |
6078         (seg_32bit << 22) |
6079         (limit_in_pages << 23) |
6080         (lm << 21) |
6081         0x7000;
6082     if (!oldmode)
6083         entry_2 |= (useable << 20);
6084 
6085     /* Install the new entry ...  */
6086 install:
6087     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6088     lp[0] = tswap32(entry_1);
6089     lp[1] = tswap32(entry_2);
6090     return 0;
6091 }
6092 
6093 /* specific and weird i386 syscalls */
6094 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6095                               unsigned long bytecount)
6096 {
6097     abi_long ret;
6098 
6099     switch (func) {
6100     case 0:
6101         ret = read_ldt(ptr, bytecount);
6102         break;
6103     case 1:
6104         ret = write_ldt(env, ptr, bytecount, 1);
6105         break;
6106     case 0x11:
6107         ret = write_ldt(env, ptr, bytecount, 0);
6108         break;
6109     default:
6110         ret = -TARGET_ENOSYS;
6111         break;
6112     }
6113     return ret;
6114 }
6115 
6116 #if defined(TARGET_ABI32)
6117 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6118 {
6119     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6120     struct target_modify_ldt_ldt_s ldt_info;
6121     struct target_modify_ldt_ldt_s *target_ldt_info;
6122     int seg_32bit, contents, read_exec_only, limit_in_pages;
6123     int seg_not_present, useable, lm;
6124     uint32_t *lp, entry_1, entry_2;
6125     int i;
6126 
6127     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6128     if (!target_ldt_info)
6129         return -TARGET_EFAULT;
6130     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6131     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6132     ldt_info.limit = tswap32(target_ldt_info->limit);
6133     ldt_info.flags = tswap32(target_ldt_info->flags);
6134     if (ldt_info.entry_number == -1) {
6135         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6136             if (gdt_table[i] == 0) {
6137                 ldt_info.entry_number = i;
6138                 target_ldt_info->entry_number = tswap32(i);
6139                 break;
6140             }
6141         }
6142     }
6143     unlock_user_struct(target_ldt_info, ptr, 1);
6144 
6145     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6146         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6147            return -TARGET_EINVAL;
6148     seg_32bit = ldt_info.flags & 1;
6149     contents = (ldt_info.flags >> 1) & 3;
6150     read_exec_only = (ldt_info.flags >> 3) & 1;
6151     limit_in_pages = (ldt_info.flags >> 4) & 1;
6152     seg_not_present = (ldt_info.flags >> 5) & 1;
6153     useable = (ldt_info.flags >> 6) & 1;
6154 #ifdef TARGET_ABI32
6155     lm = 0;
6156 #else
6157     lm = (ldt_info.flags >> 7) & 1;
6158 #endif
6159 
6160     if (contents == 3) {
6161         if (seg_not_present == 0)
6162             return -TARGET_EINVAL;
6163     }
6164 
6165     /* NOTE: same code as Linux kernel */
6166     /* Allow LDTs to be cleared by the user. */
6167     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6168         if ((contents == 0             &&
6169              read_exec_only == 1       &&
6170              seg_32bit == 0            &&
6171              limit_in_pages == 0       &&
6172              seg_not_present == 1      &&
6173              useable == 0 )) {
6174             entry_1 = 0;
6175             entry_2 = 0;
6176             goto install;
6177         }
6178     }
6179 
6180     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6181         (ldt_info.limit & 0x0ffff);
6182     entry_2 = (ldt_info.base_addr & 0xff000000) |
6183         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6184         (ldt_info.limit & 0xf0000) |
6185         ((read_exec_only ^ 1) << 9) |
6186         (contents << 10) |
6187         ((seg_not_present ^ 1) << 15) |
6188         (seg_32bit << 22) |
6189         (limit_in_pages << 23) |
6190         (useable << 20) |
6191         (lm << 21) |
6192         0x7000;
6193 
6194     /* Install the new entry ...  */
6195 install:
6196     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6197     lp[0] = tswap32(entry_1);
6198     lp[1] = tswap32(entry_2);
6199     return 0;
6200 }
6201 
6202 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6203 {
6204     struct target_modify_ldt_ldt_s *target_ldt_info;
6205     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6206     uint32_t base_addr, limit, flags;
6207     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6208     int seg_not_present, useable, lm;
6209     uint32_t *lp, entry_1, entry_2;
6210 
6211     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6212     if (!target_ldt_info)
6213         return -TARGET_EFAULT;
6214     idx = tswap32(target_ldt_info->entry_number);
6215     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6216         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6217         unlock_user_struct(target_ldt_info, ptr, 1);
6218         return -TARGET_EINVAL;
6219     }
6220     lp = (uint32_t *)(gdt_table + idx);
6221     entry_1 = tswap32(lp[0]);
6222     entry_2 = tswap32(lp[1]);
6223 
6224     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6225     contents = (entry_2 >> 10) & 3;
6226     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6227     seg_32bit = (entry_2 >> 22) & 1;
6228     limit_in_pages = (entry_2 >> 23) & 1;
6229     useable = (entry_2 >> 20) & 1;
6230 #ifdef TARGET_ABI32
6231     lm = 0;
6232 #else
6233     lm = (entry_2 >> 21) & 1;
6234 #endif
6235     flags = (seg_32bit << 0) | (contents << 1) |
6236         (read_exec_only << 3) | (limit_in_pages << 4) |
6237         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6238     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6239     base_addr = (entry_1 >> 16) |
6240         (entry_2 & 0xff000000) |
6241         ((entry_2 & 0xff) << 16);
6242     target_ldt_info->base_addr = tswapal(base_addr);
6243     target_ldt_info->limit = tswap32(limit);
6244     target_ldt_info->flags = tswap32(flags);
6245     unlock_user_struct(target_ldt_info, ptr, 1);
6246     return 0;
6247 }
6248 
6249 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6250 {
6251     return -TARGET_ENOSYS;
6252 }
6253 #else
6254 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6255 {
6256     abi_long ret = 0;
6257     abi_ulong val;
6258     int idx;
6259 
6260     switch(code) {
6261     case TARGET_ARCH_SET_GS:
6262     case TARGET_ARCH_SET_FS:
6263         if (code == TARGET_ARCH_SET_GS)
6264             idx = R_GS;
6265         else
6266             idx = R_FS;
6267         cpu_x86_load_seg(env, idx, 0);
6268         env->segs[idx].base = addr;
6269         break;
6270     case TARGET_ARCH_GET_GS:
6271     case TARGET_ARCH_GET_FS:
6272         if (code == TARGET_ARCH_GET_GS)
6273             idx = R_GS;
6274         else
6275             idx = R_FS;
6276         val = env->segs[idx].base;
6277         if (put_user(val, addr, abi_ulong))
6278             ret = -TARGET_EFAULT;
6279         break;
6280     default:
6281         ret = -TARGET_EINVAL;
6282         break;
6283     }
6284     return ret;
6285 }
6286 #endif /* defined(TARGET_ABI32 */
6287 #endif /* defined(TARGET_I386) */
6288 
6289 /*
6290  * These constants are generic.  Supply any that are missing from the host.
6291  */
6292 #ifndef PR_SET_NAME
6293 # define PR_SET_NAME    15
6294 # define PR_GET_NAME    16
6295 #endif
6296 #ifndef PR_SET_FP_MODE
6297 # define PR_SET_FP_MODE 45
6298 # define PR_GET_FP_MODE 46
6299 # define PR_FP_MODE_FR   (1 << 0)
6300 # define PR_FP_MODE_FRE  (1 << 1)
6301 #endif
6302 #ifndef PR_SVE_SET_VL
6303 # define PR_SVE_SET_VL  50
6304 # define PR_SVE_GET_VL  51
6305 # define PR_SVE_VL_LEN_MASK  0xffff
6306 # define PR_SVE_VL_INHERIT   (1 << 17)
6307 #endif
6308 #ifndef PR_PAC_RESET_KEYS
6309 # define PR_PAC_RESET_KEYS  54
6310 # define PR_PAC_APIAKEY   (1 << 0)
6311 # define PR_PAC_APIBKEY   (1 << 1)
6312 # define PR_PAC_APDAKEY   (1 << 2)
6313 # define PR_PAC_APDBKEY   (1 << 3)
6314 # define PR_PAC_APGAKEY   (1 << 4)
6315 #endif
6316 #ifndef PR_SET_TAGGED_ADDR_CTRL
6317 # define PR_SET_TAGGED_ADDR_CTRL 55
6318 # define PR_GET_TAGGED_ADDR_CTRL 56
6319 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6320 #endif
6321 #ifndef PR_MTE_TCF_SHIFT
6322 # define PR_MTE_TCF_SHIFT       1
6323 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6324 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6325 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6326 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6327 # define PR_MTE_TAG_SHIFT       3
6328 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6329 #endif
6330 #ifndef PR_SET_IO_FLUSHER
6331 # define PR_SET_IO_FLUSHER 57
6332 # define PR_GET_IO_FLUSHER 58
6333 #endif
6334 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6335 # define PR_SET_SYSCALL_USER_DISPATCH 59
6336 #endif
6337 #ifndef PR_SME_SET_VL
6338 # define PR_SME_SET_VL  63
6339 # define PR_SME_GET_VL  64
6340 # define PR_SME_VL_LEN_MASK  0xffff
6341 # define PR_SME_VL_INHERIT   (1 << 17)
6342 #endif
6343 
6344 #include "target_prctl.h"
6345 
6346 static abi_long do_prctl_inval0(CPUArchState *env)
6347 {
6348     return -TARGET_EINVAL;
6349 }
6350 
6351 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6352 {
6353     return -TARGET_EINVAL;
6354 }
6355 
6356 #ifndef do_prctl_get_fp_mode
6357 #define do_prctl_get_fp_mode do_prctl_inval0
6358 #endif
6359 #ifndef do_prctl_set_fp_mode
6360 #define do_prctl_set_fp_mode do_prctl_inval1
6361 #endif
6362 #ifndef do_prctl_sve_get_vl
6363 #define do_prctl_sve_get_vl do_prctl_inval0
6364 #endif
6365 #ifndef do_prctl_sve_set_vl
6366 #define do_prctl_sve_set_vl do_prctl_inval1
6367 #endif
6368 #ifndef do_prctl_reset_keys
6369 #define do_prctl_reset_keys do_prctl_inval1
6370 #endif
6371 #ifndef do_prctl_set_tagged_addr_ctrl
6372 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6373 #endif
6374 #ifndef do_prctl_get_tagged_addr_ctrl
6375 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6376 #endif
6377 #ifndef do_prctl_get_unalign
6378 #define do_prctl_get_unalign do_prctl_inval1
6379 #endif
6380 #ifndef do_prctl_set_unalign
6381 #define do_prctl_set_unalign do_prctl_inval1
6382 #endif
6383 #ifndef do_prctl_sme_get_vl
6384 #define do_prctl_sme_get_vl do_prctl_inval0
6385 #endif
6386 #ifndef do_prctl_sme_set_vl
6387 #define do_prctl_sme_set_vl do_prctl_inval1
6388 #endif
6389 
6390 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6391                          abi_long arg3, abi_long arg4, abi_long arg5)
6392 {
6393     abi_long ret;
6394 
6395     switch (option) {
6396     case PR_GET_PDEATHSIG:
6397         {
6398             int deathsig;
6399             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6400                                   arg3, arg4, arg5));
6401             if (!is_error(ret) &&
6402                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6403                 return -TARGET_EFAULT;
6404             }
6405             return ret;
6406         }
6407     case PR_SET_PDEATHSIG:
6408         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6409                                arg3, arg4, arg5));
6410     case PR_GET_NAME:
6411         {
6412             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6413             if (!name) {
6414                 return -TARGET_EFAULT;
6415             }
6416             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6417                                   arg3, arg4, arg5));
6418             unlock_user(name, arg2, 16);
6419             return ret;
6420         }
6421     case PR_SET_NAME:
6422         {
6423             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6424             if (!name) {
6425                 return -TARGET_EFAULT;
6426             }
6427             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6428                                   arg3, arg4, arg5));
6429             unlock_user(name, arg2, 0);
6430             return ret;
6431         }
6432     case PR_GET_FP_MODE:
6433         return do_prctl_get_fp_mode(env);
6434     case PR_SET_FP_MODE:
6435         return do_prctl_set_fp_mode(env, arg2);
6436     case PR_SVE_GET_VL:
6437         return do_prctl_sve_get_vl(env);
6438     case PR_SVE_SET_VL:
6439         return do_prctl_sve_set_vl(env, arg2);
6440     case PR_SME_GET_VL:
6441         return do_prctl_sme_get_vl(env);
6442     case PR_SME_SET_VL:
6443         return do_prctl_sme_set_vl(env, arg2);
6444     case PR_PAC_RESET_KEYS:
6445         if (arg3 || arg4 || arg5) {
6446             return -TARGET_EINVAL;
6447         }
6448         return do_prctl_reset_keys(env, arg2);
6449     case PR_SET_TAGGED_ADDR_CTRL:
6450         if (arg3 || arg4 || arg5) {
6451             return -TARGET_EINVAL;
6452         }
6453         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6454     case PR_GET_TAGGED_ADDR_CTRL:
6455         if (arg2 || arg3 || arg4 || arg5) {
6456             return -TARGET_EINVAL;
6457         }
6458         return do_prctl_get_tagged_addr_ctrl(env);
6459 
6460     case PR_GET_UNALIGN:
6461         return do_prctl_get_unalign(env, arg2);
6462     case PR_SET_UNALIGN:
6463         return do_prctl_set_unalign(env, arg2);
6464 
6465     case PR_CAP_AMBIENT:
6466     case PR_CAPBSET_READ:
6467     case PR_CAPBSET_DROP:
6468     case PR_GET_DUMPABLE:
6469     case PR_SET_DUMPABLE:
6470     case PR_GET_KEEPCAPS:
6471     case PR_SET_KEEPCAPS:
6472     case PR_GET_SECUREBITS:
6473     case PR_SET_SECUREBITS:
6474     case PR_GET_TIMING:
6475     case PR_SET_TIMING:
6476     case PR_GET_TIMERSLACK:
6477     case PR_SET_TIMERSLACK:
6478     case PR_MCE_KILL:
6479     case PR_MCE_KILL_GET:
6480     case PR_GET_NO_NEW_PRIVS:
6481     case PR_SET_NO_NEW_PRIVS:
6482     case PR_GET_IO_FLUSHER:
6483     case PR_SET_IO_FLUSHER:
6484         /* Some prctl options have no pointer arguments and we can pass on. */
6485         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6486 
6487     case PR_GET_CHILD_SUBREAPER:
6488     case PR_SET_CHILD_SUBREAPER:
6489     case PR_GET_SPECULATION_CTRL:
6490     case PR_SET_SPECULATION_CTRL:
6491     case PR_GET_TID_ADDRESS:
6492         /* TODO */
6493         return -TARGET_EINVAL;
6494 
6495     case PR_GET_FPEXC:
6496     case PR_SET_FPEXC:
6497         /* Was used for SPE on PowerPC. */
6498         return -TARGET_EINVAL;
6499 
6500     case PR_GET_ENDIAN:
6501     case PR_SET_ENDIAN:
6502     case PR_GET_FPEMU:
6503     case PR_SET_FPEMU:
6504     case PR_SET_MM:
6505     case PR_GET_SECCOMP:
6506     case PR_SET_SECCOMP:
6507     case PR_SET_SYSCALL_USER_DISPATCH:
6508     case PR_GET_THP_DISABLE:
6509     case PR_SET_THP_DISABLE:
6510     case PR_GET_TSC:
6511     case PR_SET_TSC:
6512         /* Disable to prevent the target disabling stuff we need. */
6513         return -TARGET_EINVAL;
6514 
6515     default:
6516         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6517                       option);
6518         return -TARGET_EINVAL;
6519     }
6520 }
6521 
6522 #define NEW_STACK_SIZE 0x40000
6523 
6524 
6525 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6526 typedef struct {
6527     CPUArchState *env;
6528     pthread_mutex_t mutex;
6529     pthread_cond_t cond;
6530     pthread_t thread;
6531     uint32_t tid;
6532     abi_ulong child_tidptr;
6533     abi_ulong parent_tidptr;
6534     sigset_t sigmask;
6535 } new_thread_info;
6536 
6537 static void *clone_func(void *arg)
6538 {
6539     new_thread_info *info = arg;
6540     CPUArchState *env;
6541     CPUState *cpu;
6542     TaskState *ts;
6543 
6544     rcu_register_thread();
6545     tcg_register_thread();
6546     env = info->env;
6547     cpu = env_cpu(env);
6548     thread_cpu = cpu;
6549     ts = (TaskState *)cpu->opaque;
6550     info->tid = sys_gettid();
6551     task_settid(ts);
6552     if (info->child_tidptr)
6553         put_user_u32(info->tid, info->child_tidptr);
6554     if (info->parent_tidptr)
6555         put_user_u32(info->tid, info->parent_tidptr);
6556     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6557     /* Enable signals.  */
6558     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6559     /* Signal to the parent that we're ready.  */
6560     pthread_mutex_lock(&info->mutex);
6561     pthread_cond_broadcast(&info->cond);
6562     pthread_mutex_unlock(&info->mutex);
6563     /* Wait until the parent has finished initializing the tls state.  */
6564     pthread_mutex_lock(&clone_lock);
6565     pthread_mutex_unlock(&clone_lock);
6566     cpu_loop(env);
6567     /* never exits */
6568     return NULL;
6569 }
6570 
6571 /* do_fork() Must return host values and target errnos (unlike most
6572    do_*() functions). */
6573 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6574                    abi_ulong parent_tidptr, target_ulong newtls,
6575                    abi_ulong child_tidptr)
6576 {
6577     CPUState *cpu = env_cpu(env);
6578     int ret;
6579     TaskState *ts;
6580     CPUState *new_cpu;
6581     CPUArchState *new_env;
6582     sigset_t sigmask;
6583 
6584     flags &= ~CLONE_IGNORED_FLAGS;
6585 
6586     /* Emulate vfork() with fork() */
6587     if (flags & CLONE_VFORK)
6588         flags &= ~(CLONE_VFORK | CLONE_VM);
6589 
6590     if (flags & CLONE_VM) {
6591         TaskState *parent_ts = (TaskState *)cpu->opaque;
6592         new_thread_info info;
6593         pthread_attr_t attr;
6594 
6595         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6596             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6597             return -TARGET_EINVAL;
6598         }
6599 
6600         ts = g_new0(TaskState, 1);
6601         init_task_state(ts);
6602 
6603         /* Grab a mutex so that thread setup appears atomic.  */
6604         pthread_mutex_lock(&clone_lock);
6605 
6606         /*
6607          * If this is our first additional thread, we need to ensure we
6608          * generate code for parallel execution and flush old translations.
6609          * Do this now so that the copy gets CF_PARALLEL too.
6610          */
6611         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6612             cpu->tcg_cflags |= CF_PARALLEL;
6613             tb_flush(cpu);
6614         }
6615 
6616         /* we create a new CPU instance. */
6617         new_env = cpu_copy(env);
6618         /* Init regs that differ from the parent.  */
6619         cpu_clone_regs_child(new_env, newsp, flags);
6620         cpu_clone_regs_parent(env, flags);
6621         new_cpu = env_cpu(new_env);
6622         new_cpu->opaque = ts;
6623         ts->bprm = parent_ts->bprm;
6624         ts->info = parent_ts->info;
6625         ts->signal_mask = parent_ts->signal_mask;
6626 
6627         if (flags & CLONE_CHILD_CLEARTID) {
6628             ts->child_tidptr = child_tidptr;
6629         }
6630 
6631         if (flags & CLONE_SETTLS) {
6632             cpu_set_tls (new_env, newtls);
6633         }
6634 
6635         memset(&info, 0, sizeof(info));
6636         pthread_mutex_init(&info.mutex, NULL);
6637         pthread_mutex_lock(&info.mutex);
6638         pthread_cond_init(&info.cond, NULL);
6639         info.env = new_env;
6640         if (flags & CLONE_CHILD_SETTID) {
6641             info.child_tidptr = child_tidptr;
6642         }
6643         if (flags & CLONE_PARENT_SETTID) {
6644             info.parent_tidptr = parent_tidptr;
6645         }
6646 
6647         ret = pthread_attr_init(&attr);
6648         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6649         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6650         /* It is not safe to deliver signals until the child has finished
6651            initializing, so temporarily block all signals.  */
6652         sigfillset(&sigmask);
6653         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6654         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6655 
6656         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6657         /* TODO: Free new CPU state if thread creation failed.  */
6658 
6659         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6660         pthread_attr_destroy(&attr);
6661         if (ret == 0) {
6662             /* Wait for the child to initialize.  */
6663             pthread_cond_wait(&info.cond, &info.mutex);
6664             ret = info.tid;
6665         } else {
6666             ret = -1;
6667         }
6668         pthread_mutex_unlock(&info.mutex);
6669         pthread_cond_destroy(&info.cond);
6670         pthread_mutex_destroy(&info.mutex);
6671         pthread_mutex_unlock(&clone_lock);
6672     } else {
6673         /* if no CLONE_VM, we consider it is a fork */
6674         if (flags & CLONE_INVALID_FORK_FLAGS) {
6675             return -TARGET_EINVAL;
6676         }
6677 
6678         /* We can't support custom termination signals */
6679         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6680             return -TARGET_EINVAL;
6681         }
6682 
6683         if (block_signals()) {
6684             return -QEMU_ERESTARTSYS;
6685         }
6686 
6687         fork_start();
6688         ret = fork();
6689         if (ret == 0) {
6690             /* Child Process.  */
6691             cpu_clone_regs_child(env, newsp, flags);
6692             fork_end(1);
6693             /* There is a race condition here.  The parent process could
6694                theoretically read the TID in the child process before the child
6695                tid is set.  This would require using either ptrace
6696                (not implemented) or having *_tidptr to point at a shared memory
6697                mapping.  We can't repeat the spinlock hack used above because
6698                the child process gets its own copy of the lock.  */
6699             if (flags & CLONE_CHILD_SETTID)
6700                 put_user_u32(sys_gettid(), child_tidptr);
6701             if (flags & CLONE_PARENT_SETTID)
6702                 put_user_u32(sys_gettid(), parent_tidptr);
6703             ts = (TaskState *)cpu->opaque;
6704             if (flags & CLONE_SETTLS)
6705                 cpu_set_tls (env, newtls);
6706             if (flags & CLONE_CHILD_CLEARTID)
6707                 ts->child_tidptr = child_tidptr;
6708         } else {
6709             cpu_clone_regs_parent(env, flags);
6710             fork_end(0);
6711         }
6712     }
6713     return ret;
6714 }
6715 
6716 /* warning : doesn't handle linux specific flags... */
6717 static int target_to_host_fcntl_cmd(int cmd)
6718 {
6719     int ret;
6720 
6721     switch(cmd) {
6722     case TARGET_F_DUPFD:
6723     case TARGET_F_GETFD:
6724     case TARGET_F_SETFD:
6725     case TARGET_F_GETFL:
6726     case TARGET_F_SETFL:
6727     case TARGET_F_OFD_GETLK:
6728     case TARGET_F_OFD_SETLK:
6729     case TARGET_F_OFD_SETLKW:
6730         ret = cmd;
6731         break;
6732     case TARGET_F_GETLK:
6733         ret = F_GETLK64;
6734         break;
6735     case TARGET_F_SETLK:
6736         ret = F_SETLK64;
6737         break;
6738     case TARGET_F_SETLKW:
6739         ret = F_SETLKW64;
6740         break;
6741     case TARGET_F_GETOWN:
6742         ret = F_GETOWN;
6743         break;
6744     case TARGET_F_SETOWN:
6745         ret = F_SETOWN;
6746         break;
6747     case TARGET_F_GETSIG:
6748         ret = F_GETSIG;
6749         break;
6750     case TARGET_F_SETSIG:
6751         ret = F_SETSIG;
6752         break;
6753 #if TARGET_ABI_BITS == 32
6754     case TARGET_F_GETLK64:
6755         ret = F_GETLK64;
6756         break;
6757     case TARGET_F_SETLK64:
6758         ret = F_SETLK64;
6759         break;
6760     case TARGET_F_SETLKW64:
6761         ret = F_SETLKW64;
6762         break;
6763 #endif
6764     case TARGET_F_SETLEASE:
6765         ret = F_SETLEASE;
6766         break;
6767     case TARGET_F_GETLEASE:
6768         ret = F_GETLEASE;
6769         break;
6770 #ifdef F_DUPFD_CLOEXEC
6771     case TARGET_F_DUPFD_CLOEXEC:
6772         ret = F_DUPFD_CLOEXEC;
6773         break;
6774 #endif
6775     case TARGET_F_NOTIFY:
6776         ret = F_NOTIFY;
6777         break;
6778 #ifdef F_GETOWN_EX
6779     case TARGET_F_GETOWN_EX:
6780         ret = F_GETOWN_EX;
6781         break;
6782 #endif
6783 #ifdef F_SETOWN_EX
6784     case TARGET_F_SETOWN_EX:
6785         ret = F_SETOWN_EX;
6786         break;
6787 #endif
6788 #ifdef F_SETPIPE_SZ
6789     case TARGET_F_SETPIPE_SZ:
6790         ret = F_SETPIPE_SZ;
6791         break;
6792     case TARGET_F_GETPIPE_SZ:
6793         ret = F_GETPIPE_SZ;
6794         break;
6795 #endif
6796 #ifdef F_ADD_SEALS
6797     case TARGET_F_ADD_SEALS:
6798         ret = F_ADD_SEALS;
6799         break;
6800     case TARGET_F_GET_SEALS:
6801         ret = F_GET_SEALS;
6802         break;
6803 #endif
6804     default:
6805         ret = -TARGET_EINVAL;
6806         break;
6807     }
6808 
6809 #if defined(__powerpc64__)
6810     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6811      * is not supported by kernel. The glibc fcntl call actually adjusts
6812      * them to 5, 6 and 7 before making the syscall(). Since we make the
6813      * syscall directly, adjust to what is supported by the kernel.
6814      */
6815     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6816         ret -= F_GETLK64 - 5;
6817     }
6818 #endif
6819 
6820     return ret;
6821 }
6822 
6823 #define FLOCK_TRANSTBL \
6824     switch (type) { \
6825     TRANSTBL_CONVERT(F_RDLCK); \
6826     TRANSTBL_CONVERT(F_WRLCK); \
6827     TRANSTBL_CONVERT(F_UNLCK); \
6828     }
6829 
6830 static int target_to_host_flock(int type)
6831 {
6832 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6833     FLOCK_TRANSTBL
6834 #undef  TRANSTBL_CONVERT
6835     return -TARGET_EINVAL;
6836 }
6837 
6838 static int host_to_target_flock(int type)
6839 {
6840 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6841     FLOCK_TRANSTBL
6842 #undef  TRANSTBL_CONVERT
6843     /* if we don't know how to convert the value coming
6844      * from the host we copy to the target field as-is
6845      */
6846     return type;
6847 }
6848 
6849 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6850                                             abi_ulong target_flock_addr)
6851 {
6852     struct target_flock *target_fl;
6853     int l_type;
6854 
6855     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6856         return -TARGET_EFAULT;
6857     }
6858 
6859     __get_user(l_type, &target_fl->l_type);
6860     l_type = target_to_host_flock(l_type);
6861     if (l_type < 0) {
6862         return l_type;
6863     }
6864     fl->l_type = l_type;
6865     __get_user(fl->l_whence, &target_fl->l_whence);
6866     __get_user(fl->l_start, &target_fl->l_start);
6867     __get_user(fl->l_len, &target_fl->l_len);
6868     __get_user(fl->l_pid, &target_fl->l_pid);
6869     unlock_user_struct(target_fl, target_flock_addr, 0);
6870     return 0;
6871 }
6872 
6873 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6874                                           const struct flock64 *fl)
6875 {
6876     struct target_flock *target_fl;
6877     short l_type;
6878 
6879     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6880         return -TARGET_EFAULT;
6881     }
6882 
6883     l_type = host_to_target_flock(fl->l_type);
6884     __put_user(l_type, &target_fl->l_type);
6885     __put_user(fl->l_whence, &target_fl->l_whence);
6886     __put_user(fl->l_start, &target_fl->l_start);
6887     __put_user(fl->l_len, &target_fl->l_len);
6888     __put_user(fl->l_pid, &target_fl->l_pid);
6889     unlock_user_struct(target_fl, target_flock_addr, 1);
6890     return 0;
6891 }
6892 
6893 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6894 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6895 
6896 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6897 struct target_oabi_flock64 {
6898     abi_short l_type;
6899     abi_short l_whence;
6900     abi_llong l_start;
6901     abi_llong l_len;
6902     abi_int   l_pid;
6903 } QEMU_PACKED;
6904 
6905 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6906                                                    abi_ulong target_flock_addr)
6907 {
6908     struct target_oabi_flock64 *target_fl;
6909     int l_type;
6910 
6911     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6912         return -TARGET_EFAULT;
6913     }
6914 
6915     __get_user(l_type, &target_fl->l_type);
6916     l_type = target_to_host_flock(l_type);
6917     if (l_type < 0) {
6918         return l_type;
6919     }
6920     fl->l_type = l_type;
6921     __get_user(fl->l_whence, &target_fl->l_whence);
6922     __get_user(fl->l_start, &target_fl->l_start);
6923     __get_user(fl->l_len, &target_fl->l_len);
6924     __get_user(fl->l_pid, &target_fl->l_pid);
6925     unlock_user_struct(target_fl, target_flock_addr, 0);
6926     return 0;
6927 }
6928 
6929 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6930                                                  const struct flock64 *fl)
6931 {
6932     struct target_oabi_flock64 *target_fl;
6933     short l_type;
6934 
6935     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6936         return -TARGET_EFAULT;
6937     }
6938 
6939     l_type = host_to_target_flock(fl->l_type);
6940     __put_user(l_type, &target_fl->l_type);
6941     __put_user(fl->l_whence, &target_fl->l_whence);
6942     __put_user(fl->l_start, &target_fl->l_start);
6943     __put_user(fl->l_len, &target_fl->l_len);
6944     __put_user(fl->l_pid, &target_fl->l_pid);
6945     unlock_user_struct(target_fl, target_flock_addr, 1);
6946     return 0;
6947 }
6948 #endif
6949 
6950 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6951                                               abi_ulong target_flock_addr)
6952 {
6953     struct target_flock64 *target_fl;
6954     int l_type;
6955 
6956     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6957         return -TARGET_EFAULT;
6958     }
6959 
6960     __get_user(l_type, &target_fl->l_type);
6961     l_type = target_to_host_flock(l_type);
6962     if (l_type < 0) {
6963         return l_type;
6964     }
6965     fl->l_type = l_type;
6966     __get_user(fl->l_whence, &target_fl->l_whence);
6967     __get_user(fl->l_start, &target_fl->l_start);
6968     __get_user(fl->l_len, &target_fl->l_len);
6969     __get_user(fl->l_pid, &target_fl->l_pid);
6970     unlock_user_struct(target_fl, target_flock_addr, 0);
6971     return 0;
6972 }
6973 
6974 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6975                                             const struct flock64 *fl)
6976 {
6977     struct target_flock64 *target_fl;
6978     short l_type;
6979 
6980     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6981         return -TARGET_EFAULT;
6982     }
6983 
6984     l_type = host_to_target_flock(fl->l_type);
6985     __put_user(l_type, &target_fl->l_type);
6986     __put_user(fl->l_whence, &target_fl->l_whence);
6987     __put_user(fl->l_start, &target_fl->l_start);
6988     __put_user(fl->l_len, &target_fl->l_len);
6989     __put_user(fl->l_pid, &target_fl->l_pid);
6990     unlock_user_struct(target_fl, target_flock_addr, 1);
6991     return 0;
6992 }
6993 
6994 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6995 {
6996     struct flock64 fl64;
6997 #ifdef F_GETOWN_EX
6998     struct f_owner_ex fox;
6999     struct target_f_owner_ex *target_fox;
7000 #endif
7001     abi_long ret;
7002     int host_cmd = target_to_host_fcntl_cmd(cmd);
7003 
7004     if (host_cmd == -TARGET_EINVAL)
7005 	    return host_cmd;
7006 
7007     switch(cmd) {
7008     case TARGET_F_GETLK:
7009         ret = copy_from_user_flock(&fl64, arg);
7010         if (ret) {
7011             return ret;
7012         }
7013         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7014         if (ret == 0) {
7015             ret = copy_to_user_flock(arg, &fl64);
7016         }
7017         break;
7018 
7019     case TARGET_F_SETLK:
7020     case TARGET_F_SETLKW:
7021         ret = copy_from_user_flock(&fl64, arg);
7022         if (ret) {
7023             return ret;
7024         }
7025         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7026         break;
7027 
7028     case TARGET_F_GETLK64:
7029     case TARGET_F_OFD_GETLK:
7030         ret = copy_from_user_flock64(&fl64, arg);
7031         if (ret) {
7032             return ret;
7033         }
7034         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7035         if (ret == 0) {
7036             ret = copy_to_user_flock64(arg, &fl64);
7037         }
7038         break;
7039     case TARGET_F_SETLK64:
7040     case TARGET_F_SETLKW64:
7041     case TARGET_F_OFD_SETLK:
7042     case TARGET_F_OFD_SETLKW:
7043         ret = copy_from_user_flock64(&fl64, arg);
7044         if (ret) {
7045             return ret;
7046         }
7047         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7048         break;
7049 
7050     case TARGET_F_GETFL:
7051         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7052         if (ret >= 0) {
7053             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7054         }
7055         break;
7056 
7057     case TARGET_F_SETFL:
7058         ret = get_errno(safe_fcntl(fd, host_cmd,
7059                                    target_to_host_bitmask(arg,
7060                                                           fcntl_flags_tbl)));
7061         break;
7062 
7063 #ifdef F_GETOWN_EX
7064     case TARGET_F_GETOWN_EX:
7065         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7066         if (ret >= 0) {
7067             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7068                 return -TARGET_EFAULT;
7069             target_fox->type = tswap32(fox.type);
7070             target_fox->pid = tswap32(fox.pid);
7071             unlock_user_struct(target_fox, arg, 1);
7072         }
7073         break;
7074 #endif
7075 
7076 #ifdef F_SETOWN_EX
7077     case TARGET_F_SETOWN_EX:
7078         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7079             return -TARGET_EFAULT;
7080         fox.type = tswap32(target_fox->type);
7081         fox.pid = tswap32(target_fox->pid);
7082         unlock_user_struct(target_fox, arg, 0);
7083         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7084         break;
7085 #endif
7086 
7087     case TARGET_F_SETSIG:
7088         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7089         break;
7090 
7091     case TARGET_F_GETSIG:
7092         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7093         break;
7094 
7095     case TARGET_F_SETOWN:
7096     case TARGET_F_GETOWN:
7097     case TARGET_F_SETLEASE:
7098     case TARGET_F_GETLEASE:
7099     case TARGET_F_SETPIPE_SZ:
7100     case TARGET_F_GETPIPE_SZ:
7101     case TARGET_F_ADD_SEALS:
7102     case TARGET_F_GET_SEALS:
7103         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7104         break;
7105 
7106     default:
7107         ret = get_errno(safe_fcntl(fd, cmd, arg));
7108         break;
7109     }
7110     return ret;
7111 }
7112 
7113 #ifdef USE_UID16
7114 
7115 static inline int high2lowuid(int uid)
7116 {
7117     if (uid > 65535)
7118         return 65534;
7119     else
7120         return uid;
7121 }
7122 
7123 static inline int high2lowgid(int gid)
7124 {
7125     if (gid > 65535)
7126         return 65534;
7127     else
7128         return gid;
7129 }
7130 
7131 static inline int low2highuid(int uid)
7132 {
7133     if ((int16_t)uid == -1)
7134         return -1;
7135     else
7136         return uid;
7137 }
7138 
7139 static inline int low2highgid(int gid)
7140 {
7141     if ((int16_t)gid == -1)
7142         return -1;
7143     else
7144         return gid;
7145 }
7146 static inline int tswapid(int id)
7147 {
7148     return tswap16(id);
7149 }
7150 
7151 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7152 
7153 #else /* !USE_UID16 */
7154 static inline int high2lowuid(int uid)
7155 {
7156     return uid;
7157 }
7158 static inline int high2lowgid(int gid)
7159 {
7160     return gid;
7161 }
7162 static inline int low2highuid(int uid)
7163 {
7164     return uid;
7165 }
7166 static inline int low2highgid(int gid)
7167 {
7168     return gid;
7169 }
7170 static inline int tswapid(int id)
7171 {
7172     return tswap32(id);
7173 }
7174 
7175 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7176 
7177 #endif /* USE_UID16 */
7178 
7179 /* We must do direct syscalls for setting UID/GID, because we want to
7180  * implement the Linux system call semantics of "change only for this thread",
7181  * not the libc/POSIX semantics of "change for all threads in process".
7182  * (See http://ewontfix.com/17/ for more details.)
7183  * We use the 32-bit version of the syscalls if present; if it is not
7184  * then either the host architecture supports 32-bit UIDs natively with
7185  * the standard syscall, or the 16-bit UID is the best we can do.
7186  */
7187 #ifdef __NR_setuid32
7188 #define __NR_sys_setuid __NR_setuid32
7189 #else
7190 #define __NR_sys_setuid __NR_setuid
7191 #endif
7192 #ifdef __NR_setgid32
7193 #define __NR_sys_setgid __NR_setgid32
7194 #else
7195 #define __NR_sys_setgid __NR_setgid
7196 #endif
7197 #ifdef __NR_setresuid32
7198 #define __NR_sys_setresuid __NR_setresuid32
7199 #else
7200 #define __NR_sys_setresuid __NR_setresuid
7201 #endif
7202 #ifdef __NR_setresgid32
7203 #define __NR_sys_setresgid __NR_setresgid32
7204 #else
7205 #define __NR_sys_setresgid __NR_setresgid
7206 #endif
7207 
7208 _syscall1(int, sys_setuid, uid_t, uid)
7209 _syscall1(int, sys_setgid, gid_t, gid)
7210 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7211 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7212 
7213 void syscall_init(void)
7214 {
7215     IOCTLEntry *ie;
7216     const argtype *arg_type;
7217     int size;
7218 
7219     thunk_init(STRUCT_MAX);
7220 
7221 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7222 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7223 #include "syscall_types.h"
7224 #undef STRUCT
7225 #undef STRUCT_SPECIAL
7226 
7227     /* we patch the ioctl size if necessary. We rely on the fact that
7228        no ioctl has all the bits at '1' in the size field */
7229     ie = ioctl_entries;
7230     while (ie->target_cmd != 0) {
7231         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7232             TARGET_IOC_SIZEMASK) {
7233             arg_type = ie->arg_type;
7234             if (arg_type[0] != TYPE_PTR) {
7235                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7236                         ie->target_cmd);
7237                 exit(1);
7238             }
7239             arg_type++;
7240             size = thunk_type_size(arg_type, 0);
7241             ie->target_cmd = (ie->target_cmd &
7242                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7243                 (size << TARGET_IOC_SIZESHIFT);
7244         }
7245 
7246         /* automatic consistency check if same arch */
7247 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7248     (defined(__x86_64__) && defined(TARGET_X86_64))
7249         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7250             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7251                     ie->name, ie->target_cmd, ie->host_cmd);
7252         }
7253 #endif
7254         ie++;
7255     }
7256 }
7257 
7258 #ifdef TARGET_NR_truncate64
7259 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7260                                          abi_long arg2,
7261                                          abi_long arg3,
7262                                          abi_long arg4)
7263 {
7264     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7265         arg2 = arg3;
7266         arg3 = arg4;
7267     }
7268     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7269 }
7270 #endif
7271 
7272 #ifdef TARGET_NR_ftruncate64
7273 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7274                                           abi_long arg2,
7275                                           abi_long arg3,
7276                                           abi_long arg4)
7277 {
7278     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7279         arg2 = arg3;
7280         arg3 = arg4;
7281     }
7282     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7283 }
7284 #endif
7285 
7286 #if defined(TARGET_NR_timer_settime) || \
7287     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7288 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7289                                                  abi_ulong target_addr)
7290 {
7291     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7292                                 offsetof(struct target_itimerspec,
7293                                          it_interval)) ||
7294         target_to_host_timespec(&host_its->it_value, target_addr +
7295                                 offsetof(struct target_itimerspec,
7296                                          it_value))) {
7297         return -TARGET_EFAULT;
7298     }
7299 
7300     return 0;
7301 }
7302 #endif
7303 
7304 #if defined(TARGET_NR_timer_settime64) || \
7305     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7306 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7307                                                    abi_ulong target_addr)
7308 {
7309     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7310                                   offsetof(struct target__kernel_itimerspec,
7311                                            it_interval)) ||
7312         target_to_host_timespec64(&host_its->it_value, target_addr +
7313                                   offsetof(struct target__kernel_itimerspec,
7314                                            it_value))) {
7315         return -TARGET_EFAULT;
7316     }
7317 
7318     return 0;
7319 }
7320 #endif
7321 
7322 #if ((defined(TARGET_NR_timerfd_gettime) || \
7323       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7324       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7325 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7326                                                  struct itimerspec *host_its)
7327 {
7328     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7329                                                        it_interval),
7330                                 &host_its->it_interval) ||
7331         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7332                                                        it_value),
7333                                 &host_its->it_value)) {
7334         return -TARGET_EFAULT;
7335     }
7336     return 0;
7337 }
7338 #endif
7339 
7340 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7341       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7342       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7343 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7344                                                    struct itimerspec *host_its)
7345 {
7346     if (host_to_target_timespec64(target_addr +
7347                                   offsetof(struct target__kernel_itimerspec,
7348                                            it_interval),
7349                                   &host_its->it_interval) ||
7350         host_to_target_timespec64(target_addr +
7351                                   offsetof(struct target__kernel_itimerspec,
7352                                            it_value),
7353                                   &host_its->it_value)) {
7354         return -TARGET_EFAULT;
7355     }
7356     return 0;
7357 }
7358 #endif
7359 
7360 #if defined(TARGET_NR_adjtimex) || \
7361     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7362 static inline abi_long target_to_host_timex(struct timex *host_tx,
7363                                             abi_long target_addr)
7364 {
7365     struct target_timex *target_tx;
7366 
7367     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7368         return -TARGET_EFAULT;
7369     }
7370 
7371     __get_user(host_tx->modes, &target_tx->modes);
7372     __get_user(host_tx->offset, &target_tx->offset);
7373     __get_user(host_tx->freq, &target_tx->freq);
7374     __get_user(host_tx->maxerror, &target_tx->maxerror);
7375     __get_user(host_tx->esterror, &target_tx->esterror);
7376     __get_user(host_tx->status, &target_tx->status);
7377     __get_user(host_tx->constant, &target_tx->constant);
7378     __get_user(host_tx->precision, &target_tx->precision);
7379     __get_user(host_tx->tolerance, &target_tx->tolerance);
7380     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7381     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7382     __get_user(host_tx->tick, &target_tx->tick);
7383     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7384     __get_user(host_tx->jitter, &target_tx->jitter);
7385     __get_user(host_tx->shift, &target_tx->shift);
7386     __get_user(host_tx->stabil, &target_tx->stabil);
7387     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7388     __get_user(host_tx->calcnt, &target_tx->calcnt);
7389     __get_user(host_tx->errcnt, &target_tx->errcnt);
7390     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7391     __get_user(host_tx->tai, &target_tx->tai);
7392 
7393     unlock_user_struct(target_tx, target_addr, 0);
7394     return 0;
7395 }
7396 
7397 static inline abi_long host_to_target_timex(abi_long target_addr,
7398                                             struct timex *host_tx)
7399 {
7400     struct target_timex *target_tx;
7401 
7402     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7403         return -TARGET_EFAULT;
7404     }
7405 
7406     __put_user(host_tx->modes, &target_tx->modes);
7407     __put_user(host_tx->offset, &target_tx->offset);
7408     __put_user(host_tx->freq, &target_tx->freq);
7409     __put_user(host_tx->maxerror, &target_tx->maxerror);
7410     __put_user(host_tx->esterror, &target_tx->esterror);
7411     __put_user(host_tx->status, &target_tx->status);
7412     __put_user(host_tx->constant, &target_tx->constant);
7413     __put_user(host_tx->precision, &target_tx->precision);
7414     __put_user(host_tx->tolerance, &target_tx->tolerance);
7415     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7416     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7417     __put_user(host_tx->tick, &target_tx->tick);
7418     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7419     __put_user(host_tx->jitter, &target_tx->jitter);
7420     __put_user(host_tx->shift, &target_tx->shift);
7421     __put_user(host_tx->stabil, &target_tx->stabil);
7422     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7423     __put_user(host_tx->calcnt, &target_tx->calcnt);
7424     __put_user(host_tx->errcnt, &target_tx->errcnt);
7425     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7426     __put_user(host_tx->tai, &target_tx->tai);
7427 
7428     unlock_user_struct(target_tx, target_addr, 1);
7429     return 0;
7430 }
7431 #endif
7432 
7433 
7434 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7435 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7436                                               abi_long target_addr)
7437 {
7438     struct target__kernel_timex *target_tx;
7439 
7440     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7441                                  offsetof(struct target__kernel_timex,
7442                                           time))) {
7443         return -TARGET_EFAULT;
7444     }
7445 
7446     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7447         return -TARGET_EFAULT;
7448     }
7449 
7450     __get_user(host_tx->modes, &target_tx->modes);
7451     __get_user(host_tx->offset, &target_tx->offset);
7452     __get_user(host_tx->freq, &target_tx->freq);
7453     __get_user(host_tx->maxerror, &target_tx->maxerror);
7454     __get_user(host_tx->esterror, &target_tx->esterror);
7455     __get_user(host_tx->status, &target_tx->status);
7456     __get_user(host_tx->constant, &target_tx->constant);
7457     __get_user(host_tx->precision, &target_tx->precision);
7458     __get_user(host_tx->tolerance, &target_tx->tolerance);
7459     __get_user(host_tx->tick, &target_tx->tick);
7460     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7461     __get_user(host_tx->jitter, &target_tx->jitter);
7462     __get_user(host_tx->shift, &target_tx->shift);
7463     __get_user(host_tx->stabil, &target_tx->stabil);
7464     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7465     __get_user(host_tx->calcnt, &target_tx->calcnt);
7466     __get_user(host_tx->errcnt, &target_tx->errcnt);
7467     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7468     __get_user(host_tx->tai, &target_tx->tai);
7469 
7470     unlock_user_struct(target_tx, target_addr, 0);
7471     return 0;
7472 }
7473 
7474 static inline abi_long host_to_target_timex64(abi_long target_addr,
7475                                               struct timex *host_tx)
7476 {
7477     struct target__kernel_timex *target_tx;
7478 
7479    if (copy_to_user_timeval64(target_addr +
7480                               offsetof(struct target__kernel_timex, time),
7481                               &host_tx->time)) {
7482         return -TARGET_EFAULT;
7483     }
7484 
7485     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7486         return -TARGET_EFAULT;
7487     }
7488 
7489     __put_user(host_tx->modes, &target_tx->modes);
7490     __put_user(host_tx->offset, &target_tx->offset);
7491     __put_user(host_tx->freq, &target_tx->freq);
7492     __put_user(host_tx->maxerror, &target_tx->maxerror);
7493     __put_user(host_tx->esterror, &target_tx->esterror);
7494     __put_user(host_tx->status, &target_tx->status);
7495     __put_user(host_tx->constant, &target_tx->constant);
7496     __put_user(host_tx->precision, &target_tx->precision);
7497     __put_user(host_tx->tolerance, &target_tx->tolerance);
7498     __put_user(host_tx->tick, &target_tx->tick);
7499     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7500     __put_user(host_tx->jitter, &target_tx->jitter);
7501     __put_user(host_tx->shift, &target_tx->shift);
7502     __put_user(host_tx->stabil, &target_tx->stabil);
7503     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7504     __put_user(host_tx->calcnt, &target_tx->calcnt);
7505     __put_user(host_tx->errcnt, &target_tx->errcnt);
7506     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7507     __put_user(host_tx->tai, &target_tx->tai);
7508 
7509     unlock_user_struct(target_tx, target_addr, 1);
7510     return 0;
7511 }
7512 #endif
7513 
7514 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7515 #define sigev_notify_thread_id _sigev_un._tid
7516 #endif
7517 
7518 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7519                                                abi_ulong target_addr)
7520 {
7521     struct target_sigevent *target_sevp;
7522 
7523     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7524         return -TARGET_EFAULT;
7525     }
7526 
7527     /* This union is awkward on 64 bit systems because it has a 32 bit
7528      * integer and a pointer in it; we follow the conversion approach
7529      * used for handling sigval types in signal.c so the guest should get
7530      * the correct value back even if we did a 64 bit byteswap and it's
7531      * using the 32 bit integer.
7532      */
7533     host_sevp->sigev_value.sival_ptr =
7534         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7535     host_sevp->sigev_signo =
7536         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7537     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7538     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7539 
7540     unlock_user_struct(target_sevp, target_addr, 1);
7541     return 0;
7542 }
7543 
7544 #if defined(TARGET_NR_mlockall)
7545 static inline int target_to_host_mlockall_arg(int arg)
7546 {
7547     int result = 0;
7548 
7549     if (arg & TARGET_MCL_CURRENT) {
7550         result |= MCL_CURRENT;
7551     }
7552     if (arg & TARGET_MCL_FUTURE) {
7553         result |= MCL_FUTURE;
7554     }
7555 #ifdef MCL_ONFAULT
7556     if (arg & TARGET_MCL_ONFAULT) {
7557         result |= MCL_ONFAULT;
7558     }
7559 #endif
7560 
7561     return result;
7562 }
7563 #endif
7564 
7565 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7566      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7567      defined(TARGET_NR_newfstatat))
7568 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7569                                              abi_ulong target_addr,
7570                                              struct stat *host_st)
7571 {
7572 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7573     if (cpu_env->eabi) {
7574         struct target_eabi_stat64 *target_st;
7575 
7576         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7577             return -TARGET_EFAULT;
7578         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7579         __put_user(host_st->st_dev, &target_st->st_dev);
7580         __put_user(host_st->st_ino, &target_st->st_ino);
7581 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7582         __put_user(host_st->st_ino, &target_st->__st_ino);
7583 #endif
7584         __put_user(host_st->st_mode, &target_st->st_mode);
7585         __put_user(host_st->st_nlink, &target_st->st_nlink);
7586         __put_user(host_st->st_uid, &target_st->st_uid);
7587         __put_user(host_st->st_gid, &target_st->st_gid);
7588         __put_user(host_st->st_rdev, &target_st->st_rdev);
7589         __put_user(host_st->st_size, &target_st->st_size);
7590         __put_user(host_st->st_blksize, &target_st->st_blksize);
7591         __put_user(host_st->st_blocks, &target_st->st_blocks);
7592         __put_user(host_st->st_atime, &target_st->target_st_atime);
7593         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7594         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7595 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7596         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7597         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7598         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7599 #endif
7600         unlock_user_struct(target_st, target_addr, 1);
7601     } else
7602 #endif
7603     {
7604 #if defined(TARGET_HAS_STRUCT_STAT64)
7605         struct target_stat64 *target_st;
7606 #else
7607         struct target_stat *target_st;
7608 #endif
7609 
7610         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7611             return -TARGET_EFAULT;
7612         memset(target_st, 0, sizeof(*target_st));
7613         __put_user(host_st->st_dev, &target_st->st_dev);
7614         __put_user(host_st->st_ino, &target_st->st_ino);
7615 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7616         __put_user(host_st->st_ino, &target_st->__st_ino);
7617 #endif
7618         __put_user(host_st->st_mode, &target_st->st_mode);
7619         __put_user(host_st->st_nlink, &target_st->st_nlink);
7620         __put_user(host_st->st_uid, &target_st->st_uid);
7621         __put_user(host_st->st_gid, &target_st->st_gid);
7622         __put_user(host_st->st_rdev, &target_st->st_rdev);
7623         /* XXX: better use of kernel struct */
7624         __put_user(host_st->st_size, &target_st->st_size);
7625         __put_user(host_st->st_blksize, &target_st->st_blksize);
7626         __put_user(host_st->st_blocks, &target_st->st_blocks);
7627         __put_user(host_st->st_atime, &target_st->target_st_atime);
7628         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7629         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7630 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7631         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7632         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7633         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7634 #endif
7635         unlock_user_struct(target_st, target_addr, 1);
7636     }
7637 
7638     return 0;
7639 }
7640 #endif
7641 
7642 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7643 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7644                                             abi_ulong target_addr)
7645 {
7646     struct target_statx *target_stx;
7647 
7648     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7649         return -TARGET_EFAULT;
7650     }
7651     memset(target_stx, 0, sizeof(*target_stx));
7652 
7653     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7654     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7655     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7656     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7657     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7658     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7659     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7660     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7661     __put_user(host_stx->stx_size, &target_stx->stx_size);
7662     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7663     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7664     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7665     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7666     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7667     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7668     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7669     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7670     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7671     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7672     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7673     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7674     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7675     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7676 
7677     unlock_user_struct(target_stx, target_addr, 1);
7678 
7679     return 0;
7680 }
7681 #endif
7682 
7683 static int do_sys_futex(int *uaddr, int op, int val,
7684                          const struct timespec *timeout, int *uaddr2,
7685                          int val3)
7686 {
7687 #if HOST_LONG_BITS == 64
7688 #if defined(__NR_futex)
7689     /* always a 64-bit time_t, it doesn't define _time64 version  */
7690     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7691 
7692 #endif
7693 #else /* HOST_LONG_BITS == 64 */
7694 #if defined(__NR_futex_time64)
7695     if (sizeof(timeout->tv_sec) == 8) {
7696         /* _time64 function on 32bit arch */
7697         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7698     }
7699 #endif
7700 #if defined(__NR_futex)
7701     /* old function on 32bit arch */
7702     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7703 #endif
7704 #endif /* HOST_LONG_BITS == 64 */
7705     g_assert_not_reached();
7706 }
7707 
7708 static int do_safe_futex(int *uaddr, int op, int val,
7709                          const struct timespec *timeout, int *uaddr2,
7710                          int val3)
7711 {
7712 #if HOST_LONG_BITS == 64
7713 #if defined(__NR_futex)
7714     /* always a 64-bit time_t, it doesn't define _time64 version  */
7715     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7716 #endif
7717 #else /* HOST_LONG_BITS == 64 */
7718 #if defined(__NR_futex_time64)
7719     if (sizeof(timeout->tv_sec) == 8) {
7720         /* _time64 function on 32bit arch */
7721         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7722                                            val3));
7723     }
7724 #endif
7725 #if defined(__NR_futex)
7726     /* old function on 32bit arch */
7727     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7728 #endif
7729 #endif /* HOST_LONG_BITS == 64 */
7730     return -TARGET_ENOSYS;
7731 }
7732 
7733 /* ??? Using host futex calls even when target atomic operations
7734    are not really atomic probably breaks things.  However implementing
7735    futexes locally would make futexes shared between multiple processes
7736    tricky.  However they're probably useless because guest atomic
7737    operations won't work either.  */
7738 #if defined(TARGET_NR_futex)
7739 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7740                     target_ulong timeout, target_ulong uaddr2, int val3)
7741 {
7742     struct timespec ts, *pts;
7743     int base_op;
7744 
7745     /* ??? We assume FUTEX_* constants are the same on both host
7746        and target.  */
7747 #ifdef FUTEX_CMD_MASK
7748     base_op = op & FUTEX_CMD_MASK;
7749 #else
7750     base_op = op;
7751 #endif
7752     switch (base_op) {
7753     case FUTEX_WAIT:
7754     case FUTEX_WAIT_BITSET:
7755         if (timeout) {
7756             pts = &ts;
7757             target_to_host_timespec(pts, timeout);
7758         } else {
7759             pts = NULL;
7760         }
7761         return do_safe_futex(g2h(cpu, uaddr),
7762                              op, tswap32(val), pts, NULL, val3);
7763     case FUTEX_WAKE:
7764         return do_safe_futex(g2h(cpu, uaddr),
7765                              op, val, NULL, NULL, 0);
7766     case FUTEX_FD:
7767         return do_safe_futex(g2h(cpu, uaddr),
7768                              op, val, NULL, NULL, 0);
7769     case FUTEX_REQUEUE:
7770     case FUTEX_CMP_REQUEUE:
7771     case FUTEX_WAKE_OP:
7772         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7773            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7774            But the prototype takes a `struct timespec *'; insert casts
7775            to satisfy the compiler.  We do not need to tswap TIMEOUT
7776            since it's not compared to guest memory.  */
7777         pts = (struct timespec *)(uintptr_t) timeout;
7778         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7779                              (base_op == FUTEX_CMP_REQUEUE
7780                               ? tswap32(val3) : val3));
7781     default:
7782         return -TARGET_ENOSYS;
7783     }
7784 }
7785 #endif
7786 
7787 #if defined(TARGET_NR_futex_time64)
7788 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7789                            int val, target_ulong timeout,
7790                            target_ulong uaddr2, int val3)
7791 {
7792     struct timespec ts, *pts;
7793     int base_op;
7794 
7795     /* ??? We assume FUTEX_* constants are the same on both host
7796        and target.  */
7797 #ifdef FUTEX_CMD_MASK
7798     base_op = op & FUTEX_CMD_MASK;
7799 #else
7800     base_op = op;
7801 #endif
7802     switch (base_op) {
7803     case FUTEX_WAIT:
7804     case FUTEX_WAIT_BITSET:
7805         if (timeout) {
7806             pts = &ts;
7807             if (target_to_host_timespec64(pts, timeout)) {
7808                 return -TARGET_EFAULT;
7809             }
7810         } else {
7811             pts = NULL;
7812         }
7813         return do_safe_futex(g2h(cpu, uaddr), op,
7814                              tswap32(val), pts, NULL, val3);
7815     case FUTEX_WAKE:
7816         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7817     case FUTEX_FD:
7818         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7819     case FUTEX_REQUEUE:
7820     case FUTEX_CMP_REQUEUE:
7821     case FUTEX_WAKE_OP:
7822         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7823            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7824            But the prototype takes a `struct timespec *'; insert casts
7825            to satisfy the compiler.  We do not need to tswap TIMEOUT
7826            since it's not compared to guest memory.  */
7827         pts = (struct timespec *)(uintptr_t) timeout;
7828         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7829                              (base_op == FUTEX_CMP_REQUEUE
7830                               ? tswap32(val3) : val3));
7831     default:
7832         return -TARGET_ENOSYS;
7833     }
7834 }
7835 #endif
7836 
7837 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7838 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7839                                      abi_long handle, abi_long mount_id,
7840                                      abi_long flags)
7841 {
7842     struct file_handle *target_fh;
7843     struct file_handle *fh;
7844     int mid = 0;
7845     abi_long ret;
7846     char *name;
7847     unsigned int size, total_size;
7848 
7849     if (get_user_s32(size, handle)) {
7850         return -TARGET_EFAULT;
7851     }
7852 
7853     name = lock_user_string(pathname);
7854     if (!name) {
7855         return -TARGET_EFAULT;
7856     }
7857 
7858     total_size = sizeof(struct file_handle) + size;
7859     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7860     if (!target_fh) {
7861         unlock_user(name, pathname, 0);
7862         return -TARGET_EFAULT;
7863     }
7864 
7865     fh = g_malloc0(total_size);
7866     fh->handle_bytes = size;
7867 
7868     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7869     unlock_user(name, pathname, 0);
7870 
7871     /* man name_to_handle_at(2):
7872      * Other than the use of the handle_bytes field, the caller should treat
7873      * the file_handle structure as an opaque data type
7874      */
7875 
7876     memcpy(target_fh, fh, total_size);
7877     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7878     target_fh->handle_type = tswap32(fh->handle_type);
7879     g_free(fh);
7880     unlock_user(target_fh, handle, total_size);
7881 
7882     if (put_user_s32(mid, mount_id)) {
7883         return -TARGET_EFAULT;
7884     }
7885 
7886     return ret;
7887 
7888 }
7889 #endif
7890 
7891 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7892 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7893                                      abi_long flags)
7894 {
7895     struct file_handle *target_fh;
7896     struct file_handle *fh;
7897     unsigned int size, total_size;
7898     abi_long ret;
7899 
7900     if (get_user_s32(size, handle)) {
7901         return -TARGET_EFAULT;
7902     }
7903 
7904     total_size = sizeof(struct file_handle) + size;
7905     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7906     if (!target_fh) {
7907         return -TARGET_EFAULT;
7908     }
7909 
7910     fh = g_memdup(target_fh, total_size);
7911     fh->handle_bytes = size;
7912     fh->handle_type = tswap32(target_fh->handle_type);
7913 
7914     ret = get_errno(open_by_handle_at(mount_fd, fh,
7915                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7916 
7917     g_free(fh);
7918 
7919     unlock_user(target_fh, handle, total_size);
7920 
7921     return ret;
7922 }
7923 #endif
7924 
7925 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7926 
7927 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7928 {
7929     int host_flags;
7930     target_sigset_t *target_mask;
7931     sigset_t host_mask;
7932     abi_long ret;
7933 
7934     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7935         return -TARGET_EINVAL;
7936     }
7937     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7938         return -TARGET_EFAULT;
7939     }
7940 
7941     target_to_host_sigset(&host_mask, target_mask);
7942 
7943     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7944 
7945     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7946     if (ret >= 0) {
7947         fd_trans_register(ret, &target_signalfd_trans);
7948     }
7949 
7950     unlock_user_struct(target_mask, mask, 0);
7951 
7952     return ret;
7953 }
7954 #endif
7955 
7956 /* Map host to target signal numbers for the wait family of syscalls.
7957    Assume all other status bits are the same.  */
7958 int host_to_target_waitstatus(int status)
7959 {
7960     if (WIFSIGNALED(status)) {
7961         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7962     }
7963     if (WIFSTOPPED(status)) {
7964         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7965                | (status & 0xff);
7966     }
7967     return status;
7968 }
7969 
7970 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7971 {
7972     CPUState *cpu = env_cpu(cpu_env);
7973     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7974     int i;
7975 
7976     for (i = 0; i < bprm->argc; i++) {
7977         size_t len = strlen(bprm->argv[i]) + 1;
7978 
7979         if (write(fd, bprm->argv[i], len) != len) {
7980             return -1;
7981         }
7982     }
7983 
7984     return 0;
7985 }
7986 
7987 static int open_self_maps(CPUArchState *cpu_env, int fd)
7988 {
7989     CPUState *cpu = env_cpu(cpu_env);
7990     TaskState *ts = cpu->opaque;
7991     GSList *map_info = read_self_maps();
7992     GSList *s;
7993     int count;
7994 
7995     for (s = map_info; s; s = g_slist_next(s)) {
7996         MapInfo *e = (MapInfo *) s->data;
7997 
7998         if (h2g_valid(e->start)) {
7999             unsigned long min = e->start;
8000             unsigned long max = e->end;
8001             int flags = page_get_flags(h2g(min));
8002             const char *path;
8003 
8004             max = h2g_valid(max - 1) ?
8005                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8006 
8007             if (page_check_range(h2g(min), max - min, flags) == -1) {
8008                 continue;
8009             }
8010 
8011             if (h2g(min) == ts->info->stack_limit) {
8012                 path = "[stack]";
8013             } else {
8014                 path = e->path;
8015             }
8016 
8017             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8018                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8019                             h2g(min), h2g(max - 1) + 1,
8020                             (flags & PAGE_READ) ? 'r' : '-',
8021                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8022                             (flags & PAGE_EXEC) ? 'x' : '-',
8023                             e->is_priv ? 'p' : 's',
8024                             (uint64_t) e->offset, e->dev, e->inode);
8025             if (path) {
8026                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8027             } else {
8028                 dprintf(fd, "\n");
8029             }
8030         }
8031     }
8032 
8033     free_self_maps(map_info);
8034 
8035 #ifdef TARGET_VSYSCALL_PAGE
8036     /*
8037      * We only support execution from the vsyscall page.
8038      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8039      */
8040     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8041                     " --xp 00000000 00:00 0",
8042                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8043     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8044 #endif
8045 
8046     return 0;
8047 }
8048 
8049 static int open_self_stat(CPUArchState *cpu_env, int fd)
8050 {
8051     CPUState *cpu = env_cpu(cpu_env);
8052     TaskState *ts = cpu->opaque;
8053     g_autoptr(GString) buf = g_string_new(NULL);
8054     int i;
8055 
8056     for (i = 0; i < 44; i++) {
8057         if (i == 0) {
8058             /* pid */
8059             g_string_printf(buf, FMT_pid " ", getpid());
8060         } else if (i == 1) {
8061             /* app name */
8062             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8063             bin = bin ? bin + 1 : ts->bprm->argv[0];
8064             g_string_printf(buf, "(%.15s) ", bin);
8065         } else if (i == 3) {
8066             /* ppid */
8067             g_string_printf(buf, FMT_pid " ", getppid());
8068         } else if (i == 21) {
8069             /* starttime */
8070             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8071         } else if (i == 27) {
8072             /* stack bottom */
8073             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8074         } else {
8075             /* for the rest, there is MasterCard */
8076             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8077         }
8078 
8079         if (write(fd, buf->str, buf->len) != buf->len) {
8080             return -1;
8081         }
8082     }
8083 
8084     return 0;
8085 }
8086 
8087 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8088 {
8089     CPUState *cpu = env_cpu(cpu_env);
8090     TaskState *ts = cpu->opaque;
8091     abi_ulong auxv = ts->info->saved_auxv;
8092     abi_ulong len = ts->info->auxv_len;
8093     char *ptr;
8094 
8095     /*
8096      * Auxiliary vector is stored in target process stack.
8097      * read in whole auxv vector and copy it to file
8098      */
8099     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8100     if (ptr != NULL) {
8101         while (len > 0) {
8102             ssize_t r;
8103             r = write(fd, ptr, len);
8104             if (r <= 0) {
8105                 break;
8106             }
8107             len -= r;
8108             ptr += r;
8109         }
8110         lseek(fd, 0, SEEK_SET);
8111         unlock_user(ptr, auxv, len);
8112     }
8113 
8114     return 0;
8115 }
8116 
8117 static int is_proc_myself(const char *filename, const char *entry)
8118 {
8119     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8120         filename += strlen("/proc/");
8121         if (!strncmp(filename, "self/", strlen("self/"))) {
8122             filename += strlen("self/");
8123         } else if (*filename >= '1' && *filename <= '9') {
8124             char myself[80];
8125             snprintf(myself, sizeof(myself), "%d/", getpid());
8126             if (!strncmp(filename, myself, strlen(myself))) {
8127                 filename += strlen(myself);
8128             } else {
8129                 return 0;
8130             }
8131         } else {
8132             return 0;
8133         }
8134         if (!strcmp(filename, entry)) {
8135             return 1;
8136         }
8137     }
8138     return 0;
8139 }
8140 
8141 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8142     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8143 static int is_proc(const char *filename, const char *entry)
8144 {
8145     return strcmp(filename, entry) == 0;
8146 }
8147 #endif
8148 
8149 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8150 static int open_net_route(CPUArchState *cpu_env, int fd)
8151 {
8152     FILE *fp;
8153     char *line = NULL;
8154     size_t len = 0;
8155     ssize_t read;
8156 
8157     fp = fopen("/proc/net/route", "r");
8158     if (fp == NULL) {
8159         return -1;
8160     }
8161 
8162     /* read header */
8163 
8164     read = getline(&line, &len, fp);
8165     dprintf(fd, "%s", line);
8166 
8167     /* read routes */
8168 
8169     while ((read = getline(&line, &len, fp)) != -1) {
8170         char iface[16];
8171         uint32_t dest, gw, mask;
8172         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8173         int fields;
8174 
8175         fields = sscanf(line,
8176                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8177                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8178                         &mask, &mtu, &window, &irtt);
8179         if (fields != 11) {
8180             continue;
8181         }
8182         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8183                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8184                 metric, tswap32(mask), mtu, window, irtt);
8185     }
8186 
8187     free(line);
8188     fclose(fp);
8189 
8190     return 0;
8191 }
8192 #endif
8193 
8194 #if defined(TARGET_SPARC)
8195 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8196 {
8197     dprintf(fd, "type\t\t: sun4u\n");
8198     return 0;
8199 }
8200 #endif
8201 
8202 #if defined(TARGET_HPPA)
8203 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8204 {
8205     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8206     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8207     dprintf(fd, "capabilities\t: os32\n");
8208     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8209     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8210     return 0;
8211 }
8212 #endif
8213 
8214 #if defined(TARGET_M68K)
8215 static int open_hardware(CPUArchState *cpu_env, int fd)
8216 {
8217     dprintf(fd, "Model:\t\tqemu-m68k\n");
8218     return 0;
8219 }
8220 #endif
8221 
8222 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8223 {
8224     struct fake_open {
8225         const char *filename;
8226         int (*fill)(CPUArchState *cpu_env, int fd);
8227         int (*cmp)(const char *s1, const char *s2);
8228     };
8229     const struct fake_open *fake_open;
8230     static const struct fake_open fakes[] = {
8231         { "maps", open_self_maps, is_proc_myself },
8232         { "stat", open_self_stat, is_proc_myself },
8233         { "auxv", open_self_auxv, is_proc_myself },
8234         { "cmdline", open_self_cmdline, is_proc_myself },
8235 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8236         { "/proc/net/route", open_net_route, is_proc },
8237 #endif
8238 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8239         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8240 #endif
8241 #if defined(TARGET_M68K)
8242         { "/proc/hardware", open_hardware, is_proc },
8243 #endif
8244         { NULL, NULL, NULL }
8245     };
8246 
8247     if (is_proc_myself(pathname, "exe")) {
8248         int execfd = qemu_getauxval(AT_EXECFD);
8249         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8250     }
8251 
8252     for (fake_open = fakes; fake_open->filename; fake_open++) {
8253         if (fake_open->cmp(pathname, fake_open->filename)) {
8254             break;
8255         }
8256     }
8257 
8258     if (fake_open->filename) {
8259         const char *tmpdir;
8260         char filename[PATH_MAX];
8261         int fd, r;
8262 
8263         /* create temporary file to map stat to */
8264         tmpdir = getenv("TMPDIR");
8265         if (!tmpdir)
8266             tmpdir = "/tmp";
8267         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8268         fd = mkstemp(filename);
8269         if (fd < 0) {
8270             return fd;
8271         }
8272         unlink(filename);
8273 
8274         if ((r = fake_open->fill(cpu_env, fd))) {
8275             int e = errno;
8276             close(fd);
8277             errno = e;
8278             return r;
8279         }
8280         lseek(fd, 0, SEEK_SET);
8281 
8282         return fd;
8283     }
8284 
8285     return safe_openat(dirfd, path(pathname), flags, mode);
8286 }
8287 
8288 #define TIMER_MAGIC 0x0caf0000
8289 #define TIMER_MAGIC_MASK 0xffff0000
8290 
8291 /* Convert QEMU provided timer ID back to internal 16bit index format */
8292 static target_timer_t get_timer_id(abi_long arg)
8293 {
8294     target_timer_t timerid = arg;
8295 
8296     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8297         return -TARGET_EINVAL;
8298     }
8299 
8300     timerid &= 0xffff;
8301 
8302     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8303         return -TARGET_EINVAL;
8304     }
8305 
8306     return timerid;
8307 }
8308 
8309 static int target_to_host_cpu_mask(unsigned long *host_mask,
8310                                    size_t host_size,
8311                                    abi_ulong target_addr,
8312                                    size_t target_size)
8313 {
8314     unsigned target_bits = sizeof(abi_ulong) * 8;
8315     unsigned host_bits = sizeof(*host_mask) * 8;
8316     abi_ulong *target_mask;
8317     unsigned i, j;
8318 
8319     assert(host_size >= target_size);
8320 
8321     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8322     if (!target_mask) {
8323         return -TARGET_EFAULT;
8324     }
8325     memset(host_mask, 0, host_size);
8326 
8327     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8328         unsigned bit = i * target_bits;
8329         abi_ulong val;
8330 
8331         __get_user(val, &target_mask[i]);
8332         for (j = 0; j < target_bits; j++, bit++) {
8333             if (val & (1UL << j)) {
8334                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8335             }
8336         }
8337     }
8338 
8339     unlock_user(target_mask, target_addr, 0);
8340     return 0;
8341 }
8342 
8343 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8344                                    size_t host_size,
8345                                    abi_ulong target_addr,
8346                                    size_t target_size)
8347 {
8348     unsigned target_bits = sizeof(abi_ulong) * 8;
8349     unsigned host_bits = sizeof(*host_mask) * 8;
8350     abi_ulong *target_mask;
8351     unsigned i, j;
8352 
8353     assert(host_size >= target_size);
8354 
8355     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8356     if (!target_mask) {
8357         return -TARGET_EFAULT;
8358     }
8359 
8360     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8361         unsigned bit = i * target_bits;
8362         abi_ulong val = 0;
8363 
8364         for (j = 0; j < target_bits; j++, bit++) {
8365             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8366                 val |= 1UL << j;
8367             }
8368         }
8369         __put_user(val, &target_mask[i]);
8370     }
8371 
8372     unlock_user(target_mask, target_addr, target_size);
8373     return 0;
8374 }
8375 
8376 #ifdef TARGET_NR_getdents
8377 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8378 {
8379     g_autofree void *hdirp = NULL;
8380     void *tdirp;
8381     int hlen, hoff, toff;
8382     int hreclen, treclen;
8383     off64_t prev_diroff = 0;
8384 
8385     hdirp = g_try_malloc(count);
8386     if (!hdirp) {
8387         return -TARGET_ENOMEM;
8388     }
8389 
8390 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8391     hlen = sys_getdents(dirfd, hdirp, count);
8392 #else
8393     hlen = sys_getdents64(dirfd, hdirp, count);
8394 #endif
8395 
8396     hlen = get_errno(hlen);
8397     if (is_error(hlen)) {
8398         return hlen;
8399     }
8400 
8401     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8402     if (!tdirp) {
8403         return -TARGET_EFAULT;
8404     }
8405 
8406     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8407 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8408         struct linux_dirent *hde = hdirp + hoff;
8409 #else
8410         struct linux_dirent64 *hde = hdirp + hoff;
8411 #endif
8412         struct target_dirent *tde = tdirp + toff;
8413         int namelen;
8414         uint8_t type;
8415 
8416         namelen = strlen(hde->d_name);
8417         hreclen = hde->d_reclen;
8418         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8419         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8420 
8421         if (toff + treclen > count) {
8422             /*
8423              * If the host struct is smaller than the target struct, or
8424              * requires less alignment and thus packs into less space,
8425              * then the host can return more entries than we can pass
8426              * on to the guest.
8427              */
8428             if (toff == 0) {
8429                 toff = -TARGET_EINVAL; /* result buffer is too small */
8430                 break;
8431             }
8432             /*
8433              * Return what we have, resetting the file pointer to the
8434              * location of the first record not returned.
8435              */
8436             lseek64(dirfd, prev_diroff, SEEK_SET);
8437             break;
8438         }
8439 
8440         prev_diroff = hde->d_off;
8441         tde->d_ino = tswapal(hde->d_ino);
8442         tde->d_off = tswapal(hde->d_off);
8443         tde->d_reclen = tswap16(treclen);
8444         memcpy(tde->d_name, hde->d_name, namelen + 1);
8445 
8446         /*
8447          * The getdents type is in what was formerly a padding byte at the
8448          * end of the structure.
8449          */
8450 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8451         type = *((uint8_t *)hde + hreclen - 1);
8452 #else
8453         type = hde->d_type;
8454 #endif
8455         *((uint8_t *)tde + treclen - 1) = type;
8456     }
8457 
8458     unlock_user(tdirp, arg2, toff);
8459     return toff;
8460 }
8461 #endif /* TARGET_NR_getdents */
8462 
8463 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8464 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8465 {
8466     g_autofree void *hdirp = NULL;
8467     void *tdirp;
8468     int hlen, hoff, toff;
8469     int hreclen, treclen;
8470     off64_t prev_diroff = 0;
8471 
8472     hdirp = g_try_malloc(count);
8473     if (!hdirp) {
8474         return -TARGET_ENOMEM;
8475     }
8476 
8477     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8478     if (is_error(hlen)) {
8479         return hlen;
8480     }
8481 
8482     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8483     if (!tdirp) {
8484         return -TARGET_EFAULT;
8485     }
8486 
8487     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8488         struct linux_dirent64 *hde = hdirp + hoff;
8489         struct target_dirent64 *tde = tdirp + toff;
8490         int namelen;
8491 
8492         namelen = strlen(hde->d_name) + 1;
8493         hreclen = hde->d_reclen;
8494         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8495         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8496 
8497         if (toff + treclen > count) {
8498             /*
8499              * If the host struct is smaller than the target struct, or
8500              * requires less alignment and thus packs into less space,
8501              * then the host can return more entries than we can pass
8502              * on to the guest.
8503              */
8504             if (toff == 0) {
8505                 toff = -TARGET_EINVAL; /* result buffer is too small */
8506                 break;
8507             }
8508             /*
8509              * Return what we have, resetting the file pointer to the
8510              * location of the first record not returned.
8511              */
8512             lseek64(dirfd, prev_diroff, SEEK_SET);
8513             break;
8514         }
8515 
8516         prev_diroff = hde->d_off;
8517         tde->d_ino = tswap64(hde->d_ino);
8518         tde->d_off = tswap64(hde->d_off);
8519         tde->d_reclen = tswap16(treclen);
8520         tde->d_type = hde->d_type;
8521         memcpy(tde->d_name, hde->d_name, namelen);
8522     }
8523 
8524     unlock_user(tdirp, arg2, toff);
8525     return toff;
8526 }
8527 #endif /* TARGET_NR_getdents64 */
8528 
8529 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8530 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8531 #endif
8532 
8533 /* This is an internal helper for do_syscall so that it is easier
8534  * to have a single return point, so that actions, such as logging
8535  * of syscall results, can be performed.
8536  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8537  */
8538 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8539                             abi_long arg2, abi_long arg3, abi_long arg4,
8540                             abi_long arg5, abi_long arg6, abi_long arg7,
8541                             abi_long arg8)
8542 {
8543     CPUState *cpu = env_cpu(cpu_env);
8544     abi_long ret;
8545 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8546     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8547     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8548     || defined(TARGET_NR_statx)
8549     struct stat st;
8550 #endif
8551 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8552     || defined(TARGET_NR_fstatfs)
8553     struct statfs stfs;
8554 #endif
8555     void *p;
8556 
8557     switch(num) {
8558     case TARGET_NR_exit:
8559         /* In old applications this may be used to implement _exit(2).
8560            However in threaded applications it is used for thread termination,
8561            and _exit_group is used for application termination.
8562            Do thread termination if we have more then one thread.  */
8563 
8564         if (block_signals()) {
8565             return -QEMU_ERESTARTSYS;
8566         }
8567 
8568         pthread_mutex_lock(&clone_lock);
8569 
8570         if (CPU_NEXT(first_cpu)) {
8571             TaskState *ts = cpu->opaque;
8572 
8573             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8574             object_unref(OBJECT(cpu));
8575             /*
8576              * At this point the CPU should be unrealized and removed
8577              * from cpu lists. We can clean-up the rest of the thread
8578              * data without the lock held.
8579              */
8580 
8581             pthread_mutex_unlock(&clone_lock);
8582 
8583             if (ts->child_tidptr) {
8584                 put_user_u32(0, ts->child_tidptr);
8585                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8586                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8587             }
8588             thread_cpu = NULL;
8589             g_free(ts);
8590             rcu_unregister_thread();
8591             pthread_exit(NULL);
8592         }
8593 
8594         pthread_mutex_unlock(&clone_lock);
8595         preexit_cleanup(cpu_env, arg1);
8596         _exit(arg1);
8597         return 0; /* avoid warning */
8598     case TARGET_NR_read:
8599         if (arg2 == 0 && arg3 == 0) {
8600             return get_errno(safe_read(arg1, 0, 0));
8601         } else {
8602             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8603                 return -TARGET_EFAULT;
8604             ret = get_errno(safe_read(arg1, p, arg3));
8605             if (ret >= 0 &&
8606                 fd_trans_host_to_target_data(arg1)) {
8607                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8608             }
8609             unlock_user(p, arg2, ret);
8610         }
8611         return ret;
8612     case TARGET_NR_write:
8613         if (arg2 == 0 && arg3 == 0) {
8614             return get_errno(safe_write(arg1, 0, 0));
8615         }
8616         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8617             return -TARGET_EFAULT;
8618         if (fd_trans_target_to_host_data(arg1)) {
8619             void *copy = g_malloc(arg3);
8620             memcpy(copy, p, arg3);
8621             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8622             if (ret >= 0) {
8623                 ret = get_errno(safe_write(arg1, copy, ret));
8624             }
8625             g_free(copy);
8626         } else {
8627             ret = get_errno(safe_write(arg1, p, arg3));
8628         }
8629         unlock_user(p, arg2, 0);
8630         return ret;
8631 
8632 #ifdef TARGET_NR_open
8633     case TARGET_NR_open:
8634         if (!(p = lock_user_string(arg1)))
8635             return -TARGET_EFAULT;
8636         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8637                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8638                                   arg3));
8639         fd_trans_unregister(ret);
8640         unlock_user(p, arg1, 0);
8641         return ret;
8642 #endif
8643     case TARGET_NR_openat:
8644         if (!(p = lock_user_string(arg2)))
8645             return -TARGET_EFAULT;
8646         ret = get_errno(do_openat(cpu_env, arg1, p,
8647                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8648                                   arg4));
8649         fd_trans_unregister(ret);
8650         unlock_user(p, arg2, 0);
8651         return ret;
8652 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8653     case TARGET_NR_name_to_handle_at:
8654         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8655         return ret;
8656 #endif
8657 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8658     case TARGET_NR_open_by_handle_at:
8659         ret = do_open_by_handle_at(arg1, arg2, arg3);
8660         fd_trans_unregister(ret);
8661         return ret;
8662 #endif
8663     case TARGET_NR_close:
8664         fd_trans_unregister(arg1);
8665         return get_errno(close(arg1));
8666 
8667     case TARGET_NR_brk:
8668         return do_brk(arg1);
8669 #ifdef TARGET_NR_fork
8670     case TARGET_NR_fork:
8671         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8672 #endif
8673 #ifdef TARGET_NR_waitpid
8674     case TARGET_NR_waitpid:
8675         {
8676             int status;
8677             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8678             if (!is_error(ret) && arg2 && ret
8679                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8680                 return -TARGET_EFAULT;
8681         }
8682         return ret;
8683 #endif
8684 #ifdef TARGET_NR_waitid
8685     case TARGET_NR_waitid:
8686         {
8687             siginfo_t info;
8688             info.si_pid = 0;
8689             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8690             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8691                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8692                     return -TARGET_EFAULT;
8693                 host_to_target_siginfo(p, &info);
8694                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8695             }
8696         }
8697         return ret;
8698 #endif
8699 #ifdef TARGET_NR_creat /* not on alpha */
8700     case TARGET_NR_creat:
8701         if (!(p = lock_user_string(arg1)))
8702             return -TARGET_EFAULT;
8703         ret = get_errno(creat(p, arg2));
8704         fd_trans_unregister(ret);
8705         unlock_user(p, arg1, 0);
8706         return ret;
8707 #endif
8708 #ifdef TARGET_NR_link
8709     case TARGET_NR_link:
8710         {
8711             void * p2;
8712             p = lock_user_string(arg1);
8713             p2 = lock_user_string(arg2);
8714             if (!p || !p2)
8715                 ret = -TARGET_EFAULT;
8716             else
8717                 ret = get_errno(link(p, p2));
8718             unlock_user(p2, arg2, 0);
8719             unlock_user(p, arg1, 0);
8720         }
8721         return ret;
8722 #endif
8723 #if defined(TARGET_NR_linkat)
8724     case TARGET_NR_linkat:
8725         {
8726             void * p2 = NULL;
8727             if (!arg2 || !arg4)
8728                 return -TARGET_EFAULT;
8729             p  = lock_user_string(arg2);
8730             p2 = lock_user_string(arg4);
8731             if (!p || !p2)
8732                 ret = -TARGET_EFAULT;
8733             else
8734                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8735             unlock_user(p, arg2, 0);
8736             unlock_user(p2, arg4, 0);
8737         }
8738         return ret;
8739 #endif
8740 #ifdef TARGET_NR_unlink
8741     case TARGET_NR_unlink:
8742         if (!(p = lock_user_string(arg1)))
8743             return -TARGET_EFAULT;
8744         ret = get_errno(unlink(p));
8745         unlock_user(p, arg1, 0);
8746         return ret;
8747 #endif
8748 #if defined(TARGET_NR_unlinkat)
8749     case TARGET_NR_unlinkat:
8750         if (!(p = lock_user_string(arg2)))
8751             return -TARGET_EFAULT;
8752         ret = get_errno(unlinkat(arg1, p, arg3));
8753         unlock_user(p, arg2, 0);
8754         return ret;
8755 #endif
8756     case TARGET_NR_execve:
8757         {
8758             char **argp, **envp;
8759             int argc, envc;
8760             abi_ulong gp;
8761             abi_ulong guest_argp;
8762             abi_ulong guest_envp;
8763             abi_ulong addr;
8764             char **q;
8765 
8766             argc = 0;
8767             guest_argp = arg2;
8768             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8769                 if (get_user_ual(addr, gp))
8770                     return -TARGET_EFAULT;
8771                 if (!addr)
8772                     break;
8773                 argc++;
8774             }
8775             envc = 0;
8776             guest_envp = arg3;
8777             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8778                 if (get_user_ual(addr, gp))
8779                     return -TARGET_EFAULT;
8780                 if (!addr)
8781                     break;
8782                 envc++;
8783             }
8784 
8785             argp = g_new0(char *, argc + 1);
8786             envp = g_new0(char *, envc + 1);
8787 
8788             for (gp = guest_argp, q = argp; gp;
8789                   gp += sizeof(abi_ulong), q++) {
8790                 if (get_user_ual(addr, gp))
8791                     goto execve_efault;
8792                 if (!addr)
8793                     break;
8794                 if (!(*q = lock_user_string(addr)))
8795                     goto execve_efault;
8796             }
8797             *q = NULL;
8798 
8799             for (gp = guest_envp, q = envp; gp;
8800                   gp += sizeof(abi_ulong), q++) {
8801                 if (get_user_ual(addr, gp))
8802                     goto execve_efault;
8803                 if (!addr)
8804                     break;
8805                 if (!(*q = lock_user_string(addr)))
8806                     goto execve_efault;
8807             }
8808             *q = NULL;
8809 
8810             if (!(p = lock_user_string(arg1)))
8811                 goto execve_efault;
8812             /* Although execve() is not an interruptible syscall it is
8813              * a special case where we must use the safe_syscall wrapper:
8814              * if we allow a signal to happen before we make the host
8815              * syscall then we will 'lose' it, because at the point of
8816              * execve the process leaves QEMU's control. So we use the
8817              * safe syscall wrapper to ensure that we either take the
8818              * signal as a guest signal, or else it does not happen
8819              * before the execve completes and makes it the other
8820              * program's problem.
8821              */
8822             ret = get_errno(safe_execve(p, argp, envp));
8823             unlock_user(p, arg1, 0);
8824 
8825             goto execve_end;
8826 
8827         execve_efault:
8828             ret = -TARGET_EFAULT;
8829 
8830         execve_end:
8831             for (gp = guest_argp, q = argp; *q;
8832                   gp += sizeof(abi_ulong), q++) {
8833                 if (get_user_ual(addr, gp)
8834                     || !addr)
8835                     break;
8836                 unlock_user(*q, addr, 0);
8837             }
8838             for (gp = guest_envp, q = envp; *q;
8839                   gp += sizeof(abi_ulong), q++) {
8840                 if (get_user_ual(addr, gp)
8841                     || !addr)
8842                     break;
8843                 unlock_user(*q, addr, 0);
8844             }
8845 
8846             g_free(argp);
8847             g_free(envp);
8848         }
8849         return ret;
8850     case TARGET_NR_chdir:
8851         if (!(p = lock_user_string(arg1)))
8852             return -TARGET_EFAULT;
8853         ret = get_errno(chdir(p));
8854         unlock_user(p, arg1, 0);
8855         return ret;
8856 #ifdef TARGET_NR_time
8857     case TARGET_NR_time:
8858         {
8859             time_t host_time;
8860             ret = get_errno(time(&host_time));
8861             if (!is_error(ret)
8862                 && arg1
8863                 && put_user_sal(host_time, arg1))
8864                 return -TARGET_EFAULT;
8865         }
8866         return ret;
8867 #endif
8868 #ifdef TARGET_NR_mknod
8869     case TARGET_NR_mknod:
8870         if (!(p = lock_user_string(arg1)))
8871             return -TARGET_EFAULT;
8872         ret = get_errno(mknod(p, arg2, arg3));
8873         unlock_user(p, arg1, 0);
8874         return ret;
8875 #endif
8876 #if defined(TARGET_NR_mknodat)
8877     case TARGET_NR_mknodat:
8878         if (!(p = lock_user_string(arg2)))
8879             return -TARGET_EFAULT;
8880         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8881         unlock_user(p, arg2, 0);
8882         return ret;
8883 #endif
8884 #ifdef TARGET_NR_chmod
8885     case TARGET_NR_chmod:
8886         if (!(p = lock_user_string(arg1)))
8887             return -TARGET_EFAULT;
8888         ret = get_errno(chmod(p, arg2));
8889         unlock_user(p, arg1, 0);
8890         return ret;
8891 #endif
8892 #ifdef TARGET_NR_lseek
8893     case TARGET_NR_lseek:
8894         return get_errno(lseek(arg1, arg2, arg3));
8895 #endif
8896 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8897     /* Alpha specific */
8898     case TARGET_NR_getxpid:
8899         cpu_env->ir[IR_A4] = getppid();
8900         return get_errno(getpid());
8901 #endif
8902 #ifdef TARGET_NR_getpid
8903     case TARGET_NR_getpid:
8904         return get_errno(getpid());
8905 #endif
8906     case TARGET_NR_mount:
8907         {
8908             /* need to look at the data field */
8909             void *p2, *p3;
8910 
8911             if (arg1) {
8912                 p = lock_user_string(arg1);
8913                 if (!p) {
8914                     return -TARGET_EFAULT;
8915                 }
8916             } else {
8917                 p = NULL;
8918             }
8919 
8920             p2 = lock_user_string(arg2);
8921             if (!p2) {
8922                 if (arg1) {
8923                     unlock_user(p, arg1, 0);
8924                 }
8925                 return -TARGET_EFAULT;
8926             }
8927 
8928             if (arg3) {
8929                 p3 = lock_user_string(arg3);
8930                 if (!p3) {
8931                     if (arg1) {
8932                         unlock_user(p, arg1, 0);
8933                     }
8934                     unlock_user(p2, arg2, 0);
8935                     return -TARGET_EFAULT;
8936                 }
8937             } else {
8938                 p3 = NULL;
8939             }
8940 
8941             /* FIXME - arg5 should be locked, but it isn't clear how to
8942              * do that since it's not guaranteed to be a NULL-terminated
8943              * string.
8944              */
8945             if (!arg5) {
8946                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8947             } else {
8948                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8949             }
8950             ret = get_errno(ret);
8951 
8952             if (arg1) {
8953                 unlock_user(p, arg1, 0);
8954             }
8955             unlock_user(p2, arg2, 0);
8956             if (arg3) {
8957                 unlock_user(p3, arg3, 0);
8958             }
8959         }
8960         return ret;
8961 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8962 #if defined(TARGET_NR_umount)
8963     case TARGET_NR_umount:
8964 #endif
8965 #if defined(TARGET_NR_oldumount)
8966     case TARGET_NR_oldumount:
8967 #endif
8968         if (!(p = lock_user_string(arg1)))
8969             return -TARGET_EFAULT;
8970         ret = get_errno(umount(p));
8971         unlock_user(p, arg1, 0);
8972         return ret;
8973 #endif
8974 #ifdef TARGET_NR_stime /* not on alpha */
8975     case TARGET_NR_stime:
8976         {
8977             struct timespec ts;
8978             ts.tv_nsec = 0;
8979             if (get_user_sal(ts.tv_sec, arg1)) {
8980                 return -TARGET_EFAULT;
8981             }
8982             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8983         }
8984 #endif
8985 #ifdef TARGET_NR_alarm /* not on alpha */
8986     case TARGET_NR_alarm:
8987         return alarm(arg1);
8988 #endif
8989 #ifdef TARGET_NR_pause /* not on alpha */
8990     case TARGET_NR_pause:
8991         if (!block_signals()) {
8992             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8993         }
8994         return -TARGET_EINTR;
8995 #endif
8996 #ifdef TARGET_NR_utime
8997     case TARGET_NR_utime:
8998         {
8999             struct utimbuf tbuf, *host_tbuf;
9000             struct target_utimbuf *target_tbuf;
9001             if (arg2) {
9002                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9003                     return -TARGET_EFAULT;
9004                 tbuf.actime = tswapal(target_tbuf->actime);
9005                 tbuf.modtime = tswapal(target_tbuf->modtime);
9006                 unlock_user_struct(target_tbuf, arg2, 0);
9007                 host_tbuf = &tbuf;
9008             } else {
9009                 host_tbuf = NULL;
9010             }
9011             if (!(p = lock_user_string(arg1)))
9012                 return -TARGET_EFAULT;
9013             ret = get_errno(utime(p, host_tbuf));
9014             unlock_user(p, arg1, 0);
9015         }
9016         return ret;
9017 #endif
9018 #ifdef TARGET_NR_utimes
9019     case TARGET_NR_utimes:
9020         {
9021             struct timeval *tvp, tv[2];
9022             if (arg2) {
9023                 if (copy_from_user_timeval(&tv[0], arg2)
9024                     || copy_from_user_timeval(&tv[1],
9025                                               arg2 + sizeof(struct target_timeval)))
9026                     return -TARGET_EFAULT;
9027                 tvp = tv;
9028             } else {
9029                 tvp = NULL;
9030             }
9031             if (!(p = lock_user_string(arg1)))
9032                 return -TARGET_EFAULT;
9033             ret = get_errno(utimes(p, tvp));
9034             unlock_user(p, arg1, 0);
9035         }
9036         return ret;
9037 #endif
9038 #if defined(TARGET_NR_futimesat)
9039     case TARGET_NR_futimesat:
9040         {
9041             struct timeval *tvp, tv[2];
9042             if (arg3) {
9043                 if (copy_from_user_timeval(&tv[0], arg3)
9044                     || copy_from_user_timeval(&tv[1],
9045                                               arg3 + sizeof(struct target_timeval)))
9046                     return -TARGET_EFAULT;
9047                 tvp = tv;
9048             } else {
9049                 tvp = NULL;
9050             }
9051             if (!(p = lock_user_string(arg2))) {
9052                 return -TARGET_EFAULT;
9053             }
9054             ret = get_errno(futimesat(arg1, path(p), tvp));
9055             unlock_user(p, arg2, 0);
9056         }
9057         return ret;
9058 #endif
9059 #ifdef TARGET_NR_access
9060     case TARGET_NR_access:
9061         if (!(p = lock_user_string(arg1))) {
9062             return -TARGET_EFAULT;
9063         }
9064         ret = get_errno(access(path(p), arg2));
9065         unlock_user(p, arg1, 0);
9066         return ret;
9067 #endif
9068 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9069     case TARGET_NR_faccessat:
9070         if (!(p = lock_user_string(arg2))) {
9071             return -TARGET_EFAULT;
9072         }
9073         ret = get_errno(faccessat(arg1, p, arg3, 0));
9074         unlock_user(p, arg2, 0);
9075         return ret;
9076 #endif
9077 #ifdef TARGET_NR_nice /* not on alpha */
9078     case TARGET_NR_nice:
9079         return get_errno(nice(arg1));
9080 #endif
9081     case TARGET_NR_sync:
9082         sync();
9083         return 0;
9084 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9085     case TARGET_NR_syncfs:
9086         return get_errno(syncfs(arg1));
9087 #endif
9088     case TARGET_NR_kill:
9089         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9090 #ifdef TARGET_NR_rename
9091     case TARGET_NR_rename:
9092         {
9093             void *p2;
9094             p = lock_user_string(arg1);
9095             p2 = lock_user_string(arg2);
9096             if (!p || !p2)
9097                 ret = -TARGET_EFAULT;
9098             else
9099                 ret = get_errno(rename(p, p2));
9100             unlock_user(p2, arg2, 0);
9101             unlock_user(p, arg1, 0);
9102         }
9103         return ret;
9104 #endif
9105 #if defined(TARGET_NR_renameat)
9106     case TARGET_NR_renameat:
9107         {
9108             void *p2;
9109             p  = lock_user_string(arg2);
9110             p2 = lock_user_string(arg4);
9111             if (!p || !p2)
9112                 ret = -TARGET_EFAULT;
9113             else
9114                 ret = get_errno(renameat(arg1, p, arg3, p2));
9115             unlock_user(p2, arg4, 0);
9116             unlock_user(p, arg2, 0);
9117         }
9118         return ret;
9119 #endif
9120 #if defined(TARGET_NR_renameat2)
9121     case TARGET_NR_renameat2:
9122         {
9123             void *p2;
9124             p  = lock_user_string(arg2);
9125             p2 = lock_user_string(arg4);
9126             if (!p || !p2) {
9127                 ret = -TARGET_EFAULT;
9128             } else {
9129                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9130             }
9131             unlock_user(p2, arg4, 0);
9132             unlock_user(p, arg2, 0);
9133         }
9134         return ret;
9135 #endif
9136 #ifdef TARGET_NR_mkdir
9137     case TARGET_NR_mkdir:
9138         if (!(p = lock_user_string(arg1)))
9139             return -TARGET_EFAULT;
9140         ret = get_errno(mkdir(p, arg2));
9141         unlock_user(p, arg1, 0);
9142         return ret;
9143 #endif
9144 #if defined(TARGET_NR_mkdirat)
9145     case TARGET_NR_mkdirat:
9146         if (!(p = lock_user_string(arg2)))
9147             return -TARGET_EFAULT;
9148         ret = get_errno(mkdirat(arg1, p, arg3));
9149         unlock_user(p, arg2, 0);
9150         return ret;
9151 #endif
9152 #ifdef TARGET_NR_rmdir
9153     case TARGET_NR_rmdir:
9154         if (!(p = lock_user_string(arg1)))
9155             return -TARGET_EFAULT;
9156         ret = get_errno(rmdir(p));
9157         unlock_user(p, arg1, 0);
9158         return ret;
9159 #endif
9160     case TARGET_NR_dup:
9161         ret = get_errno(dup(arg1));
9162         if (ret >= 0) {
9163             fd_trans_dup(arg1, ret);
9164         }
9165         return ret;
9166 #ifdef TARGET_NR_pipe
9167     case TARGET_NR_pipe:
9168         return do_pipe(cpu_env, arg1, 0, 0);
9169 #endif
9170 #ifdef TARGET_NR_pipe2
9171     case TARGET_NR_pipe2:
9172         return do_pipe(cpu_env, arg1,
9173                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9174 #endif
9175     case TARGET_NR_times:
9176         {
9177             struct target_tms *tmsp;
9178             struct tms tms;
9179             ret = get_errno(times(&tms));
9180             if (arg1) {
9181                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9182                 if (!tmsp)
9183                     return -TARGET_EFAULT;
9184                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9185                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9186                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9187                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9188             }
9189             if (!is_error(ret))
9190                 ret = host_to_target_clock_t(ret);
9191         }
9192         return ret;
9193     case TARGET_NR_acct:
9194         if (arg1 == 0) {
9195             ret = get_errno(acct(NULL));
9196         } else {
9197             if (!(p = lock_user_string(arg1))) {
9198                 return -TARGET_EFAULT;
9199             }
9200             ret = get_errno(acct(path(p)));
9201             unlock_user(p, arg1, 0);
9202         }
9203         return ret;
9204 #ifdef TARGET_NR_umount2
9205     case TARGET_NR_umount2:
9206         if (!(p = lock_user_string(arg1)))
9207             return -TARGET_EFAULT;
9208         ret = get_errno(umount2(p, arg2));
9209         unlock_user(p, arg1, 0);
9210         return ret;
9211 #endif
9212     case TARGET_NR_ioctl:
9213         return do_ioctl(arg1, arg2, arg3);
9214 #ifdef TARGET_NR_fcntl
9215     case TARGET_NR_fcntl:
9216         return do_fcntl(arg1, arg2, arg3);
9217 #endif
9218     case TARGET_NR_setpgid:
9219         return get_errno(setpgid(arg1, arg2));
9220     case TARGET_NR_umask:
9221         return get_errno(umask(arg1));
9222     case TARGET_NR_chroot:
9223         if (!(p = lock_user_string(arg1)))
9224             return -TARGET_EFAULT;
9225         ret = get_errno(chroot(p));
9226         unlock_user(p, arg1, 0);
9227         return ret;
9228 #ifdef TARGET_NR_dup2
9229     case TARGET_NR_dup2:
9230         ret = get_errno(dup2(arg1, arg2));
9231         if (ret >= 0) {
9232             fd_trans_dup(arg1, arg2);
9233         }
9234         return ret;
9235 #endif
9236 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9237     case TARGET_NR_dup3:
9238     {
9239         int host_flags;
9240 
9241         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9242             return -EINVAL;
9243         }
9244         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9245         ret = get_errno(dup3(arg1, arg2, host_flags));
9246         if (ret >= 0) {
9247             fd_trans_dup(arg1, arg2);
9248         }
9249         return ret;
9250     }
9251 #endif
9252 #ifdef TARGET_NR_getppid /* not on alpha */
9253     case TARGET_NR_getppid:
9254         return get_errno(getppid());
9255 #endif
9256 #ifdef TARGET_NR_getpgrp
9257     case TARGET_NR_getpgrp:
9258         return get_errno(getpgrp());
9259 #endif
9260     case TARGET_NR_setsid:
9261         return get_errno(setsid());
9262 #ifdef TARGET_NR_sigaction
9263     case TARGET_NR_sigaction:
9264         {
9265 #if defined(TARGET_MIPS)
9266 	    struct target_sigaction act, oact, *pact, *old_act;
9267 
9268 	    if (arg2) {
9269                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9270                     return -TARGET_EFAULT;
9271 		act._sa_handler = old_act->_sa_handler;
9272 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9273 		act.sa_flags = old_act->sa_flags;
9274 		unlock_user_struct(old_act, arg2, 0);
9275 		pact = &act;
9276 	    } else {
9277 		pact = NULL;
9278 	    }
9279 
9280         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9281 
9282 	    if (!is_error(ret) && arg3) {
9283                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9284                     return -TARGET_EFAULT;
9285 		old_act->_sa_handler = oact._sa_handler;
9286 		old_act->sa_flags = oact.sa_flags;
9287 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9288 		old_act->sa_mask.sig[1] = 0;
9289 		old_act->sa_mask.sig[2] = 0;
9290 		old_act->sa_mask.sig[3] = 0;
9291 		unlock_user_struct(old_act, arg3, 1);
9292 	    }
9293 #else
9294             struct target_old_sigaction *old_act;
9295             struct target_sigaction act, oact, *pact;
9296             if (arg2) {
9297                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9298                     return -TARGET_EFAULT;
9299                 act._sa_handler = old_act->_sa_handler;
9300                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9301                 act.sa_flags = old_act->sa_flags;
9302 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9303                 act.sa_restorer = old_act->sa_restorer;
9304 #endif
9305                 unlock_user_struct(old_act, arg2, 0);
9306                 pact = &act;
9307             } else {
9308                 pact = NULL;
9309             }
9310             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9311             if (!is_error(ret) && arg3) {
9312                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9313                     return -TARGET_EFAULT;
9314                 old_act->_sa_handler = oact._sa_handler;
9315                 old_act->sa_mask = oact.sa_mask.sig[0];
9316                 old_act->sa_flags = oact.sa_flags;
9317 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9318                 old_act->sa_restorer = oact.sa_restorer;
9319 #endif
9320                 unlock_user_struct(old_act, arg3, 1);
9321             }
9322 #endif
9323         }
9324         return ret;
9325 #endif
9326     case TARGET_NR_rt_sigaction:
9327         {
9328             /*
9329              * For Alpha and SPARC this is a 5 argument syscall, with
9330              * a 'restorer' parameter which must be copied into the
9331              * sa_restorer field of the sigaction struct.
9332              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9333              * and arg5 is the sigsetsize.
9334              */
9335 #if defined(TARGET_ALPHA)
9336             target_ulong sigsetsize = arg4;
9337             target_ulong restorer = arg5;
9338 #elif defined(TARGET_SPARC)
9339             target_ulong restorer = arg4;
9340             target_ulong sigsetsize = arg5;
9341 #else
9342             target_ulong sigsetsize = arg4;
9343             target_ulong restorer = 0;
9344 #endif
9345             struct target_sigaction *act = NULL;
9346             struct target_sigaction *oact = NULL;
9347 
9348             if (sigsetsize != sizeof(target_sigset_t)) {
9349                 return -TARGET_EINVAL;
9350             }
9351             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9352                 return -TARGET_EFAULT;
9353             }
9354             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9355                 ret = -TARGET_EFAULT;
9356             } else {
9357                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9358                 if (oact) {
9359                     unlock_user_struct(oact, arg3, 1);
9360                 }
9361             }
9362             if (act) {
9363                 unlock_user_struct(act, arg2, 0);
9364             }
9365         }
9366         return ret;
9367 #ifdef TARGET_NR_sgetmask /* not on alpha */
9368     case TARGET_NR_sgetmask:
9369         {
9370             sigset_t cur_set;
9371             abi_ulong target_set;
9372             ret = do_sigprocmask(0, NULL, &cur_set);
9373             if (!ret) {
9374                 host_to_target_old_sigset(&target_set, &cur_set);
9375                 ret = target_set;
9376             }
9377         }
9378         return ret;
9379 #endif
9380 #ifdef TARGET_NR_ssetmask /* not on alpha */
9381     case TARGET_NR_ssetmask:
9382         {
9383             sigset_t set, oset;
9384             abi_ulong target_set = arg1;
9385             target_to_host_old_sigset(&set, &target_set);
9386             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9387             if (!ret) {
9388                 host_to_target_old_sigset(&target_set, &oset);
9389                 ret = target_set;
9390             }
9391         }
9392         return ret;
9393 #endif
9394 #ifdef TARGET_NR_sigprocmask
9395     case TARGET_NR_sigprocmask:
9396         {
9397 #if defined(TARGET_ALPHA)
9398             sigset_t set, oldset;
9399             abi_ulong mask;
9400             int how;
9401 
9402             switch (arg1) {
9403             case TARGET_SIG_BLOCK:
9404                 how = SIG_BLOCK;
9405                 break;
9406             case TARGET_SIG_UNBLOCK:
9407                 how = SIG_UNBLOCK;
9408                 break;
9409             case TARGET_SIG_SETMASK:
9410                 how = SIG_SETMASK;
9411                 break;
9412             default:
9413                 return -TARGET_EINVAL;
9414             }
9415             mask = arg2;
9416             target_to_host_old_sigset(&set, &mask);
9417 
9418             ret = do_sigprocmask(how, &set, &oldset);
9419             if (!is_error(ret)) {
9420                 host_to_target_old_sigset(&mask, &oldset);
9421                 ret = mask;
9422                 cpu_env->ir[IR_V0] = 0; /* force no error */
9423             }
9424 #else
9425             sigset_t set, oldset, *set_ptr;
9426             int how;
9427 
9428             if (arg2) {
9429                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9430                 if (!p) {
9431                     return -TARGET_EFAULT;
9432                 }
9433                 target_to_host_old_sigset(&set, p);
9434                 unlock_user(p, arg2, 0);
9435                 set_ptr = &set;
9436                 switch (arg1) {
9437                 case TARGET_SIG_BLOCK:
9438                     how = SIG_BLOCK;
9439                     break;
9440                 case TARGET_SIG_UNBLOCK:
9441                     how = SIG_UNBLOCK;
9442                     break;
9443                 case TARGET_SIG_SETMASK:
9444                     how = SIG_SETMASK;
9445                     break;
9446                 default:
9447                     return -TARGET_EINVAL;
9448                 }
9449             } else {
9450                 how = 0;
9451                 set_ptr = NULL;
9452             }
9453             ret = do_sigprocmask(how, set_ptr, &oldset);
9454             if (!is_error(ret) && arg3) {
9455                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9456                     return -TARGET_EFAULT;
9457                 host_to_target_old_sigset(p, &oldset);
9458                 unlock_user(p, arg3, sizeof(target_sigset_t));
9459             }
9460 #endif
9461         }
9462         return ret;
9463 #endif
9464     case TARGET_NR_rt_sigprocmask:
9465         {
9466             int how = arg1;
9467             sigset_t set, oldset, *set_ptr;
9468 
9469             if (arg4 != sizeof(target_sigset_t)) {
9470                 return -TARGET_EINVAL;
9471             }
9472 
9473             if (arg2) {
9474                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9475                 if (!p) {
9476                     return -TARGET_EFAULT;
9477                 }
9478                 target_to_host_sigset(&set, p);
9479                 unlock_user(p, arg2, 0);
9480                 set_ptr = &set;
9481                 switch(how) {
9482                 case TARGET_SIG_BLOCK:
9483                     how = SIG_BLOCK;
9484                     break;
9485                 case TARGET_SIG_UNBLOCK:
9486                     how = SIG_UNBLOCK;
9487                     break;
9488                 case TARGET_SIG_SETMASK:
9489                     how = SIG_SETMASK;
9490                     break;
9491                 default:
9492                     return -TARGET_EINVAL;
9493                 }
9494             } else {
9495                 how = 0;
9496                 set_ptr = NULL;
9497             }
9498             ret = do_sigprocmask(how, set_ptr, &oldset);
9499             if (!is_error(ret) && arg3) {
9500                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9501                     return -TARGET_EFAULT;
9502                 host_to_target_sigset(p, &oldset);
9503                 unlock_user(p, arg3, sizeof(target_sigset_t));
9504             }
9505         }
9506         return ret;
9507 #ifdef TARGET_NR_sigpending
9508     case TARGET_NR_sigpending:
9509         {
9510             sigset_t set;
9511             ret = get_errno(sigpending(&set));
9512             if (!is_error(ret)) {
9513                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9514                     return -TARGET_EFAULT;
9515                 host_to_target_old_sigset(p, &set);
9516                 unlock_user(p, arg1, sizeof(target_sigset_t));
9517             }
9518         }
9519         return ret;
9520 #endif
9521     case TARGET_NR_rt_sigpending:
9522         {
9523             sigset_t set;
9524 
9525             /* Yes, this check is >, not != like most. We follow the kernel's
9526              * logic and it does it like this because it implements
9527              * NR_sigpending through the same code path, and in that case
9528              * the old_sigset_t is smaller in size.
9529              */
9530             if (arg2 > sizeof(target_sigset_t)) {
9531                 return -TARGET_EINVAL;
9532             }
9533 
9534             ret = get_errno(sigpending(&set));
9535             if (!is_error(ret)) {
9536                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9537                     return -TARGET_EFAULT;
9538                 host_to_target_sigset(p, &set);
9539                 unlock_user(p, arg1, sizeof(target_sigset_t));
9540             }
9541         }
9542         return ret;
9543 #ifdef TARGET_NR_sigsuspend
9544     case TARGET_NR_sigsuspend:
9545         {
9546             sigset_t *set;
9547 
9548 #if defined(TARGET_ALPHA)
9549             TaskState *ts = cpu->opaque;
9550             /* target_to_host_old_sigset will bswap back */
9551             abi_ulong mask = tswapal(arg1);
9552             set = &ts->sigsuspend_mask;
9553             target_to_host_old_sigset(set, &mask);
9554 #else
9555             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9556             if (ret != 0) {
9557                 return ret;
9558             }
9559 #endif
9560             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9561             finish_sigsuspend_mask(ret);
9562         }
9563         return ret;
9564 #endif
9565     case TARGET_NR_rt_sigsuspend:
9566         {
9567             sigset_t *set;
9568 
9569             ret = process_sigsuspend_mask(&set, arg1, arg2);
9570             if (ret != 0) {
9571                 return ret;
9572             }
9573             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9574             finish_sigsuspend_mask(ret);
9575         }
9576         return ret;
9577 #ifdef TARGET_NR_rt_sigtimedwait
9578     case TARGET_NR_rt_sigtimedwait:
9579         {
9580             sigset_t set;
9581             struct timespec uts, *puts;
9582             siginfo_t uinfo;
9583 
9584             if (arg4 != sizeof(target_sigset_t)) {
9585                 return -TARGET_EINVAL;
9586             }
9587 
9588             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9589                 return -TARGET_EFAULT;
9590             target_to_host_sigset(&set, p);
9591             unlock_user(p, arg1, 0);
9592             if (arg3) {
9593                 puts = &uts;
9594                 if (target_to_host_timespec(puts, arg3)) {
9595                     return -TARGET_EFAULT;
9596                 }
9597             } else {
9598                 puts = NULL;
9599             }
9600             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9601                                                  SIGSET_T_SIZE));
9602             if (!is_error(ret)) {
9603                 if (arg2) {
9604                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9605                                   0);
9606                     if (!p) {
9607                         return -TARGET_EFAULT;
9608                     }
9609                     host_to_target_siginfo(p, &uinfo);
9610                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9611                 }
9612                 ret = host_to_target_signal(ret);
9613             }
9614         }
9615         return ret;
9616 #endif
9617 #ifdef TARGET_NR_rt_sigtimedwait_time64
9618     case TARGET_NR_rt_sigtimedwait_time64:
9619         {
9620             sigset_t set;
9621             struct timespec uts, *puts;
9622             siginfo_t uinfo;
9623 
9624             if (arg4 != sizeof(target_sigset_t)) {
9625                 return -TARGET_EINVAL;
9626             }
9627 
9628             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9629             if (!p) {
9630                 return -TARGET_EFAULT;
9631             }
9632             target_to_host_sigset(&set, p);
9633             unlock_user(p, arg1, 0);
9634             if (arg3) {
9635                 puts = &uts;
9636                 if (target_to_host_timespec64(puts, arg3)) {
9637                     return -TARGET_EFAULT;
9638                 }
9639             } else {
9640                 puts = NULL;
9641             }
9642             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9643                                                  SIGSET_T_SIZE));
9644             if (!is_error(ret)) {
9645                 if (arg2) {
9646                     p = lock_user(VERIFY_WRITE, arg2,
9647                                   sizeof(target_siginfo_t), 0);
9648                     if (!p) {
9649                         return -TARGET_EFAULT;
9650                     }
9651                     host_to_target_siginfo(p, &uinfo);
9652                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9653                 }
9654                 ret = host_to_target_signal(ret);
9655             }
9656         }
9657         return ret;
9658 #endif
9659     case TARGET_NR_rt_sigqueueinfo:
9660         {
9661             siginfo_t uinfo;
9662 
9663             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9664             if (!p) {
9665                 return -TARGET_EFAULT;
9666             }
9667             target_to_host_siginfo(&uinfo, p);
9668             unlock_user(p, arg3, 0);
9669             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9670         }
9671         return ret;
9672     case TARGET_NR_rt_tgsigqueueinfo:
9673         {
9674             siginfo_t uinfo;
9675 
9676             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9677             if (!p) {
9678                 return -TARGET_EFAULT;
9679             }
9680             target_to_host_siginfo(&uinfo, p);
9681             unlock_user(p, arg4, 0);
9682             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9683         }
9684         return ret;
9685 #ifdef TARGET_NR_sigreturn
9686     case TARGET_NR_sigreturn:
9687         if (block_signals()) {
9688             return -QEMU_ERESTARTSYS;
9689         }
9690         return do_sigreturn(cpu_env);
9691 #endif
9692     case TARGET_NR_rt_sigreturn:
9693         if (block_signals()) {
9694             return -QEMU_ERESTARTSYS;
9695         }
9696         return do_rt_sigreturn(cpu_env);
9697     case TARGET_NR_sethostname:
9698         if (!(p = lock_user_string(arg1)))
9699             return -TARGET_EFAULT;
9700         ret = get_errno(sethostname(p, arg2));
9701         unlock_user(p, arg1, 0);
9702         return ret;
9703 #ifdef TARGET_NR_setrlimit
9704     case TARGET_NR_setrlimit:
9705         {
9706             int resource = target_to_host_resource(arg1);
9707             struct target_rlimit *target_rlim;
9708             struct rlimit rlim;
9709             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9710                 return -TARGET_EFAULT;
9711             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9712             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9713             unlock_user_struct(target_rlim, arg2, 0);
9714             /*
9715              * If we just passed through resource limit settings for memory then
9716              * they would also apply to QEMU's own allocations, and QEMU will
9717              * crash or hang or die if its allocations fail. Ideally we would
9718              * track the guest allocations in QEMU and apply the limits ourselves.
9719              * For now, just tell the guest the call succeeded but don't actually
9720              * limit anything.
9721              */
9722             if (resource != RLIMIT_AS &&
9723                 resource != RLIMIT_DATA &&
9724                 resource != RLIMIT_STACK) {
9725                 return get_errno(setrlimit(resource, &rlim));
9726             } else {
9727                 return 0;
9728             }
9729         }
9730 #endif
9731 #ifdef TARGET_NR_getrlimit
9732     case TARGET_NR_getrlimit:
9733         {
9734             int resource = target_to_host_resource(arg1);
9735             struct target_rlimit *target_rlim;
9736             struct rlimit rlim;
9737 
9738             ret = get_errno(getrlimit(resource, &rlim));
9739             if (!is_error(ret)) {
9740                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9741                     return -TARGET_EFAULT;
9742                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9743                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9744                 unlock_user_struct(target_rlim, arg2, 1);
9745             }
9746         }
9747         return ret;
9748 #endif
9749     case TARGET_NR_getrusage:
9750         {
9751             struct rusage rusage;
9752             ret = get_errno(getrusage(arg1, &rusage));
9753             if (!is_error(ret)) {
9754                 ret = host_to_target_rusage(arg2, &rusage);
9755             }
9756         }
9757         return ret;
9758 #if defined(TARGET_NR_gettimeofday)
9759     case TARGET_NR_gettimeofday:
9760         {
9761             struct timeval tv;
9762             struct timezone tz;
9763 
9764             ret = get_errno(gettimeofday(&tv, &tz));
9765             if (!is_error(ret)) {
9766                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9767                     return -TARGET_EFAULT;
9768                 }
9769                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9770                     return -TARGET_EFAULT;
9771                 }
9772             }
9773         }
9774         return ret;
9775 #endif
9776 #if defined(TARGET_NR_settimeofday)
9777     case TARGET_NR_settimeofday:
9778         {
9779             struct timeval tv, *ptv = NULL;
9780             struct timezone tz, *ptz = NULL;
9781 
9782             if (arg1) {
9783                 if (copy_from_user_timeval(&tv, arg1)) {
9784                     return -TARGET_EFAULT;
9785                 }
9786                 ptv = &tv;
9787             }
9788 
9789             if (arg2) {
9790                 if (copy_from_user_timezone(&tz, arg2)) {
9791                     return -TARGET_EFAULT;
9792                 }
9793                 ptz = &tz;
9794             }
9795 
9796             return get_errno(settimeofday(ptv, ptz));
9797         }
9798 #endif
9799 #if defined(TARGET_NR_select)
9800     case TARGET_NR_select:
9801 #if defined(TARGET_WANT_NI_OLD_SELECT)
9802         /* some architectures used to have old_select here
9803          * but now ENOSYS it.
9804          */
9805         ret = -TARGET_ENOSYS;
9806 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9807         ret = do_old_select(arg1);
9808 #else
9809         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9810 #endif
9811         return ret;
9812 #endif
9813 #ifdef TARGET_NR_pselect6
9814     case TARGET_NR_pselect6:
9815         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9816 #endif
9817 #ifdef TARGET_NR_pselect6_time64
9818     case TARGET_NR_pselect6_time64:
9819         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9820 #endif
9821 #ifdef TARGET_NR_symlink
9822     case TARGET_NR_symlink:
9823         {
9824             void *p2;
9825             p = lock_user_string(arg1);
9826             p2 = lock_user_string(arg2);
9827             if (!p || !p2)
9828                 ret = -TARGET_EFAULT;
9829             else
9830                 ret = get_errno(symlink(p, p2));
9831             unlock_user(p2, arg2, 0);
9832             unlock_user(p, arg1, 0);
9833         }
9834         return ret;
9835 #endif
9836 #if defined(TARGET_NR_symlinkat)
9837     case TARGET_NR_symlinkat:
9838         {
9839             void *p2;
9840             p  = lock_user_string(arg1);
9841             p2 = lock_user_string(arg3);
9842             if (!p || !p2)
9843                 ret = -TARGET_EFAULT;
9844             else
9845                 ret = get_errno(symlinkat(p, arg2, p2));
9846             unlock_user(p2, arg3, 0);
9847             unlock_user(p, arg1, 0);
9848         }
9849         return ret;
9850 #endif
9851 #ifdef TARGET_NR_readlink
9852     case TARGET_NR_readlink:
9853         {
9854             void *p2;
9855             p = lock_user_string(arg1);
9856             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9857             if (!p || !p2) {
9858                 ret = -TARGET_EFAULT;
9859             } else if (!arg3) {
9860                 /* Short circuit this for the magic exe check. */
9861                 ret = -TARGET_EINVAL;
9862             } else if (is_proc_myself((const char *)p, "exe")) {
9863                 char real[PATH_MAX], *temp;
9864                 temp = realpath(exec_path, real);
9865                 /* Return value is # of bytes that we wrote to the buffer. */
9866                 if (temp == NULL) {
9867                     ret = get_errno(-1);
9868                 } else {
9869                     /* Don't worry about sign mismatch as earlier mapping
9870                      * logic would have thrown a bad address error. */
9871                     ret = MIN(strlen(real), arg3);
9872                     /* We cannot NUL terminate the string. */
9873                     memcpy(p2, real, ret);
9874                 }
9875             } else {
9876                 ret = get_errno(readlink(path(p), p2, arg3));
9877             }
9878             unlock_user(p2, arg2, ret);
9879             unlock_user(p, arg1, 0);
9880         }
9881         return ret;
9882 #endif
9883 #if defined(TARGET_NR_readlinkat)
9884     case TARGET_NR_readlinkat:
9885         {
9886             void *p2;
9887             p  = lock_user_string(arg2);
9888             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9889             if (!p || !p2) {
9890                 ret = -TARGET_EFAULT;
9891             } else if (is_proc_myself((const char *)p, "exe")) {
9892                 char real[PATH_MAX], *temp;
9893                 temp = realpath(exec_path, real);
9894                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9895                 snprintf((char *)p2, arg4, "%s", real);
9896             } else {
9897                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9898             }
9899             unlock_user(p2, arg3, ret);
9900             unlock_user(p, arg2, 0);
9901         }
9902         return ret;
9903 #endif
9904 #ifdef TARGET_NR_swapon
9905     case TARGET_NR_swapon:
9906         if (!(p = lock_user_string(arg1)))
9907             return -TARGET_EFAULT;
9908         ret = get_errno(swapon(p, arg2));
9909         unlock_user(p, arg1, 0);
9910         return ret;
9911 #endif
9912     case TARGET_NR_reboot:
9913         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9914            /* arg4 must be ignored in all other cases */
9915            p = lock_user_string(arg4);
9916            if (!p) {
9917                return -TARGET_EFAULT;
9918            }
9919            ret = get_errno(reboot(arg1, arg2, arg3, p));
9920            unlock_user(p, arg4, 0);
9921         } else {
9922            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9923         }
9924         return ret;
9925 #ifdef TARGET_NR_mmap
9926     case TARGET_NR_mmap:
9927 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9928     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9929     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9930     || defined(TARGET_S390X)
9931         {
9932             abi_ulong *v;
9933             abi_ulong v1, v2, v3, v4, v5, v6;
9934             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9935                 return -TARGET_EFAULT;
9936             v1 = tswapal(v[0]);
9937             v2 = tswapal(v[1]);
9938             v3 = tswapal(v[2]);
9939             v4 = tswapal(v[3]);
9940             v5 = tswapal(v[4]);
9941             v6 = tswapal(v[5]);
9942             unlock_user(v, arg1, 0);
9943             ret = get_errno(target_mmap(v1, v2, v3,
9944                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9945                                         v5, v6));
9946         }
9947 #else
9948         /* mmap pointers are always untagged */
9949         ret = get_errno(target_mmap(arg1, arg2, arg3,
9950                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9951                                     arg5,
9952                                     arg6));
9953 #endif
9954         return ret;
9955 #endif
9956 #ifdef TARGET_NR_mmap2
9957     case TARGET_NR_mmap2:
9958 #ifndef MMAP_SHIFT
9959 #define MMAP_SHIFT 12
9960 #endif
9961         ret = target_mmap(arg1, arg2, arg3,
9962                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9963                           arg5, arg6 << MMAP_SHIFT);
9964         return get_errno(ret);
9965 #endif
9966     case TARGET_NR_munmap:
9967         arg1 = cpu_untagged_addr(cpu, arg1);
9968         return get_errno(target_munmap(arg1, arg2));
9969     case TARGET_NR_mprotect:
9970         arg1 = cpu_untagged_addr(cpu, arg1);
9971         {
9972             TaskState *ts = cpu->opaque;
9973             /* Special hack to detect libc making the stack executable.  */
9974             if ((arg3 & PROT_GROWSDOWN)
9975                 && arg1 >= ts->info->stack_limit
9976                 && arg1 <= ts->info->start_stack) {
9977                 arg3 &= ~PROT_GROWSDOWN;
9978                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9979                 arg1 = ts->info->stack_limit;
9980             }
9981         }
9982         return get_errno(target_mprotect(arg1, arg2, arg3));
9983 #ifdef TARGET_NR_mremap
9984     case TARGET_NR_mremap:
9985         arg1 = cpu_untagged_addr(cpu, arg1);
9986         /* mremap new_addr (arg5) is always untagged */
9987         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9988 #endif
9989         /* ??? msync/mlock/munlock are broken for softmmu.  */
9990 #ifdef TARGET_NR_msync
9991     case TARGET_NR_msync:
9992         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9993 #endif
9994 #ifdef TARGET_NR_mlock
9995     case TARGET_NR_mlock:
9996         return get_errno(mlock(g2h(cpu, arg1), arg2));
9997 #endif
9998 #ifdef TARGET_NR_munlock
9999     case TARGET_NR_munlock:
10000         return get_errno(munlock(g2h(cpu, arg1), arg2));
10001 #endif
10002 #ifdef TARGET_NR_mlockall
10003     case TARGET_NR_mlockall:
10004         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10005 #endif
10006 #ifdef TARGET_NR_munlockall
10007     case TARGET_NR_munlockall:
10008         return get_errno(munlockall());
10009 #endif
10010 #ifdef TARGET_NR_truncate
10011     case TARGET_NR_truncate:
10012         if (!(p = lock_user_string(arg1)))
10013             return -TARGET_EFAULT;
10014         ret = get_errno(truncate(p, arg2));
10015         unlock_user(p, arg1, 0);
10016         return ret;
10017 #endif
10018 #ifdef TARGET_NR_ftruncate
10019     case TARGET_NR_ftruncate:
10020         return get_errno(ftruncate(arg1, arg2));
10021 #endif
10022     case TARGET_NR_fchmod:
10023         return get_errno(fchmod(arg1, arg2));
10024 #if defined(TARGET_NR_fchmodat)
10025     case TARGET_NR_fchmodat:
10026         if (!(p = lock_user_string(arg2)))
10027             return -TARGET_EFAULT;
10028         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10029         unlock_user(p, arg2, 0);
10030         return ret;
10031 #endif
10032     case TARGET_NR_getpriority:
10033         /* Note that negative values are valid for getpriority, so we must
10034            differentiate based on errno settings.  */
10035         errno = 0;
10036         ret = getpriority(arg1, arg2);
10037         if (ret == -1 && errno != 0) {
10038             return -host_to_target_errno(errno);
10039         }
10040 #ifdef TARGET_ALPHA
10041         /* Return value is the unbiased priority.  Signal no error.  */
10042         cpu_env->ir[IR_V0] = 0;
10043 #else
10044         /* Return value is a biased priority to avoid negative numbers.  */
10045         ret = 20 - ret;
10046 #endif
10047         return ret;
10048     case TARGET_NR_setpriority:
10049         return get_errno(setpriority(arg1, arg2, arg3));
10050 #ifdef TARGET_NR_statfs
10051     case TARGET_NR_statfs:
10052         if (!(p = lock_user_string(arg1))) {
10053             return -TARGET_EFAULT;
10054         }
10055         ret = get_errno(statfs(path(p), &stfs));
10056         unlock_user(p, arg1, 0);
10057     convert_statfs:
10058         if (!is_error(ret)) {
10059             struct target_statfs *target_stfs;
10060 
10061             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10062                 return -TARGET_EFAULT;
10063             __put_user(stfs.f_type, &target_stfs->f_type);
10064             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10065             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10066             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10067             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10068             __put_user(stfs.f_files, &target_stfs->f_files);
10069             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10070             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10071             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10072             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10073             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10074 #ifdef _STATFS_F_FLAGS
10075             __put_user(stfs.f_flags, &target_stfs->f_flags);
10076 #else
10077             __put_user(0, &target_stfs->f_flags);
10078 #endif
10079             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10080             unlock_user_struct(target_stfs, arg2, 1);
10081         }
10082         return ret;
10083 #endif
10084 #ifdef TARGET_NR_fstatfs
10085     case TARGET_NR_fstatfs:
10086         ret = get_errno(fstatfs(arg1, &stfs));
10087         goto convert_statfs;
10088 #endif
10089 #ifdef TARGET_NR_statfs64
10090     case TARGET_NR_statfs64:
10091         if (!(p = lock_user_string(arg1))) {
10092             return -TARGET_EFAULT;
10093         }
10094         ret = get_errno(statfs(path(p), &stfs));
10095         unlock_user(p, arg1, 0);
10096     convert_statfs64:
10097         if (!is_error(ret)) {
10098             struct target_statfs64 *target_stfs;
10099 
10100             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10101                 return -TARGET_EFAULT;
10102             __put_user(stfs.f_type, &target_stfs->f_type);
10103             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10104             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10105             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10106             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10107             __put_user(stfs.f_files, &target_stfs->f_files);
10108             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10109             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10110             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10111             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10112             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10113 #ifdef _STATFS_F_FLAGS
10114             __put_user(stfs.f_flags, &target_stfs->f_flags);
10115 #else
10116             __put_user(0, &target_stfs->f_flags);
10117 #endif
10118             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10119             unlock_user_struct(target_stfs, arg3, 1);
10120         }
10121         return ret;
10122     case TARGET_NR_fstatfs64:
10123         ret = get_errno(fstatfs(arg1, &stfs));
10124         goto convert_statfs64;
10125 #endif
10126 #ifdef TARGET_NR_socketcall
10127     case TARGET_NR_socketcall:
10128         return do_socketcall(arg1, arg2);
10129 #endif
10130 #ifdef TARGET_NR_accept
10131     case TARGET_NR_accept:
10132         return do_accept4(arg1, arg2, arg3, 0);
10133 #endif
10134 #ifdef TARGET_NR_accept4
10135     case TARGET_NR_accept4:
10136         return do_accept4(arg1, arg2, arg3, arg4);
10137 #endif
10138 #ifdef TARGET_NR_bind
10139     case TARGET_NR_bind:
10140         return do_bind(arg1, arg2, arg3);
10141 #endif
10142 #ifdef TARGET_NR_connect
10143     case TARGET_NR_connect:
10144         return do_connect(arg1, arg2, arg3);
10145 #endif
10146 #ifdef TARGET_NR_getpeername
10147     case TARGET_NR_getpeername:
10148         return do_getpeername(arg1, arg2, arg3);
10149 #endif
10150 #ifdef TARGET_NR_getsockname
10151     case TARGET_NR_getsockname:
10152         return do_getsockname(arg1, arg2, arg3);
10153 #endif
10154 #ifdef TARGET_NR_getsockopt
10155     case TARGET_NR_getsockopt:
10156         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10157 #endif
10158 #ifdef TARGET_NR_listen
10159     case TARGET_NR_listen:
10160         return get_errno(listen(arg1, arg2));
10161 #endif
10162 #ifdef TARGET_NR_recv
10163     case TARGET_NR_recv:
10164         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10165 #endif
10166 #ifdef TARGET_NR_recvfrom
10167     case TARGET_NR_recvfrom:
10168         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10169 #endif
10170 #ifdef TARGET_NR_recvmsg
10171     case TARGET_NR_recvmsg:
10172         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10173 #endif
10174 #ifdef TARGET_NR_send
10175     case TARGET_NR_send:
10176         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10177 #endif
10178 #ifdef TARGET_NR_sendmsg
10179     case TARGET_NR_sendmsg:
10180         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10181 #endif
10182 #ifdef TARGET_NR_sendmmsg
10183     case TARGET_NR_sendmmsg:
10184         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10185 #endif
10186 #ifdef TARGET_NR_recvmmsg
10187     case TARGET_NR_recvmmsg:
10188         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10189 #endif
10190 #ifdef TARGET_NR_sendto
10191     case TARGET_NR_sendto:
10192         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10193 #endif
10194 #ifdef TARGET_NR_shutdown
10195     case TARGET_NR_shutdown:
10196         return get_errno(shutdown(arg1, arg2));
10197 #endif
10198 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10199     case TARGET_NR_getrandom:
10200         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10201         if (!p) {
10202             return -TARGET_EFAULT;
10203         }
10204         ret = get_errno(getrandom(p, arg2, arg3));
10205         unlock_user(p, arg1, ret);
10206         return ret;
10207 #endif
10208 #ifdef TARGET_NR_socket
10209     case TARGET_NR_socket:
10210         return do_socket(arg1, arg2, arg3);
10211 #endif
10212 #ifdef TARGET_NR_socketpair
10213     case TARGET_NR_socketpair:
10214         return do_socketpair(arg1, arg2, arg3, arg4);
10215 #endif
10216 #ifdef TARGET_NR_setsockopt
10217     case TARGET_NR_setsockopt:
10218         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10219 #endif
10220 #if defined(TARGET_NR_syslog)
10221     case TARGET_NR_syslog:
10222         {
10223             int len = arg2;
10224 
10225             switch (arg1) {
10226             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10227             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10228             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10229             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10230             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10231             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10232             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10233             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10234                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10235             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10236             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10237             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10238                 {
10239                     if (len < 0) {
10240                         return -TARGET_EINVAL;
10241                     }
10242                     if (len == 0) {
10243                         return 0;
10244                     }
10245                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10246                     if (!p) {
10247                         return -TARGET_EFAULT;
10248                     }
10249                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10250                     unlock_user(p, arg2, arg3);
10251                 }
10252                 return ret;
10253             default:
10254                 return -TARGET_EINVAL;
10255             }
10256         }
10257         break;
10258 #endif
10259     case TARGET_NR_setitimer:
10260         {
10261             struct itimerval value, ovalue, *pvalue;
10262 
10263             if (arg2) {
10264                 pvalue = &value;
10265                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10266                     || copy_from_user_timeval(&pvalue->it_value,
10267                                               arg2 + sizeof(struct target_timeval)))
10268                     return -TARGET_EFAULT;
10269             } else {
10270                 pvalue = NULL;
10271             }
10272             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10273             if (!is_error(ret) && arg3) {
10274                 if (copy_to_user_timeval(arg3,
10275                                          &ovalue.it_interval)
10276                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10277                                             &ovalue.it_value))
10278                     return -TARGET_EFAULT;
10279             }
10280         }
10281         return ret;
10282     case TARGET_NR_getitimer:
10283         {
10284             struct itimerval value;
10285 
10286             ret = get_errno(getitimer(arg1, &value));
10287             if (!is_error(ret) && arg2) {
10288                 if (copy_to_user_timeval(arg2,
10289                                          &value.it_interval)
10290                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10291                                             &value.it_value))
10292                     return -TARGET_EFAULT;
10293             }
10294         }
10295         return ret;
10296 #ifdef TARGET_NR_stat
10297     case TARGET_NR_stat:
10298         if (!(p = lock_user_string(arg1))) {
10299             return -TARGET_EFAULT;
10300         }
10301         ret = get_errno(stat(path(p), &st));
10302         unlock_user(p, arg1, 0);
10303         goto do_stat;
10304 #endif
10305 #ifdef TARGET_NR_lstat
10306     case TARGET_NR_lstat:
10307         if (!(p = lock_user_string(arg1))) {
10308             return -TARGET_EFAULT;
10309         }
10310         ret = get_errno(lstat(path(p), &st));
10311         unlock_user(p, arg1, 0);
10312         goto do_stat;
10313 #endif
10314 #ifdef TARGET_NR_fstat
10315     case TARGET_NR_fstat:
10316         {
10317             ret = get_errno(fstat(arg1, &st));
10318 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10319         do_stat:
10320 #endif
10321             if (!is_error(ret)) {
10322                 struct target_stat *target_st;
10323 
10324                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10325                     return -TARGET_EFAULT;
10326                 memset(target_st, 0, sizeof(*target_st));
10327                 __put_user(st.st_dev, &target_st->st_dev);
10328                 __put_user(st.st_ino, &target_st->st_ino);
10329                 __put_user(st.st_mode, &target_st->st_mode);
10330                 __put_user(st.st_uid, &target_st->st_uid);
10331                 __put_user(st.st_gid, &target_st->st_gid);
10332                 __put_user(st.st_nlink, &target_st->st_nlink);
10333                 __put_user(st.st_rdev, &target_st->st_rdev);
10334                 __put_user(st.st_size, &target_st->st_size);
10335                 __put_user(st.st_blksize, &target_st->st_blksize);
10336                 __put_user(st.st_blocks, &target_st->st_blocks);
10337                 __put_user(st.st_atime, &target_st->target_st_atime);
10338                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10339                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10340 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10341                 __put_user(st.st_atim.tv_nsec,
10342                            &target_st->target_st_atime_nsec);
10343                 __put_user(st.st_mtim.tv_nsec,
10344                            &target_st->target_st_mtime_nsec);
10345                 __put_user(st.st_ctim.tv_nsec,
10346                            &target_st->target_st_ctime_nsec);
10347 #endif
10348                 unlock_user_struct(target_st, arg2, 1);
10349             }
10350         }
10351         return ret;
10352 #endif
10353     case TARGET_NR_vhangup:
10354         return get_errno(vhangup());
10355 #ifdef TARGET_NR_syscall
10356     case TARGET_NR_syscall:
10357         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10358                           arg6, arg7, arg8, 0);
10359 #endif
10360 #if defined(TARGET_NR_wait4)
10361     case TARGET_NR_wait4:
10362         {
10363             int status;
10364             abi_long status_ptr = arg2;
10365             struct rusage rusage, *rusage_ptr;
10366             abi_ulong target_rusage = arg4;
10367             abi_long rusage_err;
10368             if (target_rusage)
10369                 rusage_ptr = &rusage;
10370             else
10371                 rusage_ptr = NULL;
10372             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10373             if (!is_error(ret)) {
10374                 if (status_ptr && ret) {
10375                     status = host_to_target_waitstatus(status);
10376                     if (put_user_s32(status, status_ptr))
10377                         return -TARGET_EFAULT;
10378                 }
10379                 if (target_rusage) {
10380                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10381                     if (rusage_err) {
10382                         ret = rusage_err;
10383                     }
10384                 }
10385             }
10386         }
10387         return ret;
10388 #endif
10389 #ifdef TARGET_NR_swapoff
10390     case TARGET_NR_swapoff:
10391         if (!(p = lock_user_string(arg1)))
10392             return -TARGET_EFAULT;
10393         ret = get_errno(swapoff(p));
10394         unlock_user(p, arg1, 0);
10395         return ret;
10396 #endif
10397     case TARGET_NR_sysinfo:
10398         {
10399             struct target_sysinfo *target_value;
10400             struct sysinfo value;
10401             ret = get_errno(sysinfo(&value));
10402             if (!is_error(ret) && arg1)
10403             {
10404                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10405                     return -TARGET_EFAULT;
10406                 __put_user(value.uptime, &target_value->uptime);
10407                 __put_user(value.loads[0], &target_value->loads[0]);
10408                 __put_user(value.loads[1], &target_value->loads[1]);
10409                 __put_user(value.loads[2], &target_value->loads[2]);
10410                 __put_user(value.totalram, &target_value->totalram);
10411                 __put_user(value.freeram, &target_value->freeram);
10412                 __put_user(value.sharedram, &target_value->sharedram);
10413                 __put_user(value.bufferram, &target_value->bufferram);
10414                 __put_user(value.totalswap, &target_value->totalswap);
10415                 __put_user(value.freeswap, &target_value->freeswap);
10416                 __put_user(value.procs, &target_value->procs);
10417                 __put_user(value.totalhigh, &target_value->totalhigh);
10418                 __put_user(value.freehigh, &target_value->freehigh);
10419                 __put_user(value.mem_unit, &target_value->mem_unit);
10420                 unlock_user_struct(target_value, arg1, 1);
10421             }
10422         }
10423         return ret;
10424 #ifdef TARGET_NR_ipc
10425     case TARGET_NR_ipc:
10426         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10427 #endif
10428 #ifdef TARGET_NR_semget
10429     case TARGET_NR_semget:
10430         return get_errno(semget(arg1, arg2, arg3));
10431 #endif
10432 #ifdef TARGET_NR_semop
10433     case TARGET_NR_semop:
10434         return do_semtimedop(arg1, arg2, arg3, 0, false);
10435 #endif
10436 #ifdef TARGET_NR_semtimedop
10437     case TARGET_NR_semtimedop:
10438         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10439 #endif
10440 #ifdef TARGET_NR_semtimedop_time64
10441     case TARGET_NR_semtimedop_time64:
10442         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10443 #endif
10444 #ifdef TARGET_NR_semctl
10445     case TARGET_NR_semctl:
10446         return do_semctl(arg1, arg2, arg3, arg4);
10447 #endif
10448 #ifdef TARGET_NR_msgctl
10449     case TARGET_NR_msgctl:
10450         return do_msgctl(arg1, arg2, arg3);
10451 #endif
10452 #ifdef TARGET_NR_msgget
10453     case TARGET_NR_msgget:
10454         return get_errno(msgget(arg1, arg2));
10455 #endif
10456 #ifdef TARGET_NR_msgrcv
10457     case TARGET_NR_msgrcv:
10458         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10459 #endif
10460 #ifdef TARGET_NR_msgsnd
10461     case TARGET_NR_msgsnd:
10462         return do_msgsnd(arg1, arg2, arg3, arg4);
10463 #endif
10464 #ifdef TARGET_NR_shmget
10465     case TARGET_NR_shmget:
10466         return get_errno(shmget(arg1, arg2, arg3));
10467 #endif
10468 #ifdef TARGET_NR_shmctl
10469     case TARGET_NR_shmctl:
10470         return do_shmctl(arg1, arg2, arg3);
10471 #endif
10472 #ifdef TARGET_NR_shmat
10473     case TARGET_NR_shmat:
10474         return do_shmat(cpu_env, arg1, arg2, arg3);
10475 #endif
10476 #ifdef TARGET_NR_shmdt
10477     case TARGET_NR_shmdt:
10478         return do_shmdt(arg1);
10479 #endif
10480     case TARGET_NR_fsync:
10481         return get_errno(fsync(arg1));
10482     case TARGET_NR_clone:
10483         /* Linux manages to have three different orderings for its
10484          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10485          * match the kernel's CONFIG_CLONE_* settings.
10486          * Microblaze is further special in that it uses a sixth
10487          * implicit argument to clone for the TLS pointer.
10488          */
10489 #if defined(TARGET_MICROBLAZE)
10490         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10491 #elif defined(TARGET_CLONE_BACKWARDS)
10492         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10493 #elif defined(TARGET_CLONE_BACKWARDS2)
10494         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10495 #else
10496         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10497 #endif
10498         return ret;
10499 #ifdef __NR_exit_group
10500         /* new thread calls */
10501     case TARGET_NR_exit_group:
10502         preexit_cleanup(cpu_env, arg1);
10503         return get_errno(exit_group(arg1));
10504 #endif
10505     case TARGET_NR_setdomainname:
10506         if (!(p = lock_user_string(arg1)))
10507             return -TARGET_EFAULT;
10508         ret = get_errno(setdomainname(p, arg2));
10509         unlock_user(p, arg1, 0);
10510         return ret;
10511     case TARGET_NR_uname:
10512         /* no need to transcode because we use the linux syscall */
10513         {
10514             struct new_utsname * buf;
10515 
10516             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10517                 return -TARGET_EFAULT;
10518             ret = get_errno(sys_uname(buf));
10519             if (!is_error(ret)) {
10520                 /* Overwrite the native machine name with whatever is being
10521                    emulated. */
10522                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10523                           sizeof(buf->machine));
10524                 /* Allow the user to override the reported release.  */
10525                 if (qemu_uname_release && *qemu_uname_release) {
10526                     g_strlcpy(buf->release, qemu_uname_release,
10527                               sizeof(buf->release));
10528                 }
10529             }
10530             unlock_user_struct(buf, arg1, 1);
10531         }
10532         return ret;
10533 #ifdef TARGET_I386
10534     case TARGET_NR_modify_ldt:
10535         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10536 #if !defined(TARGET_X86_64)
10537     case TARGET_NR_vm86:
10538         return do_vm86(cpu_env, arg1, arg2);
10539 #endif
10540 #endif
10541 #if defined(TARGET_NR_adjtimex)
10542     case TARGET_NR_adjtimex:
10543         {
10544             struct timex host_buf;
10545 
10546             if (target_to_host_timex(&host_buf, arg1) != 0) {
10547                 return -TARGET_EFAULT;
10548             }
10549             ret = get_errno(adjtimex(&host_buf));
10550             if (!is_error(ret)) {
10551                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10552                     return -TARGET_EFAULT;
10553                 }
10554             }
10555         }
10556         return ret;
10557 #endif
10558 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10559     case TARGET_NR_clock_adjtime:
10560         {
10561             struct timex htx, *phtx = &htx;
10562 
10563             if (target_to_host_timex(phtx, arg2) != 0) {
10564                 return -TARGET_EFAULT;
10565             }
10566             ret = get_errno(clock_adjtime(arg1, phtx));
10567             if (!is_error(ret) && phtx) {
10568                 if (host_to_target_timex(arg2, phtx) != 0) {
10569                     return -TARGET_EFAULT;
10570                 }
10571             }
10572         }
10573         return ret;
10574 #endif
10575 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10576     case TARGET_NR_clock_adjtime64:
10577         {
10578             struct timex htx;
10579 
10580             if (target_to_host_timex64(&htx, arg2) != 0) {
10581                 return -TARGET_EFAULT;
10582             }
10583             ret = get_errno(clock_adjtime(arg1, &htx));
10584             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10585                     return -TARGET_EFAULT;
10586             }
10587         }
10588         return ret;
10589 #endif
10590     case TARGET_NR_getpgid:
10591         return get_errno(getpgid(arg1));
10592     case TARGET_NR_fchdir:
10593         return get_errno(fchdir(arg1));
10594     case TARGET_NR_personality:
10595         return get_errno(personality(arg1));
10596 #ifdef TARGET_NR__llseek /* Not on alpha */
10597     case TARGET_NR__llseek:
10598         {
10599             int64_t res;
10600 #if !defined(__NR_llseek)
10601             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10602             if (res == -1) {
10603                 ret = get_errno(res);
10604             } else {
10605                 ret = 0;
10606             }
10607 #else
10608             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10609 #endif
10610             if ((ret == 0) && put_user_s64(res, arg4)) {
10611                 return -TARGET_EFAULT;
10612             }
10613         }
10614         return ret;
10615 #endif
10616 #ifdef TARGET_NR_getdents
10617     case TARGET_NR_getdents:
10618         return do_getdents(arg1, arg2, arg3);
10619 #endif /* TARGET_NR_getdents */
10620 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10621     case TARGET_NR_getdents64:
10622         return do_getdents64(arg1, arg2, arg3);
10623 #endif /* TARGET_NR_getdents64 */
10624 #if defined(TARGET_NR__newselect)
10625     case TARGET_NR__newselect:
10626         return do_select(arg1, arg2, arg3, arg4, arg5);
10627 #endif
10628 #ifdef TARGET_NR_poll
10629     case TARGET_NR_poll:
10630         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10631 #endif
10632 #ifdef TARGET_NR_ppoll
10633     case TARGET_NR_ppoll:
10634         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10635 #endif
10636 #ifdef TARGET_NR_ppoll_time64
10637     case TARGET_NR_ppoll_time64:
10638         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10639 #endif
10640     case TARGET_NR_flock:
10641         /* NOTE: the flock constant seems to be the same for every
10642            Linux platform */
10643         return get_errno(safe_flock(arg1, arg2));
10644     case TARGET_NR_readv:
10645         {
10646             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10647             if (vec != NULL) {
10648                 ret = get_errno(safe_readv(arg1, vec, arg3));
10649                 unlock_iovec(vec, arg2, arg3, 1);
10650             } else {
10651                 ret = -host_to_target_errno(errno);
10652             }
10653         }
10654         return ret;
10655     case TARGET_NR_writev:
10656         {
10657             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10658             if (vec != NULL) {
10659                 ret = get_errno(safe_writev(arg1, vec, arg3));
10660                 unlock_iovec(vec, arg2, arg3, 0);
10661             } else {
10662                 ret = -host_to_target_errno(errno);
10663             }
10664         }
10665         return ret;
10666 #if defined(TARGET_NR_preadv)
10667     case TARGET_NR_preadv:
10668         {
10669             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10670             if (vec != NULL) {
10671                 unsigned long low, high;
10672 
10673                 target_to_host_low_high(arg4, arg5, &low, &high);
10674                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10675                 unlock_iovec(vec, arg2, arg3, 1);
10676             } else {
10677                 ret = -host_to_target_errno(errno);
10678            }
10679         }
10680         return ret;
10681 #endif
10682 #if defined(TARGET_NR_pwritev)
10683     case TARGET_NR_pwritev:
10684         {
10685             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10686             if (vec != NULL) {
10687                 unsigned long low, high;
10688 
10689                 target_to_host_low_high(arg4, arg5, &low, &high);
10690                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10691                 unlock_iovec(vec, arg2, arg3, 0);
10692             } else {
10693                 ret = -host_to_target_errno(errno);
10694            }
10695         }
10696         return ret;
10697 #endif
10698     case TARGET_NR_getsid:
10699         return get_errno(getsid(arg1));
10700 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10701     case TARGET_NR_fdatasync:
10702         return get_errno(fdatasync(arg1));
10703 #endif
10704     case TARGET_NR_sched_getaffinity:
10705         {
10706             unsigned int mask_size;
10707             unsigned long *mask;
10708 
10709             /*
10710              * sched_getaffinity needs multiples of ulong, so need to take
10711              * care of mismatches between target ulong and host ulong sizes.
10712              */
10713             if (arg2 & (sizeof(abi_ulong) - 1)) {
10714                 return -TARGET_EINVAL;
10715             }
10716             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10717 
10718             mask = alloca(mask_size);
10719             memset(mask, 0, mask_size);
10720             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10721 
10722             if (!is_error(ret)) {
10723                 if (ret > arg2) {
10724                     /* More data returned than the caller's buffer will fit.
10725                      * This only happens if sizeof(abi_long) < sizeof(long)
10726                      * and the caller passed us a buffer holding an odd number
10727                      * of abi_longs. If the host kernel is actually using the
10728                      * extra 4 bytes then fail EINVAL; otherwise we can just
10729                      * ignore them and only copy the interesting part.
10730                      */
10731                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10732                     if (numcpus > arg2 * 8) {
10733                         return -TARGET_EINVAL;
10734                     }
10735                     ret = arg2;
10736                 }
10737 
10738                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10739                     return -TARGET_EFAULT;
10740                 }
10741             }
10742         }
10743         return ret;
10744     case TARGET_NR_sched_setaffinity:
10745         {
10746             unsigned int mask_size;
10747             unsigned long *mask;
10748 
10749             /*
10750              * sched_setaffinity needs multiples of ulong, so need to take
10751              * care of mismatches between target ulong and host ulong sizes.
10752              */
10753             if (arg2 & (sizeof(abi_ulong) - 1)) {
10754                 return -TARGET_EINVAL;
10755             }
10756             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10757             mask = alloca(mask_size);
10758 
10759             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10760             if (ret) {
10761                 return ret;
10762             }
10763 
10764             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10765         }
10766     case TARGET_NR_getcpu:
10767         {
10768             unsigned cpu, node;
10769             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10770                                        arg2 ? &node : NULL,
10771                                        NULL));
10772             if (is_error(ret)) {
10773                 return ret;
10774             }
10775             if (arg1 && put_user_u32(cpu, arg1)) {
10776                 return -TARGET_EFAULT;
10777             }
10778             if (arg2 && put_user_u32(node, arg2)) {
10779                 return -TARGET_EFAULT;
10780             }
10781         }
10782         return ret;
10783     case TARGET_NR_sched_setparam:
10784         {
10785             struct target_sched_param *target_schp;
10786             struct sched_param schp;
10787 
10788             if (arg2 == 0) {
10789                 return -TARGET_EINVAL;
10790             }
10791             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10792                 return -TARGET_EFAULT;
10793             }
10794             schp.sched_priority = tswap32(target_schp->sched_priority);
10795             unlock_user_struct(target_schp, arg2, 0);
10796             return get_errno(sys_sched_setparam(arg1, &schp));
10797         }
10798     case TARGET_NR_sched_getparam:
10799         {
10800             struct target_sched_param *target_schp;
10801             struct sched_param schp;
10802 
10803             if (arg2 == 0) {
10804                 return -TARGET_EINVAL;
10805             }
10806             ret = get_errno(sys_sched_getparam(arg1, &schp));
10807             if (!is_error(ret)) {
10808                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10809                     return -TARGET_EFAULT;
10810                 }
10811                 target_schp->sched_priority = tswap32(schp.sched_priority);
10812                 unlock_user_struct(target_schp, arg2, 1);
10813             }
10814         }
10815         return ret;
10816     case TARGET_NR_sched_setscheduler:
10817         {
10818             struct target_sched_param *target_schp;
10819             struct sched_param schp;
10820             if (arg3 == 0) {
10821                 return -TARGET_EINVAL;
10822             }
10823             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10824                 return -TARGET_EFAULT;
10825             }
10826             schp.sched_priority = tswap32(target_schp->sched_priority);
10827             unlock_user_struct(target_schp, arg3, 0);
10828             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10829         }
10830     case TARGET_NR_sched_getscheduler:
10831         return get_errno(sys_sched_getscheduler(arg1));
10832     case TARGET_NR_sched_getattr:
10833         {
10834             struct target_sched_attr *target_scha;
10835             struct sched_attr scha;
10836             if (arg2 == 0) {
10837                 return -TARGET_EINVAL;
10838             }
10839             if (arg3 > sizeof(scha)) {
10840                 arg3 = sizeof(scha);
10841             }
10842             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10843             if (!is_error(ret)) {
10844                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10845                 if (!target_scha) {
10846                     return -TARGET_EFAULT;
10847                 }
10848                 target_scha->size = tswap32(scha.size);
10849                 target_scha->sched_policy = tswap32(scha.sched_policy);
10850                 target_scha->sched_flags = tswap64(scha.sched_flags);
10851                 target_scha->sched_nice = tswap32(scha.sched_nice);
10852                 target_scha->sched_priority = tswap32(scha.sched_priority);
10853                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10854                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10855                 target_scha->sched_period = tswap64(scha.sched_period);
10856                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10857                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10858                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10859                 }
10860                 unlock_user(target_scha, arg2, arg3);
10861             }
10862             return ret;
10863         }
10864     case TARGET_NR_sched_setattr:
10865         {
10866             struct target_sched_attr *target_scha;
10867             struct sched_attr scha;
10868             uint32_t size;
10869             int zeroed;
10870             if (arg2 == 0) {
10871                 return -TARGET_EINVAL;
10872             }
10873             if (get_user_u32(size, arg2)) {
10874                 return -TARGET_EFAULT;
10875             }
10876             if (!size) {
10877                 size = offsetof(struct target_sched_attr, sched_util_min);
10878             }
10879             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10880                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10881                     return -TARGET_EFAULT;
10882                 }
10883                 return -TARGET_E2BIG;
10884             }
10885 
10886             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10887             if (zeroed < 0) {
10888                 return zeroed;
10889             } else if (zeroed == 0) {
10890                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10891                     return -TARGET_EFAULT;
10892                 }
10893                 return -TARGET_E2BIG;
10894             }
10895             if (size > sizeof(struct target_sched_attr)) {
10896                 size = sizeof(struct target_sched_attr);
10897             }
10898 
10899             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10900             if (!target_scha) {
10901                 return -TARGET_EFAULT;
10902             }
10903             scha.size = size;
10904             scha.sched_policy = tswap32(target_scha->sched_policy);
10905             scha.sched_flags = tswap64(target_scha->sched_flags);
10906             scha.sched_nice = tswap32(target_scha->sched_nice);
10907             scha.sched_priority = tswap32(target_scha->sched_priority);
10908             scha.sched_runtime = tswap64(target_scha->sched_runtime);
10909             scha.sched_deadline = tswap64(target_scha->sched_deadline);
10910             scha.sched_period = tswap64(target_scha->sched_period);
10911             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10912                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10913                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10914             }
10915             unlock_user(target_scha, arg2, 0);
10916             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10917         }
10918     case TARGET_NR_sched_yield:
10919         return get_errno(sched_yield());
10920     case TARGET_NR_sched_get_priority_max:
10921         return get_errno(sched_get_priority_max(arg1));
10922     case TARGET_NR_sched_get_priority_min:
10923         return get_errno(sched_get_priority_min(arg1));
10924 #ifdef TARGET_NR_sched_rr_get_interval
10925     case TARGET_NR_sched_rr_get_interval:
10926         {
10927             struct timespec ts;
10928             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10929             if (!is_error(ret)) {
10930                 ret = host_to_target_timespec(arg2, &ts);
10931             }
10932         }
10933         return ret;
10934 #endif
10935 #ifdef TARGET_NR_sched_rr_get_interval_time64
10936     case TARGET_NR_sched_rr_get_interval_time64:
10937         {
10938             struct timespec ts;
10939             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10940             if (!is_error(ret)) {
10941                 ret = host_to_target_timespec64(arg2, &ts);
10942             }
10943         }
10944         return ret;
10945 #endif
10946 #if defined(TARGET_NR_nanosleep)
10947     case TARGET_NR_nanosleep:
10948         {
10949             struct timespec req, rem;
10950             target_to_host_timespec(&req, arg1);
10951             ret = get_errno(safe_nanosleep(&req, &rem));
10952             if (is_error(ret) && arg2) {
10953                 host_to_target_timespec(arg2, &rem);
10954             }
10955         }
10956         return ret;
10957 #endif
10958     case TARGET_NR_prctl:
10959         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10960         break;
10961 #ifdef TARGET_NR_arch_prctl
10962     case TARGET_NR_arch_prctl:
10963         return do_arch_prctl(cpu_env, arg1, arg2);
10964 #endif
10965 #ifdef TARGET_NR_pread64
10966     case TARGET_NR_pread64:
10967         if (regpairs_aligned(cpu_env, num)) {
10968             arg4 = arg5;
10969             arg5 = arg6;
10970         }
10971         if (arg2 == 0 && arg3 == 0) {
10972             /* Special-case NULL buffer and zero length, which should succeed */
10973             p = 0;
10974         } else {
10975             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10976             if (!p) {
10977                 return -TARGET_EFAULT;
10978             }
10979         }
10980         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10981         unlock_user(p, arg2, ret);
10982         return ret;
10983     case TARGET_NR_pwrite64:
10984         if (regpairs_aligned(cpu_env, num)) {
10985             arg4 = arg5;
10986             arg5 = arg6;
10987         }
10988         if (arg2 == 0 && arg3 == 0) {
10989             /* Special-case NULL buffer and zero length, which should succeed */
10990             p = 0;
10991         } else {
10992             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10993             if (!p) {
10994                 return -TARGET_EFAULT;
10995             }
10996         }
10997         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10998         unlock_user(p, arg2, 0);
10999         return ret;
11000 #endif
11001     case TARGET_NR_getcwd:
11002         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11003             return -TARGET_EFAULT;
11004         ret = get_errno(sys_getcwd1(p, arg2));
11005         unlock_user(p, arg1, ret);
11006         return ret;
11007     case TARGET_NR_capget:
11008     case TARGET_NR_capset:
11009     {
11010         struct target_user_cap_header *target_header;
11011         struct target_user_cap_data *target_data = NULL;
11012         struct __user_cap_header_struct header;
11013         struct __user_cap_data_struct data[2];
11014         struct __user_cap_data_struct *dataptr = NULL;
11015         int i, target_datalen;
11016         int data_items = 1;
11017 
11018         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11019             return -TARGET_EFAULT;
11020         }
11021         header.version = tswap32(target_header->version);
11022         header.pid = tswap32(target_header->pid);
11023 
11024         if (header.version != _LINUX_CAPABILITY_VERSION) {
11025             /* Version 2 and up takes pointer to two user_data structs */
11026             data_items = 2;
11027         }
11028 
11029         target_datalen = sizeof(*target_data) * data_items;
11030 
11031         if (arg2) {
11032             if (num == TARGET_NR_capget) {
11033                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11034             } else {
11035                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11036             }
11037             if (!target_data) {
11038                 unlock_user_struct(target_header, arg1, 0);
11039                 return -TARGET_EFAULT;
11040             }
11041 
11042             if (num == TARGET_NR_capset) {
11043                 for (i = 0; i < data_items; i++) {
11044                     data[i].effective = tswap32(target_data[i].effective);
11045                     data[i].permitted = tswap32(target_data[i].permitted);
11046                     data[i].inheritable = tswap32(target_data[i].inheritable);
11047                 }
11048             }
11049 
11050             dataptr = data;
11051         }
11052 
11053         if (num == TARGET_NR_capget) {
11054             ret = get_errno(capget(&header, dataptr));
11055         } else {
11056             ret = get_errno(capset(&header, dataptr));
11057         }
11058 
11059         /* The kernel always updates version for both capget and capset */
11060         target_header->version = tswap32(header.version);
11061         unlock_user_struct(target_header, arg1, 1);
11062 
11063         if (arg2) {
11064             if (num == TARGET_NR_capget) {
11065                 for (i = 0; i < data_items; i++) {
11066                     target_data[i].effective = tswap32(data[i].effective);
11067                     target_data[i].permitted = tswap32(data[i].permitted);
11068                     target_data[i].inheritable = tswap32(data[i].inheritable);
11069                 }
11070                 unlock_user(target_data, arg2, target_datalen);
11071             } else {
11072                 unlock_user(target_data, arg2, 0);
11073             }
11074         }
11075         return ret;
11076     }
11077     case TARGET_NR_sigaltstack:
11078         return do_sigaltstack(arg1, arg2, cpu_env);
11079 
11080 #ifdef CONFIG_SENDFILE
11081 #ifdef TARGET_NR_sendfile
11082     case TARGET_NR_sendfile:
11083     {
11084         off_t *offp = NULL;
11085         off_t off;
11086         if (arg3) {
11087             ret = get_user_sal(off, arg3);
11088             if (is_error(ret)) {
11089                 return ret;
11090             }
11091             offp = &off;
11092         }
11093         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11094         if (!is_error(ret) && arg3) {
11095             abi_long ret2 = put_user_sal(off, arg3);
11096             if (is_error(ret2)) {
11097                 ret = ret2;
11098             }
11099         }
11100         return ret;
11101     }
11102 #endif
11103 #ifdef TARGET_NR_sendfile64
11104     case TARGET_NR_sendfile64:
11105     {
11106         off_t *offp = NULL;
11107         off_t off;
11108         if (arg3) {
11109             ret = get_user_s64(off, arg3);
11110             if (is_error(ret)) {
11111                 return ret;
11112             }
11113             offp = &off;
11114         }
11115         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11116         if (!is_error(ret) && arg3) {
11117             abi_long ret2 = put_user_s64(off, arg3);
11118             if (is_error(ret2)) {
11119                 ret = ret2;
11120             }
11121         }
11122         return ret;
11123     }
11124 #endif
11125 #endif
11126 #ifdef TARGET_NR_vfork
11127     case TARGET_NR_vfork:
11128         return get_errno(do_fork(cpu_env,
11129                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11130                          0, 0, 0, 0));
11131 #endif
11132 #ifdef TARGET_NR_ugetrlimit
11133     case TARGET_NR_ugetrlimit:
11134     {
11135 	struct rlimit rlim;
11136 	int resource = target_to_host_resource(arg1);
11137 	ret = get_errno(getrlimit(resource, &rlim));
11138 	if (!is_error(ret)) {
11139 	    struct target_rlimit *target_rlim;
11140             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11141                 return -TARGET_EFAULT;
11142 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11143 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11144             unlock_user_struct(target_rlim, arg2, 1);
11145 	}
11146         return ret;
11147     }
11148 #endif
11149 #ifdef TARGET_NR_truncate64
11150     case TARGET_NR_truncate64:
11151         if (!(p = lock_user_string(arg1)))
11152             return -TARGET_EFAULT;
11153 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11154         unlock_user(p, arg1, 0);
11155         return ret;
11156 #endif
11157 #ifdef TARGET_NR_ftruncate64
11158     case TARGET_NR_ftruncate64:
11159         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11160 #endif
11161 #ifdef TARGET_NR_stat64
11162     case TARGET_NR_stat64:
11163         if (!(p = lock_user_string(arg1))) {
11164             return -TARGET_EFAULT;
11165         }
11166         ret = get_errno(stat(path(p), &st));
11167         unlock_user(p, arg1, 0);
11168         if (!is_error(ret))
11169             ret = host_to_target_stat64(cpu_env, arg2, &st);
11170         return ret;
11171 #endif
11172 #ifdef TARGET_NR_lstat64
11173     case TARGET_NR_lstat64:
11174         if (!(p = lock_user_string(arg1))) {
11175             return -TARGET_EFAULT;
11176         }
11177         ret = get_errno(lstat(path(p), &st));
11178         unlock_user(p, arg1, 0);
11179         if (!is_error(ret))
11180             ret = host_to_target_stat64(cpu_env, arg2, &st);
11181         return ret;
11182 #endif
11183 #ifdef TARGET_NR_fstat64
11184     case TARGET_NR_fstat64:
11185         ret = get_errno(fstat(arg1, &st));
11186         if (!is_error(ret))
11187             ret = host_to_target_stat64(cpu_env, arg2, &st);
11188         return ret;
11189 #endif
11190 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11191 #ifdef TARGET_NR_fstatat64
11192     case TARGET_NR_fstatat64:
11193 #endif
11194 #ifdef TARGET_NR_newfstatat
11195     case TARGET_NR_newfstatat:
11196 #endif
11197         if (!(p = lock_user_string(arg2))) {
11198             return -TARGET_EFAULT;
11199         }
11200         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11201         unlock_user(p, arg2, 0);
11202         if (!is_error(ret))
11203             ret = host_to_target_stat64(cpu_env, arg3, &st);
11204         return ret;
11205 #endif
11206 #if defined(TARGET_NR_statx)
11207     case TARGET_NR_statx:
11208         {
11209             struct target_statx *target_stx;
11210             int dirfd = arg1;
11211             int flags = arg3;
11212 
11213             p = lock_user_string(arg2);
11214             if (p == NULL) {
11215                 return -TARGET_EFAULT;
11216             }
11217 #if defined(__NR_statx)
11218             {
11219                 /*
11220                  * It is assumed that struct statx is architecture independent.
11221                  */
11222                 struct target_statx host_stx;
11223                 int mask = arg4;
11224 
11225                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11226                 if (!is_error(ret)) {
11227                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11228                         unlock_user(p, arg2, 0);
11229                         return -TARGET_EFAULT;
11230                     }
11231                 }
11232 
11233                 if (ret != -TARGET_ENOSYS) {
11234                     unlock_user(p, arg2, 0);
11235                     return ret;
11236                 }
11237             }
11238 #endif
11239             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11240             unlock_user(p, arg2, 0);
11241 
11242             if (!is_error(ret)) {
11243                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11244                     return -TARGET_EFAULT;
11245                 }
11246                 memset(target_stx, 0, sizeof(*target_stx));
11247                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11248                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11249                 __put_user(st.st_ino, &target_stx->stx_ino);
11250                 __put_user(st.st_mode, &target_stx->stx_mode);
11251                 __put_user(st.st_uid, &target_stx->stx_uid);
11252                 __put_user(st.st_gid, &target_stx->stx_gid);
11253                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11254                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11255                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11256                 __put_user(st.st_size, &target_stx->stx_size);
11257                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11258                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11259                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11260                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11261                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11262                 unlock_user_struct(target_stx, arg5, 1);
11263             }
11264         }
11265         return ret;
11266 #endif
11267 #ifdef TARGET_NR_lchown
11268     case TARGET_NR_lchown:
11269         if (!(p = lock_user_string(arg1)))
11270             return -TARGET_EFAULT;
11271         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11272         unlock_user(p, arg1, 0);
11273         return ret;
11274 #endif
11275 #ifdef TARGET_NR_getuid
11276     case TARGET_NR_getuid:
11277         return get_errno(high2lowuid(getuid()));
11278 #endif
11279 #ifdef TARGET_NR_getgid
11280     case TARGET_NR_getgid:
11281         return get_errno(high2lowgid(getgid()));
11282 #endif
11283 #ifdef TARGET_NR_geteuid
11284     case TARGET_NR_geteuid:
11285         return get_errno(high2lowuid(geteuid()));
11286 #endif
11287 #ifdef TARGET_NR_getegid
11288     case TARGET_NR_getegid:
11289         return get_errno(high2lowgid(getegid()));
11290 #endif
11291     case TARGET_NR_setreuid:
11292         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11293     case TARGET_NR_setregid:
11294         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11295     case TARGET_NR_getgroups:
11296         {
11297             int gidsetsize = arg1;
11298             target_id *target_grouplist;
11299             gid_t *grouplist;
11300             int i;
11301 
11302             grouplist = alloca(gidsetsize * sizeof(gid_t));
11303             ret = get_errno(getgroups(gidsetsize, grouplist));
11304             if (gidsetsize == 0)
11305                 return ret;
11306             if (!is_error(ret)) {
11307                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11308                 if (!target_grouplist)
11309                     return -TARGET_EFAULT;
11310                 for(i = 0;i < ret; i++)
11311                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11312                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11313             }
11314         }
11315         return ret;
11316     case TARGET_NR_setgroups:
11317         {
11318             int gidsetsize = arg1;
11319             target_id *target_grouplist;
11320             gid_t *grouplist = NULL;
11321             int i;
11322             if (gidsetsize) {
11323                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11324                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11325                 if (!target_grouplist) {
11326                     return -TARGET_EFAULT;
11327                 }
11328                 for (i = 0; i < gidsetsize; i++) {
11329                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11330                 }
11331                 unlock_user(target_grouplist, arg2, 0);
11332             }
11333             return get_errno(setgroups(gidsetsize, grouplist));
11334         }
11335     case TARGET_NR_fchown:
11336         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11337 #if defined(TARGET_NR_fchownat)
11338     case TARGET_NR_fchownat:
11339         if (!(p = lock_user_string(arg2)))
11340             return -TARGET_EFAULT;
11341         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11342                                  low2highgid(arg4), arg5));
11343         unlock_user(p, arg2, 0);
11344         return ret;
11345 #endif
11346 #ifdef TARGET_NR_setresuid
11347     case TARGET_NR_setresuid:
11348         return get_errno(sys_setresuid(low2highuid(arg1),
11349                                        low2highuid(arg2),
11350                                        low2highuid(arg3)));
11351 #endif
11352 #ifdef TARGET_NR_getresuid
11353     case TARGET_NR_getresuid:
11354         {
11355             uid_t ruid, euid, suid;
11356             ret = get_errno(getresuid(&ruid, &euid, &suid));
11357             if (!is_error(ret)) {
11358                 if (put_user_id(high2lowuid(ruid), arg1)
11359                     || put_user_id(high2lowuid(euid), arg2)
11360                     || put_user_id(high2lowuid(suid), arg3))
11361                     return -TARGET_EFAULT;
11362             }
11363         }
11364         return ret;
11365 #endif
11366 #ifdef TARGET_NR_getresgid
11367     case TARGET_NR_setresgid:
11368         return get_errno(sys_setresgid(low2highgid(arg1),
11369                                        low2highgid(arg2),
11370                                        low2highgid(arg3)));
11371 #endif
11372 #ifdef TARGET_NR_getresgid
11373     case TARGET_NR_getresgid:
11374         {
11375             gid_t rgid, egid, sgid;
11376             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11377             if (!is_error(ret)) {
11378                 if (put_user_id(high2lowgid(rgid), arg1)
11379                     || put_user_id(high2lowgid(egid), arg2)
11380                     || put_user_id(high2lowgid(sgid), arg3))
11381                     return -TARGET_EFAULT;
11382             }
11383         }
11384         return ret;
11385 #endif
11386 #ifdef TARGET_NR_chown
11387     case TARGET_NR_chown:
11388         if (!(p = lock_user_string(arg1)))
11389             return -TARGET_EFAULT;
11390         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11391         unlock_user(p, arg1, 0);
11392         return ret;
11393 #endif
11394     case TARGET_NR_setuid:
11395         return get_errno(sys_setuid(low2highuid(arg1)));
11396     case TARGET_NR_setgid:
11397         return get_errno(sys_setgid(low2highgid(arg1)));
11398     case TARGET_NR_setfsuid:
11399         return get_errno(setfsuid(arg1));
11400     case TARGET_NR_setfsgid:
11401         return get_errno(setfsgid(arg1));
11402 
11403 #ifdef TARGET_NR_lchown32
11404     case TARGET_NR_lchown32:
11405         if (!(p = lock_user_string(arg1)))
11406             return -TARGET_EFAULT;
11407         ret = get_errno(lchown(p, arg2, arg3));
11408         unlock_user(p, arg1, 0);
11409         return ret;
11410 #endif
11411 #ifdef TARGET_NR_getuid32
11412     case TARGET_NR_getuid32:
11413         return get_errno(getuid());
11414 #endif
11415 
11416 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11417    /* Alpha specific */
11418     case TARGET_NR_getxuid:
11419          {
11420             uid_t euid;
11421             euid=geteuid();
11422             cpu_env->ir[IR_A4]=euid;
11423          }
11424         return get_errno(getuid());
11425 #endif
11426 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11427    /* Alpha specific */
11428     case TARGET_NR_getxgid:
11429          {
11430             uid_t egid;
11431             egid=getegid();
11432             cpu_env->ir[IR_A4]=egid;
11433          }
11434         return get_errno(getgid());
11435 #endif
11436 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11437     /* Alpha specific */
11438     case TARGET_NR_osf_getsysinfo:
11439         ret = -TARGET_EOPNOTSUPP;
11440         switch (arg1) {
11441           case TARGET_GSI_IEEE_FP_CONTROL:
11442             {
11443                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11444                 uint64_t swcr = cpu_env->swcr;
11445 
11446                 swcr &= ~SWCR_STATUS_MASK;
11447                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11448 
11449                 if (put_user_u64 (swcr, arg2))
11450                         return -TARGET_EFAULT;
11451                 ret = 0;
11452             }
11453             break;
11454 
11455           /* case GSI_IEEE_STATE_AT_SIGNAL:
11456              -- Not implemented in linux kernel.
11457              case GSI_UACPROC:
11458              -- Retrieves current unaligned access state; not much used.
11459              case GSI_PROC_TYPE:
11460              -- Retrieves implver information; surely not used.
11461              case GSI_GET_HWRPB:
11462              -- Grabs a copy of the HWRPB; surely not used.
11463           */
11464         }
11465         return ret;
11466 #endif
11467 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11468     /* Alpha specific */
11469     case TARGET_NR_osf_setsysinfo:
11470         ret = -TARGET_EOPNOTSUPP;
11471         switch (arg1) {
11472           case TARGET_SSI_IEEE_FP_CONTROL:
11473             {
11474                 uint64_t swcr, fpcr;
11475 
11476                 if (get_user_u64 (swcr, arg2)) {
11477                     return -TARGET_EFAULT;
11478                 }
11479 
11480                 /*
11481                  * The kernel calls swcr_update_status to update the
11482                  * status bits from the fpcr at every point that it
11483                  * could be queried.  Therefore, we store the status
11484                  * bits only in FPCR.
11485                  */
11486                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11487 
11488                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11489                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11490                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11491                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11492                 ret = 0;
11493             }
11494             break;
11495 
11496           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11497             {
11498                 uint64_t exc, fpcr, fex;
11499 
11500                 if (get_user_u64(exc, arg2)) {
11501                     return -TARGET_EFAULT;
11502                 }
11503                 exc &= SWCR_STATUS_MASK;
11504                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11505 
11506                 /* Old exceptions are not signaled.  */
11507                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11508                 fex = exc & ~fex;
11509                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11510                 fex &= (cpu_env)->swcr;
11511 
11512                 /* Update the hardware fpcr.  */
11513                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11514                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11515 
11516                 if (fex) {
11517                     int si_code = TARGET_FPE_FLTUNK;
11518                     target_siginfo_t info;
11519 
11520                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11521                         si_code = TARGET_FPE_FLTUND;
11522                     }
11523                     if (fex & SWCR_TRAP_ENABLE_INE) {
11524                         si_code = TARGET_FPE_FLTRES;
11525                     }
11526                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11527                         si_code = TARGET_FPE_FLTUND;
11528                     }
11529                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11530                         si_code = TARGET_FPE_FLTOVF;
11531                     }
11532                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11533                         si_code = TARGET_FPE_FLTDIV;
11534                     }
11535                     if (fex & SWCR_TRAP_ENABLE_INV) {
11536                         si_code = TARGET_FPE_FLTINV;
11537                     }
11538 
11539                     info.si_signo = SIGFPE;
11540                     info.si_errno = 0;
11541                     info.si_code = si_code;
11542                     info._sifields._sigfault._addr = (cpu_env)->pc;
11543                     queue_signal(cpu_env, info.si_signo,
11544                                  QEMU_SI_FAULT, &info);
11545                 }
11546                 ret = 0;
11547             }
11548             break;
11549 
11550           /* case SSI_NVPAIRS:
11551              -- Used with SSIN_UACPROC to enable unaligned accesses.
11552              case SSI_IEEE_STATE_AT_SIGNAL:
11553              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11554              -- Not implemented in linux kernel
11555           */
11556         }
11557         return ret;
11558 #endif
11559 #ifdef TARGET_NR_osf_sigprocmask
11560     /* Alpha specific.  */
11561     case TARGET_NR_osf_sigprocmask:
11562         {
11563             abi_ulong mask;
11564             int how;
11565             sigset_t set, oldset;
11566 
11567             switch(arg1) {
11568             case TARGET_SIG_BLOCK:
11569                 how = SIG_BLOCK;
11570                 break;
11571             case TARGET_SIG_UNBLOCK:
11572                 how = SIG_UNBLOCK;
11573                 break;
11574             case TARGET_SIG_SETMASK:
11575                 how = SIG_SETMASK;
11576                 break;
11577             default:
11578                 return -TARGET_EINVAL;
11579             }
11580             mask = arg2;
11581             target_to_host_old_sigset(&set, &mask);
11582             ret = do_sigprocmask(how, &set, &oldset);
11583             if (!ret) {
11584                 host_to_target_old_sigset(&mask, &oldset);
11585                 ret = mask;
11586             }
11587         }
11588         return ret;
11589 #endif
11590 
11591 #ifdef TARGET_NR_getgid32
11592     case TARGET_NR_getgid32:
11593         return get_errno(getgid());
11594 #endif
11595 #ifdef TARGET_NR_geteuid32
11596     case TARGET_NR_geteuid32:
11597         return get_errno(geteuid());
11598 #endif
11599 #ifdef TARGET_NR_getegid32
11600     case TARGET_NR_getegid32:
11601         return get_errno(getegid());
11602 #endif
11603 #ifdef TARGET_NR_setreuid32
11604     case TARGET_NR_setreuid32:
11605         return get_errno(setreuid(arg1, arg2));
11606 #endif
11607 #ifdef TARGET_NR_setregid32
11608     case TARGET_NR_setregid32:
11609         return get_errno(setregid(arg1, arg2));
11610 #endif
11611 #ifdef TARGET_NR_getgroups32
11612     case TARGET_NR_getgroups32:
11613         {
11614             int gidsetsize = arg1;
11615             uint32_t *target_grouplist;
11616             gid_t *grouplist;
11617             int i;
11618 
11619             grouplist = alloca(gidsetsize * sizeof(gid_t));
11620             ret = get_errno(getgroups(gidsetsize, grouplist));
11621             if (gidsetsize == 0)
11622                 return ret;
11623             if (!is_error(ret)) {
11624                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11625                 if (!target_grouplist) {
11626                     return -TARGET_EFAULT;
11627                 }
11628                 for(i = 0;i < ret; i++)
11629                     target_grouplist[i] = tswap32(grouplist[i]);
11630                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11631             }
11632         }
11633         return ret;
11634 #endif
11635 #ifdef TARGET_NR_setgroups32
11636     case TARGET_NR_setgroups32:
11637         {
11638             int gidsetsize = arg1;
11639             uint32_t *target_grouplist;
11640             gid_t *grouplist;
11641             int i;
11642 
11643             grouplist = alloca(gidsetsize * sizeof(gid_t));
11644             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11645             if (!target_grouplist) {
11646                 return -TARGET_EFAULT;
11647             }
11648             for(i = 0;i < gidsetsize; i++)
11649                 grouplist[i] = tswap32(target_grouplist[i]);
11650             unlock_user(target_grouplist, arg2, 0);
11651             return get_errno(setgroups(gidsetsize, grouplist));
11652         }
11653 #endif
11654 #ifdef TARGET_NR_fchown32
11655     case TARGET_NR_fchown32:
11656         return get_errno(fchown(arg1, arg2, arg3));
11657 #endif
11658 #ifdef TARGET_NR_setresuid32
11659     case TARGET_NR_setresuid32:
11660         return get_errno(sys_setresuid(arg1, arg2, arg3));
11661 #endif
11662 #ifdef TARGET_NR_getresuid32
11663     case TARGET_NR_getresuid32:
11664         {
11665             uid_t ruid, euid, suid;
11666             ret = get_errno(getresuid(&ruid, &euid, &suid));
11667             if (!is_error(ret)) {
11668                 if (put_user_u32(ruid, arg1)
11669                     || put_user_u32(euid, arg2)
11670                     || put_user_u32(suid, arg3))
11671                     return -TARGET_EFAULT;
11672             }
11673         }
11674         return ret;
11675 #endif
11676 #ifdef TARGET_NR_setresgid32
11677     case TARGET_NR_setresgid32:
11678         return get_errno(sys_setresgid(arg1, arg2, arg3));
11679 #endif
11680 #ifdef TARGET_NR_getresgid32
11681     case TARGET_NR_getresgid32:
11682         {
11683             gid_t rgid, egid, sgid;
11684             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11685             if (!is_error(ret)) {
11686                 if (put_user_u32(rgid, arg1)
11687                     || put_user_u32(egid, arg2)
11688                     || put_user_u32(sgid, arg3))
11689                     return -TARGET_EFAULT;
11690             }
11691         }
11692         return ret;
11693 #endif
11694 #ifdef TARGET_NR_chown32
11695     case TARGET_NR_chown32:
11696         if (!(p = lock_user_string(arg1)))
11697             return -TARGET_EFAULT;
11698         ret = get_errno(chown(p, arg2, arg3));
11699         unlock_user(p, arg1, 0);
11700         return ret;
11701 #endif
11702 #ifdef TARGET_NR_setuid32
11703     case TARGET_NR_setuid32:
11704         return get_errno(sys_setuid(arg1));
11705 #endif
11706 #ifdef TARGET_NR_setgid32
11707     case TARGET_NR_setgid32:
11708         return get_errno(sys_setgid(arg1));
11709 #endif
11710 #ifdef TARGET_NR_setfsuid32
11711     case TARGET_NR_setfsuid32:
11712         return get_errno(setfsuid(arg1));
11713 #endif
11714 #ifdef TARGET_NR_setfsgid32
11715     case TARGET_NR_setfsgid32:
11716         return get_errno(setfsgid(arg1));
11717 #endif
11718 #ifdef TARGET_NR_mincore
11719     case TARGET_NR_mincore:
11720         {
11721             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11722             if (!a) {
11723                 return -TARGET_ENOMEM;
11724             }
11725             p = lock_user_string(arg3);
11726             if (!p) {
11727                 ret = -TARGET_EFAULT;
11728             } else {
11729                 ret = get_errno(mincore(a, arg2, p));
11730                 unlock_user(p, arg3, ret);
11731             }
11732             unlock_user(a, arg1, 0);
11733         }
11734         return ret;
11735 #endif
11736 #ifdef TARGET_NR_arm_fadvise64_64
11737     case TARGET_NR_arm_fadvise64_64:
11738         /* arm_fadvise64_64 looks like fadvise64_64 but
11739          * with different argument order: fd, advice, offset, len
11740          * rather than the usual fd, offset, len, advice.
11741          * Note that offset and len are both 64-bit so appear as
11742          * pairs of 32-bit registers.
11743          */
11744         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11745                             target_offset64(arg5, arg6), arg2);
11746         return -host_to_target_errno(ret);
11747 #endif
11748 
11749 #if TARGET_ABI_BITS == 32
11750 
11751 #ifdef TARGET_NR_fadvise64_64
11752     case TARGET_NR_fadvise64_64:
11753 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11754         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11755         ret = arg2;
11756         arg2 = arg3;
11757         arg3 = arg4;
11758         arg4 = arg5;
11759         arg5 = arg6;
11760         arg6 = ret;
11761 #else
11762         /* 6 args: fd, offset (high, low), len (high, low), advice */
11763         if (regpairs_aligned(cpu_env, num)) {
11764             /* offset is in (3,4), len in (5,6) and advice in 7 */
11765             arg2 = arg3;
11766             arg3 = arg4;
11767             arg4 = arg5;
11768             arg5 = arg6;
11769             arg6 = arg7;
11770         }
11771 #endif
11772         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11773                             target_offset64(arg4, arg5), arg6);
11774         return -host_to_target_errno(ret);
11775 #endif
11776 
11777 #ifdef TARGET_NR_fadvise64
11778     case TARGET_NR_fadvise64:
11779         /* 5 args: fd, offset (high, low), len, advice */
11780         if (regpairs_aligned(cpu_env, num)) {
11781             /* offset is in (3,4), len in 5 and advice in 6 */
11782             arg2 = arg3;
11783             arg3 = arg4;
11784             arg4 = arg5;
11785             arg5 = arg6;
11786         }
11787         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11788         return -host_to_target_errno(ret);
11789 #endif
11790 
11791 #else /* not a 32-bit ABI */
11792 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11793 #ifdef TARGET_NR_fadvise64_64
11794     case TARGET_NR_fadvise64_64:
11795 #endif
11796 #ifdef TARGET_NR_fadvise64
11797     case TARGET_NR_fadvise64:
11798 #endif
11799 #ifdef TARGET_S390X
11800         switch (arg4) {
11801         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11802         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11803         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11804         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11805         default: break;
11806         }
11807 #endif
11808         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11809 #endif
11810 #endif /* end of 64-bit ABI fadvise handling */
11811 
11812 #ifdef TARGET_NR_madvise
11813     case TARGET_NR_madvise:
11814         return target_madvise(arg1, arg2, arg3);
11815 #endif
11816 #ifdef TARGET_NR_fcntl64
11817     case TARGET_NR_fcntl64:
11818     {
11819         int cmd;
11820         struct flock64 fl;
11821         from_flock64_fn *copyfrom = copy_from_user_flock64;
11822         to_flock64_fn *copyto = copy_to_user_flock64;
11823 
11824 #ifdef TARGET_ARM
11825         if (!cpu_env->eabi) {
11826             copyfrom = copy_from_user_oabi_flock64;
11827             copyto = copy_to_user_oabi_flock64;
11828         }
11829 #endif
11830 
11831         cmd = target_to_host_fcntl_cmd(arg2);
11832         if (cmd == -TARGET_EINVAL) {
11833             return cmd;
11834         }
11835 
11836         switch(arg2) {
11837         case TARGET_F_GETLK64:
11838             ret = copyfrom(&fl, arg3);
11839             if (ret) {
11840                 break;
11841             }
11842             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11843             if (ret == 0) {
11844                 ret = copyto(arg3, &fl);
11845             }
11846 	    break;
11847 
11848         case TARGET_F_SETLK64:
11849         case TARGET_F_SETLKW64:
11850             ret = copyfrom(&fl, arg3);
11851             if (ret) {
11852                 break;
11853             }
11854             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11855 	    break;
11856         default:
11857             ret = do_fcntl(arg1, arg2, arg3);
11858             break;
11859         }
11860         return ret;
11861     }
11862 #endif
11863 #ifdef TARGET_NR_cacheflush
11864     case TARGET_NR_cacheflush:
11865         /* self-modifying code is handled automatically, so nothing needed */
11866         return 0;
11867 #endif
11868 #ifdef TARGET_NR_getpagesize
11869     case TARGET_NR_getpagesize:
11870         return TARGET_PAGE_SIZE;
11871 #endif
11872     case TARGET_NR_gettid:
11873         return get_errno(sys_gettid());
11874 #ifdef TARGET_NR_readahead
11875     case TARGET_NR_readahead:
11876 #if TARGET_ABI_BITS == 32
11877         if (regpairs_aligned(cpu_env, num)) {
11878             arg2 = arg3;
11879             arg3 = arg4;
11880             arg4 = arg5;
11881         }
11882         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11883 #else
11884         ret = get_errno(readahead(arg1, arg2, arg3));
11885 #endif
11886         return ret;
11887 #endif
11888 #ifdef CONFIG_ATTR
11889 #ifdef TARGET_NR_setxattr
11890     case TARGET_NR_listxattr:
11891     case TARGET_NR_llistxattr:
11892     {
11893         void *p, *b = 0;
11894         if (arg2) {
11895             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11896             if (!b) {
11897                 return -TARGET_EFAULT;
11898             }
11899         }
11900         p = lock_user_string(arg1);
11901         if (p) {
11902             if (num == TARGET_NR_listxattr) {
11903                 ret = get_errno(listxattr(p, b, arg3));
11904             } else {
11905                 ret = get_errno(llistxattr(p, b, arg3));
11906             }
11907         } else {
11908             ret = -TARGET_EFAULT;
11909         }
11910         unlock_user(p, arg1, 0);
11911         unlock_user(b, arg2, arg3);
11912         return ret;
11913     }
11914     case TARGET_NR_flistxattr:
11915     {
11916         void *b = 0;
11917         if (arg2) {
11918             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11919             if (!b) {
11920                 return -TARGET_EFAULT;
11921             }
11922         }
11923         ret = get_errno(flistxattr(arg1, b, arg3));
11924         unlock_user(b, arg2, arg3);
11925         return ret;
11926     }
11927     case TARGET_NR_setxattr:
11928     case TARGET_NR_lsetxattr:
11929         {
11930             void *p, *n, *v = 0;
11931             if (arg3) {
11932                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11933                 if (!v) {
11934                     return -TARGET_EFAULT;
11935                 }
11936             }
11937             p = lock_user_string(arg1);
11938             n = lock_user_string(arg2);
11939             if (p && n) {
11940                 if (num == TARGET_NR_setxattr) {
11941                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11942                 } else {
11943                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11944                 }
11945             } else {
11946                 ret = -TARGET_EFAULT;
11947             }
11948             unlock_user(p, arg1, 0);
11949             unlock_user(n, arg2, 0);
11950             unlock_user(v, arg3, 0);
11951         }
11952         return ret;
11953     case TARGET_NR_fsetxattr:
11954         {
11955             void *n, *v = 0;
11956             if (arg3) {
11957                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11958                 if (!v) {
11959                     return -TARGET_EFAULT;
11960                 }
11961             }
11962             n = lock_user_string(arg2);
11963             if (n) {
11964                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11965             } else {
11966                 ret = -TARGET_EFAULT;
11967             }
11968             unlock_user(n, arg2, 0);
11969             unlock_user(v, arg3, 0);
11970         }
11971         return ret;
11972     case TARGET_NR_getxattr:
11973     case TARGET_NR_lgetxattr:
11974         {
11975             void *p, *n, *v = 0;
11976             if (arg3) {
11977                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11978                 if (!v) {
11979                     return -TARGET_EFAULT;
11980                 }
11981             }
11982             p = lock_user_string(arg1);
11983             n = lock_user_string(arg2);
11984             if (p && n) {
11985                 if (num == TARGET_NR_getxattr) {
11986                     ret = get_errno(getxattr(p, n, v, arg4));
11987                 } else {
11988                     ret = get_errno(lgetxattr(p, n, v, arg4));
11989                 }
11990             } else {
11991                 ret = -TARGET_EFAULT;
11992             }
11993             unlock_user(p, arg1, 0);
11994             unlock_user(n, arg2, 0);
11995             unlock_user(v, arg3, arg4);
11996         }
11997         return ret;
11998     case TARGET_NR_fgetxattr:
11999         {
12000             void *n, *v = 0;
12001             if (arg3) {
12002                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12003                 if (!v) {
12004                     return -TARGET_EFAULT;
12005                 }
12006             }
12007             n = lock_user_string(arg2);
12008             if (n) {
12009                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12010             } else {
12011                 ret = -TARGET_EFAULT;
12012             }
12013             unlock_user(n, arg2, 0);
12014             unlock_user(v, arg3, arg4);
12015         }
12016         return ret;
12017     case TARGET_NR_removexattr:
12018     case TARGET_NR_lremovexattr:
12019         {
12020             void *p, *n;
12021             p = lock_user_string(arg1);
12022             n = lock_user_string(arg2);
12023             if (p && n) {
12024                 if (num == TARGET_NR_removexattr) {
12025                     ret = get_errno(removexattr(p, n));
12026                 } else {
12027                     ret = get_errno(lremovexattr(p, n));
12028                 }
12029             } else {
12030                 ret = -TARGET_EFAULT;
12031             }
12032             unlock_user(p, arg1, 0);
12033             unlock_user(n, arg2, 0);
12034         }
12035         return ret;
12036     case TARGET_NR_fremovexattr:
12037         {
12038             void *n;
12039             n = lock_user_string(arg2);
12040             if (n) {
12041                 ret = get_errno(fremovexattr(arg1, n));
12042             } else {
12043                 ret = -TARGET_EFAULT;
12044             }
12045             unlock_user(n, arg2, 0);
12046         }
12047         return ret;
12048 #endif
12049 #endif /* CONFIG_ATTR */
12050 #ifdef TARGET_NR_set_thread_area
12051     case TARGET_NR_set_thread_area:
12052 #if defined(TARGET_MIPS)
12053       cpu_env->active_tc.CP0_UserLocal = arg1;
12054       return 0;
12055 #elif defined(TARGET_CRIS)
12056       if (arg1 & 0xff)
12057           ret = -TARGET_EINVAL;
12058       else {
12059           cpu_env->pregs[PR_PID] = arg1;
12060           ret = 0;
12061       }
12062       return ret;
12063 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12064       return do_set_thread_area(cpu_env, arg1);
12065 #elif defined(TARGET_M68K)
12066       {
12067           TaskState *ts = cpu->opaque;
12068           ts->tp_value = arg1;
12069           return 0;
12070       }
12071 #else
12072       return -TARGET_ENOSYS;
12073 #endif
12074 #endif
12075 #ifdef TARGET_NR_get_thread_area
12076     case TARGET_NR_get_thread_area:
12077 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12078         return do_get_thread_area(cpu_env, arg1);
12079 #elif defined(TARGET_M68K)
12080         {
12081             TaskState *ts = cpu->opaque;
12082             return ts->tp_value;
12083         }
12084 #else
12085         return -TARGET_ENOSYS;
12086 #endif
12087 #endif
12088 #ifdef TARGET_NR_getdomainname
12089     case TARGET_NR_getdomainname:
12090         return -TARGET_ENOSYS;
12091 #endif
12092 
12093 #ifdef TARGET_NR_clock_settime
12094     case TARGET_NR_clock_settime:
12095     {
12096         struct timespec ts;
12097 
12098         ret = target_to_host_timespec(&ts, arg2);
12099         if (!is_error(ret)) {
12100             ret = get_errno(clock_settime(arg1, &ts));
12101         }
12102         return ret;
12103     }
12104 #endif
12105 #ifdef TARGET_NR_clock_settime64
12106     case TARGET_NR_clock_settime64:
12107     {
12108         struct timespec ts;
12109 
12110         ret = target_to_host_timespec64(&ts, arg2);
12111         if (!is_error(ret)) {
12112             ret = get_errno(clock_settime(arg1, &ts));
12113         }
12114         return ret;
12115     }
12116 #endif
12117 #ifdef TARGET_NR_clock_gettime
12118     case TARGET_NR_clock_gettime:
12119     {
12120         struct timespec ts;
12121         ret = get_errno(clock_gettime(arg1, &ts));
12122         if (!is_error(ret)) {
12123             ret = host_to_target_timespec(arg2, &ts);
12124         }
12125         return ret;
12126     }
12127 #endif
12128 #ifdef TARGET_NR_clock_gettime64
12129     case TARGET_NR_clock_gettime64:
12130     {
12131         struct timespec ts;
12132         ret = get_errno(clock_gettime(arg1, &ts));
12133         if (!is_error(ret)) {
12134             ret = host_to_target_timespec64(arg2, &ts);
12135         }
12136         return ret;
12137     }
12138 #endif
12139 #ifdef TARGET_NR_clock_getres
12140     case TARGET_NR_clock_getres:
12141     {
12142         struct timespec ts;
12143         ret = get_errno(clock_getres(arg1, &ts));
12144         if (!is_error(ret)) {
12145             host_to_target_timespec(arg2, &ts);
12146         }
12147         return ret;
12148     }
12149 #endif
12150 #ifdef TARGET_NR_clock_getres_time64
12151     case TARGET_NR_clock_getres_time64:
12152     {
12153         struct timespec ts;
12154         ret = get_errno(clock_getres(arg1, &ts));
12155         if (!is_error(ret)) {
12156             host_to_target_timespec64(arg2, &ts);
12157         }
12158         return ret;
12159     }
12160 #endif
12161 #ifdef TARGET_NR_clock_nanosleep
12162     case TARGET_NR_clock_nanosleep:
12163     {
12164         struct timespec ts;
12165         if (target_to_host_timespec(&ts, arg3)) {
12166             return -TARGET_EFAULT;
12167         }
12168         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12169                                              &ts, arg4 ? &ts : NULL));
12170         /*
12171          * if the call is interrupted by a signal handler, it fails
12172          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12173          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12174          */
12175         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12176             host_to_target_timespec(arg4, &ts)) {
12177               return -TARGET_EFAULT;
12178         }
12179 
12180         return ret;
12181     }
12182 #endif
12183 #ifdef TARGET_NR_clock_nanosleep_time64
12184     case TARGET_NR_clock_nanosleep_time64:
12185     {
12186         struct timespec ts;
12187 
12188         if (target_to_host_timespec64(&ts, arg3)) {
12189             return -TARGET_EFAULT;
12190         }
12191 
12192         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12193                                              &ts, arg4 ? &ts : NULL));
12194 
12195         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12196             host_to_target_timespec64(arg4, &ts)) {
12197             return -TARGET_EFAULT;
12198         }
12199         return ret;
12200     }
12201 #endif
12202 
12203 #if defined(TARGET_NR_set_tid_address)
12204     case TARGET_NR_set_tid_address:
12205     {
12206         TaskState *ts = cpu->opaque;
12207         ts->child_tidptr = arg1;
12208         /* do not call host set_tid_address() syscall, instead return tid() */
12209         return get_errno(sys_gettid());
12210     }
12211 #endif
12212 
12213     case TARGET_NR_tkill:
12214         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12215 
12216     case TARGET_NR_tgkill:
12217         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12218                          target_to_host_signal(arg3)));
12219 
12220 #ifdef TARGET_NR_set_robust_list
12221     case TARGET_NR_set_robust_list:
12222     case TARGET_NR_get_robust_list:
12223         /* The ABI for supporting robust futexes has userspace pass
12224          * the kernel a pointer to a linked list which is updated by
12225          * userspace after the syscall; the list is walked by the kernel
12226          * when the thread exits. Since the linked list in QEMU guest
12227          * memory isn't a valid linked list for the host and we have
12228          * no way to reliably intercept the thread-death event, we can't
12229          * support these. Silently return ENOSYS so that guest userspace
12230          * falls back to a non-robust futex implementation (which should
12231          * be OK except in the corner case of the guest crashing while
12232          * holding a mutex that is shared with another process via
12233          * shared memory).
12234          */
12235         return -TARGET_ENOSYS;
12236 #endif
12237 
12238 #if defined(TARGET_NR_utimensat)
12239     case TARGET_NR_utimensat:
12240         {
12241             struct timespec *tsp, ts[2];
12242             if (!arg3) {
12243                 tsp = NULL;
12244             } else {
12245                 if (target_to_host_timespec(ts, arg3)) {
12246                     return -TARGET_EFAULT;
12247                 }
12248                 if (target_to_host_timespec(ts + 1, arg3 +
12249                                             sizeof(struct target_timespec))) {
12250                     return -TARGET_EFAULT;
12251                 }
12252                 tsp = ts;
12253             }
12254             if (!arg2)
12255                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12256             else {
12257                 if (!(p = lock_user_string(arg2))) {
12258                     return -TARGET_EFAULT;
12259                 }
12260                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12261                 unlock_user(p, arg2, 0);
12262             }
12263         }
12264         return ret;
12265 #endif
12266 #ifdef TARGET_NR_utimensat_time64
12267     case TARGET_NR_utimensat_time64:
12268         {
12269             struct timespec *tsp, ts[2];
12270             if (!arg3) {
12271                 tsp = NULL;
12272             } else {
12273                 if (target_to_host_timespec64(ts, arg3)) {
12274                     return -TARGET_EFAULT;
12275                 }
12276                 if (target_to_host_timespec64(ts + 1, arg3 +
12277                                      sizeof(struct target__kernel_timespec))) {
12278                     return -TARGET_EFAULT;
12279                 }
12280                 tsp = ts;
12281             }
12282             if (!arg2)
12283                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12284             else {
12285                 p = lock_user_string(arg2);
12286                 if (!p) {
12287                     return -TARGET_EFAULT;
12288                 }
12289                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12290                 unlock_user(p, arg2, 0);
12291             }
12292         }
12293         return ret;
12294 #endif
12295 #ifdef TARGET_NR_futex
12296     case TARGET_NR_futex:
12297         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12298 #endif
12299 #ifdef TARGET_NR_futex_time64
12300     case TARGET_NR_futex_time64:
12301         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12302 #endif
12303 #ifdef CONFIG_INOTIFY
12304 #if defined(TARGET_NR_inotify_init)
12305     case TARGET_NR_inotify_init:
12306         ret = get_errno(inotify_init());
12307         if (ret >= 0) {
12308             fd_trans_register(ret, &target_inotify_trans);
12309         }
12310         return ret;
12311 #endif
12312 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12313     case TARGET_NR_inotify_init1:
12314         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12315                                           fcntl_flags_tbl)));
12316         if (ret >= 0) {
12317             fd_trans_register(ret, &target_inotify_trans);
12318         }
12319         return ret;
12320 #endif
12321 #if defined(TARGET_NR_inotify_add_watch)
12322     case TARGET_NR_inotify_add_watch:
12323         p = lock_user_string(arg2);
12324         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12325         unlock_user(p, arg2, 0);
12326         return ret;
12327 #endif
12328 #if defined(TARGET_NR_inotify_rm_watch)
12329     case TARGET_NR_inotify_rm_watch:
12330         return get_errno(inotify_rm_watch(arg1, arg2));
12331 #endif
12332 #endif
12333 
12334 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12335     case TARGET_NR_mq_open:
12336         {
12337             struct mq_attr posix_mq_attr;
12338             struct mq_attr *pposix_mq_attr;
12339             int host_flags;
12340 
12341             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12342             pposix_mq_attr = NULL;
12343             if (arg4) {
12344                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12345                     return -TARGET_EFAULT;
12346                 }
12347                 pposix_mq_attr = &posix_mq_attr;
12348             }
12349             p = lock_user_string(arg1 - 1);
12350             if (!p) {
12351                 return -TARGET_EFAULT;
12352             }
12353             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12354             unlock_user (p, arg1, 0);
12355         }
12356         return ret;
12357 
12358     case TARGET_NR_mq_unlink:
12359         p = lock_user_string(arg1 - 1);
12360         if (!p) {
12361             return -TARGET_EFAULT;
12362         }
12363         ret = get_errno(mq_unlink(p));
12364         unlock_user (p, arg1, 0);
12365         return ret;
12366 
12367 #ifdef TARGET_NR_mq_timedsend
12368     case TARGET_NR_mq_timedsend:
12369         {
12370             struct timespec ts;
12371 
12372             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12373             if (arg5 != 0) {
12374                 if (target_to_host_timespec(&ts, arg5)) {
12375                     return -TARGET_EFAULT;
12376                 }
12377                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12378                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12379                     return -TARGET_EFAULT;
12380                 }
12381             } else {
12382                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12383             }
12384             unlock_user (p, arg2, arg3);
12385         }
12386         return ret;
12387 #endif
12388 #ifdef TARGET_NR_mq_timedsend_time64
12389     case TARGET_NR_mq_timedsend_time64:
12390         {
12391             struct timespec ts;
12392 
12393             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12394             if (arg5 != 0) {
12395                 if (target_to_host_timespec64(&ts, arg5)) {
12396                     return -TARGET_EFAULT;
12397                 }
12398                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12399                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12400                     return -TARGET_EFAULT;
12401                 }
12402             } else {
12403                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12404             }
12405             unlock_user(p, arg2, arg3);
12406         }
12407         return ret;
12408 #endif
12409 
12410 #ifdef TARGET_NR_mq_timedreceive
12411     case TARGET_NR_mq_timedreceive:
12412         {
12413             struct timespec ts;
12414             unsigned int prio;
12415 
12416             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12417             if (arg5 != 0) {
12418                 if (target_to_host_timespec(&ts, arg5)) {
12419                     return -TARGET_EFAULT;
12420                 }
12421                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12422                                                      &prio, &ts));
12423                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12424                     return -TARGET_EFAULT;
12425                 }
12426             } else {
12427                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12428                                                      &prio, NULL));
12429             }
12430             unlock_user (p, arg2, arg3);
12431             if (arg4 != 0)
12432                 put_user_u32(prio, arg4);
12433         }
12434         return ret;
12435 #endif
12436 #ifdef TARGET_NR_mq_timedreceive_time64
12437     case TARGET_NR_mq_timedreceive_time64:
12438         {
12439             struct timespec ts;
12440             unsigned int prio;
12441 
12442             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12443             if (arg5 != 0) {
12444                 if (target_to_host_timespec64(&ts, arg5)) {
12445                     return -TARGET_EFAULT;
12446                 }
12447                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12448                                                      &prio, &ts));
12449                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12450                     return -TARGET_EFAULT;
12451                 }
12452             } else {
12453                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12454                                                      &prio, NULL));
12455             }
12456             unlock_user(p, arg2, arg3);
12457             if (arg4 != 0) {
12458                 put_user_u32(prio, arg4);
12459             }
12460         }
12461         return ret;
12462 #endif
12463 
12464     /* Not implemented for now... */
12465 /*     case TARGET_NR_mq_notify: */
12466 /*         break; */
12467 
12468     case TARGET_NR_mq_getsetattr:
12469         {
12470             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12471             ret = 0;
12472             if (arg2 != 0) {
12473                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12474                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12475                                            &posix_mq_attr_out));
12476             } else if (arg3 != 0) {
12477                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12478             }
12479             if (ret == 0 && arg3 != 0) {
12480                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12481             }
12482         }
12483         return ret;
12484 #endif
12485 
12486 #ifdef CONFIG_SPLICE
12487 #ifdef TARGET_NR_tee
12488     case TARGET_NR_tee:
12489         {
12490             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12491         }
12492         return ret;
12493 #endif
12494 #ifdef TARGET_NR_splice
12495     case TARGET_NR_splice:
12496         {
12497             loff_t loff_in, loff_out;
12498             loff_t *ploff_in = NULL, *ploff_out = NULL;
12499             if (arg2) {
12500                 if (get_user_u64(loff_in, arg2)) {
12501                     return -TARGET_EFAULT;
12502                 }
12503                 ploff_in = &loff_in;
12504             }
12505             if (arg4) {
12506                 if (get_user_u64(loff_out, arg4)) {
12507                     return -TARGET_EFAULT;
12508                 }
12509                 ploff_out = &loff_out;
12510             }
12511             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12512             if (arg2) {
12513                 if (put_user_u64(loff_in, arg2)) {
12514                     return -TARGET_EFAULT;
12515                 }
12516             }
12517             if (arg4) {
12518                 if (put_user_u64(loff_out, arg4)) {
12519                     return -TARGET_EFAULT;
12520                 }
12521             }
12522         }
12523         return ret;
12524 #endif
12525 #ifdef TARGET_NR_vmsplice
12526 	case TARGET_NR_vmsplice:
12527         {
12528             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12529             if (vec != NULL) {
12530                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12531                 unlock_iovec(vec, arg2, arg3, 0);
12532             } else {
12533                 ret = -host_to_target_errno(errno);
12534             }
12535         }
12536         return ret;
12537 #endif
12538 #endif /* CONFIG_SPLICE */
12539 #ifdef CONFIG_EVENTFD
12540 #if defined(TARGET_NR_eventfd)
12541     case TARGET_NR_eventfd:
12542         ret = get_errno(eventfd(arg1, 0));
12543         if (ret >= 0) {
12544             fd_trans_register(ret, &target_eventfd_trans);
12545         }
12546         return ret;
12547 #endif
12548 #if defined(TARGET_NR_eventfd2)
12549     case TARGET_NR_eventfd2:
12550     {
12551         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12552         if (arg2 & TARGET_O_NONBLOCK) {
12553             host_flags |= O_NONBLOCK;
12554         }
12555         if (arg2 & TARGET_O_CLOEXEC) {
12556             host_flags |= O_CLOEXEC;
12557         }
12558         ret = get_errno(eventfd(arg1, host_flags));
12559         if (ret >= 0) {
12560             fd_trans_register(ret, &target_eventfd_trans);
12561         }
12562         return ret;
12563     }
12564 #endif
12565 #endif /* CONFIG_EVENTFD  */
12566 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12567     case TARGET_NR_fallocate:
12568 #if TARGET_ABI_BITS == 32
12569         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12570                                   target_offset64(arg5, arg6)));
12571 #else
12572         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12573 #endif
12574         return ret;
12575 #endif
12576 #if defined(CONFIG_SYNC_FILE_RANGE)
12577 #if defined(TARGET_NR_sync_file_range)
12578     case TARGET_NR_sync_file_range:
12579 #if TARGET_ABI_BITS == 32
12580 #if defined(TARGET_MIPS)
12581         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12582                                         target_offset64(arg5, arg6), arg7));
12583 #else
12584         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12585                                         target_offset64(arg4, arg5), arg6));
12586 #endif /* !TARGET_MIPS */
12587 #else
12588         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12589 #endif
12590         return ret;
12591 #endif
12592 #if defined(TARGET_NR_sync_file_range2) || \
12593     defined(TARGET_NR_arm_sync_file_range)
12594 #if defined(TARGET_NR_sync_file_range2)
12595     case TARGET_NR_sync_file_range2:
12596 #endif
12597 #if defined(TARGET_NR_arm_sync_file_range)
12598     case TARGET_NR_arm_sync_file_range:
12599 #endif
12600         /* This is like sync_file_range but the arguments are reordered */
12601 #if TARGET_ABI_BITS == 32
12602         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12603                                         target_offset64(arg5, arg6), arg2));
12604 #else
12605         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12606 #endif
12607         return ret;
12608 #endif
12609 #endif
12610 #if defined(TARGET_NR_signalfd4)
12611     case TARGET_NR_signalfd4:
12612         return do_signalfd4(arg1, arg2, arg4);
12613 #endif
12614 #if defined(TARGET_NR_signalfd)
12615     case TARGET_NR_signalfd:
12616         return do_signalfd4(arg1, arg2, 0);
12617 #endif
12618 #if defined(CONFIG_EPOLL)
12619 #if defined(TARGET_NR_epoll_create)
12620     case TARGET_NR_epoll_create:
12621         return get_errno(epoll_create(arg1));
12622 #endif
12623 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12624     case TARGET_NR_epoll_create1:
12625         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12626 #endif
12627 #if defined(TARGET_NR_epoll_ctl)
12628     case TARGET_NR_epoll_ctl:
12629     {
12630         struct epoll_event ep;
12631         struct epoll_event *epp = 0;
12632         if (arg4) {
12633             if (arg2 != EPOLL_CTL_DEL) {
12634                 struct target_epoll_event *target_ep;
12635                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12636                     return -TARGET_EFAULT;
12637                 }
12638                 ep.events = tswap32(target_ep->events);
12639                 /*
12640                  * The epoll_data_t union is just opaque data to the kernel,
12641                  * so we transfer all 64 bits across and need not worry what
12642                  * actual data type it is.
12643                  */
12644                 ep.data.u64 = tswap64(target_ep->data.u64);
12645                 unlock_user_struct(target_ep, arg4, 0);
12646             }
12647             /*
12648              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12649              * non-null pointer, even though this argument is ignored.
12650              *
12651              */
12652             epp = &ep;
12653         }
12654         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12655     }
12656 #endif
12657 
12658 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12659 #if defined(TARGET_NR_epoll_wait)
12660     case TARGET_NR_epoll_wait:
12661 #endif
12662 #if defined(TARGET_NR_epoll_pwait)
12663     case TARGET_NR_epoll_pwait:
12664 #endif
12665     {
12666         struct target_epoll_event *target_ep;
12667         struct epoll_event *ep;
12668         int epfd = arg1;
12669         int maxevents = arg3;
12670         int timeout = arg4;
12671 
12672         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12673             return -TARGET_EINVAL;
12674         }
12675 
12676         target_ep = lock_user(VERIFY_WRITE, arg2,
12677                               maxevents * sizeof(struct target_epoll_event), 1);
12678         if (!target_ep) {
12679             return -TARGET_EFAULT;
12680         }
12681 
12682         ep = g_try_new(struct epoll_event, maxevents);
12683         if (!ep) {
12684             unlock_user(target_ep, arg2, 0);
12685             return -TARGET_ENOMEM;
12686         }
12687 
12688         switch (num) {
12689 #if defined(TARGET_NR_epoll_pwait)
12690         case TARGET_NR_epoll_pwait:
12691         {
12692             sigset_t *set = NULL;
12693 
12694             if (arg5) {
12695                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12696                 if (ret != 0) {
12697                     break;
12698                 }
12699             }
12700 
12701             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12702                                              set, SIGSET_T_SIZE));
12703 
12704             if (set) {
12705                 finish_sigsuspend_mask(ret);
12706             }
12707             break;
12708         }
12709 #endif
12710 #if defined(TARGET_NR_epoll_wait)
12711         case TARGET_NR_epoll_wait:
12712             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12713                                              NULL, 0));
12714             break;
12715 #endif
12716         default:
12717             ret = -TARGET_ENOSYS;
12718         }
12719         if (!is_error(ret)) {
12720             int i;
12721             for (i = 0; i < ret; i++) {
12722                 target_ep[i].events = tswap32(ep[i].events);
12723                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12724             }
12725             unlock_user(target_ep, arg2,
12726                         ret * sizeof(struct target_epoll_event));
12727         } else {
12728             unlock_user(target_ep, arg2, 0);
12729         }
12730         g_free(ep);
12731         return ret;
12732     }
12733 #endif
12734 #endif
12735 #ifdef TARGET_NR_prlimit64
12736     case TARGET_NR_prlimit64:
12737     {
12738         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12739         struct target_rlimit64 *target_rnew, *target_rold;
12740         struct host_rlimit64 rnew, rold, *rnewp = 0;
12741         int resource = target_to_host_resource(arg2);
12742 
12743         if (arg3 && (resource != RLIMIT_AS &&
12744                      resource != RLIMIT_DATA &&
12745                      resource != RLIMIT_STACK)) {
12746             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12747                 return -TARGET_EFAULT;
12748             }
12749             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12750             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12751             unlock_user_struct(target_rnew, arg3, 0);
12752             rnewp = &rnew;
12753         }
12754 
12755         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12756         if (!is_error(ret) && arg4) {
12757             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12758                 return -TARGET_EFAULT;
12759             }
12760             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12761             target_rold->rlim_max = tswap64(rold.rlim_max);
12762             unlock_user_struct(target_rold, arg4, 1);
12763         }
12764         return ret;
12765     }
12766 #endif
12767 #ifdef TARGET_NR_gethostname
12768     case TARGET_NR_gethostname:
12769     {
12770         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12771         if (name) {
12772             ret = get_errno(gethostname(name, arg2));
12773             unlock_user(name, arg1, arg2);
12774         } else {
12775             ret = -TARGET_EFAULT;
12776         }
12777         return ret;
12778     }
12779 #endif
12780 #ifdef TARGET_NR_atomic_cmpxchg_32
12781     case TARGET_NR_atomic_cmpxchg_32:
12782     {
12783         /* should use start_exclusive from main.c */
12784         abi_ulong mem_value;
12785         if (get_user_u32(mem_value, arg6)) {
12786             target_siginfo_t info;
12787             info.si_signo = SIGSEGV;
12788             info.si_errno = 0;
12789             info.si_code = TARGET_SEGV_MAPERR;
12790             info._sifields._sigfault._addr = arg6;
12791             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12792             ret = 0xdeadbeef;
12793 
12794         }
12795         if (mem_value == arg2)
12796             put_user_u32(arg1, arg6);
12797         return mem_value;
12798     }
12799 #endif
12800 #ifdef TARGET_NR_atomic_barrier
12801     case TARGET_NR_atomic_barrier:
12802         /* Like the kernel implementation and the
12803            qemu arm barrier, no-op this? */
12804         return 0;
12805 #endif
12806 
12807 #ifdef TARGET_NR_timer_create
12808     case TARGET_NR_timer_create:
12809     {
12810         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12811 
12812         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12813 
12814         int clkid = arg1;
12815         int timer_index = next_free_host_timer();
12816 
12817         if (timer_index < 0) {
12818             ret = -TARGET_EAGAIN;
12819         } else {
12820             timer_t *phtimer = g_posix_timers  + timer_index;
12821 
12822             if (arg2) {
12823                 phost_sevp = &host_sevp;
12824                 ret = target_to_host_sigevent(phost_sevp, arg2);
12825                 if (ret != 0) {
12826                     return ret;
12827                 }
12828             }
12829 
12830             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12831             if (ret) {
12832                 phtimer = NULL;
12833             } else {
12834                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12835                     return -TARGET_EFAULT;
12836                 }
12837             }
12838         }
12839         return ret;
12840     }
12841 #endif
12842 
12843 #ifdef TARGET_NR_timer_settime
12844     case TARGET_NR_timer_settime:
12845     {
12846         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12847          * struct itimerspec * old_value */
12848         target_timer_t timerid = get_timer_id(arg1);
12849 
12850         if (timerid < 0) {
12851             ret = timerid;
12852         } else if (arg3 == 0) {
12853             ret = -TARGET_EINVAL;
12854         } else {
12855             timer_t htimer = g_posix_timers[timerid];
12856             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12857 
12858             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12859                 return -TARGET_EFAULT;
12860             }
12861             ret = get_errno(
12862                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12863             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12864                 return -TARGET_EFAULT;
12865             }
12866         }
12867         return ret;
12868     }
12869 #endif
12870 
12871 #ifdef TARGET_NR_timer_settime64
12872     case TARGET_NR_timer_settime64:
12873     {
12874         target_timer_t timerid = get_timer_id(arg1);
12875 
12876         if (timerid < 0) {
12877             ret = timerid;
12878         } else if (arg3 == 0) {
12879             ret = -TARGET_EINVAL;
12880         } else {
12881             timer_t htimer = g_posix_timers[timerid];
12882             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12883 
12884             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12885                 return -TARGET_EFAULT;
12886             }
12887             ret = get_errno(
12888                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12889             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12890                 return -TARGET_EFAULT;
12891             }
12892         }
12893         return ret;
12894     }
12895 #endif
12896 
12897 #ifdef TARGET_NR_timer_gettime
12898     case TARGET_NR_timer_gettime:
12899     {
12900         /* args: timer_t timerid, struct itimerspec *curr_value */
12901         target_timer_t timerid = get_timer_id(arg1);
12902 
12903         if (timerid < 0) {
12904             ret = timerid;
12905         } else if (!arg2) {
12906             ret = -TARGET_EFAULT;
12907         } else {
12908             timer_t htimer = g_posix_timers[timerid];
12909             struct itimerspec hspec;
12910             ret = get_errno(timer_gettime(htimer, &hspec));
12911 
12912             if (host_to_target_itimerspec(arg2, &hspec)) {
12913                 ret = -TARGET_EFAULT;
12914             }
12915         }
12916         return ret;
12917     }
12918 #endif
12919 
12920 #ifdef TARGET_NR_timer_gettime64
12921     case TARGET_NR_timer_gettime64:
12922     {
12923         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12924         target_timer_t timerid = get_timer_id(arg1);
12925 
12926         if (timerid < 0) {
12927             ret = timerid;
12928         } else if (!arg2) {
12929             ret = -TARGET_EFAULT;
12930         } else {
12931             timer_t htimer = g_posix_timers[timerid];
12932             struct itimerspec hspec;
12933             ret = get_errno(timer_gettime(htimer, &hspec));
12934 
12935             if (host_to_target_itimerspec64(arg2, &hspec)) {
12936                 ret = -TARGET_EFAULT;
12937             }
12938         }
12939         return ret;
12940     }
12941 #endif
12942 
12943 #ifdef TARGET_NR_timer_getoverrun
12944     case TARGET_NR_timer_getoverrun:
12945     {
12946         /* args: timer_t timerid */
12947         target_timer_t timerid = get_timer_id(arg1);
12948 
12949         if (timerid < 0) {
12950             ret = timerid;
12951         } else {
12952             timer_t htimer = g_posix_timers[timerid];
12953             ret = get_errno(timer_getoverrun(htimer));
12954         }
12955         return ret;
12956     }
12957 #endif
12958 
12959 #ifdef TARGET_NR_timer_delete
12960     case TARGET_NR_timer_delete:
12961     {
12962         /* args: timer_t timerid */
12963         target_timer_t timerid = get_timer_id(arg1);
12964 
12965         if (timerid < 0) {
12966             ret = timerid;
12967         } else {
12968             timer_t htimer = g_posix_timers[timerid];
12969             ret = get_errno(timer_delete(htimer));
12970             g_posix_timers[timerid] = 0;
12971         }
12972         return ret;
12973     }
12974 #endif
12975 
12976 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12977     case TARGET_NR_timerfd_create:
12978         return get_errno(timerfd_create(arg1,
12979                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12980 #endif
12981 
12982 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12983     case TARGET_NR_timerfd_gettime:
12984         {
12985             struct itimerspec its_curr;
12986 
12987             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12988 
12989             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12990                 return -TARGET_EFAULT;
12991             }
12992         }
12993         return ret;
12994 #endif
12995 
12996 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12997     case TARGET_NR_timerfd_gettime64:
12998         {
12999             struct itimerspec its_curr;
13000 
13001             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13002 
13003             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13004                 return -TARGET_EFAULT;
13005             }
13006         }
13007         return ret;
13008 #endif
13009 
13010 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13011     case TARGET_NR_timerfd_settime:
13012         {
13013             struct itimerspec its_new, its_old, *p_new;
13014 
13015             if (arg3) {
13016                 if (target_to_host_itimerspec(&its_new, arg3)) {
13017                     return -TARGET_EFAULT;
13018                 }
13019                 p_new = &its_new;
13020             } else {
13021                 p_new = NULL;
13022             }
13023 
13024             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13025 
13026             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13027                 return -TARGET_EFAULT;
13028             }
13029         }
13030         return ret;
13031 #endif
13032 
13033 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13034     case TARGET_NR_timerfd_settime64:
13035         {
13036             struct itimerspec its_new, its_old, *p_new;
13037 
13038             if (arg3) {
13039                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13040                     return -TARGET_EFAULT;
13041                 }
13042                 p_new = &its_new;
13043             } else {
13044                 p_new = NULL;
13045             }
13046 
13047             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13048 
13049             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13050                 return -TARGET_EFAULT;
13051             }
13052         }
13053         return ret;
13054 #endif
13055 
13056 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13057     case TARGET_NR_ioprio_get:
13058         return get_errno(ioprio_get(arg1, arg2));
13059 #endif
13060 
13061 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13062     case TARGET_NR_ioprio_set:
13063         return get_errno(ioprio_set(arg1, arg2, arg3));
13064 #endif
13065 
13066 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13067     case TARGET_NR_setns:
13068         return get_errno(setns(arg1, arg2));
13069 #endif
13070 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13071     case TARGET_NR_unshare:
13072         return get_errno(unshare(arg1));
13073 #endif
13074 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13075     case TARGET_NR_kcmp:
13076         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13077 #endif
13078 #ifdef TARGET_NR_swapcontext
13079     case TARGET_NR_swapcontext:
13080         /* PowerPC specific.  */
13081         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13082 #endif
13083 #ifdef TARGET_NR_memfd_create
13084     case TARGET_NR_memfd_create:
13085         p = lock_user_string(arg1);
13086         if (!p) {
13087             return -TARGET_EFAULT;
13088         }
13089         ret = get_errno(memfd_create(p, arg2));
13090         fd_trans_unregister(ret);
13091         unlock_user(p, arg1, 0);
13092         return ret;
13093 #endif
13094 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13095     case TARGET_NR_membarrier:
13096         return get_errno(membarrier(arg1, arg2));
13097 #endif
13098 
13099 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13100     case TARGET_NR_copy_file_range:
13101         {
13102             loff_t inoff, outoff;
13103             loff_t *pinoff = NULL, *poutoff = NULL;
13104 
13105             if (arg2) {
13106                 if (get_user_u64(inoff, arg2)) {
13107                     return -TARGET_EFAULT;
13108                 }
13109                 pinoff = &inoff;
13110             }
13111             if (arg4) {
13112                 if (get_user_u64(outoff, arg4)) {
13113                     return -TARGET_EFAULT;
13114                 }
13115                 poutoff = &outoff;
13116             }
13117             /* Do not sign-extend the count parameter. */
13118             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13119                                                  (abi_ulong)arg5, arg6));
13120             if (!is_error(ret) && ret > 0) {
13121                 if (arg2) {
13122                     if (put_user_u64(inoff, arg2)) {
13123                         return -TARGET_EFAULT;
13124                     }
13125                 }
13126                 if (arg4) {
13127                     if (put_user_u64(outoff, arg4)) {
13128                         return -TARGET_EFAULT;
13129                     }
13130                 }
13131             }
13132         }
13133         return ret;
13134 #endif
13135 
13136 #if defined(TARGET_NR_pivot_root)
13137     case TARGET_NR_pivot_root:
13138         {
13139             void *p2;
13140             p = lock_user_string(arg1); /* new_root */
13141             p2 = lock_user_string(arg2); /* put_old */
13142             if (!p || !p2) {
13143                 ret = -TARGET_EFAULT;
13144             } else {
13145                 ret = get_errno(pivot_root(p, p2));
13146             }
13147             unlock_user(p2, arg2, 0);
13148             unlock_user(p, arg1, 0);
13149         }
13150         return ret;
13151 #endif
13152 
13153     default:
13154         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13155         return -TARGET_ENOSYS;
13156     }
13157     return ret;
13158 }
13159 
13160 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13161                     abi_long arg2, abi_long arg3, abi_long arg4,
13162                     abi_long arg5, abi_long arg6, abi_long arg7,
13163                     abi_long arg8)
13164 {
13165     CPUState *cpu = env_cpu(cpu_env);
13166     abi_long ret;
13167 
13168 #ifdef DEBUG_ERESTARTSYS
13169     /* Debug-only code for exercising the syscall-restart code paths
13170      * in the per-architecture cpu main loops: restart every syscall
13171      * the guest makes once before letting it through.
13172      */
13173     {
13174         static bool flag;
13175         flag = !flag;
13176         if (flag) {
13177             return -QEMU_ERESTARTSYS;
13178         }
13179     }
13180 #endif
13181 
13182     record_syscall_start(cpu, num, arg1,
13183                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13184 
13185     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13186         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13187     }
13188 
13189     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13190                       arg5, arg6, arg7, arg8);
13191 
13192     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13193         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13194                           arg3, arg4, arg5, arg6);
13195     }
13196 
13197     record_syscall_return(cpu, num, ret);
13198     return ret;
13199 }
13200