xref: /qemu/linux-user/syscall.c (revision 785ea711)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
135 #include "tcg/tcg.h"
136 
137 #ifndef CLONE_IO
138 #define CLONE_IO                0x80000000      /* Clone io context */
139 #endif
140 
141 /* We can't directly call the host clone syscall, because this will
142  * badly confuse libc (breaking mutexes, for example). So we must
143  * divide clone flags into:
144  *  * flag combinations that look like pthread_create()
145  *  * flag combinations that look like fork()
146  *  * flags we can implement within QEMU itself
147  *  * flags we can't support and will return an error for
148  */
149 /* For thread creation, all these flags must be present; for
150  * fork, none must be present.
151  */
152 #define CLONE_THREAD_FLAGS                              \
153     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155 
156 /* These flags are ignored:
157  * CLONE_DETACHED is now ignored by the kernel;
158  * CLONE_IO is just an optimisation hint to the I/O scheduler
159  */
160 #define CLONE_IGNORED_FLAGS                     \
161     (CLONE_DETACHED | CLONE_IO)
162 
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS               \
165     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167 
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172 
173 #define CLONE_INVALID_FORK_FLAGS                                        \
174     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175 
176 #define CLONE_INVALID_THREAD_FLAGS                                      \
177     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178        CLONE_IGNORED_FLAGS))
179 
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181  * have almost all been allocated. We cannot support any of
182  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184  * The checks against the invalid thread masks above will catch these.
185  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186  */
187 
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189  * once. This exercises the codepaths for restart.
190  */
191 //#define DEBUG_ERESTARTSYS
192 
193 //#include <linux/msdos_fs.h>
194 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196 
197 #undef _syscall0
198 #undef _syscall1
199 #undef _syscall2
200 #undef _syscall3
201 #undef _syscall4
202 #undef _syscall5
203 #undef _syscall6
204 
205 #define _syscall0(type,name)		\
206 static type name (void)			\
207 {					\
208 	return syscall(__NR_##name);	\
209 }
210 
211 #define _syscall1(type,name,type1,arg1)		\
212 static type name (type1 arg1)			\
213 {						\
214 	return syscall(__NR_##name, arg1);	\
215 }
216 
217 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218 static type name (type1 arg1,type2 arg2)		\
219 {							\
220 	return syscall(__NR_##name, arg1, arg2);	\
221 }
222 
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224 static type name (type1 arg1,type2 arg2,type3 arg3)		\
225 {								\
226 	return syscall(__NR_##name, arg1, arg2, arg3);		\
227 }
228 
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233 }
234 
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5)							\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238 {										\
239 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240 }
241 
242 
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5,type6,arg6)					\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                   type6 arg6)							\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249 }
250 
251 
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
262 #endif
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
265 #endif
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
270 
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
273 #endif
274 
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
278 #endif
279 
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
283 #endif
284 
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid)
287 
288 /* For the 64-bit guest on 32-bit host case we must emulate
289  * getdents using getdents64, because otherwise the host
290  * might hand us back more dirent records than we can fit
291  * into the guest buffer after structure format conversion.
292  * Otherwise we emulate getdents with getdents if the host has it.
293  */
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
296 #endif
297 
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
300 #endif
301 #if (defined(TARGET_NR_getdents) && \
302       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
305 #endif
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
308           loff_t *, res, uint, wh);
309 #endif
310 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
311 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
312           siginfo_t *, uinfo)
313 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group,int,error_code)
316 #endif
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address,int *,tidptr)
319 #endif
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
322           const struct timespec *,timeout,int *,uaddr2,int,val3)
323 #endif
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
326           const struct timespec *,timeout,int *,uaddr2,int,val3)
327 #endif
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
330           unsigned long *, user_mask_ptr);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
336 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
337           void *, arg);
338 _syscall2(int, capget, struct __user_cap_header_struct *, header,
339           struct __user_cap_data_struct *, data);
340 _syscall2(int, capset, struct __user_cap_header_struct *, header,
341           struct __user_cap_data_struct *, data);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get, int, which, int, who)
344 #endif
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
347 #endif
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
350 #endif
351 
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
354           unsigned long, idx1, unsigned long, idx2)
355 #endif
356 
357 /*
358  * It is assumed that struct statx is architecture independent.
359  */
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
362           unsigned int, mask, struct target_statx *, statxbuf)
363 #endif
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier, int, cmd, int, flags)
366 #endif
367 
368 static const bitmask_transtbl fcntl_flags_tbl[] = {
369   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
370   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
371   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
372   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
373   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
374   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
375   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
376   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
377   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
378   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
379   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
380   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
381   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
382 #if defined(O_DIRECT)
383   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
384 #endif
385 #if defined(O_NOATIME)
386   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
387 #endif
388 #if defined(O_CLOEXEC)
389   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
390 #endif
391 #if defined(O_PATH)
392   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
393 #endif
394 #if defined(O_TMPFILE)
395   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
396 #endif
397   /* Don't terminate the list prematurely on 64-bit host+guest.  */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
400 #endif
401   { 0, 0, 0, 0 }
402 };
403 
404 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
405 
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
410           const struct timespec *,tsp,int,flags)
411 #else
412 static int sys_utimensat(int dirfd, const char *pathname,
413                          const struct timespec times[2], int flags)
414 {
415     errno = ENOSYS;
416     return -1;
417 }
418 #endif
419 #endif /* TARGET_NR_utimensat */
420 
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
425           const char *, new, unsigned int, flags)
426 #else
427 static int sys_renameat2(int oldfd, const char *old,
428                          int newfd, const char *new, int flags)
429 {
430     if (flags == 0) {
431         return renameat(oldfd, old, newfd, new);
432     }
433     errno = ENOSYS;
434     return -1;
435 }
436 #endif
437 #endif /* TARGET_NR_renameat2 */
438 
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
441 
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
444 {
445   return (inotify_init());
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
450 {
451   return (inotify_add_watch(fd, pathname, mask));
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd, int32_t wd)
456 {
457   return (inotify_rm_watch(fd, wd));
458 }
459 #endif
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags)
463 {
464   return (inotify_init1(flags));
465 }
466 #endif
467 #endif
468 #else
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY  */
475 
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
479 #endif
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64 {
483     uint64_t rlim_cur;
484     uint64_t rlim_max;
485 };
486 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
487           const struct host_rlimit64 *, new_limit,
488           struct host_rlimit64 *, old_limit)
489 #endif
490 
491 
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers[32] = { 0, } ;
495 
496 static inline int next_free_host_timer(void)
497 {
498     int k ;
499     /* FIXME: Does finding the next free slot require a lock? */
500     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
501         if (g_posix_timers[k] == 0) {
502             g_posix_timers[k] = (timer_t) 1;
503             return k;
504         }
505     }
506     return -1;
507 }
508 #endif
509 
510 static inline int host_to_target_errno(int host_errno)
511 {
512     switch (host_errno) {
513 #define E(X)  case X: return TARGET_##X;
514 #include "errnos.c.inc"
515 #undef E
516     default:
517         return host_errno;
518     }
519 }
520 
521 static inline int target_to_host_errno(int target_errno)
522 {
523     switch (target_errno) {
524 #define E(X)  case TARGET_##X: return X;
525 #include "errnos.c.inc"
526 #undef E
527     default:
528         return target_errno;
529     }
530 }
531 
532 static inline abi_long get_errno(abi_long ret)
533 {
534     if (ret == -1)
535         return -host_to_target_errno(errno);
536     else
537         return ret;
538 }
539 
540 const char *target_strerror(int err)
541 {
542     if (err == TARGET_ERESTARTSYS) {
543         return "To be restarted";
544     }
545     if (err == TARGET_QEMU_ESIGRETURN) {
546         return "Successful exit from sigreturn";
547     }
548 
549     return strerror(target_to_host_errno(err));
550 }
551 
552 #define safe_syscall0(type, name) \
553 static type safe_##name(void) \
554 { \
555     return safe_syscall(__NR_##name); \
556 }
557 
558 #define safe_syscall1(type, name, type1, arg1) \
559 static type safe_##name(type1 arg1) \
560 { \
561     return safe_syscall(__NR_##name, arg1); \
562 }
563 
564 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
565 static type safe_##name(type1 arg1, type2 arg2) \
566 { \
567     return safe_syscall(__NR_##name, arg1, arg2); \
568 }
569 
570 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
571 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
572 { \
573     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
574 }
575 
576 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
577     type4, arg4) \
578 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
579 { \
580     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
581 }
582 
583 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
584     type4, arg4, type5, arg5) \
585 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
586     type5 arg5) \
587 { \
588     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
589 }
590 
591 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
592     type4, arg4, type5, arg5, type6, arg6) \
593 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
594     type5 arg5, type6 arg6) \
595 { \
596     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
597 }
598 
599 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
600 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
601 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
602               int, flags, mode_t, mode)
603 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
604 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
605               struct rusage *, rusage)
606 #endif
607 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
608               int, options, struct rusage *, rusage)
609 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
610 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
611     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
612 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
613               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
614 #endif
615 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
616 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
617               struct timespec *, tsp, const sigset_t *, sigmask,
618               size_t, sigsetsize)
619 #endif
620 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
621               int, maxevents, int, timeout, const sigset_t *, sigmask,
622               size_t, sigsetsize)
623 #if defined(__NR_futex)
624 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
625               const struct timespec *,timeout,int *,uaddr2,int,val3)
626 #endif
627 #if defined(__NR_futex_time64)
628 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
629               const struct timespec *,timeout,int *,uaddr2,int,val3)
630 #endif
631 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
632 safe_syscall2(int, kill, pid_t, pid, int, sig)
633 safe_syscall2(int, tkill, int, tid, int, sig)
634 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
635 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
636 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
637 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
638               unsigned long, pos_l, unsigned long, pos_h)
639 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
640               unsigned long, pos_l, unsigned long, pos_h)
641 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
642               socklen_t, addrlen)
643 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
644               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
645 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
646               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
647 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
648 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
649 safe_syscall2(int, flock, int, fd, int, operation)
650 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
651 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
652               const struct timespec *, uts, size_t, sigsetsize)
653 #endif
654 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
655               int, flags)
656 #if defined(TARGET_NR_nanosleep)
657 safe_syscall2(int, nanosleep, const struct timespec *, req,
658               struct timespec *, rem)
659 #endif
660 #if defined(TARGET_NR_clock_nanosleep) || \
661     defined(TARGET_NR_clock_nanosleep_time64)
662 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
663               const struct timespec *, req, struct timespec *, rem)
664 #endif
665 #ifdef __NR_ipc
666 #ifdef __s390x__
667 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
668               void *, ptr)
669 #else
670 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
671               void *, ptr, long, fifth)
672 #endif
673 #endif
674 #ifdef __NR_msgsnd
675 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
676               int, flags)
677 #endif
678 #ifdef __NR_msgrcv
679 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
680               long, msgtype, int, flags)
681 #endif
682 #ifdef __NR_semtimedop
683 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
684               unsigned, nsops, const struct timespec *, timeout)
685 #endif
686 #if defined(TARGET_NR_mq_timedsend) || \
687     defined(TARGET_NR_mq_timedsend_time64)
688 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
689               size_t, len, unsigned, prio, const struct timespec *, timeout)
690 #endif
691 #if defined(TARGET_NR_mq_timedreceive) || \
692     defined(TARGET_NR_mq_timedreceive_time64)
693 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
694               size_t, len, unsigned *, prio, const struct timespec *, timeout)
695 #endif
696 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
697 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
698               int, outfd, loff_t *, poutoff, size_t, length,
699               unsigned int, flags)
700 #endif
701 
702 /* We do ioctl like this rather than via safe_syscall3 to preserve the
703  * "third argument might be integer or pointer or not present" behaviour of
704  * the libc function.
705  */
706 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
707 /* Similarly for fcntl. Note that callers must always:
708  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
709  *  use the flock64 struct rather than unsuffixed flock
710  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
711  */
712 #ifdef __NR_fcntl64
713 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
714 #else
715 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
716 #endif
717 
718 static inline int host_to_target_sock_type(int host_type)
719 {
720     int target_type;
721 
722     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
723     case SOCK_DGRAM:
724         target_type = TARGET_SOCK_DGRAM;
725         break;
726     case SOCK_STREAM:
727         target_type = TARGET_SOCK_STREAM;
728         break;
729     default:
730         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
731         break;
732     }
733 
734 #if defined(SOCK_CLOEXEC)
735     if (host_type & SOCK_CLOEXEC) {
736         target_type |= TARGET_SOCK_CLOEXEC;
737     }
738 #endif
739 
740 #if defined(SOCK_NONBLOCK)
741     if (host_type & SOCK_NONBLOCK) {
742         target_type |= TARGET_SOCK_NONBLOCK;
743     }
744 #endif
745 
746     return target_type;
747 }
748 
749 static abi_ulong target_brk;
750 static abi_ulong target_original_brk;
751 static abi_ulong brk_page;
752 
753 void target_set_brk(abi_ulong new_brk)
754 {
755     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
756     brk_page = HOST_PAGE_ALIGN(target_brk);
757 }
758 
759 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
760 #define DEBUGF_BRK(message, args...)
761 
762 /* do_brk() must return target values and target errnos. */
763 abi_long do_brk(abi_ulong new_brk)
764 {
765     abi_long mapped_addr;
766     abi_ulong new_alloc_size;
767 
768     /* brk pointers are always untagged */
769 
770     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
771 
772     if (!new_brk) {
773         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
774         return target_brk;
775     }
776     if (new_brk < target_original_brk) {
777         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
778                    target_brk);
779         return target_brk;
780     }
781 
782     /* If the new brk is less than the highest page reserved to the
783      * target heap allocation, set it and we're almost done...  */
784     if (new_brk <= brk_page) {
785         /* Heap contents are initialized to zero, as for anonymous
786          * mapped pages.  */
787         if (new_brk > target_brk) {
788             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
789         }
790 	target_brk = new_brk;
791         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
792 	return target_brk;
793     }
794 
795     /* We need to allocate more memory after the brk... Note that
796      * we don't use MAP_FIXED because that will map over the top of
797      * any existing mapping (like the one with the host libc or qemu
798      * itself); instead we treat "mapped but at wrong address" as
799      * a failure and unmap again.
800      */
801     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
802     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
803                                         PROT_READ|PROT_WRITE,
804                                         MAP_ANON|MAP_PRIVATE, 0, 0));
805 
806     if (mapped_addr == brk_page) {
807         /* Heap contents are initialized to zero, as for anonymous
808          * mapped pages.  Technically the new pages are already
809          * initialized to zero since they *are* anonymous mapped
810          * pages, however we have to take care with the contents that
811          * come from the remaining part of the previous page: it may
812          * contains garbage data due to a previous heap usage (grown
813          * then shrunken).  */
814         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
815 
816         target_brk = new_brk;
817         brk_page = HOST_PAGE_ALIGN(target_brk);
818         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
819             target_brk);
820         return target_brk;
821     } else if (mapped_addr != -1) {
822         /* Mapped but at wrong address, meaning there wasn't actually
823          * enough space for this brk.
824          */
825         target_munmap(mapped_addr, new_alloc_size);
826         mapped_addr = -1;
827         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
828     }
829     else {
830         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
831     }
832 
833 #if defined(TARGET_ALPHA)
834     /* We (partially) emulate OSF/1 on Alpha, which requires we
835        return a proper errno, not an unchanged brk value.  */
836     return -TARGET_ENOMEM;
837 #endif
838     /* For everything else, return the previous break. */
839     return target_brk;
840 }
841 
842 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
843     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
844 static inline abi_long copy_from_user_fdset(fd_set *fds,
845                                             abi_ulong target_fds_addr,
846                                             int n)
847 {
848     int i, nw, j, k;
849     abi_ulong b, *target_fds;
850 
851     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
852     if (!(target_fds = lock_user(VERIFY_READ,
853                                  target_fds_addr,
854                                  sizeof(abi_ulong) * nw,
855                                  1)))
856         return -TARGET_EFAULT;
857 
858     FD_ZERO(fds);
859     k = 0;
860     for (i = 0; i < nw; i++) {
861         /* grab the abi_ulong */
862         __get_user(b, &target_fds[i]);
863         for (j = 0; j < TARGET_ABI_BITS; j++) {
864             /* check the bit inside the abi_ulong */
865             if ((b >> j) & 1)
866                 FD_SET(k, fds);
867             k++;
868         }
869     }
870 
871     unlock_user(target_fds, target_fds_addr, 0);
872 
873     return 0;
874 }
875 
876 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
877                                                  abi_ulong target_fds_addr,
878                                                  int n)
879 {
880     if (target_fds_addr) {
881         if (copy_from_user_fdset(fds, target_fds_addr, n))
882             return -TARGET_EFAULT;
883         *fds_ptr = fds;
884     } else {
885         *fds_ptr = NULL;
886     }
887     return 0;
888 }
889 
890 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
891                                           const fd_set *fds,
892                                           int n)
893 {
894     int i, nw, j, k;
895     abi_long v;
896     abi_ulong *target_fds;
897 
898     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
899     if (!(target_fds = lock_user(VERIFY_WRITE,
900                                  target_fds_addr,
901                                  sizeof(abi_ulong) * nw,
902                                  0)))
903         return -TARGET_EFAULT;
904 
905     k = 0;
906     for (i = 0; i < nw; i++) {
907         v = 0;
908         for (j = 0; j < TARGET_ABI_BITS; j++) {
909             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
910             k++;
911         }
912         __put_user(v, &target_fds[i]);
913     }
914 
915     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
916 
917     return 0;
918 }
919 #endif
920 
921 #if defined(__alpha__)
922 #define HOST_HZ 1024
923 #else
924 #define HOST_HZ 100
925 #endif
926 
927 static inline abi_long host_to_target_clock_t(long ticks)
928 {
929 #if HOST_HZ == TARGET_HZ
930     return ticks;
931 #else
932     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
933 #endif
934 }
935 
936 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
937                                              const struct rusage *rusage)
938 {
939     struct target_rusage *target_rusage;
940 
941     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
942         return -TARGET_EFAULT;
943     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
944     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
945     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
946     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
947     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
948     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
949     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
950     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
951     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
952     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
953     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
954     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
955     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
956     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
957     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
958     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
959     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
960     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
961     unlock_user_struct(target_rusage, target_addr, 1);
962 
963     return 0;
964 }
965 
966 #ifdef TARGET_NR_setrlimit
967 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
968 {
969     abi_ulong target_rlim_swap;
970     rlim_t result;
971 
972     target_rlim_swap = tswapal(target_rlim);
973     if (target_rlim_swap == TARGET_RLIM_INFINITY)
974         return RLIM_INFINITY;
975 
976     result = target_rlim_swap;
977     if (target_rlim_swap != (rlim_t)result)
978         return RLIM_INFINITY;
979 
980     return result;
981 }
982 #endif
983 
984 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
985 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
986 {
987     abi_ulong target_rlim_swap;
988     abi_ulong result;
989 
990     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
991         target_rlim_swap = TARGET_RLIM_INFINITY;
992     else
993         target_rlim_swap = rlim;
994     result = tswapal(target_rlim_swap);
995 
996     return result;
997 }
998 #endif
999 
1000 static inline int target_to_host_resource(int code)
1001 {
1002     switch (code) {
1003     case TARGET_RLIMIT_AS:
1004         return RLIMIT_AS;
1005     case TARGET_RLIMIT_CORE:
1006         return RLIMIT_CORE;
1007     case TARGET_RLIMIT_CPU:
1008         return RLIMIT_CPU;
1009     case TARGET_RLIMIT_DATA:
1010         return RLIMIT_DATA;
1011     case TARGET_RLIMIT_FSIZE:
1012         return RLIMIT_FSIZE;
1013     case TARGET_RLIMIT_LOCKS:
1014         return RLIMIT_LOCKS;
1015     case TARGET_RLIMIT_MEMLOCK:
1016         return RLIMIT_MEMLOCK;
1017     case TARGET_RLIMIT_MSGQUEUE:
1018         return RLIMIT_MSGQUEUE;
1019     case TARGET_RLIMIT_NICE:
1020         return RLIMIT_NICE;
1021     case TARGET_RLIMIT_NOFILE:
1022         return RLIMIT_NOFILE;
1023     case TARGET_RLIMIT_NPROC:
1024         return RLIMIT_NPROC;
1025     case TARGET_RLIMIT_RSS:
1026         return RLIMIT_RSS;
1027     case TARGET_RLIMIT_RTPRIO:
1028         return RLIMIT_RTPRIO;
1029     case TARGET_RLIMIT_SIGPENDING:
1030         return RLIMIT_SIGPENDING;
1031     case TARGET_RLIMIT_STACK:
1032         return RLIMIT_STACK;
1033     default:
1034         return code;
1035     }
1036 }
1037 
1038 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1039                                               abi_ulong target_tv_addr)
1040 {
1041     struct target_timeval *target_tv;
1042 
1043     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1044         return -TARGET_EFAULT;
1045     }
1046 
1047     __get_user(tv->tv_sec, &target_tv->tv_sec);
1048     __get_user(tv->tv_usec, &target_tv->tv_usec);
1049 
1050     unlock_user_struct(target_tv, target_tv_addr, 0);
1051 
1052     return 0;
1053 }
1054 
1055 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1056                                             const struct timeval *tv)
1057 {
1058     struct target_timeval *target_tv;
1059 
1060     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1061         return -TARGET_EFAULT;
1062     }
1063 
1064     __put_user(tv->tv_sec, &target_tv->tv_sec);
1065     __put_user(tv->tv_usec, &target_tv->tv_usec);
1066 
1067     unlock_user_struct(target_tv, target_tv_addr, 1);
1068 
1069     return 0;
1070 }
1071 
1072 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1073 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1074                                                 abi_ulong target_tv_addr)
1075 {
1076     struct target__kernel_sock_timeval *target_tv;
1077 
1078     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1079         return -TARGET_EFAULT;
1080     }
1081 
1082     __get_user(tv->tv_sec, &target_tv->tv_sec);
1083     __get_user(tv->tv_usec, &target_tv->tv_usec);
1084 
1085     unlock_user_struct(target_tv, target_tv_addr, 0);
1086 
1087     return 0;
1088 }
1089 #endif
1090 
1091 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1092                                               const struct timeval *tv)
1093 {
1094     struct target__kernel_sock_timeval *target_tv;
1095 
1096     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1097         return -TARGET_EFAULT;
1098     }
1099 
1100     __put_user(tv->tv_sec, &target_tv->tv_sec);
1101     __put_user(tv->tv_usec, &target_tv->tv_usec);
1102 
1103     unlock_user_struct(target_tv, target_tv_addr, 1);
1104 
1105     return 0;
1106 }
1107 
1108 #if defined(TARGET_NR_futex) || \
1109     defined(TARGET_NR_rt_sigtimedwait) || \
1110     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1111     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1112     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1113     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1114     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1115     defined(TARGET_NR_timer_settime) || \
1116     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1117 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1118                                                abi_ulong target_addr)
1119 {
1120     struct target_timespec *target_ts;
1121 
1122     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1123         return -TARGET_EFAULT;
1124     }
1125     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1126     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1127     unlock_user_struct(target_ts, target_addr, 0);
1128     return 0;
1129 }
1130 #endif
1131 
1132 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1133     defined(TARGET_NR_timer_settime64) || \
1134     defined(TARGET_NR_mq_timedsend_time64) || \
1135     defined(TARGET_NR_mq_timedreceive_time64) || \
1136     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1137     defined(TARGET_NR_clock_nanosleep_time64) || \
1138     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1139     defined(TARGET_NR_utimensat) || \
1140     defined(TARGET_NR_utimensat_time64) || \
1141     defined(TARGET_NR_semtimedop_time64) || \
1142     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1143 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1144                                                  abi_ulong target_addr)
1145 {
1146     struct target__kernel_timespec *target_ts;
1147 
1148     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1149         return -TARGET_EFAULT;
1150     }
1151     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1152     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1153     /* in 32bit mode, this drops the padding */
1154     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1155     unlock_user_struct(target_ts, target_addr, 0);
1156     return 0;
1157 }
1158 #endif
1159 
1160 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1161                                                struct timespec *host_ts)
1162 {
1163     struct target_timespec *target_ts;
1164 
1165     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1166         return -TARGET_EFAULT;
1167     }
1168     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1169     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1170     unlock_user_struct(target_ts, target_addr, 1);
1171     return 0;
1172 }
1173 
1174 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1175                                                  struct timespec *host_ts)
1176 {
1177     struct target__kernel_timespec *target_ts;
1178 
1179     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1180         return -TARGET_EFAULT;
1181     }
1182     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1183     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1184     unlock_user_struct(target_ts, target_addr, 1);
1185     return 0;
1186 }
1187 
1188 #if defined(TARGET_NR_gettimeofday)
1189 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1190                                              struct timezone *tz)
1191 {
1192     struct target_timezone *target_tz;
1193 
1194     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1195         return -TARGET_EFAULT;
1196     }
1197 
1198     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1199     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1200 
1201     unlock_user_struct(target_tz, target_tz_addr, 1);
1202 
1203     return 0;
1204 }
1205 #endif
1206 
1207 #if defined(TARGET_NR_settimeofday)
1208 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1209                                                abi_ulong target_tz_addr)
1210 {
1211     struct target_timezone *target_tz;
1212 
1213     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1214         return -TARGET_EFAULT;
1215     }
1216 
1217     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1218     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1219 
1220     unlock_user_struct(target_tz, target_tz_addr, 0);
1221 
1222     return 0;
1223 }
1224 #endif
1225 
1226 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1227 #include <mqueue.h>
1228 
1229 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1230                                               abi_ulong target_mq_attr_addr)
1231 {
1232     struct target_mq_attr *target_mq_attr;
1233 
1234     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1235                           target_mq_attr_addr, 1))
1236         return -TARGET_EFAULT;
1237 
1238     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1239     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1240     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1241     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1242 
1243     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1244 
1245     return 0;
1246 }
1247 
1248 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1249                                             const struct mq_attr *attr)
1250 {
1251     struct target_mq_attr *target_mq_attr;
1252 
1253     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1254                           target_mq_attr_addr, 0))
1255         return -TARGET_EFAULT;
1256 
1257     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1258     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1259     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1260     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1261 
1262     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1263 
1264     return 0;
1265 }
1266 #endif
1267 
1268 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1269 /* do_select() must return target values and target errnos. */
1270 static abi_long do_select(int n,
1271                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1272                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1273 {
1274     fd_set rfds, wfds, efds;
1275     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1276     struct timeval tv;
1277     struct timespec ts, *ts_ptr;
1278     abi_long ret;
1279 
1280     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1281     if (ret) {
1282         return ret;
1283     }
1284     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1285     if (ret) {
1286         return ret;
1287     }
1288     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1289     if (ret) {
1290         return ret;
1291     }
1292 
1293     if (target_tv_addr) {
1294         if (copy_from_user_timeval(&tv, target_tv_addr))
1295             return -TARGET_EFAULT;
1296         ts.tv_sec = tv.tv_sec;
1297         ts.tv_nsec = tv.tv_usec * 1000;
1298         ts_ptr = &ts;
1299     } else {
1300         ts_ptr = NULL;
1301     }
1302 
1303     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1304                                   ts_ptr, NULL));
1305 
1306     if (!is_error(ret)) {
1307         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1308             return -TARGET_EFAULT;
1309         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1310             return -TARGET_EFAULT;
1311         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1312             return -TARGET_EFAULT;
1313 
1314         if (target_tv_addr) {
1315             tv.tv_sec = ts.tv_sec;
1316             tv.tv_usec = ts.tv_nsec / 1000;
1317             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1318                 return -TARGET_EFAULT;
1319             }
1320         }
1321     }
1322 
1323     return ret;
1324 }
1325 
1326 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1327 static abi_long do_old_select(abi_ulong arg1)
1328 {
1329     struct target_sel_arg_struct *sel;
1330     abi_ulong inp, outp, exp, tvp;
1331     long nsel;
1332 
1333     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1334         return -TARGET_EFAULT;
1335     }
1336 
1337     nsel = tswapal(sel->n);
1338     inp = tswapal(sel->inp);
1339     outp = tswapal(sel->outp);
1340     exp = tswapal(sel->exp);
1341     tvp = tswapal(sel->tvp);
1342 
1343     unlock_user_struct(sel, arg1, 0);
1344 
1345     return do_select(nsel, inp, outp, exp, tvp);
1346 }
1347 #endif
1348 #endif
1349 
1350 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1351 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1352                             abi_long arg4, abi_long arg5, abi_long arg6,
1353                             bool time64)
1354 {
1355     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1356     fd_set rfds, wfds, efds;
1357     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1358     struct timespec ts, *ts_ptr;
1359     abi_long ret;
1360 
1361     /*
1362      * The 6th arg is actually two args smashed together,
1363      * so we cannot use the C library.
1364      */
1365     sigset_t set;
1366     struct {
1367         sigset_t *set;
1368         size_t size;
1369     } sig, *sig_ptr;
1370 
1371     abi_ulong arg_sigset, arg_sigsize, *arg7;
1372     target_sigset_t *target_sigset;
1373 
1374     n = arg1;
1375     rfd_addr = arg2;
1376     wfd_addr = arg3;
1377     efd_addr = arg4;
1378     ts_addr = arg5;
1379 
1380     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1381     if (ret) {
1382         return ret;
1383     }
1384     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1385     if (ret) {
1386         return ret;
1387     }
1388     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1389     if (ret) {
1390         return ret;
1391     }
1392 
1393     /*
1394      * This takes a timespec, and not a timeval, so we cannot
1395      * use the do_select() helper ...
1396      */
1397     if (ts_addr) {
1398         if (time64) {
1399             if (target_to_host_timespec64(&ts, ts_addr)) {
1400                 return -TARGET_EFAULT;
1401             }
1402         } else {
1403             if (target_to_host_timespec(&ts, ts_addr)) {
1404                 return -TARGET_EFAULT;
1405             }
1406         }
1407             ts_ptr = &ts;
1408     } else {
1409         ts_ptr = NULL;
1410     }
1411 
1412     /* Extract the two packed args for the sigset */
1413     if (arg6) {
1414         sig_ptr = &sig;
1415         sig.size = SIGSET_T_SIZE;
1416 
1417         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1418         if (!arg7) {
1419             return -TARGET_EFAULT;
1420         }
1421         arg_sigset = tswapal(arg7[0]);
1422         arg_sigsize = tswapal(arg7[1]);
1423         unlock_user(arg7, arg6, 0);
1424 
1425         if (arg_sigset) {
1426             sig.set = &set;
1427             if (arg_sigsize != sizeof(*target_sigset)) {
1428                 /* Like the kernel, we enforce correct size sigsets */
1429                 return -TARGET_EINVAL;
1430             }
1431             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1432                                       sizeof(*target_sigset), 1);
1433             if (!target_sigset) {
1434                 return -TARGET_EFAULT;
1435             }
1436             target_to_host_sigset(&set, target_sigset);
1437             unlock_user(target_sigset, arg_sigset, 0);
1438         } else {
1439             sig.set = NULL;
1440         }
1441     } else {
1442         sig_ptr = NULL;
1443     }
1444 
1445     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1446                                   ts_ptr, sig_ptr));
1447 
1448     if (!is_error(ret)) {
1449         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1450             return -TARGET_EFAULT;
1451         }
1452         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1453             return -TARGET_EFAULT;
1454         }
1455         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1456             return -TARGET_EFAULT;
1457         }
1458         if (time64) {
1459             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1460                 return -TARGET_EFAULT;
1461             }
1462         } else {
1463             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1464                 return -TARGET_EFAULT;
1465             }
1466         }
1467     }
1468     return ret;
1469 }
1470 #endif
1471 
1472 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1473     defined(TARGET_NR_ppoll_time64)
1474 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1475                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1476 {
1477     struct target_pollfd *target_pfd;
1478     unsigned int nfds = arg2;
1479     struct pollfd *pfd;
1480     unsigned int i;
1481     abi_long ret;
1482 
1483     pfd = NULL;
1484     target_pfd = NULL;
1485     if (nfds) {
1486         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1487             return -TARGET_EINVAL;
1488         }
1489         target_pfd = lock_user(VERIFY_WRITE, arg1,
1490                                sizeof(struct target_pollfd) * nfds, 1);
1491         if (!target_pfd) {
1492             return -TARGET_EFAULT;
1493         }
1494 
1495         pfd = alloca(sizeof(struct pollfd) * nfds);
1496         for (i = 0; i < nfds; i++) {
1497             pfd[i].fd = tswap32(target_pfd[i].fd);
1498             pfd[i].events = tswap16(target_pfd[i].events);
1499         }
1500     }
1501     if (ppoll) {
1502         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1503         target_sigset_t *target_set;
1504         sigset_t _set, *set = &_set;
1505 
1506         if (arg3) {
1507             if (time64) {
1508                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1509                     unlock_user(target_pfd, arg1, 0);
1510                     return -TARGET_EFAULT;
1511                 }
1512             } else {
1513                 if (target_to_host_timespec(timeout_ts, arg3)) {
1514                     unlock_user(target_pfd, arg1, 0);
1515                     return -TARGET_EFAULT;
1516                 }
1517             }
1518         } else {
1519             timeout_ts = NULL;
1520         }
1521 
1522         if (arg4) {
1523             if (arg5 != sizeof(target_sigset_t)) {
1524                 unlock_user(target_pfd, arg1, 0);
1525                 return -TARGET_EINVAL;
1526             }
1527 
1528             target_set = lock_user(VERIFY_READ, arg4,
1529                                    sizeof(target_sigset_t), 1);
1530             if (!target_set) {
1531                 unlock_user(target_pfd, arg1, 0);
1532                 return -TARGET_EFAULT;
1533             }
1534             target_to_host_sigset(set, target_set);
1535         } else {
1536             set = NULL;
1537         }
1538 
1539         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1540                                    set, SIGSET_T_SIZE));
1541 
1542         if (!is_error(ret) && arg3) {
1543             if (time64) {
1544                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1545                     return -TARGET_EFAULT;
1546                 }
1547             } else {
1548                 if (host_to_target_timespec(arg3, timeout_ts)) {
1549                     return -TARGET_EFAULT;
1550                 }
1551             }
1552         }
1553         if (arg4) {
1554             unlock_user(target_set, arg4, 0);
1555         }
1556     } else {
1557           struct timespec ts, *pts;
1558 
1559           if (arg3 >= 0) {
1560               /* Convert ms to secs, ns */
1561               ts.tv_sec = arg3 / 1000;
1562               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1563               pts = &ts;
1564           } else {
1565               /* -ve poll() timeout means "infinite" */
1566               pts = NULL;
1567           }
1568           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1569     }
1570 
1571     if (!is_error(ret)) {
1572         for (i = 0; i < nfds; i++) {
1573             target_pfd[i].revents = tswap16(pfd[i].revents);
1574         }
1575     }
1576     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1577     return ret;
1578 }
1579 #endif
1580 
1581 static abi_long do_pipe2(int host_pipe[], int flags)
1582 {
1583 #ifdef CONFIG_PIPE2
1584     return pipe2(host_pipe, flags);
1585 #else
1586     return -ENOSYS;
1587 #endif
1588 }
1589 
1590 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1591                         int flags, int is_pipe2)
1592 {
1593     int host_pipe[2];
1594     abi_long ret;
1595     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1596 
1597     if (is_error(ret))
1598         return get_errno(ret);
1599 
1600     /* Several targets have special calling conventions for the original
1601        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1602     if (!is_pipe2) {
1603 #if defined(TARGET_ALPHA)
1604         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1605         return host_pipe[0];
1606 #elif defined(TARGET_MIPS)
1607         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1608         return host_pipe[0];
1609 #elif defined(TARGET_SH4)
1610         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1611         return host_pipe[0];
1612 #elif defined(TARGET_SPARC)
1613         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1614         return host_pipe[0];
1615 #endif
1616     }
1617 
1618     if (put_user_s32(host_pipe[0], pipedes)
1619         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1620         return -TARGET_EFAULT;
1621     return get_errno(ret);
1622 }
1623 
1624 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1625                                               abi_ulong target_addr,
1626                                               socklen_t len)
1627 {
1628     struct target_ip_mreqn *target_smreqn;
1629 
1630     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1631     if (!target_smreqn)
1632         return -TARGET_EFAULT;
1633     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1634     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1635     if (len == sizeof(struct target_ip_mreqn))
1636         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1637     unlock_user(target_smreqn, target_addr, 0);
1638 
1639     return 0;
1640 }
1641 
1642 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1643                                                abi_ulong target_addr,
1644                                                socklen_t len)
1645 {
1646     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1647     sa_family_t sa_family;
1648     struct target_sockaddr *target_saddr;
1649 
1650     if (fd_trans_target_to_host_addr(fd)) {
1651         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1652     }
1653 
1654     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1655     if (!target_saddr)
1656         return -TARGET_EFAULT;
1657 
1658     sa_family = tswap16(target_saddr->sa_family);
1659 
1660     /* Oops. The caller might send a incomplete sun_path; sun_path
1661      * must be terminated by \0 (see the manual page), but
1662      * unfortunately it is quite common to specify sockaddr_un
1663      * length as "strlen(x->sun_path)" while it should be
1664      * "strlen(...) + 1". We'll fix that here if needed.
1665      * Linux kernel has a similar feature.
1666      */
1667 
1668     if (sa_family == AF_UNIX) {
1669         if (len < unix_maxlen && len > 0) {
1670             char *cp = (char*)target_saddr;
1671 
1672             if ( cp[len-1] && !cp[len] )
1673                 len++;
1674         }
1675         if (len > unix_maxlen)
1676             len = unix_maxlen;
1677     }
1678 
1679     memcpy(addr, target_saddr, len);
1680     addr->sa_family = sa_family;
1681     if (sa_family == AF_NETLINK) {
1682         struct sockaddr_nl *nladdr;
1683 
1684         nladdr = (struct sockaddr_nl *)addr;
1685         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1686         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1687     } else if (sa_family == AF_PACKET) {
1688 	struct target_sockaddr_ll *lladdr;
1689 
1690 	lladdr = (struct target_sockaddr_ll *)addr;
1691 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1692 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1693     }
1694     unlock_user(target_saddr, target_addr, 0);
1695 
1696     return 0;
1697 }
1698 
1699 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1700                                                struct sockaddr *addr,
1701                                                socklen_t len)
1702 {
1703     struct target_sockaddr *target_saddr;
1704 
1705     if (len == 0) {
1706         return 0;
1707     }
1708     assert(addr);
1709 
1710     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1711     if (!target_saddr)
1712         return -TARGET_EFAULT;
1713     memcpy(target_saddr, addr, len);
1714     if (len >= offsetof(struct target_sockaddr, sa_family) +
1715         sizeof(target_saddr->sa_family)) {
1716         target_saddr->sa_family = tswap16(addr->sa_family);
1717     }
1718     if (addr->sa_family == AF_NETLINK &&
1719         len >= sizeof(struct target_sockaddr_nl)) {
1720         struct target_sockaddr_nl *target_nl =
1721                (struct target_sockaddr_nl *)target_saddr;
1722         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1723         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1724     } else if (addr->sa_family == AF_PACKET) {
1725         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1726         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1727         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1728     } else if (addr->sa_family == AF_INET6 &&
1729                len >= sizeof(struct target_sockaddr_in6)) {
1730         struct target_sockaddr_in6 *target_in6 =
1731                (struct target_sockaddr_in6 *)target_saddr;
1732         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1733     }
1734     unlock_user(target_saddr, target_addr, len);
1735 
1736     return 0;
1737 }
1738 
1739 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1740                                            struct target_msghdr *target_msgh)
1741 {
1742     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1743     abi_long msg_controllen;
1744     abi_ulong target_cmsg_addr;
1745     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1746     socklen_t space = 0;
1747 
1748     msg_controllen = tswapal(target_msgh->msg_controllen);
1749     if (msg_controllen < sizeof (struct target_cmsghdr))
1750         goto the_end;
1751     target_cmsg_addr = tswapal(target_msgh->msg_control);
1752     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1753     target_cmsg_start = target_cmsg;
1754     if (!target_cmsg)
1755         return -TARGET_EFAULT;
1756 
1757     while (cmsg && target_cmsg) {
1758         void *data = CMSG_DATA(cmsg);
1759         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1760 
1761         int len = tswapal(target_cmsg->cmsg_len)
1762             - sizeof(struct target_cmsghdr);
1763 
1764         space += CMSG_SPACE(len);
1765         if (space > msgh->msg_controllen) {
1766             space -= CMSG_SPACE(len);
1767             /* This is a QEMU bug, since we allocated the payload
1768              * area ourselves (unlike overflow in host-to-target
1769              * conversion, which is just the guest giving us a buffer
1770              * that's too small). It can't happen for the payload types
1771              * we currently support; if it becomes an issue in future
1772              * we would need to improve our allocation strategy to
1773              * something more intelligent than "twice the size of the
1774              * target buffer we're reading from".
1775              */
1776             qemu_log_mask(LOG_UNIMP,
1777                           ("Unsupported ancillary data %d/%d: "
1778                            "unhandled msg size\n"),
1779                           tswap32(target_cmsg->cmsg_level),
1780                           tswap32(target_cmsg->cmsg_type));
1781             break;
1782         }
1783 
1784         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1785             cmsg->cmsg_level = SOL_SOCKET;
1786         } else {
1787             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1788         }
1789         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1790         cmsg->cmsg_len = CMSG_LEN(len);
1791 
1792         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1793             int *fd = (int *)data;
1794             int *target_fd = (int *)target_data;
1795             int i, numfds = len / sizeof(int);
1796 
1797             for (i = 0; i < numfds; i++) {
1798                 __get_user(fd[i], target_fd + i);
1799             }
1800         } else if (cmsg->cmsg_level == SOL_SOCKET
1801                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1802             struct ucred *cred = (struct ucred *)data;
1803             struct target_ucred *target_cred =
1804                 (struct target_ucred *)target_data;
1805 
1806             __get_user(cred->pid, &target_cred->pid);
1807             __get_user(cred->uid, &target_cred->uid);
1808             __get_user(cred->gid, &target_cred->gid);
1809         } else {
1810             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1811                           cmsg->cmsg_level, cmsg->cmsg_type);
1812             memcpy(data, target_data, len);
1813         }
1814 
1815         cmsg = CMSG_NXTHDR(msgh, cmsg);
1816         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1817                                          target_cmsg_start);
1818     }
1819     unlock_user(target_cmsg, target_cmsg_addr, 0);
1820  the_end:
1821     msgh->msg_controllen = space;
1822     return 0;
1823 }
1824 
1825 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1826                                            struct msghdr *msgh)
1827 {
1828     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1829     abi_long msg_controllen;
1830     abi_ulong target_cmsg_addr;
1831     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1832     socklen_t space = 0;
1833 
1834     msg_controllen = tswapal(target_msgh->msg_controllen);
1835     if (msg_controllen < sizeof (struct target_cmsghdr))
1836         goto the_end;
1837     target_cmsg_addr = tswapal(target_msgh->msg_control);
1838     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1839     target_cmsg_start = target_cmsg;
1840     if (!target_cmsg)
1841         return -TARGET_EFAULT;
1842 
1843     while (cmsg && target_cmsg) {
1844         void *data = CMSG_DATA(cmsg);
1845         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1846 
1847         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1848         int tgt_len, tgt_space;
1849 
1850         /* We never copy a half-header but may copy half-data;
1851          * this is Linux's behaviour in put_cmsg(). Note that
1852          * truncation here is a guest problem (which we report
1853          * to the guest via the CTRUNC bit), unlike truncation
1854          * in target_to_host_cmsg, which is a QEMU bug.
1855          */
1856         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1857             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1858             break;
1859         }
1860 
1861         if (cmsg->cmsg_level == SOL_SOCKET) {
1862             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1863         } else {
1864             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1865         }
1866         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1867 
1868         /* Payload types which need a different size of payload on
1869          * the target must adjust tgt_len here.
1870          */
1871         tgt_len = len;
1872         switch (cmsg->cmsg_level) {
1873         case SOL_SOCKET:
1874             switch (cmsg->cmsg_type) {
1875             case SO_TIMESTAMP:
1876                 tgt_len = sizeof(struct target_timeval);
1877                 break;
1878             default:
1879                 break;
1880             }
1881             break;
1882         default:
1883             break;
1884         }
1885 
1886         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1887             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1888             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1889         }
1890 
1891         /* We must now copy-and-convert len bytes of payload
1892          * into tgt_len bytes of destination space. Bear in mind
1893          * that in both source and destination we may be dealing
1894          * with a truncated value!
1895          */
1896         switch (cmsg->cmsg_level) {
1897         case SOL_SOCKET:
1898             switch (cmsg->cmsg_type) {
1899             case SCM_RIGHTS:
1900             {
1901                 int *fd = (int *)data;
1902                 int *target_fd = (int *)target_data;
1903                 int i, numfds = tgt_len / sizeof(int);
1904 
1905                 for (i = 0; i < numfds; i++) {
1906                     __put_user(fd[i], target_fd + i);
1907                 }
1908                 break;
1909             }
1910             case SO_TIMESTAMP:
1911             {
1912                 struct timeval *tv = (struct timeval *)data;
1913                 struct target_timeval *target_tv =
1914                     (struct target_timeval *)target_data;
1915 
1916                 if (len != sizeof(struct timeval) ||
1917                     tgt_len != sizeof(struct target_timeval)) {
1918                     goto unimplemented;
1919                 }
1920 
1921                 /* copy struct timeval to target */
1922                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1923                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1924                 break;
1925             }
1926             case SCM_CREDENTIALS:
1927             {
1928                 struct ucred *cred = (struct ucred *)data;
1929                 struct target_ucred *target_cred =
1930                     (struct target_ucred *)target_data;
1931 
1932                 __put_user(cred->pid, &target_cred->pid);
1933                 __put_user(cred->uid, &target_cred->uid);
1934                 __put_user(cred->gid, &target_cred->gid);
1935                 break;
1936             }
1937             default:
1938                 goto unimplemented;
1939             }
1940             break;
1941 
1942         case SOL_IP:
1943             switch (cmsg->cmsg_type) {
1944             case IP_TTL:
1945             {
1946                 uint32_t *v = (uint32_t *)data;
1947                 uint32_t *t_int = (uint32_t *)target_data;
1948 
1949                 if (len != sizeof(uint32_t) ||
1950                     tgt_len != sizeof(uint32_t)) {
1951                     goto unimplemented;
1952                 }
1953                 __put_user(*v, t_int);
1954                 break;
1955             }
1956             case IP_RECVERR:
1957             {
1958                 struct errhdr_t {
1959                    struct sock_extended_err ee;
1960                    struct sockaddr_in offender;
1961                 };
1962                 struct errhdr_t *errh = (struct errhdr_t *)data;
1963                 struct errhdr_t *target_errh =
1964                     (struct errhdr_t *)target_data;
1965 
1966                 if (len != sizeof(struct errhdr_t) ||
1967                     tgt_len != sizeof(struct errhdr_t)) {
1968                     goto unimplemented;
1969                 }
1970                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1971                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1972                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1973                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1974                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1975                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1976                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1977                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1978                     (void *) &errh->offender, sizeof(errh->offender));
1979                 break;
1980             }
1981             default:
1982                 goto unimplemented;
1983             }
1984             break;
1985 
1986         case SOL_IPV6:
1987             switch (cmsg->cmsg_type) {
1988             case IPV6_HOPLIMIT:
1989             {
1990                 uint32_t *v = (uint32_t *)data;
1991                 uint32_t *t_int = (uint32_t *)target_data;
1992 
1993                 if (len != sizeof(uint32_t) ||
1994                     tgt_len != sizeof(uint32_t)) {
1995                     goto unimplemented;
1996                 }
1997                 __put_user(*v, t_int);
1998                 break;
1999             }
2000             case IPV6_RECVERR:
2001             {
2002                 struct errhdr6_t {
2003                    struct sock_extended_err ee;
2004                    struct sockaddr_in6 offender;
2005                 };
2006                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2007                 struct errhdr6_t *target_errh =
2008                     (struct errhdr6_t *)target_data;
2009 
2010                 if (len != sizeof(struct errhdr6_t) ||
2011                     tgt_len != sizeof(struct errhdr6_t)) {
2012                     goto unimplemented;
2013                 }
2014                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2015                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2016                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2017                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2018                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2019                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2020                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2021                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2022                     (void *) &errh->offender, sizeof(errh->offender));
2023                 break;
2024             }
2025             default:
2026                 goto unimplemented;
2027             }
2028             break;
2029 
2030         default:
2031         unimplemented:
2032             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2033                           cmsg->cmsg_level, cmsg->cmsg_type);
2034             memcpy(target_data, data, MIN(len, tgt_len));
2035             if (tgt_len > len) {
2036                 memset(target_data + len, 0, tgt_len - len);
2037             }
2038         }
2039 
2040         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2041         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2042         if (msg_controllen < tgt_space) {
2043             tgt_space = msg_controllen;
2044         }
2045         msg_controllen -= tgt_space;
2046         space += tgt_space;
2047         cmsg = CMSG_NXTHDR(msgh, cmsg);
2048         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2049                                          target_cmsg_start);
2050     }
2051     unlock_user(target_cmsg, target_cmsg_addr, space);
2052  the_end:
2053     target_msgh->msg_controllen = tswapal(space);
2054     return 0;
2055 }
2056 
2057 /* do_setsockopt() Must return target values and target errnos. */
2058 static abi_long do_setsockopt(int sockfd, int level, int optname,
2059                               abi_ulong optval_addr, socklen_t optlen)
2060 {
2061     abi_long ret;
2062     int val;
2063     struct ip_mreqn *ip_mreq;
2064     struct ip_mreq_source *ip_mreq_source;
2065 
2066     switch(level) {
2067     case SOL_TCP:
2068     case SOL_UDP:
2069         /* TCP and UDP options all take an 'int' value.  */
2070         if (optlen < sizeof(uint32_t))
2071             return -TARGET_EINVAL;
2072 
2073         if (get_user_u32(val, optval_addr))
2074             return -TARGET_EFAULT;
2075         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2076         break;
2077     case SOL_IP:
2078         switch(optname) {
2079         case IP_TOS:
2080         case IP_TTL:
2081         case IP_HDRINCL:
2082         case IP_ROUTER_ALERT:
2083         case IP_RECVOPTS:
2084         case IP_RETOPTS:
2085         case IP_PKTINFO:
2086         case IP_MTU_DISCOVER:
2087         case IP_RECVERR:
2088         case IP_RECVTTL:
2089         case IP_RECVTOS:
2090 #ifdef IP_FREEBIND
2091         case IP_FREEBIND:
2092 #endif
2093         case IP_MULTICAST_TTL:
2094         case IP_MULTICAST_LOOP:
2095             val = 0;
2096             if (optlen >= sizeof(uint32_t)) {
2097                 if (get_user_u32(val, optval_addr))
2098                     return -TARGET_EFAULT;
2099             } else if (optlen >= 1) {
2100                 if (get_user_u8(val, optval_addr))
2101                     return -TARGET_EFAULT;
2102             }
2103             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2104             break;
2105         case IP_ADD_MEMBERSHIP:
2106         case IP_DROP_MEMBERSHIP:
2107             if (optlen < sizeof (struct target_ip_mreq) ||
2108                 optlen > sizeof (struct target_ip_mreqn))
2109                 return -TARGET_EINVAL;
2110 
2111             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2112             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2113             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2114             break;
2115 
2116         case IP_BLOCK_SOURCE:
2117         case IP_UNBLOCK_SOURCE:
2118         case IP_ADD_SOURCE_MEMBERSHIP:
2119         case IP_DROP_SOURCE_MEMBERSHIP:
2120             if (optlen != sizeof (struct target_ip_mreq_source))
2121                 return -TARGET_EINVAL;
2122 
2123             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2124             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2125             unlock_user (ip_mreq_source, optval_addr, 0);
2126             break;
2127 
2128         default:
2129             goto unimplemented;
2130         }
2131         break;
2132     case SOL_IPV6:
2133         switch (optname) {
2134         case IPV6_MTU_DISCOVER:
2135         case IPV6_MTU:
2136         case IPV6_V6ONLY:
2137         case IPV6_RECVPKTINFO:
2138         case IPV6_UNICAST_HOPS:
2139         case IPV6_MULTICAST_HOPS:
2140         case IPV6_MULTICAST_LOOP:
2141         case IPV6_RECVERR:
2142         case IPV6_RECVHOPLIMIT:
2143         case IPV6_2292HOPLIMIT:
2144         case IPV6_CHECKSUM:
2145         case IPV6_ADDRFORM:
2146         case IPV6_2292PKTINFO:
2147         case IPV6_RECVTCLASS:
2148         case IPV6_RECVRTHDR:
2149         case IPV6_2292RTHDR:
2150         case IPV6_RECVHOPOPTS:
2151         case IPV6_2292HOPOPTS:
2152         case IPV6_RECVDSTOPTS:
2153         case IPV6_2292DSTOPTS:
2154         case IPV6_TCLASS:
2155         case IPV6_ADDR_PREFERENCES:
2156 #ifdef IPV6_RECVPATHMTU
2157         case IPV6_RECVPATHMTU:
2158 #endif
2159 #ifdef IPV6_TRANSPARENT
2160         case IPV6_TRANSPARENT:
2161 #endif
2162 #ifdef IPV6_FREEBIND
2163         case IPV6_FREEBIND:
2164 #endif
2165 #ifdef IPV6_RECVORIGDSTADDR
2166         case IPV6_RECVORIGDSTADDR:
2167 #endif
2168             val = 0;
2169             if (optlen < sizeof(uint32_t)) {
2170                 return -TARGET_EINVAL;
2171             }
2172             if (get_user_u32(val, optval_addr)) {
2173                 return -TARGET_EFAULT;
2174             }
2175             ret = get_errno(setsockopt(sockfd, level, optname,
2176                                        &val, sizeof(val)));
2177             break;
2178         case IPV6_PKTINFO:
2179         {
2180             struct in6_pktinfo pki;
2181 
2182             if (optlen < sizeof(pki)) {
2183                 return -TARGET_EINVAL;
2184             }
2185 
2186             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2187                 return -TARGET_EFAULT;
2188             }
2189 
2190             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2191 
2192             ret = get_errno(setsockopt(sockfd, level, optname,
2193                                        &pki, sizeof(pki)));
2194             break;
2195         }
2196         case IPV6_ADD_MEMBERSHIP:
2197         case IPV6_DROP_MEMBERSHIP:
2198         {
2199             struct ipv6_mreq ipv6mreq;
2200 
2201             if (optlen < sizeof(ipv6mreq)) {
2202                 return -TARGET_EINVAL;
2203             }
2204 
2205             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2206                 return -TARGET_EFAULT;
2207             }
2208 
2209             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2210 
2211             ret = get_errno(setsockopt(sockfd, level, optname,
2212                                        &ipv6mreq, sizeof(ipv6mreq)));
2213             break;
2214         }
2215         default:
2216             goto unimplemented;
2217         }
2218         break;
2219     case SOL_ICMPV6:
2220         switch (optname) {
2221         case ICMPV6_FILTER:
2222         {
2223             struct icmp6_filter icmp6f;
2224 
2225             if (optlen > sizeof(icmp6f)) {
2226                 optlen = sizeof(icmp6f);
2227             }
2228 
2229             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2230                 return -TARGET_EFAULT;
2231             }
2232 
2233             for (val = 0; val < 8; val++) {
2234                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2235             }
2236 
2237             ret = get_errno(setsockopt(sockfd, level, optname,
2238                                        &icmp6f, optlen));
2239             break;
2240         }
2241         default:
2242             goto unimplemented;
2243         }
2244         break;
2245     case SOL_RAW:
2246         switch (optname) {
2247         case ICMP_FILTER:
2248         case IPV6_CHECKSUM:
2249             /* those take an u32 value */
2250             if (optlen < sizeof(uint32_t)) {
2251                 return -TARGET_EINVAL;
2252             }
2253 
2254             if (get_user_u32(val, optval_addr)) {
2255                 return -TARGET_EFAULT;
2256             }
2257             ret = get_errno(setsockopt(sockfd, level, optname,
2258                                        &val, sizeof(val)));
2259             break;
2260 
2261         default:
2262             goto unimplemented;
2263         }
2264         break;
2265 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2266     case SOL_ALG:
2267         switch (optname) {
2268         case ALG_SET_KEY:
2269         {
2270             char *alg_key = g_malloc(optlen);
2271 
2272             if (!alg_key) {
2273                 return -TARGET_ENOMEM;
2274             }
2275             if (copy_from_user(alg_key, optval_addr, optlen)) {
2276                 g_free(alg_key);
2277                 return -TARGET_EFAULT;
2278             }
2279             ret = get_errno(setsockopt(sockfd, level, optname,
2280                                        alg_key, optlen));
2281             g_free(alg_key);
2282             break;
2283         }
2284         case ALG_SET_AEAD_AUTHSIZE:
2285         {
2286             ret = get_errno(setsockopt(sockfd, level, optname,
2287                                        NULL, optlen));
2288             break;
2289         }
2290         default:
2291             goto unimplemented;
2292         }
2293         break;
2294 #endif
2295     case TARGET_SOL_SOCKET:
2296         switch (optname) {
2297         case TARGET_SO_RCVTIMEO:
2298         {
2299                 struct timeval tv;
2300 
2301                 optname = SO_RCVTIMEO;
2302 
2303 set_timeout:
2304                 if (optlen != sizeof(struct target_timeval)) {
2305                     return -TARGET_EINVAL;
2306                 }
2307 
2308                 if (copy_from_user_timeval(&tv, optval_addr)) {
2309                     return -TARGET_EFAULT;
2310                 }
2311 
2312                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2313                                 &tv, sizeof(tv)));
2314                 return ret;
2315         }
2316         case TARGET_SO_SNDTIMEO:
2317                 optname = SO_SNDTIMEO;
2318                 goto set_timeout;
2319         case TARGET_SO_ATTACH_FILTER:
2320         {
2321                 struct target_sock_fprog *tfprog;
2322                 struct target_sock_filter *tfilter;
2323                 struct sock_fprog fprog;
2324                 struct sock_filter *filter;
2325                 int i;
2326 
2327                 if (optlen != sizeof(*tfprog)) {
2328                     return -TARGET_EINVAL;
2329                 }
2330                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2331                     return -TARGET_EFAULT;
2332                 }
2333                 if (!lock_user_struct(VERIFY_READ, tfilter,
2334                                       tswapal(tfprog->filter), 0)) {
2335                     unlock_user_struct(tfprog, optval_addr, 1);
2336                     return -TARGET_EFAULT;
2337                 }
2338 
2339                 fprog.len = tswap16(tfprog->len);
2340                 filter = g_try_new(struct sock_filter, fprog.len);
2341                 if (filter == NULL) {
2342                     unlock_user_struct(tfilter, tfprog->filter, 1);
2343                     unlock_user_struct(tfprog, optval_addr, 1);
2344                     return -TARGET_ENOMEM;
2345                 }
2346                 for (i = 0; i < fprog.len; i++) {
2347                     filter[i].code = tswap16(tfilter[i].code);
2348                     filter[i].jt = tfilter[i].jt;
2349                     filter[i].jf = tfilter[i].jf;
2350                     filter[i].k = tswap32(tfilter[i].k);
2351                 }
2352                 fprog.filter = filter;
2353 
2354                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2355                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2356                 g_free(filter);
2357 
2358                 unlock_user_struct(tfilter, tfprog->filter, 1);
2359                 unlock_user_struct(tfprog, optval_addr, 1);
2360                 return ret;
2361         }
2362 	case TARGET_SO_BINDTODEVICE:
2363 	{
2364 		char *dev_ifname, *addr_ifname;
2365 
2366 		if (optlen > IFNAMSIZ - 1) {
2367 		    optlen = IFNAMSIZ - 1;
2368 		}
2369 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2370 		if (!dev_ifname) {
2371 		    return -TARGET_EFAULT;
2372 		}
2373 		optname = SO_BINDTODEVICE;
2374 		addr_ifname = alloca(IFNAMSIZ);
2375 		memcpy(addr_ifname, dev_ifname, optlen);
2376 		addr_ifname[optlen] = 0;
2377 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2378                                            addr_ifname, optlen));
2379 		unlock_user (dev_ifname, optval_addr, 0);
2380 		return ret;
2381 	}
2382         case TARGET_SO_LINGER:
2383         {
2384                 struct linger lg;
2385                 struct target_linger *tlg;
2386 
2387                 if (optlen != sizeof(struct target_linger)) {
2388                     return -TARGET_EINVAL;
2389                 }
2390                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2391                     return -TARGET_EFAULT;
2392                 }
2393                 __get_user(lg.l_onoff, &tlg->l_onoff);
2394                 __get_user(lg.l_linger, &tlg->l_linger);
2395                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2396                                 &lg, sizeof(lg)));
2397                 unlock_user_struct(tlg, optval_addr, 0);
2398                 return ret;
2399         }
2400             /* Options with 'int' argument.  */
2401         case TARGET_SO_DEBUG:
2402 		optname = SO_DEBUG;
2403 		break;
2404         case TARGET_SO_REUSEADDR:
2405 		optname = SO_REUSEADDR;
2406 		break;
2407 #ifdef SO_REUSEPORT
2408         case TARGET_SO_REUSEPORT:
2409                 optname = SO_REUSEPORT;
2410                 break;
2411 #endif
2412         case TARGET_SO_TYPE:
2413 		optname = SO_TYPE;
2414 		break;
2415         case TARGET_SO_ERROR:
2416 		optname = SO_ERROR;
2417 		break;
2418         case TARGET_SO_DONTROUTE:
2419 		optname = SO_DONTROUTE;
2420 		break;
2421         case TARGET_SO_BROADCAST:
2422 		optname = SO_BROADCAST;
2423 		break;
2424         case TARGET_SO_SNDBUF:
2425 		optname = SO_SNDBUF;
2426 		break;
2427         case TARGET_SO_SNDBUFFORCE:
2428                 optname = SO_SNDBUFFORCE;
2429                 break;
2430         case TARGET_SO_RCVBUF:
2431 		optname = SO_RCVBUF;
2432 		break;
2433         case TARGET_SO_RCVBUFFORCE:
2434                 optname = SO_RCVBUFFORCE;
2435                 break;
2436         case TARGET_SO_KEEPALIVE:
2437 		optname = SO_KEEPALIVE;
2438 		break;
2439         case TARGET_SO_OOBINLINE:
2440 		optname = SO_OOBINLINE;
2441 		break;
2442         case TARGET_SO_NO_CHECK:
2443 		optname = SO_NO_CHECK;
2444 		break;
2445         case TARGET_SO_PRIORITY:
2446 		optname = SO_PRIORITY;
2447 		break;
2448 #ifdef SO_BSDCOMPAT
2449         case TARGET_SO_BSDCOMPAT:
2450 		optname = SO_BSDCOMPAT;
2451 		break;
2452 #endif
2453         case TARGET_SO_PASSCRED:
2454 		optname = SO_PASSCRED;
2455 		break;
2456         case TARGET_SO_PASSSEC:
2457                 optname = SO_PASSSEC;
2458                 break;
2459         case TARGET_SO_TIMESTAMP:
2460 		optname = SO_TIMESTAMP;
2461 		break;
2462         case TARGET_SO_RCVLOWAT:
2463 		optname = SO_RCVLOWAT;
2464 		break;
2465         default:
2466             goto unimplemented;
2467         }
2468 	if (optlen < sizeof(uint32_t))
2469             return -TARGET_EINVAL;
2470 
2471 	if (get_user_u32(val, optval_addr))
2472             return -TARGET_EFAULT;
2473 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2474         break;
2475 #ifdef SOL_NETLINK
2476     case SOL_NETLINK:
2477         switch (optname) {
2478         case NETLINK_PKTINFO:
2479         case NETLINK_ADD_MEMBERSHIP:
2480         case NETLINK_DROP_MEMBERSHIP:
2481         case NETLINK_BROADCAST_ERROR:
2482         case NETLINK_NO_ENOBUFS:
2483 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2484         case NETLINK_LISTEN_ALL_NSID:
2485         case NETLINK_CAP_ACK:
2486 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2487 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2488         case NETLINK_EXT_ACK:
2489 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2490 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2491         case NETLINK_GET_STRICT_CHK:
2492 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2493             break;
2494         default:
2495             goto unimplemented;
2496         }
2497         val = 0;
2498         if (optlen < sizeof(uint32_t)) {
2499             return -TARGET_EINVAL;
2500         }
2501         if (get_user_u32(val, optval_addr)) {
2502             return -TARGET_EFAULT;
2503         }
2504         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2505                                    sizeof(val)));
2506         break;
2507 #endif /* SOL_NETLINK */
2508     default:
2509     unimplemented:
2510         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2511                       level, optname);
2512         ret = -TARGET_ENOPROTOOPT;
2513     }
2514     return ret;
2515 }
2516 
2517 /* do_getsockopt() Must return target values and target errnos. */
2518 static abi_long do_getsockopt(int sockfd, int level, int optname,
2519                               abi_ulong optval_addr, abi_ulong optlen)
2520 {
2521     abi_long ret;
2522     int len, val;
2523     socklen_t lv;
2524 
2525     switch(level) {
2526     case TARGET_SOL_SOCKET:
2527         level = SOL_SOCKET;
2528         switch (optname) {
2529         /* These don't just return a single integer */
2530         case TARGET_SO_PEERNAME:
2531             goto unimplemented;
2532         case TARGET_SO_RCVTIMEO: {
2533             struct timeval tv;
2534             socklen_t tvlen;
2535 
2536             optname = SO_RCVTIMEO;
2537 
2538 get_timeout:
2539             if (get_user_u32(len, optlen)) {
2540                 return -TARGET_EFAULT;
2541             }
2542             if (len < 0) {
2543                 return -TARGET_EINVAL;
2544             }
2545 
2546             tvlen = sizeof(tv);
2547             ret = get_errno(getsockopt(sockfd, level, optname,
2548                                        &tv, &tvlen));
2549             if (ret < 0) {
2550                 return ret;
2551             }
2552             if (len > sizeof(struct target_timeval)) {
2553                 len = sizeof(struct target_timeval);
2554             }
2555             if (copy_to_user_timeval(optval_addr, &tv)) {
2556                 return -TARGET_EFAULT;
2557             }
2558             if (put_user_u32(len, optlen)) {
2559                 return -TARGET_EFAULT;
2560             }
2561             break;
2562         }
2563         case TARGET_SO_SNDTIMEO:
2564             optname = SO_SNDTIMEO;
2565             goto get_timeout;
2566         case TARGET_SO_PEERCRED: {
2567             struct ucred cr;
2568             socklen_t crlen;
2569             struct target_ucred *tcr;
2570 
2571             if (get_user_u32(len, optlen)) {
2572                 return -TARGET_EFAULT;
2573             }
2574             if (len < 0) {
2575                 return -TARGET_EINVAL;
2576             }
2577 
2578             crlen = sizeof(cr);
2579             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2580                                        &cr, &crlen));
2581             if (ret < 0) {
2582                 return ret;
2583             }
2584             if (len > crlen) {
2585                 len = crlen;
2586             }
2587             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2588                 return -TARGET_EFAULT;
2589             }
2590             __put_user(cr.pid, &tcr->pid);
2591             __put_user(cr.uid, &tcr->uid);
2592             __put_user(cr.gid, &tcr->gid);
2593             unlock_user_struct(tcr, optval_addr, 1);
2594             if (put_user_u32(len, optlen)) {
2595                 return -TARGET_EFAULT;
2596             }
2597             break;
2598         }
2599         case TARGET_SO_PEERSEC: {
2600             char *name;
2601 
2602             if (get_user_u32(len, optlen)) {
2603                 return -TARGET_EFAULT;
2604             }
2605             if (len < 0) {
2606                 return -TARGET_EINVAL;
2607             }
2608             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2609             if (!name) {
2610                 return -TARGET_EFAULT;
2611             }
2612             lv = len;
2613             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2614                                        name, &lv));
2615             if (put_user_u32(lv, optlen)) {
2616                 ret = -TARGET_EFAULT;
2617             }
2618             unlock_user(name, optval_addr, lv);
2619             break;
2620         }
2621         case TARGET_SO_LINGER:
2622         {
2623             struct linger lg;
2624             socklen_t lglen;
2625             struct target_linger *tlg;
2626 
2627             if (get_user_u32(len, optlen)) {
2628                 return -TARGET_EFAULT;
2629             }
2630             if (len < 0) {
2631                 return -TARGET_EINVAL;
2632             }
2633 
2634             lglen = sizeof(lg);
2635             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2636                                        &lg, &lglen));
2637             if (ret < 0) {
2638                 return ret;
2639             }
2640             if (len > lglen) {
2641                 len = lglen;
2642             }
2643             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2644                 return -TARGET_EFAULT;
2645             }
2646             __put_user(lg.l_onoff, &tlg->l_onoff);
2647             __put_user(lg.l_linger, &tlg->l_linger);
2648             unlock_user_struct(tlg, optval_addr, 1);
2649             if (put_user_u32(len, optlen)) {
2650                 return -TARGET_EFAULT;
2651             }
2652             break;
2653         }
2654         /* Options with 'int' argument.  */
2655         case TARGET_SO_DEBUG:
2656             optname = SO_DEBUG;
2657             goto int_case;
2658         case TARGET_SO_REUSEADDR:
2659             optname = SO_REUSEADDR;
2660             goto int_case;
2661 #ifdef SO_REUSEPORT
2662         case TARGET_SO_REUSEPORT:
2663             optname = SO_REUSEPORT;
2664             goto int_case;
2665 #endif
2666         case TARGET_SO_TYPE:
2667             optname = SO_TYPE;
2668             goto int_case;
2669         case TARGET_SO_ERROR:
2670             optname = SO_ERROR;
2671             goto int_case;
2672         case TARGET_SO_DONTROUTE:
2673             optname = SO_DONTROUTE;
2674             goto int_case;
2675         case TARGET_SO_BROADCAST:
2676             optname = SO_BROADCAST;
2677             goto int_case;
2678         case TARGET_SO_SNDBUF:
2679             optname = SO_SNDBUF;
2680             goto int_case;
2681         case TARGET_SO_RCVBUF:
2682             optname = SO_RCVBUF;
2683             goto int_case;
2684         case TARGET_SO_KEEPALIVE:
2685             optname = SO_KEEPALIVE;
2686             goto int_case;
2687         case TARGET_SO_OOBINLINE:
2688             optname = SO_OOBINLINE;
2689             goto int_case;
2690         case TARGET_SO_NO_CHECK:
2691             optname = SO_NO_CHECK;
2692             goto int_case;
2693         case TARGET_SO_PRIORITY:
2694             optname = SO_PRIORITY;
2695             goto int_case;
2696 #ifdef SO_BSDCOMPAT
2697         case TARGET_SO_BSDCOMPAT:
2698             optname = SO_BSDCOMPAT;
2699             goto int_case;
2700 #endif
2701         case TARGET_SO_PASSCRED:
2702             optname = SO_PASSCRED;
2703             goto int_case;
2704         case TARGET_SO_TIMESTAMP:
2705             optname = SO_TIMESTAMP;
2706             goto int_case;
2707         case TARGET_SO_RCVLOWAT:
2708             optname = SO_RCVLOWAT;
2709             goto int_case;
2710         case TARGET_SO_ACCEPTCONN:
2711             optname = SO_ACCEPTCONN;
2712             goto int_case;
2713         case TARGET_SO_PROTOCOL:
2714             optname = SO_PROTOCOL;
2715             goto int_case;
2716         case TARGET_SO_DOMAIN:
2717             optname = SO_DOMAIN;
2718             goto int_case;
2719         default:
2720             goto int_case;
2721         }
2722         break;
2723     case SOL_TCP:
2724     case SOL_UDP:
2725         /* TCP and UDP options all take an 'int' value.  */
2726     int_case:
2727         if (get_user_u32(len, optlen))
2728             return -TARGET_EFAULT;
2729         if (len < 0)
2730             return -TARGET_EINVAL;
2731         lv = sizeof(lv);
2732         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2733         if (ret < 0)
2734             return ret;
2735         if (optname == SO_TYPE) {
2736             val = host_to_target_sock_type(val);
2737         }
2738         if (len > lv)
2739             len = lv;
2740         if (len == 4) {
2741             if (put_user_u32(val, optval_addr))
2742                 return -TARGET_EFAULT;
2743         } else {
2744             if (put_user_u8(val, optval_addr))
2745                 return -TARGET_EFAULT;
2746         }
2747         if (put_user_u32(len, optlen))
2748             return -TARGET_EFAULT;
2749         break;
2750     case SOL_IP:
2751         switch(optname) {
2752         case IP_TOS:
2753         case IP_TTL:
2754         case IP_HDRINCL:
2755         case IP_ROUTER_ALERT:
2756         case IP_RECVOPTS:
2757         case IP_RETOPTS:
2758         case IP_PKTINFO:
2759         case IP_MTU_DISCOVER:
2760         case IP_RECVERR:
2761         case IP_RECVTOS:
2762 #ifdef IP_FREEBIND
2763         case IP_FREEBIND:
2764 #endif
2765         case IP_MULTICAST_TTL:
2766         case IP_MULTICAST_LOOP:
2767             if (get_user_u32(len, optlen))
2768                 return -TARGET_EFAULT;
2769             if (len < 0)
2770                 return -TARGET_EINVAL;
2771             lv = sizeof(lv);
2772             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2773             if (ret < 0)
2774                 return ret;
2775             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2776                 len = 1;
2777                 if (put_user_u32(len, optlen)
2778                     || put_user_u8(val, optval_addr))
2779                     return -TARGET_EFAULT;
2780             } else {
2781                 if (len > sizeof(int))
2782                     len = sizeof(int);
2783                 if (put_user_u32(len, optlen)
2784                     || put_user_u32(val, optval_addr))
2785                     return -TARGET_EFAULT;
2786             }
2787             break;
2788         default:
2789             ret = -TARGET_ENOPROTOOPT;
2790             break;
2791         }
2792         break;
2793     case SOL_IPV6:
2794         switch (optname) {
2795         case IPV6_MTU_DISCOVER:
2796         case IPV6_MTU:
2797         case IPV6_V6ONLY:
2798         case IPV6_RECVPKTINFO:
2799         case IPV6_UNICAST_HOPS:
2800         case IPV6_MULTICAST_HOPS:
2801         case IPV6_MULTICAST_LOOP:
2802         case IPV6_RECVERR:
2803         case IPV6_RECVHOPLIMIT:
2804         case IPV6_2292HOPLIMIT:
2805         case IPV6_CHECKSUM:
2806         case IPV6_ADDRFORM:
2807         case IPV6_2292PKTINFO:
2808         case IPV6_RECVTCLASS:
2809         case IPV6_RECVRTHDR:
2810         case IPV6_2292RTHDR:
2811         case IPV6_RECVHOPOPTS:
2812         case IPV6_2292HOPOPTS:
2813         case IPV6_RECVDSTOPTS:
2814         case IPV6_2292DSTOPTS:
2815         case IPV6_TCLASS:
2816         case IPV6_ADDR_PREFERENCES:
2817 #ifdef IPV6_RECVPATHMTU
2818         case IPV6_RECVPATHMTU:
2819 #endif
2820 #ifdef IPV6_TRANSPARENT
2821         case IPV6_TRANSPARENT:
2822 #endif
2823 #ifdef IPV6_FREEBIND
2824         case IPV6_FREEBIND:
2825 #endif
2826 #ifdef IPV6_RECVORIGDSTADDR
2827         case IPV6_RECVORIGDSTADDR:
2828 #endif
2829             if (get_user_u32(len, optlen))
2830                 return -TARGET_EFAULT;
2831             if (len < 0)
2832                 return -TARGET_EINVAL;
2833             lv = sizeof(lv);
2834             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2835             if (ret < 0)
2836                 return ret;
2837             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2838                 len = 1;
2839                 if (put_user_u32(len, optlen)
2840                     || put_user_u8(val, optval_addr))
2841                     return -TARGET_EFAULT;
2842             } else {
2843                 if (len > sizeof(int))
2844                     len = sizeof(int);
2845                 if (put_user_u32(len, optlen)
2846                     || put_user_u32(val, optval_addr))
2847                     return -TARGET_EFAULT;
2848             }
2849             break;
2850         default:
2851             ret = -TARGET_ENOPROTOOPT;
2852             break;
2853         }
2854         break;
2855 #ifdef SOL_NETLINK
2856     case SOL_NETLINK:
2857         switch (optname) {
2858         case NETLINK_PKTINFO:
2859         case NETLINK_BROADCAST_ERROR:
2860         case NETLINK_NO_ENOBUFS:
2861 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2862         case NETLINK_LISTEN_ALL_NSID:
2863         case NETLINK_CAP_ACK:
2864 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2865 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2866         case NETLINK_EXT_ACK:
2867 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2868 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2869         case NETLINK_GET_STRICT_CHK:
2870 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2871             if (get_user_u32(len, optlen)) {
2872                 return -TARGET_EFAULT;
2873             }
2874             if (len != sizeof(val)) {
2875                 return -TARGET_EINVAL;
2876             }
2877             lv = len;
2878             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2879             if (ret < 0) {
2880                 return ret;
2881             }
2882             if (put_user_u32(lv, optlen)
2883                 || put_user_u32(val, optval_addr)) {
2884                 return -TARGET_EFAULT;
2885             }
2886             break;
2887 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2888         case NETLINK_LIST_MEMBERSHIPS:
2889         {
2890             uint32_t *results;
2891             int i;
2892             if (get_user_u32(len, optlen)) {
2893                 return -TARGET_EFAULT;
2894             }
2895             if (len < 0) {
2896                 return -TARGET_EINVAL;
2897             }
2898             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2899             if (!results && len > 0) {
2900                 return -TARGET_EFAULT;
2901             }
2902             lv = len;
2903             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2904             if (ret < 0) {
2905                 unlock_user(results, optval_addr, 0);
2906                 return ret;
2907             }
2908             /* swap host endianess to target endianess. */
2909             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2910                 results[i] = tswap32(results[i]);
2911             }
2912             if (put_user_u32(lv, optlen)) {
2913                 return -TARGET_EFAULT;
2914             }
2915             unlock_user(results, optval_addr, 0);
2916             break;
2917         }
2918 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2919         default:
2920             goto unimplemented;
2921         }
2922         break;
2923 #endif /* SOL_NETLINK */
2924     default:
2925     unimplemented:
2926         qemu_log_mask(LOG_UNIMP,
2927                       "getsockopt level=%d optname=%d not yet supported\n",
2928                       level, optname);
2929         ret = -TARGET_EOPNOTSUPP;
2930         break;
2931     }
2932     return ret;
2933 }
2934 
2935 /* Convert target low/high pair representing file offset into the host
2936  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2937  * as the kernel doesn't handle them either.
2938  */
2939 static void target_to_host_low_high(abi_ulong tlow,
2940                                     abi_ulong thigh,
2941                                     unsigned long *hlow,
2942                                     unsigned long *hhigh)
2943 {
2944     uint64_t off = tlow |
2945         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2946         TARGET_LONG_BITS / 2;
2947 
2948     *hlow = off;
2949     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2950 }
2951 
2952 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2953                                 abi_ulong count, int copy)
2954 {
2955     struct target_iovec *target_vec;
2956     struct iovec *vec;
2957     abi_ulong total_len, max_len;
2958     int i;
2959     int err = 0;
2960     bool bad_address = false;
2961 
2962     if (count == 0) {
2963         errno = 0;
2964         return NULL;
2965     }
2966     if (count > IOV_MAX) {
2967         errno = EINVAL;
2968         return NULL;
2969     }
2970 
2971     vec = g_try_new0(struct iovec, count);
2972     if (vec == NULL) {
2973         errno = ENOMEM;
2974         return NULL;
2975     }
2976 
2977     target_vec = lock_user(VERIFY_READ, target_addr,
2978                            count * sizeof(struct target_iovec), 1);
2979     if (target_vec == NULL) {
2980         err = EFAULT;
2981         goto fail2;
2982     }
2983 
2984     /* ??? If host page size > target page size, this will result in a
2985        value larger than what we can actually support.  */
2986     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2987     total_len = 0;
2988 
2989     for (i = 0; i < count; i++) {
2990         abi_ulong base = tswapal(target_vec[i].iov_base);
2991         abi_long len = tswapal(target_vec[i].iov_len);
2992 
2993         if (len < 0) {
2994             err = EINVAL;
2995             goto fail;
2996         } else if (len == 0) {
2997             /* Zero length pointer is ignored.  */
2998             vec[i].iov_base = 0;
2999         } else {
3000             vec[i].iov_base = lock_user(type, base, len, copy);
3001             /* If the first buffer pointer is bad, this is a fault.  But
3002              * subsequent bad buffers will result in a partial write; this
3003              * is realized by filling the vector with null pointers and
3004              * zero lengths. */
3005             if (!vec[i].iov_base) {
3006                 if (i == 0) {
3007                     err = EFAULT;
3008                     goto fail;
3009                 } else {
3010                     bad_address = true;
3011                 }
3012             }
3013             if (bad_address) {
3014                 len = 0;
3015             }
3016             if (len > max_len - total_len) {
3017                 len = max_len - total_len;
3018             }
3019         }
3020         vec[i].iov_len = len;
3021         total_len += len;
3022     }
3023 
3024     unlock_user(target_vec, target_addr, 0);
3025     return vec;
3026 
3027  fail:
3028     while (--i >= 0) {
3029         if (tswapal(target_vec[i].iov_len) > 0) {
3030             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3031         }
3032     }
3033     unlock_user(target_vec, target_addr, 0);
3034  fail2:
3035     g_free(vec);
3036     errno = err;
3037     return NULL;
3038 }
3039 
3040 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3041                          abi_ulong count, int copy)
3042 {
3043     struct target_iovec *target_vec;
3044     int i;
3045 
3046     target_vec = lock_user(VERIFY_READ, target_addr,
3047                            count * sizeof(struct target_iovec), 1);
3048     if (target_vec) {
3049         for (i = 0; i < count; i++) {
3050             abi_ulong base = tswapal(target_vec[i].iov_base);
3051             abi_long len = tswapal(target_vec[i].iov_len);
3052             if (len < 0) {
3053                 break;
3054             }
3055             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3056         }
3057         unlock_user(target_vec, target_addr, 0);
3058     }
3059 
3060     g_free(vec);
3061 }
3062 
3063 static inline int target_to_host_sock_type(int *type)
3064 {
3065     int host_type = 0;
3066     int target_type = *type;
3067 
3068     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3069     case TARGET_SOCK_DGRAM:
3070         host_type = SOCK_DGRAM;
3071         break;
3072     case TARGET_SOCK_STREAM:
3073         host_type = SOCK_STREAM;
3074         break;
3075     default:
3076         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3077         break;
3078     }
3079     if (target_type & TARGET_SOCK_CLOEXEC) {
3080 #if defined(SOCK_CLOEXEC)
3081         host_type |= SOCK_CLOEXEC;
3082 #else
3083         return -TARGET_EINVAL;
3084 #endif
3085     }
3086     if (target_type & TARGET_SOCK_NONBLOCK) {
3087 #if defined(SOCK_NONBLOCK)
3088         host_type |= SOCK_NONBLOCK;
3089 #elif !defined(O_NONBLOCK)
3090         return -TARGET_EINVAL;
3091 #endif
3092     }
3093     *type = host_type;
3094     return 0;
3095 }
3096 
3097 /* Try to emulate socket type flags after socket creation.  */
3098 static int sock_flags_fixup(int fd, int target_type)
3099 {
3100 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3101     if (target_type & TARGET_SOCK_NONBLOCK) {
3102         int flags = fcntl(fd, F_GETFL);
3103         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3104             close(fd);
3105             return -TARGET_EINVAL;
3106         }
3107     }
3108 #endif
3109     return fd;
3110 }
3111 
3112 /* do_socket() Must return target values and target errnos. */
3113 static abi_long do_socket(int domain, int type, int protocol)
3114 {
3115     int target_type = type;
3116     int ret;
3117 
3118     ret = target_to_host_sock_type(&type);
3119     if (ret) {
3120         return ret;
3121     }
3122 
3123     if (domain == PF_NETLINK && !(
3124 #ifdef CONFIG_RTNETLINK
3125          protocol == NETLINK_ROUTE ||
3126 #endif
3127          protocol == NETLINK_KOBJECT_UEVENT ||
3128          protocol == NETLINK_AUDIT)) {
3129         return -TARGET_EPROTONOSUPPORT;
3130     }
3131 
3132     if (domain == AF_PACKET ||
3133         (domain == AF_INET && type == SOCK_PACKET)) {
3134         protocol = tswap16(protocol);
3135     }
3136 
3137     ret = get_errno(socket(domain, type, protocol));
3138     if (ret >= 0) {
3139         ret = sock_flags_fixup(ret, target_type);
3140         if (type == SOCK_PACKET) {
3141             /* Manage an obsolete case :
3142              * if socket type is SOCK_PACKET, bind by name
3143              */
3144             fd_trans_register(ret, &target_packet_trans);
3145         } else if (domain == PF_NETLINK) {
3146             switch (protocol) {
3147 #ifdef CONFIG_RTNETLINK
3148             case NETLINK_ROUTE:
3149                 fd_trans_register(ret, &target_netlink_route_trans);
3150                 break;
3151 #endif
3152             case NETLINK_KOBJECT_UEVENT:
3153                 /* nothing to do: messages are strings */
3154                 break;
3155             case NETLINK_AUDIT:
3156                 fd_trans_register(ret, &target_netlink_audit_trans);
3157                 break;
3158             default:
3159                 g_assert_not_reached();
3160             }
3161         }
3162     }
3163     return ret;
3164 }
3165 
3166 /* do_bind() Must return target values and target errnos. */
3167 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3168                         socklen_t addrlen)
3169 {
3170     void *addr;
3171     abi_long ret;
3172 
3173     if ((int)addrlen < 0) {
3174         return -TARGET_EINVAL;
3175     }
3176 
3177     addr = alloca(addrlen+1);
3178 
3179     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3180     if (ret)
3181         return ret;
3182 
3183     return get_errno(bind(sockfd, addr, addrlen));
3184 }
3185 
3186 /* do_connect() Must return target values and target errnos. */
3187 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3188                            socklen_t addrlen)
3189 {
3190     void *addr;
3191     abi_long ret;
3192 
3193     if ((int)addrlen < 0) {
3194         return -TARGET_EINVAL;
3195     }
3196 
3197     addr = alloca(addrlen+1);
3198 
3199     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3200     if (ret)
3201         return ret;
3202 
3203     return get_errno(safe_connect(sockfd, addr, addrlen));
3204 }
3205 
3206 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3207 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3208                                       int flags, int send)
3209 {
3210     abi_long ret, len;
3211     struct msghdr msg;
3212     abi_ulong count;
3213     struct iovec *vec;
3214     abi_ulong target_vec;
3215 
3216     if (msgp->msg_name) {
3217         msg.msg_namelen = tswap32(msgp->msg_namelen);
3218         msg.msg_name = alloca(msg.msg_namelen+1);
3219         ret = target_to_host_sockaddr(fd, msg.msg_name,
3220                                       tswapal(msgp->msg_name),
3221                                       msg.msg_namelen);
3222         if (ret == -TARGET_EFAULT) {
3223             /* For connected sockets msg_name and msg_namelen must
3224              * be ignored, so returning EFAULT immediately is wrong.
3225              * Instead, pass a bad msg_name to the host kernel, and
3226              * let it decide whether to return EFAULT or not.
3227              */
3228             msg.msg_name = (void *)-1;
3229         } else if (ret) {
3230             goto out2;
3231         }
3232     } else {
3233         msg.msg_name = NULL;
3234         msg.msg_namelen = 0;
3235     }
3236     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3237     msg.msg_control = alloca(msg.msg_controllen);
3238     memset(msg.msg_control, 0, msg.msg_controllen);
3239 
3240     msg.msg_flags = tswap32(msgp->msg_flags);
3241 
3242     count = tswapal(msgp->msg_iovlen);
3243     target_vec = tswapal(msgp->msg_iov);
3244 
3245     if (count > IOV_MAX) {
3246         /* sendrcvmsg returns a different errno for this condition than
3247          * readv/writev, so we must catch it here before lock_iovec() does.
3248          */
3249         ret = -TARGET_EMSGSIZE;
3250         goto out2;
3251     }
3252 
3253     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3254                      target_vec, count, send);
3255     if (vec == NULL) {
3256         ret = -host_to_target_errno(errno);
3257         goto out2;
3258     }
3259     msg.msg_iovlen = count;
3260     msg.msg_iov = vec;
3261 
3262     if (send) {
3263         if (fd_trans_target_to_host_data(fd)) {
3264             void *host_msg;
3265 
3266             host_msg = g_malloc(msg.msg_iov->iov_len);
3267             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3268             ret = fd_trans_target_to_host_data(fd)(host_msg,
3269                                                    msg.msg_iov->iov_len);
3270             if (ret >= 0) {
3271                 msg.msg_iov->iov_base = host_msg;
3272                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3273             }
3274             g_free(host_msg);
3275         } else {
3276             ret = target_to_host_cmsg(&msg, msgp);
3277             if (ret == 0) {
3278                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3279             }
3280         }
3281     } else {
3282         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3283         if (!is_error(ret)) {
3284             len = ret;
3285             if (fd_trans_host_to_target_data(fd)) {
3286                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3287                                                MIN(msg.msg_iov->iov_len, len));
3288             } else {
3289                 ret = host_to_target_cmsg(msgp, &msg);
3290             }
3291             if (!is_error(ret)) {
3292                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3293                 msgp->msg_flags = tswap32(msg.msg_flags);
3294                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3295                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3296                                     msg.msg_name, msg.msg_namelen);
3297                     if (ret) {
3298                         goto out;
3299                     }
3300                 }
3301 
3302                 ret = len;
3303             }
3304         }
3305     }
3306 
3307 out:
3308     unlock_iovec(vec, target_vec, count, !send);
3309 out2:
3310     return ret;
3311 }
3312 
3313 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3314                                int flags, int send)
3315 {
3316     abi_long ret;
3317     struct target_msghdr *msgp;
3318 
3319     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3320                           msgp,
3321                           target_msg,
3322                           send ? 1 : 0)) {
3323         return -TARGET_EFAULT;
3324     }
3325     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3326     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3327     return ret;
3328 }
3329 
3330 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3331  * so it might not have this *mmsg-specific flag either.
3332  */
3333 #ifndef MSG_WAITFORONE
3334 #define MSG_WAITFORONE 0x10000
3335 #endif
3336 
3337 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3338                                 unsigned int vlen, unsigned int flags,
3339                                 int send)
3340 {
3341     struct target_mmsghdr *mmsgp;
3342     abi_long ret = 0;
3343     int i;
3344 
3345     if (vlen > UIO_MAXIOV) {
3346         vlen = UIO_MAXIOV;
3347     }
3348 
3349     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3350     if (!mmsgp) {
3351         return -TARGET_EFAULT;
3352     }
3353 
3354     for (i = 0; i < vlen; i++) {
3355         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3356         if (is_error(ret)) {
3357             break;
3358         }
3359         mmsgp[i].msg_len = tswap32(ret);
3360         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3361         if (flags & MSG_WAITFORONE) {
3362             flags |= MSG_DONTWAIT;
3363         }
3364     }
3365 
3366     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3367 
3368     /* Return number of datagrams sent if we sent any at all;
3369      * otherwise return the error.
3370      */
3371     if (i) {
3372         return i;
3373     }
3374     return ret;
3375 }
3376 
3377 /* do_accept4() Must return target values and target errnos. */
3378 static abi_long do_accept4(int fd, abi_ulong target_addr,
3379                            abi_ulong target_addrlen_addr, int flags)
3380 {
3381     socklen_t addrlen, ret_addrlen;
3382     void *addr;
3383     abi_long ret;
3384     int host_flags;
3385 
3386     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3387 
3388     if (target_addr == 0) {
3389         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3390     }
3391 
3392     /* linux returns EFAULT if addrlen pointer is invalid */
3393     if (get_user_u32(addrlen, target_addrlen_addr))
3394         return -TARGET_EFAULT;
3395 
3396     if ((int)addrlen < 0) {
3397         return -TARGET_EINVAL;
3398     }
3399 
3400     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3401         return -TARGET_EFAULT;
3402     }
3403 
3404     addr = alloca(addrlen);
3405 
3406     ret_addrlen = addrlen;
3407     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3408     if (!is_error(ret)) {
3409         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3410         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3411             ret = -TARGET_EFAULT;
3412         }
3413     }
3414     return ret;
3415 }
3416 
3417 /* do_getpeername() Must return target values and target errnos. */
3418 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3419                                abi_ulong target_addrlen_addr)
3420 {
3421     socklen_t addrlen, ret_addrlen;
3422     void *addr;
3423     abi_long ret;
3424 
3425     if (get_user_u32(addrlen, target_addrlen_addr))
3426         return -TARGET_EFAULT;
3427 
3428     if ((int)addrlen < 0) {
3429         return -TARGET_EINVAL;
3430     }
3431 
3432     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3433         return -TARGET_EFAULT;
3434     }
3435 
3436     addr = alloca(addrlen);
3437 
3438     ret_addrlen = addrlen;
3439     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3440     if (!is_error(ret)) {
3441         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3442         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3443             ret = -TARGET_EFAULT;
3444         }
3445     }
3446     return ret;
3447 }
3448 
3449 /* do_getsockname() Must return target values and target errnos. */
3450 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3451                                abi_ulong target_addrlen_addr)
3452 {
3453     socklen_t addrlen, ret_addrlen;
3454     void *addr;
3455     abi_long ret;
3456 
3457     if (get_user_u32(addrlen, target_addrlen_addr))
3458         return -TARGET_EFAULT;
3459 
3460     if ((int)addrlen < 0) {
3461         return -TARGET_EINVAL;
3462     }
3463 
3464     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3465         return -TARGET_EFAULT;
3466     }
3467 
3468     addr = alloca(addrlen);
3469 
3470     ret_addrlen = addrlen;
3471     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3472     if (!is_error(ret)) {
3473         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3474         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3475             ret = -TARGET_EFAULT;
3476         }
3477     }
3478     return ret;
3479 }
3480 
3481 /* do_socketpair() Must return target values and target errnos. */
3482 static abi_long do_socketpair(int domain, int type, int protocol,
3483                               abi_ulong target_tab_addr)
3484 {
3485     int tab[2];
3486     abi_long ret;
3487 
3488     target_to_host_sock_type(&type);
3489 
3490     ret = get_errno(socketpair(domain, type, protocol, tab));
3491     if (!is_error(ret)) {
3492         if (put_user_s32(tab[0], target_tab_addr)
3493             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3494             ret = -TARGET_EFAULT;
3495     }
3496     return ret;
3497 }
3498 
3499 /* do_sendto() Must return target values and target errnos. */
3500 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3501                           abi_ulong target_addr, socklen_t addrlen)
3502 {
3503     void *addr;
3504     void *host_msg;
3505     void *copy_msg = NULL;
3506     abi_long ret;
3507 
3508     if ((int)addrlen < 0) {
3509         return -TARGET_EINVAL;
3510     }
3511 
3512     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3513     if (!host_msg)
3514         return -TARGET_EFAULT;
3515     if (fd_trans_target_to_host_data(fd)) {
3516         copy_msg = host_msg;
3517         host_msg = g_malloc(len);
3518         memcpy(host_msg, copy_msg, len);
3519         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3520         if (ret < 0) {
3521             goto fail;
3522         }
3523     }
3524     if (target_addr) {
3525         addr = alloca(addrlen+1);
3526         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3527         if (ret) {
3528             goto fail;
3529         }
3530         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3531     } else {
3532         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3533     }
3534 fail:
3535     if (copy_msg) {
3536         g_free(host_msg);
3537         host_msg = copy_msg;
3538     }
3539     unlock_user(host_msg, msg, 0);
3540     return ret;
3541 }
3542 
3543 /* do_recvfrom() Must return target values and target errnos. */
3544 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3545                             abi_ulong target_addr,
3546                             abi_ulong target_addrlen)
3547 {
3548     socklen_t addrlen, ret_addrlen;
3549     void *addr;
3550     void *host_msg;
3551     abi_long ret;
3552 
3553     if (!msg) {
3554         host_msg = NULL;
3555     } else {
3556         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3557         if (!host_msg) {
3558             return -TARGET_EFAULT;
3559         }
3560     }
3561     if (target_addr) {
3562         if (get_user_u32(addrlen, target_addrlen)) {
3563             ret = -TARGET_EFAULT;
3564             goto fail;
3565         }
3566         if ((int)addrlen < 0) {
3567             ret = -TARGET_EINVAL;
3568             goto fail;
3569         }
3570         addr = alloca(addrlen);
3571         ret_addrlen = addrlen;
3572         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3573                                       addr, &ret_addrlen));
3574     } else {
3575         addr = NULL; /* To keep compiler quiet.  */
3576         addrlen = 0; /* To keep compiler quiet.  */
3577         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3578     }
3579     if (!is_error(ret)) {
3580         if (fd_trans_host_to_target_data(fd)) {
3581             abi_long trans;
3582             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3583             if (is_error(trans)) {
3584                 ret = trans;
3585                 goto fail;
3586             }
3587         }
3588         if (target_addr) {
3589             host_to_target_sockaddr(target_addr, addr,
3590                                     MIN(addrlen, ret_addrlen));
3591             if (put_user_u32(ret_addrlen, target_addrlen)) {
3592                 ret = -TARGET_EFAULT;
3593                 goto fail;
3594             }
3595         }
3596         unlock_user(host_msg, msg, len);
3597     } else {
3598 fail:
3599         unlock_user(host_msg, msg, 0);
3600     }
3601     return ret;
3602 }
3603 
3604 #ifdef TARGET_NR_socketcall
3605 /* do_socketcall() must return target values and target errnos. */
3606 static abi_long do_socketcall(int num, abi_ulong vptr)
3607 {
3608     static const unsigned nargs[] = { /* number of arguments per operation */
3609         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3610         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3611         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3612         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3613         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3614         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3615         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3616         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3617         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3618         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3619         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3620         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3621         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3622         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3623         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3624         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3625         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3626         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3627         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3628         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3629     };
3630     abi_long a[6]; /* max 6 args */
3631     unsigned i;
3632 
3633     /* check the range of the first argument num */
3634     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3635     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3636         return -TARGET_EINVAL;
3637     }
3638     /* ensure we have space for args */
3639     if (nargs[num] > ARRAY_SIZE(a)) {
3640         return -TARGET_EINVAL;
3641     }
3642     /* collect the arguments in a[] according to nargs[] */
3643     for (i = 0; i < nargs[num]; ++i) {
3644         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3645             return -TARGET_EFAULT;
3646         }
3647     }
3648     /* now when we have the args, invoke the appropriate underlying function */
3649     switch (num) {
3650     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3651         return do_socket(a[0], a[1], a[2]);
3652     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3653         return do_bind(a[0], a[1], a[2]);
3654     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3655         return do_connect(a[0], a[1], a[2]);
3656     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3657         return get_errno(listen(a[0], a[1]));
3658     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3659         return do_accept4(a[0], a[1], a[2], 0);
3660     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3661         return do_getsockname(a[0], a[1], a[2]);
3662     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3663         return do_getpeername(a[0], a[1], a[2]);
3664     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3665         return do_socketpair(a[0], a[1], a[2], a[3]);
3666     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3667         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3668     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3669         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3670     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3671         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3672     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3673         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3674     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3675         return get_errno(shutdown(a[0], a[1]));
3676     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3677         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3678     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3679         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3680     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3681         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3682     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3683         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3684     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3685         return do_accept4(a[0], a[1], a[2], a[3]);
3686     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3687         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3688     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3689         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3690     default:
3691         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3692         return -TARGET_EINVAL;
3693     }
3694 }
3695 #endif
3696 
3697 #define N_SHM_REGIONS	32
3698 
3699 static struct shm_region {
3700     abi_ulong start;
3701     abi_ulong size;
3702     bool in_use;
3703 } shm_regions[N_SHM_REGIONS];
3704 
3705 #ifndef TARGET_SEMID64_DS
3706 /* asm-generic version of this struct */
3707 struct target_semid64_ds
3708 {
3709   struct target_ipc_perm sem_perm;
3710   abi_ulong sem_otime;
3711 #if TARGET_ABI_BITS == 32
3712   abi_ulong __unused1;
3713 #endif
3714   abi_ulong sem_ctime;
3715 #if TARGET_ABI_BITS == 32
3716   abi_ulong __unused2;
3717 #endif
3718   abi_ulong sem_nsems;
3719   abi_ulong __unused3;
3720   abi_ulong __unused4;
3721 };
3722 #endif
3723 
3724 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3725                                                abi_ulong target_addr)
3726 {
3727     struct target_ipc_perm *target_ip;
3728     struct target_semid64_ds *target_sd;
3729 
3730     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3731         return -TARGET_EFAULT;
3732     target_ip = &(target_sd->sem_perm);
3733     host_ip->__key = tswap32(target_ip->__key);
3734     host_ip->uid = tswap32(target_ip->uid);
3735     host_ip->gid = tswap32(target_ip->gid);
3736     host_ip->cuid = tswap32(target_ip->cuid);
3737     host_ip->cgid = tswap32(target_ip->cgid);
3738 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3739     host_ip->mode = tswap32(target_ip->mode);
3740 #else
3741     host_ip->mode = tswap16(target_ip->mode);
3742 #endif
3743 #if defined(TARGET_PPC)
3744     host_ip->__seq = tswap32(target_ip->__seq);
3745 #else
3746     host_ip->__seq = tswap16(target_ip->__seq);
3747 #endif
3748     unlock_user_struct(target_sd, target_addr, 0);
3749     return 0;
3750 }
3751 
3752 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3753                                                struct ipc_perm *host_ip)
3754 {
3755     struct target_ipc_perm *target_ip;
3756     struct target_semid64_ds *target_sd;
3757 
3758     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3759         return -TARGET_EFAULT;
3760     target_ip = &(target_sd->sem_perm);
3761     target_ip->__key = tswap32(host_ip->__key);
3762     target_ip->uid = tswap32(host_ip->uid);
3763     target_ip->gid = tswap32(host_ip->gid);
3764     target_ip->cuid = tswap32(host_ip->cuid);
3765     target_ip->cgid = tswap32(host_ip->cgid);
3766 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3767     target_ip->mode = tswap32(host_ip->mode);
3768 #else
3769     target_ip->mode = tswap16(host_ip->mode);
3770 #endif
3771 #if defined(TARGET_PPC)
3772     target_ip->__seq = tswap32(host_ip->__seq);
3773 #else
3774     target_ip->__seq = tswap16(host_ip->__seq);
3775 #endif
3776     unlock_user_struct(target_sd, target_addr, 1);
3777     return 0;
3778 }
3779 
3780 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3781                                                abi_ulong target_addr)
3782 {
3783     struct target_semid64_ds *target_sd;
3784 
3785     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3786         return -TARGET_EFAULT;
3787     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3788         return -TARGET_EFAULT;
3789     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3790     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3791     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3792     unlock_user_struct(target_sd, target_addr, 0);
3793     return 0;
3794 }
3795 
3796 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3797                                                struct semid_ds *host_sd)
3798 {
3799     struct target_semid64_ds *target_sd;
3800 
3801     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3802         return -TARGET_EFAULT;
3803     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3804         return -TARGET_EFAULT;
3805     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3806     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3807     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3808     unlock_user_struct(target_sd, target_addr, 1);
3809     return 0;
3810 }
3811 
3812 struct target_seminfo {
3813     int semmap;
3814     int semmni;
3815     int semmns;
3816     int semmnu;
3817     int semmsl;
3818     int semopm;
3819     int semume;
3820     int semusz;
3821     int semvmx;
3822     int semaem;
3823 };
3824 
3825 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3826                                               struct seminfo *host_seminfo)
3827 {
3828     struct target_seminfo *target_seminfo;
3829     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3830         return -TARGET_EFAULT;
3831     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3832     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3833     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3834     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3835     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3836     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3837     __put_user(host_seminfo->semume, &target_seminfo->semume);
3838     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3839     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3840     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3841     unlock_user_struct(target_seminfo, target_addr, 1);
3842     return 0;
3843 }
3844 
3845 union semun {
3846 	int val;
3847 	struct semid_ds *buf;
3848 	unsigned short *array;
3849 	struct seminfo *__buf;
3850 };
3851 
3852 union target_semun {
3853 	int val;
3854 	abi_ulong buf;
3855 	abi_ulong array;
3856 	abi_ulong __buf;
3857 };
3858 
3859 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3860                                                abi_ulong target_addr)
3861 {
3862     int nsems;
3863     unsigned short *array;
3864     union semun semun;
3865     struct semid_ds semid_ds;
3866     int i, ret;
3867 
3868     semun.buf = &semid_ds;
3869 
3870     ret = semctl(semid, 0, IPC_STAT, semun);
3871     if (ret == -1)
3872         return get_errno(ret);
3873 
3874     nsems = semid_ds.sem_nsems;
3875 
3876     *host_array = g_try_new(unsigned short, nsems);
3877     if (!*host_array) {
3878         return -TARGET_ENOMEM;
3879     }
3880     array = lock_user(VERIFY_READ, target_addr,
3881                       nsems*sizeof(unsigned short), 1);
3882     if (!array) {
3883         g_free(*host_array);
3884         return -TARGET_EFAULT;
3885     }
3886 
3887     for(i=0; i<nsems; i++) {
3888         __get_user((*host_array)[i], &array[i]);
3889     }
3890     unlock_user(array, target_addr, 0);
3891 
3892     return 0;
3893 }
3894 
3895 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3896                                                unsigned short **host_array)
3897 {
3898     int nsems;
3899     unsigned short *array;
3900     union semun semun;
3901     struct semid_ds semid_ds;
3902     int i, ret;
3903 
3904     semun.buf = &semid_ds;
3905 
3906     ret = semctl(semid, 0, IPC_STAT, semun);
3907     if (ret == -1)
3908         return get_errno(ret);
3909 
3910     nsems = semid_ds.sem_nsems;
3911 
3912     array = lock_user(VERIFY_WRITE, target_addr,
3913                       nsems*sizeof(unsigned short), 0);
3914     if (!array)
3915         return -TARGET_EFAULT;
3916 
3917     for(i=0; i<nsems; i++) {
3918         __put_user((*host_array)[i], &array[i]);
3919     }
3920     g_free(*host_array);
3921     unlock_user(array, target_addr, 1);
3922 
3923     return 0;
3924 }
3925 
3926 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3927                                  abi_ulong target_arg)
3928 {
3929     union target_semun target_su = { .buf = target_arg };
3930     union semun arg;
3931     struct semid_ds dsarg;
3932     unsigned short *array = NULL;
3933     struct seminfo seminfo;
3934     abi_long ret = -TARGET_EINVAL;
3935     abi_long err;
3936     cmd &= 0xff;
3937 
3938     switch( cmd ) {
3939 	case GETVAL:
3940 	case SETVAL:
3941             /* In 64 bit cross-endian situations, we will erroneously pick up
3942              * the wrong half of the union for the "val" element.  To rectify
3943              * this, the entire 8-byte structure is byteswapped, followed by
3944 	     * a swap of the 4 byte val field. In other cases, the data is
3945 	     * already in proper host byte order. */
3946 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3947 		target_su.buf = tswapal(target_su.buf);
3948 		arg.val = tswap32(target_su.val);
3949 	    } else {
3950 		arg.val = target_su.val;
3951 	    }
3952             ret = get_errno(semctl(semid, semnum, cmd, arg));
3953             break;
3954 	case GETALL:
3955 	case SETALL:
3956             err = target_to_host_semarray(semid, &array, target_su.array);
3957             if (err)
3958                 return err;
3959             arg.array = array;
3960             ret = get_errno(semctl(semid, semnum, cmd, arg));
3961             err = host_to_target_semarray(semid, target_su.array, &array);
3962             if (err)
3963                 return err;
3964             break;
3965 	case IPC_STAT:
3966 	case IPC_SET:
3967 	case SEM_STAT:
3968             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3969             if (err)
3970                 return err;
3971             arg.buf = &dsarg;
3972             ret = get_errno(semctl(semid, semnum, cmd, arg));
3973             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3974             if (err)
3975                 return err;
3976             break;
3977 	case IPC_INFO:
3978 	case SEM_INFO:
3979             arg.__buf = &seminfo;
3980             ret = get_errno(semctl(semid, semnum, cmd, arg));
3981             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3982             if (err)
3983                 return err;
3984             break;
3985 	case IPC_RMID:
3986 	case GETPID:
3987 	case GETNCNT:
3988 	case GETZCNT:
3989             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3990             break;
3991     }
3992 
3993     return ret;
3994 }
3995 
3996 struct target_sembuf {
3997     unsigned short sem_num;
3998     short sem_op;
3999     short sem_flg;
4000 };
4001 
4002 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4003                                              abi_ulong target_addr,
4004                                              unsigned nsops)
4005 {
4006     struct target_sembuf *target_sembuf;
4007     int i;
4008 
4009     target_sembuf = lock_user(VERIFY_READ, target_addr,
4010                               nsops*sizeof(struct target_sembuf), 1);
4011     if (!target_sembuf)
4012         return -TARGET_EFAULT;
4013 
4014     for(i=0; i<nsops; i++) {
4015         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4016         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4017         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4018     }
4019 
4020     unlock_user(target_sembuf, target_addr, 0);
4021 
4022     return 0;
4023 }
4024 
4025 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4026     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4027 
4028 /*
4029  * This macro is required to handle the s390 variants, which passes the
4030  * arguments in a different order than default.
4031  */
4032 #ifdef __s390x__
4033 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4034   (__nsops), (__timeout), (__sops)
4035 #else
4036 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4037   (__nsops), 0, (__sops), (__timeout)
4038 #endif
4039 
4040 static inline abi_long do_semtimedop(int semid,
4041                                      abi_long ptr,
4042                                      unsigned nsops,
4043                                      abi_long timeout, bool time64)
4044 {
4045     struct sembuf *sops;
4046     struct timespec ts, *pts = NULL;
4047     abi_long ret;
4048 
4049     if (timeout) {
4050         pts = &ts;
4051         if (time64) {
4052             if (target_to_host_timespec64(pts, timeout)) {
4053                 return -TARGET_EFAULT;
4054             }
4055         } else {
4056             if (target_to_host_timespec(pts, timeout)) {
4057                 return -TARGET_EFAULT;
4058             }
4059         }
4060     }
4061 
4062     if (nsops > TARGET_SEMOPM) {
4063         return -TARGET_E2BIG;
4064     }
4065 
4066     sops = g_new(struct sembuf, nsops);
4067 
4068     if (target_to_host_sembuf(sops, ptr, nsops)) {
4069         g_free(sops);
4070         return -TARGET_EFAULT;
4071     }
4072 
4073     ret = -TARGET_ENOSYS;
4074 #ifdef __NR_semtimedop
4075     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4076 #endif
4077 #ifdef __NR_ipc
4078     if (ret == -TARGET_ENOSYS) {
4079         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4080                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4081     }
4082 #endif
4083     g_free(sops);
4084     return ret;
4085 }
4086 #endif
4087 
4088 struct target_msqid_ds
4089 {
4090     struct target_ipc_perm msg_perm;
4091     abi_ulong msg_stime;
4092 #if TARGET_ABI_BITS == 32
4093     abi_ulong __unused1;
4094 #endif
4095     abi_ulong msg_rtime;
4096 #if TARGET_ABI_BITS == 32
4097     abi_ulong __unused2;
4098 #endif
4099     abi_ulong msg_ctime;
4100 #if TARGET_ABI_BITS == 32
4101     abi_ulong __unused3;
4102 #endif
4103     abi_ulong __msg_cbytes;
4104     abi_ulong msg_qnum;
4105     abi_ulong msg_qbytes;
4106     abi_ulong msg_lspid;
4107     abi_ulong msg_lrpid;
4108     abi_ulong __unused4;
4109     abi_ulong __unused5;
4110 };
4111 
4112 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4113                                                abi_ulong target_addr)
4114 {
4115     struct target_msqid_ds *target_md;
4116 
4117     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4118         return -TARGET_EFAULT;
4119     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4120         return -TARGET_EFAULT;
4121     host_md->msg_stime = tswapal(target_md->msg_stime);
4122     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4123     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4124     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4125     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4126     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4127     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4128     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4129     unlock_user_struct(target_md, target_addr, 0);
4130     return 0;
4131 }
4132 
4133 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4134                                                struct msqid_ds *host_md)
4135 {
4136     struct target_msqid_ds *target_md;
4137 
4138     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4139         return -TARGET_EFAULT;
4140     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4141         return -TARGET_EFAULT;
4142     target_md->msg_stime = tswapal(host_md->msg_stime);
4143     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4144     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4145     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4146     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4147     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4148     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4149     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4150     unlock_user_struct(target_md, target_addr, 1);
4151     return 0;
4152 }
4153 
4154 struct target_msginfo {
4155     int msgpool;
4156     int msgmap;
4157     int msgmax;
4158     int msgmnb;
4159     int msgmni;
4160     int msgssz;
4161     int msgtql;
4162     unsigned short int msgseg;
4163 };
4164 
4165 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4166                                               struct msginfo *host_msginfo)
4167 {
4168     struct target_msginfo *target_msginfo;
4169     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4170         return -TARGET_EFAULT;
4171     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4172     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4173     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4174     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4175     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4176     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4177     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4178     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4179     unlock_user_struct(target_msginfo, target_addr, 1);
4180     return 0;
4181 }
4182 
4183 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4184 {
4185     struct msqid_ds dsarg;
4186     struct msginfo msginfo;
4187     abi_long ret = -TARGET_EINVAL;
4188 
4189     cmd &= 0xff;
4190 
4191     switch (cmd) {
4192     case IPC_STAT:
4193     case IPC_SET:
4194     case MSG_STAT:
4195         if (target_to_host_msqid_ds(&dsarg,ptr))
4196             return -TARGET_EFAULT;
4197         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4198         if (host_to_target_msqid_ds(ptr,&dsarg))
4199             return -TARGET_EFAULT;
4200         break;
4201     case IPC_RMID:
4202         ret = get_errno(msgctl(msgid, cmd, NULL));
4203         break;
4204     case IPC_INFO:
4205     case MSG_INFO:
4206         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4207         if (host_to_target_msginfo(ptr, &msginfo))
4208             return -TARGET_EFAULT;
4209         break;
4210     }
4211 
4212     return ret;
4213 }
4214 
4215 struct target_msgbuf {
4216     abi_long mtype;
4217     char	mtext[1];
4218 };
4219 
4220 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4221                                  ssize_t msgsz, int msgflg)
4222 {
4223     struct target_msgbuf *target_mb;
4224     struct msgbuf *host_mb;
4225     abi_long ret = 0;
4226 
4227     if (msgsz < 0) {
4228         return -TARGET_EINVAL;
4229     }
4230 
4231     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4232         return -TARGET_EFAULT;
4233     host_mb = g_try_malloc(msgsz + sizeof(long));
4234     if (!host_mb) {
4235         unlock_user_struct(target_mb, msgp, 0);
4236         return -TARGET_ENOMEM;
4237     }
4238     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4239     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4240     ret = -TARGET_ENOSYS;
4241 #ifdef __NR_msgsnd
4242     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4243 #endif
4244 #ifdef __NR_ipc
4245     if (ret == -TARGET_ENOSYS) {
4246 #ifdef __s390x__
4247         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4248                                  host_mb));
4249 #else
4250         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4251                                  host_mb, 0));
4252 #endif
4253     }
4254 #endif
4255     g_free(host_mb);
4256     unlock_user_struct(target_mb, msgp, 0);
4257 
4258     return ret;
4259 }
4260 
4261 #ifdef __NR_ipc
4262 #if defined(__sparc__)
4263 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4264 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4265 #elif defined(__s390x__)
4266 /* The s390 sys_ipc variant has only five parameters.  */
4267 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4268     ((long int[]){(long int)__msgp, __msgtyp})
4269 #else
4270 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4271     ((long int[]){(long int)__msgp, __msgtyp}), 0
4272 #endif
4273 #endif
4274 
4275 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4276                                  ssize_t msgsz, abi_long msgtyp,
4277                                  int msgflg)
4278 {
4279     struct target_msgbuf *target_mb;
4280     char *target_mtext;
4281     struct msgbuf *host_mb;
4282     abi_long ret = 0;
4283 
4284     if (msgsz < 0) {
4285         return -TARGET_EINVAL;
4286     }
4287 
4288     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4289         return -TARGET_EFAULT;
4290 
4291     host_mb = g_try_malloc(msgsz + sizeof(long));
4292     if (!host_mb) {
4293         ret = -TARGET_ENOMEM;
4294         goto end;
4295     }
4296     ret = -TARGET_ENOSYS;
4297 #ifdef __NR_msgrcv
4298     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4299 #endif
4300 #ifdef __NR_ipc
4301     if (ret == -TARGET_ENOSYS) {
4302         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4303                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4304     }
4305 #endif
4306 
4307     if (ret > 0) {
4308         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4309         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4310         if (!target_mtext) {
4311             ret = -TARGET_EFAULT;
4312             goto end;
4313         }
4314         memcpy(target_mb->mtext, host_mb->mtext, ret);
4315         unlock_user(target_mtext, target_mtext_addr, ret);
4316     }
4317 
4318     target_mb->mtype = tswapal(host_mb->mtype);
4319 
4320 end:
4321     if (target_mb)
4322         unlock_user_struct(target_mb, msgp, 1);
4323     g_free(host_mb);
4324     return ret;
4325 }
4326 
4327 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4328                                                abi_ulong target_addr)
4329 {
4330     struct target_shmid_ds *target_sd;
4331 
4332     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4333         return -TARGET_EFAULT;
4334     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4335         return -TARGET_EFAULT;
4336     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4337     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4338     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4339     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4340     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4341     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4342     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4343     unlock_user_struct(target_sd, target_addr, 0);
4344     return 0;
4345 }
4346 
4347 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4348                                                struct shmid_ds *host_sd)
4349 {
4350     struct target_shmid_ds *target_sd;
4351 
4352     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4353         return -TARGET_EFAULT;
4354     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4355         return -TARGET_EFAULT;
4356     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4357     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4358     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4359     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4360     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4361     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4362     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4363     unlock_user_struct(target_sd, target_addr, 1);
4364     return 0;
4365 }
4366 
4367 struct  target_shminfo {
4368     abi_ulong shmmax;
4369     abi_ulong shmmin;
4370     abi_ulong shmmni;
4371     abi_ulong shmseg;
4372     abi_ulong shmall;
4373 };
4374 
4375 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4376                                               struct shminfo *host_shminfo)
4377 {
4378     struct target_shminfo *target_shminfo;
4379     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4380         return -TARGET_EFAULT;
4381     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4382     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4383     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4384     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4385     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4386     unlock_user_struct(target_shminfo, target_addr, 1);
4387     return 0;
4388 }
4389 
4390 struct target_shm_info {
4391     int used_ids;
4392     abi_ulong shm_tot;
4393     abi_ulong shm_rss;
4394     abi_ulong shm_swp;
4395     abi_ulong swap_attempts;
4396     abi_ulong swap_successes;
4397 };
4398 
4399 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4400                                                struct shm_info *host_shm_info)
4401 {
4402     struct target_shm_info *target_shm_info;
4403     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4404         return -TARGET_EFAULT;
4405     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4406     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4407     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4408     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4409     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4410     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4411     unlock_user_struct(target_shm_info, target_addr, 1);
4412     return 0;
4413 }
4414 
4415 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4416 {
4417     struct shmid_ds dsarg;
4418     struct shminfo shminfo;
4419     struct shm_info shm_info;
4420     abi_long ret = -TARGET_EINVAL;
4421 
4422     cmd &= 0xff;
4423 
4424     switch(cmd) {
4425     case IPC_STAT:
4426     case IPC_SET:
4427     case SHM_STAT:
4428         if (target_to_host_shmid_ds(&dsarg, buf))
4429             return -TARGET_EFAULT;
4430         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4431         if (host_to_target_shmid_ds(buf, &dsarg))
4432             return -TARGET_EFAULT;
4433         break;
4434     case IPC_INFO:
4435         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4436         if (host_to_target_shminfo(buf, &shminfo))
4437             return -TARGET_EFAULT;
4438         break;
4439     case SHM_INFO:
4440         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4441         if (host_to_target_shm_info(buf, &shm_info))
4442             return -TARGET_EFAULT;
4443         break;
4444     case IPC_RMID:
4445     case SHM_LOCK:
4446     case SHM_UNLOCK:
4447         ret = get_errno(shmctl(shmid, cmd, NULL));
4448         break;
4449     }
4450 
4451     return ret;
4452 }
4453 
4454 #ifndef TARGET_FORCE_SHMLBA
4455 /* For most architectures, SHMLBA is the same as the page size;
4456  * some architectures have larger values, in which case they should
4457  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4458  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4459  * and defining its own value for SHMLBA.
4460  *
4461  * The kernel also permits SHMLBA to be set by the architecture to a
4462  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4463  * this means that addresses are rounded to the large size if
4464  * SHM_RND is set but addresses not aligned to that size are not rejected
4465  * as long as they are at least page-aligned. Since the only architecture
4466  * which uses this is ia64 this code doesn't provide for that oddity.
4467  */
4468 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4469 {
4470     return TARGET_PAGE_SIZE;
4471 }
4472 #endif
4473 
4474 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4475                                  int shmid, abi_ulong shmaddr, int shmflg)
4476 {
4477     CPUState *cpu = env_cpu(cpu_env);
4478     abi_long raddr;
4479     void *host_raddr;
4480     struct shmid_ds shm_info;
4481     int i,ret;
4482     abi_ulong shmlba;
4483 
4484     /* shmat pointers are always untagged */
4485 
4486     /* find out the length of the shared memory segment */
4487     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4488     if (is_error(ret)) {
4489         /* can't get length, bail out */
4490         return ret;
4491     }
4492 
4493     shmlba = target_shmlba(cpu_env);
4494 
4495     if (shmaddr & (shmlba - 1)) {
4496         if (shmflg & SHM_RND) {
4497             shmaddr &= ~(shmlba - 1);
4498         } else {
4499             return -TARGET_EINVAL;
4500         }
4501     }
4502     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4503         return -TARGET_EINVAL;
4504     }
4505 
4506     mmap_lock();
4507 
4508     /*
4509      * We're mapping shared memory, so ensure we generate code for parallel
4510      * execution and flush old translations.  This will work up to the level
4511      * supported by the host -- anything that requires EXCP_ATOMIC will not
4512      * be atomic with respect to an external process.
4513      */
4514     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4515         cpu->tcg_cflags |= CF_PARALLEL;
4516         tb_flush(cpu);
4517     }
4518 
4519     if (shmaddr)
4520         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4521     else {
4522         abi_ulong mmap_start;
4523 
4524         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4525         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4526 
4527         if (mmap_start == -1) {
4528             errno = ENOMEM;
4529             host_raddr = (void *)-1;
4530         } else
4531             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4532                                shmflg | SHM_REMAP);
4533     }
4534 
4535     if (host_raddr == (void *)-1) {
4536         mmap_unlock();
4537         return get_errno((long)host_raddr);
4538     }
4539     raddr=h2g((unsigned long)host_raddr);
4540 
4541     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4542                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4543                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4544 
4545     for (i = 0; i < N_SHM_REGIONS; i++) {
4546         if (!shm_regions[i].in_use) {
4547             shm_regions[i].in_use = true;
4548             shm_regions[i].start = raddr;
4549             shm_regions[i].size = shm_info.shm_segsz;
4550             break;
4551         }
4552     }
4553 
4554     mmap_unlock();
4555     return raddr;
4556 
4557 }
4558 
4559 static inline abi_long do_shmdt(abi_ulong shmaddr)
4560 {
4561     int i;
4562     abi_long rv;
4563 
4564     /* shmdt pointers are always untagged */
4565 
4566     mmap_lock();
4567 
4568     for (i = 0; i < N_SHM_REGIONS; ++i) {
4569         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4570             shm_regions[i].in_use = false;
4571             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4572             break;
4573         }
4574     }
4575     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4576 
4577     mmap_unlock();
4578 
4579     return rv;
4580 }
4581 
4582 #ifdef TARGET_NR_ipc
4583 /* ??? This only works with linear mappings.  */
4584 /* do_ipc() must return target values and target errnos. */
4585 static abi_long do_ipc(CPUArchState *cpu_env,
4586                        unsigned int call, abi_long first,
4587                        abi_long second, abi_long third,
4588                        abi_long ptr, abi_long fifth)
4589 {
4590     int version;
4591     abi_long ret = 0;
4592 
4593     version = call >> 16;
4594     call &= 0xffff;
4595 
4596     switch (call) {
4597     case IPCOP_semop:
4598         ret = do_semtimedop(first, ptr, second, 0, false);
4599         break;
4600     case IPCOP_semtimedop:
4601     /*
4602      * The s390 sys_ipc variant has only five parameters instead of six
4603      * (as for default variant) and the only difference is the handling of
4604      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4605      * to a struct timespec where the generic variant uses fifth parameter.
4606      */
4607 #if defined(TARGET_S390X)
4608         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4609 #else
4610         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4611 #endif
4612         break;
4613 
4614     case IPCOP_semget:
4615         ret = get_errno(semget(first, second, third));
4616         break;
4617 
4618     case IPCOP_semctl: {
4619         /* The semun argument to semctl is passed by value, so dereference the
4620          * ptr argument. */
4621         abi_ulong atptr;
4622         get_user_ual(atptr, ptr);
4623         ret = do_semctl(first, second, third, atptr);
4624         break;
4625     }
4626 
4627     case IPCOP_msgget:
4628         ret = get_errno(msgget(first, second));
4629         break;
4630 
4631     case IPCOP_msgsnd:
4632         ret = do_msgsnd(first, ptr, second, third);
4633         break;
4634 
4635     case IPCOP_msgctl:
4636         ret = do_msgctl(first, second, ptr);
4637         break;
4638 
4639     case IPCOP_msgrcv:
4640         switch (version) {
4641         case 0:
4642             {
4643                 struct target_ipc_kludge {
4644                     abi_long msgp;
4645                     abi_long msgtyp;
4646                 } *tmp;
4647 
4648                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4649                     ret = -TARGET_EFAULT;
4650                     break;
4651                 }
4652 
4653                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4654 
4655                 unlock_user_struct(tmp, ptr, 0);
4656                 break;
4657             }
4658         default:
4659             ret = do_msgrcv(first, ptr, second, fifth, third);
4660         }
4661         break;
4662 
4663     case IPCOP_shmat:
4664         switch (version) {
4665         default:
4666         {
4667             abi_ulong raddr;
4668             raddr = do_shmat(cpu_env, first, ptr, second);
4669             if (is_error(raddr))
4670                 return get_errno(raddr);
4671             if (put_user_ual(raddr, third))
4672                 return -TARGET_EFAULT;
4673             break;
4674         }
4675         case 1:
4676             ret = -TARGET_EINVAL;
4677             break;
4678         }
4679 	break;
4680     case IPCOP_shmdt:
4681         ret = do_shmdt(ptr);
4682 	break;
4683 
4684     case IPCOP_shmget:
4685 	/* IPC_* flag values are the same on all linux platforms */
4686 	ret = get_errno(shmget(first, second, third));
4687 	break;
4688 
4689 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4690     case IPCOP_shmctl:
4691         ret = do_shmctl(first, second, ptr);
4692         break;
4693     default:
4694         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4695                       call, version);
4696 	ret = -TARGET_ENOSYS;
4697 	break;
4698     }
4699     return ret;
4700 }
4701 #endif
4702 
4703 /* kernel structure types definitions */
4704 
4705 #define STRUCT(name, ...) STRUCT_ ## name,
4706 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4707 enum {
4708 #include "syscall_types.h"
4709 STRUCT_MAX
4710 };
4711 #undef STRUCT
4712 #undef STRUCT_SPECIAL
4713 
4714 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4715 #define STRUCT_SPECIAL(name)
4716 #include "syscall_types.h"
4717 #undef STRUCT
4718 #undef STRUCT_SPECIAL
4719 
4720 #define MAX_STRUCT_SIZE 4096
4721 
4722 #ifdef CONFIG_FIEMAP
4723 /* So fiemap access checks don't overflow on 32 bit systems.
4724  * This is very slightly smaller than the limit imposed by
4725  * the underlying kernel.
4726  */
4727 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4728                             / sizeof(struct fiemap_extent))
4729 
4730 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4731                                        int fd, int cmd, abi_long arg)
4732 {
4733     /* The parameter for this ioctl is a struct fiemap followed
4734      * by an array of struct fiemap_extent whose size is set
4735      * in fiemap->fm_extent_count. The array is filled in by the
4736      * ioctl.
4737      */
4738     int target_size_in, target_size_out;
4739     struct fiemap *fm;
4740     const argtype *arg_type = ie->arg_type;
4741     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4742     void *argptr, *p;
4743     abi_long ret;
4744     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4745     uint32_t outbufsz;
4746     int free_fm = 0;
4747 
4748     assert(arg_type[0] == TYPE_PTR);
4749     assert(ie->access == IOC_RW);
4750     arg_type++;
4751     target_size_in = thunk_type_size(arg_type, 0);
4752     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4753     if (!argptr) {
4754         return -TARGET_EFAULT;
4755     }
4756     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4757     unlock_user(argptr, arg, 0);
4758     fm = (struct fiemap *)buf_temp;
4759     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4760         return -TARGET_EINVAL;
4761     }
4762 
4763     outbufsz = sizeof (*fm) +
4764         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4765 
4766     if (outbufsz > MAX_STRUCT_SIZE) {
4767         /* We can't fit all the extents into the fixed size buffer.
4768          * Allocate one that is large enough and use it instead.
4769          */
4770         fm = g_try_malloc(outbufsz);
4771         if (!fm) {
4772             return -TARGET_ENOMEM;
4773         }
4774         memcpy(fm, buf_temp, sizeof(struct fiemap));
4775         free_fm = 1;
4776     }
4777     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4778     if (!is_error(ret)) {
4779         target_size_out = target_size_in;
4780         /* An extent_count of 0 means we were only counting the extents
4781          * so there are no structs to copy
4782          */
4783         if (fm->fm_extent_count != 0) {
4784             target_size_out += fm->fm_mapped_extents * extent_size;
4785         }
4786         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4787         if (!argptr) {
4788             ret = -TARGET_EFAULT;
4789         } else {
4790             /* Convert the struct fiemap */
4791             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4792             if (fm->fm_extent_count != 0) {
4793                 p = argptr + target_size_in;
4794                 /* ...and then all the struct fiemap_extents */
4795                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4796                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4797                                   THUNK_TARGET);
4798                     p += extent_size;
4799                 }
4800             }
4801             unlock_user(argptr, arg, target_size_out);
4802         }
4803     }
4804     if (free_fm) {
4805         g_free(fm);
4806     }
4807     return ret;
4808 }
4809 #endif
4810 
4811 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4812                                 int fd, int cmd, abi_long arg)
4813 {
4814     const argtype *arg_type = ie->arg_type;
4815     int target_size;
4816     void *argptr;
4817     int ret;
4818     struct ifconf *host_ifconf;
4819     uint32_t outbufsz;
4820     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4821     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4822     int target_ifreq_size;
4823     int nb_ifreq;
4824     int free_buf = 0;
4825     int i;
4826     int target_ifc_len;
4827     abi_long target_ifc_buf;
4828     int host_ifc_len;
4829     char *host_ifc_buf;
4830 
4831     assert(arg_type[0] == TYPE_PTR);
4832     assert(ie->access == IOC_RW);
4833 
4834     arg_type++;
4835     target_size = thunk_type_size(arg_type, 0);
4836 
4837     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4838     if (!argptr)
4839         return -TARGET_EFAULT;
4840     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4841     unlock_user(argptr, arg, 0);
4842 
4843     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4844     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4845     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4846 
4847     if (target_ifc_buf != 0) {
4848         target_ifc_len = host_ifconf->ifc_len;
4849         nb_ifreq = target_ifc_len / target_ifreq_size;
4850         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4851 
4852         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4853         if (outbufsz > MAX_STRUCT_SIZE) {
4854             /*
4855              * We can't fit all the extents into the fixed size buffer.
4856              * Allocate one that is large enough and use it instead.
4857              */
4858             host_ifconf = malloc(outbufsz);
4859             if (!host_ifconf) {
4860                 return -TARGET_ENOMEM;
4861             }
4862             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4863             free_buf = 1;
4864         }
4865         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4866 
4867         host_ifconf->ifc_len = host_ifc_len;
4868     } else {
4869       host_ifc_buf = NULL;
4870     }
4871     host_ifconf->ifc_buf = host_ifc_buf;
4872 
4873     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4874     if (!is_error(ret)) {
4875 	/* convert host ifc_len to target ifc_len */
4876 
4877         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4878         target_ifc_len = nb_ifreq * target_ifreq_size;
4879         host_ifconf->ifc_len = target_ifc_len;
4880 
4881 	/* restore target ifc_buf */
4882 
4883         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4884 
4885 	/* copy struct ifconf to target user */
4886 
4887         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4888         if (!argptr)
4889             return -TARGET_EFAULT;
4890         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4891         unlock_user(argptr, arg, target_size);
4892 
4893         if (target_ifc_buf != 0) {
4894             /* copy ifreq[] to target user */
4895             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4896             for (i = 0; i < nb_ifreq ; i++) {
4897                 thunk_convert(argptr + i * target_ifreq_size,
4898                               host_ifc_buf + i * sizeof(struct ifreq),
4899                               ifreq_arg_type, THUNK_TARGET);
4900             }
4901             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4902         }
4903     }
4904 
4905     if (free_buf) {
4906         free(host_ifconf);
4907     }
4908 
4909     return ret;
4910 }
4911 
4912 #if defined(CONFIG_USBFS)
4913 #if HOST_LONG_BITS > 64
4914 #error USBDEVFS thunks do not support >64 bit hosts yet.
4915 #endif
4916 struct live_urb {
4917     uint64_t target_urb_adr;
4918     uint64_t target_buf_adr;
4919     char *target_buf_ptr;
4920     struct usbdevfs_urb host_urb;
4921 };
4922 
4923 static GHashTable *usbdevfs_urb_hashtable(void)
4924 {
4925     static GHashTable *urb_hashtable;
4926 
4927     if (!urb_hashtable) {
4928         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4929     }
4930     return urb_hashtable;
4931 }
4932 
4933 static void urb_hashtable_insert(struct live_urb *urb)
4934 {
4935     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4936     g_hash_table_insert(urb_hashtable, urb, urb);
4937 }
4938 
4939 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4940 {
4941     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4942     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4943 }
4944 
4945 static void urb_hashtable_remove(struct live_urb *urb)
4946 {
4947     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4948     g_hash_table_remove(urb_hashtable, urb);
4949 }
4950 
4951 static abi_long
4952 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4953                           int fd, int cmd, abi_long arg)
4954 {
4955     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4956     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4957     struct live_urb *lurb;
4958     void *argptr;
4959     uint64_t hurb;
4960     int target_size;
4961     uintptr_t target_urb_adr;
4962     abi_long ret;
4963 
4964     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4965 
4966     memset(buf_temp, 0, sizeof(uint64_t));
4967     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4968     if (is_error(ret)) {
4969         return ret;
4970     }
4971 
4972     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4973     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4974     if (!lurb->target_urb_adr) {
4975         return -TARGET_EFAULT;
4976     }
4977     urb_hashtable_remove(lurb);
4978     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4979         lurb->host_urb.buffer_length);
4980     lurb->target_buf_ptr = NULL;
4981 
4982     /* restore the guest buffer pointer */
4983     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4984 
4985     /* update the guest urb struct */
4986     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4987     if (!argptr) {
4988         g_free(lurb);
4989         return -TARGET_EFAULT;
4990     }
4991     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4992     unlock_user(argptr, lurb->target_urb_adr, target_size);
4993 
4994     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4995     /* write back the urb handle */
4996     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4997     if (!argptr) {
4998         g_free(lurb);
4999         return -TARGET_EFAULT;
5000     }
5001 
5002     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5003     target_urb_adr = lurb->target_urb_adr;
5004     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5005     unlock_user(argptr, arg, target_size);
5006 
5007     g_free(lurb);
5008     return ret;
5009 }
5010 
5011 static abi_long
5012 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5013                              uint8_t *buf_temp __attribute__((unused)),
5014                              int fd, int cmd, abi_long arg)
5015 {
5016     struct live_urb *lurb;
5017 
5018     /* map target address back to host URB with metadata. */
5019     lurb = urb_hashtable_lookup(arg);
5020     if (!lurb) {
5021         return -TARGET_EFAULT;
5022     }
5023     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5024 }
5025 
5026 static abi_long
5027 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5028                             int fd, int cmd, abi_long arg)
5029 {
5030     const argtype *arg_type = ie->arg_type;
5031     int target_size;
5032     abi_long ret;
5033     void *argptr;
5034     int rw_dir;
5035     struct live_urb *lurb;
5036 
5037     /*
5038      * each submitted URB needs to map to a unique ID for the
5039      * kernel, and that unique ID needs to be a pointer to
5040      * host memory.  hence, we need to malloc for each URB.
5041      * isochronous transfers have a variable length struct.
5042      */
5043     arg_type++;
5044     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5045 
5046     /* construct host copy of urb and metadata */
5047     lurb = g_try_malloc0(sizeof(struct live_urb));
5048     if (!lurb) {
5049         return -TARGET_ENOMEM;
5050     }
5051 
5052     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5053     if (!argptr) {
5054         g_free(lurb);
5055         return -TARGET_EFAULT;
5056     }
5057     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5058     unlock_user(argptr, arg, 0);
5059 
5060     lurb->target_urb_adr = arg;
5061     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5062 
5063     /* buffer space used depends on endpoint type so lock the entire buffer */
5064     /* control type urbs should check the buffer contents for true direction */
5065     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5066     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5067         lurb->host_urb.buffer_length, 1);
5068     if (lurb->target_buf_ptr == NULL) {
5069         g_free(lurb);
5070         return -TARGET_EFAULT;
5071     }
5072 
5073     /* update buffer pointer in host copy */
5074     lurb->host_urb.buffer = lurb->target_buf_ptr;
5075 
5076     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5077     if (is_error(ret)) {
5078         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5079         g_free(lurb);
5080     } else {
5081         urb_hashtable_insert(lurb);
5082     }
5083 
5084     return ret;
5085 }
5086 #endif /* CONFIG_USBFS */
5087 
5088 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5089                             int cmd, abi_long arg)
5090 {
5091     void *argptr;
5092     struct dm_ioctl *host_dm;
5093     abi_long guest_data;
5094     uint32_t guest_data_size;
5095     int target_size;
5096     const argtype *arg_type = ie->arg_type;
5097     abi_long ret;
5098     void *big_buf = NULL;
5099     char *host_data;
5100 
5101     arg_type++;
5102     target_size = thunk_type_size(arg_type, 0);
5103     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5104     if (!argptr) {
5105         ret = -TARGET_EFAULT;
5106         goto out;
5107     }
5108     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5109     unlock_user(argptr, arg, 0);
5110 
5111     /* buf_temp is too small, so fetch things into a bigger buffer */
5112     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5113     memcpy(big_buf, buf_temp, target_size);
5114     buf_temp = big_buf;
5115     host_dm = big_buf;
5116 
5117     guest_data = arg + host_dm->data_start;
5118     if ((guest_data - arg) < 0) {
5119         ret = -TARGET_EINVAL;
5120         goto out;
5121     }
5122     guest_data_size = host_dm->data_size - host_dm->data_start;
5123     host_data = (char*)host_dm + host_dm->data_start;
5124 
5125     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5126     if (!argptr) {
5127         ret = -TARGET_EFAULT;
5128         goto out;
5129     }
5130 
5131     switch (ie->host_cmd) {
5132     case DM_REMOVE_ALL:
5133     case DM_LIST_DEVICES:
5134     case DM_DEV_CREATE:
5135     case DM_DEV_REMOVE:
5136     case DM_DEV_SUSPEND:
5137     case DM_DEV_STATUS:
5138     case DM_DEV_WAIT:
5139     case DM_TABLE_STATUS:
5140     case DM_TABLE_CLEAR:
5141     case DM_TABLE_DEPS:
5142     case DM_LIST_VERSIONS:
5143         /* no input data */
5144         break;
5145     case DM_DEV_RENAME:
5146     case DM_DEV_SET_GEOMETRY:
5147         /* data contains only strings */
5148         memcpy(host_data, argptr, guest_data_size);
5149         break;
5150     case DM_TARGET_MSG:
5151         memcpy(host_data, argptr, guest_data_size);
5152         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5153         break;
5154     case DM_TABLE_LOAD:
5155     {
5156         void *gspec = argptr;
5157         void *cur_data = host_data;
5158         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5159         int spec_size = thunk_type_size(arg_type, 0);
5160         int i;
5161 
5162         for (i = 0; i < host_dm->target_count; i++) {
5163             struct dm_target_spec *spec = cur_data;
5164             uint32_t next;
5165             int slen;
5166 
5167             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5168             slen = strlen((char*)gspec + spec_size) + 1;
5169             next = spec->next;
5170             spec->next = sizeof(*spec) + slen;
5171             strcpy((char*)&spec[1], gspec + spec_size);
5172             gspec += next;
5173             cur_data += spec->next;
5174         }
5175         break;
5176     }
5177     default:
5178         ret = -TARGET_EINVAL;
5179         unlock_user(argptr, guest_data, 0);
5180         goto out;
5181     }
5182     unlock_user(argptr, guest_data, 0);
5183 
5184     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5185     if (!is_error(ret)) {
5186         guest_data = arg + host_dm->data_start;
5187         guest_data_size = host_dm->data_size - host_dm->data_start;
5188         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5189         switch (ie->host_cmd) {
5190         case DM_REMOVE_ALL:
5191         case DM_DEV_CREATE:
5192         case DM_DEV_REMOVE:
5193         case DM_DEV_RENAME:
5194         case DM_DEV_SUSPEND:
5195         case DM_DEV_STATUS:
5196         case DM_TABLE_LOAD:
5197         case DM_TABLE_CLEAR:
5198         case DM_TARGET_MSG:
5199         case DM_DEV_SET_GEOMETRY:
5200             /* no return data */
5201             break;
5202         case DM_LIST_DEVICES:
5203         {
5204             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5205             uint32_t remaining_data = guest_data_size;
5206             void *cur_data = argptr;
5207             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5208             int nl_size = 12; /* can't use thunk_size due to alignment */
5209 
5210             while (1) {
5211                 uint32_t next = nl->next;
5212                 if (next) {
5213                     nl->next = nl_size + (strlen(nl->name) + 1);
5214                 }
5215                 if (remaining_data < nl->next) {
5216                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5217                     break;
5218                 }
5219                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5220                 strcpy(cur_data + nl_size, nl->name);
5221                 cur_data += nl->next;
5222                 remaining_data -= nl->next;
5223                 if (!next) {
5224                     break;
5225                 }
5226                 nl = (void*)nl + next;
5227             }
5228             break;
5229         }
5230         case DM_DEV_WAIT:
5231         case DM_TABLE_STATUS:
5232         {
5233             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5234             void *cur_data = argptr;
5235             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5236             int spec_size = thunk_type_size(arg_type, 0);
5237             int i;
5238 
5239             for (i = 0; i < host_dm->target_count; i++) {
5240                 uint32_t next = spec->next;
5241                 int slen = strlen((char*)&spec[1]) + 1;
5242                 spec->next = (cur_data - argptr) + spec_size + slen;
5243                 if (guest_data_size < spec->next) {
5244                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5245                     break;
5246                 }
5247                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5248                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5249                 cur_data = argptr + spec->next;
5250                 spec = (void*)host_dm + host_dm->data_start + next;
5251             }
5252             break;
5253         }
5254         case DM_TABLE_DEPS:
5255         {
5256             void *hdata = (void*)host_dm + host_dm->data_start;
5257             int count = *(uint32_t*)hdata;
5258             uint64_t *hdev = hdata + 8;
5259             uint64_t *gdev = argptr + 8;
5260             int i;
5261 
5262             *(uint32_t*)argptr = tswap32(count);
5263             for (i = 0; i < count; i++) {
5264                 *gdev = tswap64(*hdev);
5265                 gdev++;
5266                 hdev++;
5267             }
5268             break;
5269         }
5270         case DM_LIST_VERSIONS:
5271         {
5272             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5273             uint32_t remaining_data = guest_data_size;
5274             void *cur_data = argptr;
5275             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5276             int vers_size = thunk_type_size(arg_type, 0);
5277 
5278             while (1) {
5279                 uint32_t next = vers->next;
5280                 if (next) {
5281                     vers->next = vers_size + (strlen(vers->name) + 1);
5282                 }
5283                 if (remaining_data < vers->next) {
5284                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5285                     break;
5286                 }
5287                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5288                 strcpy(cur_data + vers_size, vers->name);
5289                 cur_data += vers->next;
5290                 remaining_data -= vers->next;
5291                 if (!next) {
5292                     break;
5293                 }
5294                 vers = (void*)vers + next;
5295             }
5296             break;
5297         }
5298         default:
5299             unlock_user(argptr, guest_data, 0);
5300             ret = -TARGET_EINVAL;
5301             goto out;
5302         }
5303         unlock_user(argptr, guest_data, guest_data_size);
5304 
5305         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5306         if (!argptr) {
5307             ret = -TARGET_EFAULT;
5308             goto out;
5309         }
5310         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5311         unlock_user(argptr, arg, target_size);
5312     }
5313 out:
5314     g_free(big_buf);
5315     return ret;
5316 }
5317 
5318 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5319                                int cmd, abi_long arg)
5320 {
5321     void *argptr;
5322     int target_size;
5323     const argtype *arg_type = ie->arg_type;
5324     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5325     abi_long ret;
5326 
5327     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5328     struct blkpg_partition host_part;
5329 
5330     /* Read and convert blkpg */
5331     arg_type++;
5332     target_size = thunk_type_size(arg_type, 0);
5333     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5334     if (!argptr) {
5335         ret = -TARGET_EFAULT;
5336         goto out;
5337     }
5338     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5339     unlock_user(argptr, arg, 0);
5340 
5341     switch (host_blkpg->op) {
5342     case BLKPG_ADD_PARTITION:
5343     case BLKPG_DEL_PARTITION:
5344         /* payload is struct blkpg_partition */
5345         break;
5346     default:
5347         /* Unknown opcode */
5348         ret = -TARGET_EINVAL;
5349         goto out;
5350     }
5351 
5352     /* Read and convert blkpg->data */
5353     arg = (abi_long)(uintptr_t)host_blkpg->data;
5354     target_size = thunk_type_size(part_arg_type, 0);
5355     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5356     if (!argptr) {
5357         ret = -TARGET_EFAULT;
5358         goto out;
5359     }
5360     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5361     unlock_user(argptr, arg, 0);
5362 
5363     /* Swizzle the data pointer to our local copy and call! */
5364     host_blkpg->data = &host_part;
5365     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5366 
5367 out:
5368     return ret;
5369 }
5370 
5371 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5372                                 int fd, int cmd, abi_long arg)
5373 {
5374     const argtype *arg_type = ie->arg_type;
5375     const StructEntry *se;
5376     const argtype *field_types;
5377     const int *dst_offsets, *src_offsets;
5378     int target_size;
5379     void *argptr;
5380     abi_ulong *target_rt_dev_ptr = NULL;
5381     unsigned long *host_rt_dev_ptr = NULL;
5382     abi_long ret;
5383     int i;
5384 
5385     assert(ie->access == IOC_W);
5386     assert(*arg_type == TYPE_PTR);
5387     arg_type++;
5388     assert(*arg_type == TYPE_STRUCT);
5389     target_size = thunk_type_size(arg_type, 0);
5390     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5391     if (!argptr) {
5392         return -TARGET_EFAULT;
5393     }
5394     arg_type++;
5395     assert(*arg_type == (int)STRUCT_rtentry);
5396     se = struct_entries + *arg_type++;
5397     assert(se->convert[0] == NULL);
5398     /* convert struct here to be able to catch rt_dev string */
5399     field_types = se->field_types;
5400     dst_offsets = se->field_offsets[THUNK_HOST];
5401     src_offsets = se->field_offsets[THUNK_TARGET];
5402     for (i = 0; i < se->nb_fields; i++) {
5403         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5404             assert(*field_types == TYPE_PTRVOID);
5405             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5406             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5407             if (*target_rt_dev_ptr != 0) {
5408                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5409                                                   tswapal(*target_rt_dev_ptr));
5410                 if (!*host_rt_dev_ptr) {
5411                     unlock_user(argptr, arg, 0);
5412                     return -TARGET_EFAULT;
5413                 }
5414             } else {
5415                 *host_rt_dev_ptr = 0;
5416             }
5417             field_types++;
5418             continue;
5419         }
5420         field_types = thunk_convert(buf_temp + dst_offsets[i],
5421                                     argptr + src_offsets[i],
5422                                     field_types, THUNK_HOST);
5423     }
5424     unlock_user(argptr, arg, 0);
5425 
5426     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5427 
5428     assert(host_rt_dev_ptr != NULL);
5429     assert(target_rt_dev_ptr != NULL);
5430     if (*host_rt_dev_ptr != 0) {
5431         unlock_user((void *)*host_rt_dev_ptr,
5432                     *target_rt_dev_ptr, 0);
5433     }
5434     return ret;
5435 }
5436 
5437 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5438                                      int fd, int cmd, abi_long arg)
5439 {
5440     int sig = target_to_host_signal(arg);
5441     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5442 }
5443 
5444 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5445                                     int fd, int cmd, abi_long arg)
5446 {
5447     struct timeval tv;
5448     abi_long ret;
5449 
5450     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5451     if (is_error(ret)) {
5452         return ret;
5453     }
5454 
5455     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5456         if (copy_to_user_timeval(arg, &tv)) {
5457             return -TARGET_EFAULT;
5458         }
5459     } else {
5460         if (copy_to_user_timeval64(arg, &tv)) {
5461             return -TARGET_EFAULT;
5462         }
5463     }
5464 
5465     return ret;
5466 }
5467 
5468 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5469                                       int fd, int cmd, abi_long arg)
5470 {
5471     struct timespec ts;
5472     abi_long ret;
5473 
5474     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5475     if (is_error(ret)) {
5476         return ret;
5477     }
5478 
5479     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5480         if (host_to_target_timespec(arg, &ts)) {
5481             return -TARGET_EFAULT;
5482         }
5483     } else{
5484         if (host_to_target_timespec64(arg, &ts)) {
5485             return -TARGET_EFAULT;
5486         }
5487     }
5488 
5489     return ret;
5490 }
5491 
5492 #ifdef TIOCGPTPEER
5493 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5494                                      int fd, int cmd, abi_long arg)
5495 {
5496     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5497     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5498 }
5499 #endif
5500 
5501 #ifdef HAVE_DRM_H
5502 
5503 static void unlock_drm_version(struct drm_version *host_ver,
5504                                struct target_drm_version *target_ver,
5505                                bool copy)
5506 {
5507     unlock_user(host_ver->name, target_ver->name,
5508                                 copy ? host_ver->name_len : 0);
5509     unlock_user(host_ver->date, target_ver->date,
5510                                 copy ? host_ver->date_len : 0);
5511     unlock_user(host_ver->desc, target_ver->desc,
5512                                 copy ? host_ver->desc_len : 0);
5513 }
5514 
5515 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5516                                           struct target_drm_version *target_ver)
5517 {
5518     memset(host_ver, 0, sizeof(*host_ver));
5519 
5520     __get_user(host_ver->name_len, &target_ver->name_len);
5521     if (host_ver->name_len) {
5522         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5523                                    target_ver->name_len, 0);
5524         if (!host_ver->name) {
5525             return -EFAULT;
5526         }
5527     }
5528 
5529     __get_user(host_ver->date_len, &target_ver->date_len);
5530     if (host_ver->date_len) {
5531         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5532                                    target_ver->date_len, 0);
5533         if (!host_ver->date) {
5534             goto err;
5535         }
5536     }
5537 
5538     __get_user(host_ver->desc_len, &target_ver->desc_len);
5539     if (host_ver->desc_len) {
5540         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5541                                    target_ver->desc_len, 0);
5542         if (!host_ver->desc) {
5543             goto err;
5544         }
5545     }
5546 
5547     return 0;
5548 err:
5549     unlock_drm_version(host_ver, target_ver, false);
5550     return -EFAULT;
5551 }
5552 
5553 static inline void host_to_target_drmversion(
5554                                           struct target_drm_version *target_ver,
5555                                           struct drm_version *host_ver)
5556 {
5557     __put_user(host_ver->version_major, &target_ver->version_major);
5558     __put_user(host_ver->version_minor, &target_ver->version_minor);
5559     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5560     __put_user(host_ver->name_len, &target_ver->name_len);
5561     __put_user(host_ver->date_len, &target_ver->date_len);
5562     __put_user(host_ver->desc_len, &target_ver->desc_len);
5563     unlock_drm_version(host_ver, target_ver, true);
5564 }
5565 
5566 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5567                              int fd, int cmd, abi_long arg)
5568 {
5569     struct drm_version *ver;
5570     struct target_drm_version *target_ver;
5571     abi_long ret;
5572 
5573     switch (ie->host_cmd) {
5574     case DRM_IOCTL_VERSION:
5575         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5576             return -TARGET_EFAULT;
5577         }
5578         ver = (struct drm_version *)buf_temp;
5579         ret = target_to_host_drmversion(ver, target_ver);
5580         if (!is_error(ret)) {
5581             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5582             if (is_error(ret)) {
5583                 unlock_drm_version(ver, target_ver, false);
5584             } else {
5585                 host_to_target_drmversion(target_ver, ver);
5586             }
5587         }
5588         unlock_user_struct(target_ver, arg, 0);
5589         return ret;
5590     }
5591     return -TARGET_ENOSYS;
5592 }
5593 
5594 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5595                                            struct drm_i915_getparam *gparam,
5596                                            int fd, abi_long arg)
5597 {
5598     abi_long ret;
5599     int value;
5600     struct target_drm_i915_getparam *target_gparam;
5601 
5602     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5603         return -TARGET_EFAULT;
5604     }
5605 
5606     __get_user(gparam->param, &target_gparam->param);
5607     gparam->value = &value;
5608     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5609     put_user_s32(value, target_gparam->value);
5610 
5611     unlock_user_struct(target_gparam, arg, 0);
5612     return ret;
5613 }
5614 
5615 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5616                                   int fd, int cmd, abi_long arg)
5617 {
5618     switch (ie->host_cmd) {
5619     case DRM_IOCTL_I915_GETPARAM:
5620         return do_ioctl_drm_i915_getparam(ie,
5621                                           (struct drm_i915_getparam *)buf_temp,
5622                                           fd, arg);
5623     default:
5624         return -TARGET_ENOSYS;
5625     }
5626 }
5627 
5628 #endif
5629 
5630 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5631                                         int fd, int cmd, abi_long arg)
5632 {
5633     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5634     struct tun_filter *target_filter;
5635     char *target_addr;
5636 
5637     assert(ie->access == IOC_W);
5638 
5639     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5640     if (!target_filter) {
5641         return -TARGET_EFAULT;
5642     }
5643     filter->flags = tswap16(target_filter->flags);
5644     filter->count = tswap16(target_filter->count);
5645     unlock_user(target_filter, arg, 0);
5646 
5647     if (filter->count) {
5648         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5649             MAX_STRUCT_SIZE) {
5650             return -TARGET_EFAULT;
5651         }
5652 
5653         target_addr = lock_user(VERIFY_READ,
5654                                 arg + offsetof(struct tun_filter, addr),
5655                                 filter->count * ETH_ALEN, 1);
5656         if (!target_addr) {
5657             return -TARGET_EFAULT;
5658         }
5659         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5660         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5661     }
5662 
5663     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5664 }
5665 
5666 IOCTLEntry ioctl_entries[] = {
5667 #define IOCTL(cmd, access, ...) \
5668     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5669 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5670     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5671 #define IOCTL_IGNORE(cmd) \
5672     { TARGET_ ## cmd, 0, #cmd },
5673 #include "ioctls.h"
5674     { 0, 0, },
5675 };
5676 
5677 /* ??? Implement proper locking for ioctls.  */
5678 /* do_ioctl() Must return target values and target errnos. */
5679 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5680 {
5681     const IOCTLEntry *ie;
5682     const argtype *arg_type;
5683     abi_long ret;
5684     uint8_t buf_temp[MAX_STRUCT_SIZE];
5685     int target_size;
5686     void *argptr;
5687 
5688     ie = ioctl_entries;
5689     for(;;) {
5690         if (ie->target_cmd == 0) {
5691             qemu_log_mask(
5692                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5693             return -TARGET_ENOSYS;
5694         }
5695         if (ie->target_cmd == cmd)
5696             break;
5697         ie++;
5698     }
5699     arg_type = ie->arg_type;
5700     if (ie->do_ioctl) {
5701         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5702     } else if (!ie->host_cmd) {
5703         /* Some architectures define BSD ioctls in their headers
5704            that are not implemented in Linux.  */
5705         return -TARGET_ENOSYS;
5706     }
5707 
5708     switch(arg_type[0]) {
5709     case TYPE_NULL:
5710         /* no argument */
5711         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5712         break;
5713     case TYPE_PTRVOID:
5714     case TYPE_INT:
5715     case TYPE_LONG:
5716     case TYPE_ULONG:
5717         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5718         break;
5719     case TYPE_PTR:
5720         arg_type++;
5721         target_size = thunk_type_size(arg_type, 0);
5722         switch(ie->access) {
5723         case IOC_R:
5724             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5725             if (!is_error(ret)) {
5726                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5727                 if (!argptr)
5728                     return -TARGET_EFAULT;
5729                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5730                 unlock_user(argptr, arg, target_size);
5731             }
5732             break;
5733         case IOC_W:
5734             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5735             if (!argptr)
5736                 return -TARGET_EFAULT;
5737             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5738             unlock_user(argptr, arg, 0);
5739             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5740             break;
5741         default:
5742         case IOC_RW:
5743             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5744             if (!argptr)
5745                 return -TARGET_EFAULT;
5746             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5747             unlock_user(argptr, arg, 0);
5748             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5749             if (!is_error(ret)) {
5750                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5751                 if (!argptr)
5752                     return -TARGET_EFAULT;
5753                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5754                 unlock_user(argptr, arg, target_size);
5755             }
5756             break;
5757         }
5758         break;
5759     default:
5760         qemu_log_mask(LOG_UNIMP,
5761                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5762                       (long)cmd, arg_type[0]);
5763         ret = -TARGET_ENOSYS;
5764         break;
5765     }
5766     return ret;
5767 }
5768 
5769 static const bitmask_transtbl iflag_tbl[] = {
5770         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5771         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5772         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5773         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5774         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5775         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5776         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5777         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5778         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5779         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5780         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5781         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5782         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5783         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5784         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5785         { 0, 0, 0, 0 }
5786 };
5787 
5788 static const bitmask_transtbl oflag_tbl[] = {
5789 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5790 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5791 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5792 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5793 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5794 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5795 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5796 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5797 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5798 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5799 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5800 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5801 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5802 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5803 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5804 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5805 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5806 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5807 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5808 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5809 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5810 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5811 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5812 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5813 	{ 0, 0, 0, 0 }
5814 };
5815 
5816 static const bitmask_transtbl cflag_tbl[] = {
5817 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5818 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5819 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5820 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5821 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5822 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5823 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5824 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5825 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5826 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5827 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5828 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5829 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5830 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5831 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5832 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5833 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5834 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5835 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5836 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5837 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5838 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5839 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5840 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5841 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5842 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5843 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5844 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5845 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5846 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5847 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5848 	{ 0, 0, 0, 0 }
5849 };
5850 
5851 static const bitmask_transtbl lflag_tbl[] = {
5852   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5853   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5854   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5855   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5856   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5857   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5858   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5859   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5860   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5861   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5862   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5863   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5864   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5865   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5866   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5867   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5868   { 0, 0, 0, 0 }
5869 };
5870 
5871 static void target_to_host_termios (void *dst, const void *src)
5872 {
5873     struct host_termios *host = dst;
5874     const struct target_termios *target = src;
5875 
5876     host->c_iflag =
5877         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5878     host->c_oflag =
5879         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5880     host->c_cflag =
5881         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5882     host->c_lflag =
5883         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5884     host->c_line = target->c_line;
5885 
5886     memset(host->c_cc, 0, sizeof(host->c_cc));
5887     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5888     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5889     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5890     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5891     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5892     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5893     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5894     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5895     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5896     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5897     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5898     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5899     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5900     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5901     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5902     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5903     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5904 }
5905 
5906 static void host_to_target_termios (void *dst, const void *src)
5907 {
5908     struct target_termios *target = dst;
5909     const struct host_termios *host = src;
5910 
5911     target->c_iflag =
5912         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5913     target->c_oflag =
5914         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5915     target->c_cflag =
5916         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5917     target->c_lflag =
5918         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5919     target->c_line = host->c_line;
5920 
5921     memset(target->c_cc, 0, sizeof(target->c_cc));
5922     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5923     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5924     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5925     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5926     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5927     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5928     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5929     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5930     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5931     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5932     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5933     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5934     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5935     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5936     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5937     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5938     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5939 }
5940 
5941 static const StructEntry struct_termios_def = {
5942     .convert = { host_to_target_termios, target_to_host_termios },
5943     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5944     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5945     .print = print_termios,
5946 };
5947 
5948 static const bitmask_transtbl mmap_flags_tbl[] = {
5949     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5950     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5951     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5952     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5953       MAP_ANONYMOUS, MAP_ANONYMOUS },
5954     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5955       MAP_GROWSDOWN, MAP_GROWSDOWN },
5956     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5957       MAP_DENYWRITE, MAP_DENYWRITE },
5958     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5959       MAP_EXECUTABLE, MAP_EXECUTABLE },
5960     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5961     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5962       MAP_NORESERVE, MAP_NORESERVE },
5963     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5964     /* MAP_STACK had been ignored by the kernel for quite some time.
5965        Recognize it for the target insofar as we do not want to pass
5966        it through to the host.  */
5967     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5968     { 0, 0, 0, 0 }
5969 };
5970 
5971 /*
5972  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5973  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5974  */
5975 #if defined(TARGET_I386)
5976 
5977 /* NOTE: there is really one LDT for all the threads */
5978 static uint8_t *ldt_table;
5979 
5980 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5981 {
5982     int size;
5983     void *p;
5984 
5985     if (!ldt_table)
5986         return 0;
5987     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5988     if (size > bytecount)
5989         size = bytecount;
5990     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5991     if (!p)
5992         return -TARGET_EFAULT;
5993     /* ??? Should this by byteswapped?  */
5994     memcpy(p, ldt_table, size);
5995     unlock_user(p, ptr, size);
5996     return size;
5997 }
5998 
5999 /* XXX: add locking support */
6000 static abi_long write_ldt(CPUX86State *env,
6001                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6002 {
6003     struct target_modify_ldt_ldt_s ldt_info;
6004     struct target_modify_ldt_ldt_s *target_ldt_info;
6005     int seg_32bit, contents, read_exec_only, limit_in_pages;
6006     int seg_not_present, useable, lm;
6007     uint32_t *lp, entry_1, entry_2;
6008 
6009     if (bytecount != sizeof(ldt_info))
6010         return -TARGET_EINVAL;
6011     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6012         return -TARGET_EFAULT;
6013     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6014     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6015     ldt_info.limit = tswap32(target_ldt_info->limit);
6016     ldt_info.flags = tswap32(target_ldt_info->flags);
6017     unlock_user_struct(target_ldt_info, ptr, 0);
6018 
6019     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6020         return -TARGET_EINVAL;
6021     seg_32bit = ldt_info.flags & 1;
6022     contents = (ldt_info.flags >> 1) & 3;
6023     read_exec_only = (ldt_info.flags >> 3) & 1;
6024     limit_in_pages = (ldt_info.flags >> 4) & 1;
6025     seg_not_present = (ldt_info.flags >> 5) & 1;
6026     useable = (ldt_info.flags >> 6) & 1;
6027 #ifdef TARGET_ABI32
6028     lm = 0;
6029 #else
6030     lm = (ldt_info.flags >> 7) & 1;
6031 #endif
6032     if (contents == 3) {
6033         if (oldmode)
6034             return -TARGET_EINVAL;
6035         if (seg_not_present == 0)
6036             return -TARGET_EINVAL;
6037     }
6038     /* allocate the LDT */
6039     if (!ldt_table) {
6040         env->ldt.base = target_mmap(0,
6041                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6042                                     PROT_READ|PROT_WRITE,
6043                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6044         if (env->ldt.base == -1)
6045             return -TARGET_ENOMEM;
6046         memset(g2h_untagged(env->ldt.base), 0,
6047                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6048         env->ldt.limit = 0xffff;
6049         ldt_table = g2h_untagged(env->ldt.base);
6050     }
6051 
6052     /* NOTE: same code as Linux kernel */
6053     /* Allow LDTs to be cleared by the user. */
6054     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6055         if (oldmode ||
6056             (contents == 0		&&
6057              read_exec_only == 1	&&
6058              seg_32bit == 0		&&
6059              limit_in_pages == 0	&&
6060              seg_not_present == 1	&&
6061              useable == 0 )) {
6062             entry_1 = 0;
6063             entry_2 = 0;
6064             goto install;
6065         }
6066     }
6067 
6068     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6069         (ldt_info.limit & 0x0ffff);
6070     entry_2 = (ldt_info.base_addr & 0xff000000) |
6071         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6072         (ldt_info.limit & 0xf0000) |
6073         ((read_exec_only ^ 1) << 9) |
6074         (contents << 10) |
6075         ((seg_not_present ^ 1) << 15) |
6076         (seg_32bit << 22) |
6077         (limit_in_pages << 23) |
6078         (lm << 21) |
6079         0x7000;
6080     if (!oldmode)
6081         entry_2 |= (useable << 20);
6082 
6083     /* Install the new entry ...  */
6084 install:
6085     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6086     lp[0] = tswap32(entry_1);
6087     lp[1] = tswap32(entry_2);
6088     return 0;
6089 }
6090 
6091 /* specific and weird i386 syscalls */
6092 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6093                               unsigned long bytecount)
6094 {
6095     abi_long ret;
6096 
6097     switch (func) {
6098     case 0:
6099         ret = read_ldt(ptr, bytecount);
6100         break;
6101     case 1:
6102         ret = write_ldt(env, ptr, bytecount, 1);
6103         break;
6104     case 0x11:
6105         ret = write_ldt(env, ptr, bytecount, 0);
6106         break;
6107     default:
6108         ret = -TARGET_ENOSYS;
6109         break;
6110     }
6111     return ret;
6112 }
6113 
6114 #if defined(TARGET_ABI32)
6115 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6116 {
6117     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6118     struct target_modify_ldt_ldt_s ldt_info;
6119     struct target_modify_ldt_ldt_s *target_ldt_info;
6120     int seg_32bit, contents, read_exec_only, limit_in_pages;
6121     int seg_not_present, useable, lm;
6122     uint32_t *lp, entry_1, entry_2;
6123     int i;
6124 
6125     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6126     if (!target_ldt_info)
6127         return -TARGET_EFAULT;
6128     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6129     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6130     ldt_info.limit = tswap32(target_ldt_info->limit);
6131     ldt_info.flags = tswap32(target_ldt_info->flags);
6132     if (ldt_info.entry_number == -1) {
6133         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6134             if (gdt_table[i] == 0) {
6135                 ldt_info.entry_number = i;
6136                 target_ldt_info->entry_number = tswap32(i);
6137                 break;
6138             }
6139         }
6140     }
6141     unlock_user_struct(target_ldt_info, ptr, 1);
6142 
6143     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6144         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6145            return -TARGET_EINVAL;
6146     seg_32bit = ldt_info.flags & 1;
6147     contents = (ldt_info.flags >> 1) & 3;
6148     read_exec_only = (ldt_info.flags >> 3) & 1;
6149     limit_in_pages = (ldt_info.flags >> 4) & 1;
6150     seg_not_present = (ldt_info.flags >> 5) & 1;
6151     useable = (ldt_info.flags >> 6) & 1;
6152 #ifdef TARGET_ABI32
6153     lm = 0;
6154 #else
6155     lm = (ldt_info.flags >> 7) & 1;
6156 #endif
6157 
6158     if (contents == 3) {
6159         if (seg_not_present == 0)
6160             return -TARGET_EINVAL;
6161     }
6162 
6163     /* NOTE: same code as Linux kernel */
6164     /* Allow LDTs to be cleared by the user. */
6165     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6166         if ((contents == 0             &&
6167              read_exec_only == 1       &&
6168              seg_32bit == 0            &&
6169              limit_in_pages == 0       &&
6170              seg_not_present == 1      &&
6171              useable == 0 )) {
6172             entry_1 = 0;
6173             entry_2 = 0;
6174             goto install;
6175         }
6176     }
6177 
6178     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6179         (ldt_info.limit & 0x0ffff);
6180     entry_2 = (ldt_info.base_addr & 0xff000000) |
6181         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6182         (ldt_info.limit & 0xf0000) |
6183         ((read_exec_only ^ 1) << 9) |
6184         (contents << 10) |
6185         ((seg_not_present ^ 1) << 15) |
6186         (seg_32bit << 22) |
6187         (limit_in_pages << 23) |
6188         (useable << 20) |
6189         (lm << 21) |
6190         0x7000;
6191 
6192     /* Install the new entry ...  */
6193 install:
6194     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6195     lp[0] = tswap32(entry_1);
6196     lp[1] = tswap32(entry_2);
6197     return 0;
6198 }
6199 
6200 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6201 {
6202     struct target_modify_ldt_ldt_s *target_ldt_info;
6203     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6204     uint32_t base_addr, limit, flags;
6205     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6206     int seg_not_present, useable, lm;
6207     uint32_t *lp, entry_1, entry_2;
6208 
6209     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6210     if (!target_ldt_info)
6211         return -TARGET_EFAULT;
6212     idx = tswap32(target_ldt_info->entry_number);
6213     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6214         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6215         unlock_user_struct(target_ldt_info, ptr, 1);
6216         return -TARGET_EINVAL;
6217     }
6218     lp = (uint32_t *)(gdt_table + idx);
6219     entry_1 = tswap32(lp[0]);
6220     entry_2 = tswap32(lp[1]);
6221 
6222     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6223     contents = (entry_2 >> 10) & 3;
6224     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6225     seg_32bit = (entry_2 >> 22) & 1;
6226     limit_in_pages = (entry_2 >> 23) & 1;
6227     useable = (entry_2 >> 20) & 1;
6228 #ifdef TARGET_ABI32
6229     lm = 0;
6230 #else
6231     lm = (entry_2 >> 21) & 1;
6232 #endif
6233     flags = (seg_32bit << 0) | (contents << 1) |
6234         (read_exec_only << 3) | (limit_in_pages << 4) |
6235         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6236     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6237     base_addr = (entry_1 >> 16) |
6238         (entry_2 & 0xff000000) |
6239         ((entry_2 & 0xff) << 16);
6240     target_ldt_info->base_addr = tswapal(base_addr);
6241     target_ldt_info->limit = tswap32(limit);
6242     target_ldt_info->flags = tswap32(flags);
6243     unlock_user_struct(target_ldt_info, ptr, 1);
6244     return 0;
6245 }
6246 
6247 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6248 {
6249     return -TARGET_ENOSYS;
6250 }
6251 #else
6252 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6253 {
6254     abi_long ret = 0;
6255     abi_ulong val;
6256     int idx;
6257 
6258     switch(code) {
6259     case TARGET_ARCH_SET_GS:
6260     case TARGET_ARCH_SET_FS:
6261         if (code == TARGET_ARCH_SET_GS)
6262             idx = R_GS;
6263         else
6264             idx = R_FS;
6265         cpu_x86_load_seg(env, idx, 0);
6266         env->segs[idx].base = addr;
6267         break;
6268     case TARGET_ARCH_GET_GS:
6269     case TARGET_ARCH_GET_FS:
6270         if (code == TARGET_ARCH_GET_GS)
6271             idx = R_GS;
6272         else
6273             idx = R_FS;
6274         val = env->segs[idx].base;
6275         if (put_user(val, addr, abi_ulong))
6276             ret = -TARGET_EFAULT;
6277         break;
6278     default:
6279         ret = -TARGET_EINVAL;
6280         break;
6281     }
6282     return ret;
6283 }
6284 #endif /* defined(TARGET_ABI32 */
6285 
6286 #endif /* defined(TARGET_I386) */
6287 
6288 #define NEW_STACK_SIZE 0x40000
6289 
6290 
6291 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6292 typedef struct {
6293     CPUArchState *env;
6294     pthread_mutex_t mutex;
6295     pthread_cond_t cond;
6296     pthread_t thread;
6297     uint32_t tid;
6298     abi_ulong child_tidptr;
6299     abi_ulong parent_tidptr;
6300     sigset_t sigmask;
6301 } new_thread_info;
6302 
6303 static void *clone_func(void *arg)
6304 {
6305     new_thread_info *info = arg;
6306     CPUArchState *env;
6307     CPUState *cpu;
6308     TaskState *ts;
6309 
6310     rcu_register_thread();
6311     tcg_register_thread();
6312     env = info->env;
6313     cpu = env_cpu(env);
6314     thread_cpu = cpu;
6315     ts = (TaskState *)cpu->opaque;
6316     info->tid = sys_gettid();
6317     task_settid(ts);
6318     if (info->child_tidptr)
6319         put_user_u32(info->tid, info->child_tidptr);
6320     if (info->parent_tidptr)
6321         put_user_u32(info->tid, info->parent_tidptr);
6322     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6323     /* Enable signals.  */
6324     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6325     /* Signal to the parent that we're ready.  */
6326     pthread_mutex_lock(&info->mutex);
6327     pthread_cond_broadcast(&info->cond);
6328     pthread_mutex_unlock(&info->mutex);
6329     /* Wait until the parent has finished initializing the tls state.  */
6330     pthread_mutex_lock(&clone_lock);
6331     pthread_mutex_unlock(&clone_lock);
6332     cpu_loop(env);
6333     /* never exits */
6334     return NULL;
6335 }
6336 
6337 /* do_fork() Must return host values and target errnos (unlike most
6338    do_*() functions). */
6339 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6340                    abi_ulong parent_tidptr, target_ulong newtls,
6341                    abi_ulong child_tidptr)
6342 {
6343     CPUState *cpu = env_cpu(env);
6344     int ret;
6345     TaskState *ts;
6346     CPUState *new_cpu;
6347     CPUArchState *new_env;
6348     sigset_t sigmask;
6349 
6350     flags &= ~CLONE_IGNORED_FLAGS;
6351 
6352     /* Emulate vfork() with fork() */
6353     if (flags & CLONE_VFORK)
6354         flags &= ~(CLONE_VFORK | CLONE_VM);
6355 
6356     if (flags & CLONE_VM) {
6357         TaskState *parent_ts = (TaskState *)cpu->opaque;
6358         new_thread_info info;
6359         pthread_attr_t attr;
6360 
6361         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6362             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6363             return -TARGET_EINVAL;
6364         }
6365 
6366         ts = g_new0(TaskState, 1);
6367         init_task_state(ts);
6368 
6369         /* Grab a mutex so that thread setup appears atomic.  */
6370         pthread_mutex_lock(&clone_lock);
6371 
6372         /*
6373          * If this is our first additional thread, we need to ensure we
6374          * generate code for parallel execution and flush old translations.
6375          * Do this now so that the copy gets CF_PARALLEL too.
6376          */
6377         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6378             cpu->tcg_cflags |= CF_PARALLEL;
6379             tb_flush(cpu);
6380         }
6381 
6382         /* we create a new CPU instance. */
6383         new_env = cpu_copy(env);
6384         /* Init regs that differ from the parent.  */
6385         cpu_clone_regs_child(new_env, newsp, flags);
6386         cpu_clone_regs_parent(env, flags);
6387         new_cpu = env_cpu(new_env);
6388         new_cpu->opaque = ts;
6389         ts->bprm = parent_ts->bprm;
6390         ts->info = parent_ts->info;
6391         ts->signal_mask = parent_ts->signal_mask;
6392 
6393         if (flags & CLONE_CHILD_CLEARTID) {
6394             ts->child_tidptr = child_tidptr;
6395         }
6396 
6397         if (flags & CLONE_SETTLS) {
6398             cpu_set_tls (new_env, newtls);
6399         }
6400 
6401         memset(&info, 0, sizeof(info));
6402         pthread_mutex_init(&info.mutex, NULL);
6403         pthread_mutex_lock(&info.mutex);
6404         pthread_cond_init(&info.cond, NULL);
6405         info.env = new_env;
6406         if (flags & CLONE_CHILD_SETTID) {
6407             info.child_tidptr = child_tidptr;
6408         }
6409         if (flags & CLONE_PARENT_SETTID) {
6410             info.parent_tidptr = parent_tidptr;
6411         }
6412 
6413         ret = pthread_attr_init(&attr);
6414         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6415         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6416         /* It is not safe to deliver signals until the child has finished
6417            initializing, so temporarily block all signals.  */
6418         sigfillset(&sigmask);
6419         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6420         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6421 
6422         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6423         /* TODO: Free new CPU state if thread creation failed.  */
6424 
6425         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6426         pthread_attr_destroy(&attr);
6427         if (ret == 0) {
6428             /* Wait for the child to initialize.  */
6429             pthread_cond_wait(&info.cond, &info.mutex);
6430             ret = info.tid;
6431         } else {
6432             ret = -1;
6433         }
6434         pthread_mutex_unlock(&info.mutex);
6435         pthread_cond_destroy(&info.cond);
6436         pthread_mutex_destroy(&info.mutex);
6437         pthread_mutex_unlock(&clone_lock);
6438     } else {
6439         /* if no CLONE_VM, we consider it is a fork */
6440         if (flags & CLONE_INVALID_FORK_FLAGS) {
6441             return -TARGET_EINVAL;
6442         }
6443 
6444         /* We can't support custom termination signals */
6445         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6446             return -TARGET_EINVAL;
6447         }
6448 
6449         if (block_signals()) {
6450             return -TARGET_ERESTARTSYS;
6451         }
6452 
6453         fork_start();
6454         ret = fork();
6455         if (ret == 0) {
6456             /* Child Process.  */
6457             cpu_clone_regs_child(env, newsp, flags);
6458             fork_end(1);
6459             /* There is a race condition here.  The parent process could
6460                theoretically read the TID in the child process before the child
6461                tid is set.  This would require using either ptrace
6462                (not implemented) or having *_tidptr to point at a shared memory
6463                mapping.  We can't repeat the spinlock hack used above because
6464                the child process gets its own copy of the lock.  */
6465             if (flags & CLONE_CHILD_SETTID)
6466                 put_user_u32(sys_gettid(), child_tidptr);
6467             if (flags & CLONE_PARENT_SETTID)
6468                 put_user_u32(sys_gettid(), parent_tidptr);
6469             ts = (TaskState *)cpu->opaque;
6470             if (flags & CLONE_SETTLS)
6471                 cpu_set_tls (env, newtls);
6472             if (flags & CLONE_CHILD_CLEARTID)
6473                 ts->child_tidptr = child_tidptr;
6474         } else {
6475             cpu_clone_regs_parent(env, flags);
6476             fork_end(0);
6477         }
6478     }
6479     return ret;
6480 }
6481 
6482 /* warning : doesn't handle linux specific flags... */
6483 static int target_to_host_fcntl_cmd(int cmd)
6484 {
6485     int ret;
6486 
6487     switch(cmd) {
6488     case TARGET_F_DUPFD:
6489     case TARGET_F_GETFD:
6490     case TARGET_F_SETFD:
6491     case TARGET_F_GETFL:
6492     case TARGET_F_SETFL:
6493     case TARGET_F_OFD_GETLK:
6494     case TARGET_F_OFD_SETLK:
6495     case TARGET_F_OFD_SETLKW:
6496         ret = cmd;
6497         break;
6498     case TARGET_F_GETLK:
6499         ret = F_GETLK64;
6500         break;
6501     case TARGET_F_SETLK:
6502         ret = F_SETLK64;
6503         break;
6504     case TARGET_F_SETLKW:
6505         ret = F_SETLKW64;
6506         break;
6507     case TARGET_F_GETOWN:
6508         ret = F_GETOWN;
6509         break;
6510     case TARGET_F_SETOWN:
6511         ret = F_SETOWN;
6512         break;
6513     case TARGET_F_GETSIG:
6514         ret = F_GETSIG;
6515         break;
6516     case TARGET_F_SETSIG:
6517         ret = F_SETSIG;
6518         break;
6519 #if TARGET_ABI_BITS == 32
6520     case TARGET_F_GETLK64:
6521         ret = F_GETLK64;
6522         break;
6523     case TARGET_F_SETLK64:
6524         ret = F_SETLK64;
6525         break;
6526     case TARGET_F_SETLKW64:
6527         ret = F_SETLKW64;
6528         break;
6529 #endif
6530     case TARGET_F_SETLEASE:
6531         ret = F_SETLEASE;
6532         break;
6533     case TARGET_F_GETLEASE:
6534         ret = F_GETLEASE;
6535         break;
6536 #ifdef F_DUPFD_CLOEXEC
6537     case TARGET_F_DUPFD_CLOEXEC:
6538         ret = F_DUPFD_CLOEXEC;
6539         break;
6540 #endif
6541     case TARGET_F_NOTIFY:
6542         ret = F_NOTIFY;
6543         break;
6544 #ifdef F_GETOWN_EX
6545     case TARGET_F_GETOWN_EX:
6546         ret = F_GETOWN_EX;
6547         break;
6548 #endif
6549 #ifdef F_SETOWN_EX
6550     case TARGET_F_SETOWN_EX:
6551         ret = F_SETOWN_EX;
6552         break;
6553 #endif
6554 #ifdef F_SETPIPE_SZ
6555     case TARGET_F_SETPIPE_SZ:
6556         ret = F_SETPIPE_SZ;
6557         break;
6558     case TARGET_F_GETPIPE_SZ:
6559         ret = F_GETPIPE_SZ;
6560         break;
6561 #endif
6562 #ifdef F_ADD_SEALS
6563     case TARGET_F_ADD_SEALS:
6564         ret = F_ADD_SEALS;
6565         break;
6566     case TARGET_F_GET_SEALS:
6567         ret = F_GET_SEALS;
6568         break;
6569 #endif
6570     default:
6571         ret = -TARGET_EINVAL;
6572         break;
6573     }
6574 
6575 #if defined(__powerpc64__)
6576     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6577      * is not supported by kernel. The glibc fcntl call actually adjusts
6578      * them to 5, 6 and 7 before making the syscall(). Since we make the
6579      * syscall directly, adjust to what is supported by the kernel.
6580      */
6581     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6582         ret -= F_GETLK64 - 5;
6583     }
6584 #endif
6585 
6586     return ret;
6587 }
6588 
6589 #define FLOCK_TRANSTBL \
6590     switch (type) { \
6591     TRANSTBL_CONVERT(F_RDLCK); \
6592     TRANSTBL_CONVERT(F_WRLCK); \
6593     TRANSTBL_CONVERT(F_UNLCK); \
6594     }
6595 
6596 static int target_to_host_flock(int type)
6597 {
6598 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6599     FLOCK_TRANSTBL
6600 #undef  TRANSTBL_CONVERT
6601     return -TARGET_EINVAL;
6602 }
6603 
6604 static int host_to_target_flock(int type)
6605 {
6606 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6607     FLOCK_TRANSTBL
6608 #undef  TRANSTBL_CONVERT
6609     /* if we don't know how to convert the value coming
6610      * from the host we copy to the target field as-is
6611      */
6612     return type;
6613 }
6614 
6615 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6616                                             abi_ulong target_flock_addr)
6617 {
6618     struct target_flock *target_fl;
6619     int l_type;
6620 
6621     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6622         return -TARGET_EFAULT;
6623     }
6624 
6625     __get_user(l_type, &target_fl->l_type);
6626     l_type = target_to_host_flock(l_type);
6627     if (l_type < 0) {
6628         return l_type;
6629     }
6630     fl->l_type = l_type;
6631     __get_user(fl->l_whence, &target_fl->l_whence);
6632     __get_user(fl->l_start, &target_fl->l_start);
6633     __get_user(fl->l_len, &target_fl->l_len);
6634     __get_user(fl->l_pid, &target_fl->l_pid);
6635     unlock_user_struct(target_fl, target_flock_addr, 0);
6636     return 0;
6637 }
6638 
6639 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6640                                           const struct flock64 *fl)
6641 {
6642     struct target_flock *target_fl;
6643     short l_type;
6644 
6645     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6646         return -TARGET_EFAULT;
6647     }
6648 
6649     l_type = host_to_target_flock(fl->l_type);
6650     __put_user(l_type, &target_fl->l_type);
6651     __put_user(fl->l_whence, &target_fl->l_whence);
6652     __put_user(fl->l_start, &target_fl->l_start);
6653     __put_user(fl->l_len, &target_fl->l_len);
6654     __put_user(fl->l_pid, &target_fl->l_pid);
6655     unlock_user_struct(target_fl, target_flock_addr, 1);
6656     return 0;
6657 }
6658 
6659 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6660 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6661 
6662 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6663 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6664                                                    abi_ulong target_flock_addr)
6665 {
6666     struct target_oabi_flock64 *target_fl;
6667     int l_type;
6668 
6669     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6670         return -TARGET_EFAULT;
6671     }
6672 
6673     __get_user(l_type, &target_fl->l_type);
6674     l_type = target_to_host_flock(l_type);
6675     if (l_type < 0) {
6676         return l_type;
6677     }
6678     fl->l_type = l_type;
6679     __get_user(fl->l_whence, &target_fl->l_whence);
6680     __get_user(fl->l_start, &target_fl->l_start);
6681     __get_user(fl->l_len, &target_fl->l_len);
6682     __get_user(fl->l_pid, &target_fl->l_pid);
6683     unlock_user_struct(target_fl, target_flock_addr, 0);
6684     return 0;
6685 }
6686 
6687 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6688                                                  const struct flock64 *fl)
6689 {
6690     struct target_oabi_flock64 *target_fl;
6691     short l_type;
6692 
6693     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6694         return -TARGET_EFAULT;
6695     }
6696 
6697     l_type = host_to_target_flock(fl->l_type);
6698     __put_user(l_type, &target_fl->l_type);
6699     __put_user(fl->l_whence, &target_fl->l_whence);
6700     __put_user(fl->l_start, &target_fl->l_start);
6701     __put_user(fl->l_len, &target_fl->l_len);
6702     __put_user(fl->l_pid, &target_fl->l_pid);
6703     unlock_user_struct(target_fl, target_flock_addr, 1);
6704     return 0;
6705 }
6706 #endif
6707 
6708 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6709                                               abi_ulong target_flock_addr)
6710 {
6711     struct target_flock64 *target_fl;
6712     int l_type;
6713 
6714     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6715         return -TARGET_EFAULT;
6716     }
6717 
6718     __get_user(l_type, &target_fl->l_type);
6719     l_type = target_to_host_flock(l_type);
6720     if (l_type < 0) {
6721         return l_type;
6722     }
6723     fl->l_type = l_type;
6724     __get_user(fl->l_whence, &target_fl->l_whence);
6725     __get_user(fl->l_start, &target_fl->l_start);
6726     __get_user(fl->l_len, &target_fl->l_len);
6727     __get_user(fl->l_pid, &target_fl->l_pid);
6728     unlock_user_struct(target_fl, target_flock_addr, 0);
6729     return 0;
6730 }
6731 
6732 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6733                                             const struct flock64 *fl)
6734 {
6735     struct target_flock64 *target_fl;
6736     short l_type;
6737 
6738     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6739         return -TARGET_EFAULT;
6740     }
6741 
6742     l_type = host_to_target_flock(fl->l_type);
6743     __put_user(l_type, &target_fl->l_type);
6744     __put_user(fl->l_whence, &target_fl->l_whence);
6745     __put_user(fl->l_start, &target_fl->l_start);
6746     __put_user(fl->l_len, &target_fl->l_len);
6747     __put_user(fl->l_pid, &target_fl->l_pid);
6748     unlock_user_struct(target_fl, target_flock_addr, 1);
6749     return 0;
6750 }
6751 
6752 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6753 {
6754     struct flock64 fl64;
6755 #ifdef F_GETOWN_EX
6756     struct f_owner_ex fox;
6757     struct target_f_owner_ex *target_fox;
6758 #endif
6759     abi_long ret;
6760     int host_cmd = target_to_host_fcntl_cmd(cmd);
6761 
6762     if (host_cmd == -TARGET_EINVAL)
6763 	    return host_cmd;
6764 
6765     switch(cmd) {
6766     case TARGET_F_GETLK:
6767         ret = copy_from_user_flock(&fl64, arg);
6768         if (ret) {
6769             return ret;
6770         }
6771         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6772         if (ret == 0) {
6773             ret = copy_to_user_flock(arg, &fl64);
6774         }
6775         break;
6776 
6777     case TARGET_F_SETLK:
6778     case TARGET_F_SETLKW:
6779         ret = copy_from_user_flock(&fl64, arg);
6780         if (ret) {
6781             return ret;
6782         }
6783         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6784         break;
6785 
6786     case TARGET_F_GETLK64:
6787     case TARGET_F_OFD_GETLK:
6788         ret = copy_from_user_flock64(&fl64, arg);
6789         if (ret) {
6790             return ret;
6791         }
6792         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6793         if (ret == 0) {
6794             ret = copy_to_user_flock64(arg, &fl64);
6795         }
6796         break;
6797     case TARGET_F_SETLK64:
6798     case TARGET_F_SETLKW64:
6799     case TARGET_F_OFD_SETLK:
6800     case TARGET_F_OFD_SETLKW:
6801         ret = copy_from_user_flock64(&fl64, arg);
6802         if (ret) {
6803             return ret;
6804         }
6805         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6806         break;
6807 
6808     case TARGET_F_GETFL:
6809         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6810         if (ret >= 0) {
6811             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6812         }
6813         break;
6814 
6815     case TARGET_F_SETFL:
6816         ret = get_errno(safe_fcntl(fd, host_cmd,
6817                                    target_to_host_bitmask(arg,
6818                                                           fcntl_flags_tbl)));
6819         break;
6820 
6821 #ifdef F_GETOWN_EX
6822     case TARGET_F_GETOWN_EX:
6823         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6824         if (ret >= 0) {
6825             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6826                 return -TARGET_EFAULT;
6827             target_fox->type = tswap32(fox.type);
6828             target_fox->pid = tswap32(fox.pid);
6829             unlock_user_struct(target_fox, arg, 1);
6830         }
6831         break;
6832 #endif
6833 
6834 #ifdef F_SETOWN_EX
6835     case TARGET_F_SETOWN_EX:
6836         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6837             return -TARGET_EFAULT;
6838         fox.type = tswap32(target_fox->type);
6839         fox.pid = tswap32(target_fox->pid);
6840         unlock_user_struct(target_fox, arg, 0);
6841         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6842         break;
6843 #endif
6844 
6845     case TARGET_F_SETSIG:
6846         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6847         break;
6848 
6849     case TARGET_F_GETSIG:
6850         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6851         break;
6852 
6853     case TARGET_F_SETOWN:
6854     case TARGET_F_GETOWN:
6855     case TARGET_F_SETLEASE:
6856     case TARGET_F_GETLEASE:
6857     case TARGET_F_SETPIPE_SZ:
6858     case TARGET_F_GETPIPE_SZ:
6859     case TARGET_F_ADD_SEALS:
6860     case TARGET_F_GET_SEALS:
6861         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6862         break;
6863 
6864     default:
6865         ret = get_errno(safe_fcntl(fd, cmd, arg));
6866         break;
6867     }
6868     return ret;
6869 }
6870 
6871 #ifdef USE_UID16
6872 
6873 static inline int high2lowuid(int uid)
6874 {
6875     if (uid > 65535)
6876         return 65534;
6877     else
6878         return uid;
6879 }
6880 
6881 static inline int high2lowgid(int gid)
6882 {
6883     if (gid > 65535)
6884         return 65534;
6885     else
6886         return gid;
6887 }
6888 
6889 static inline int low2highuid(int uid)
6890 {
6891     if ((int16_t)uid == -1)
6892         return -1;
6893     else
6894         return uid;
6895 }
6896 
6897 static inline int low2highgid(int gid)
6898 {
6899     if ((int16_t)gid == -1)
6900         return -1;
6901     else
6902         return gid;
6903 }
6904 static inline int tswapid(int id)
6905 {
6906     return tswap16(id);
6907 }
6908 
6909 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6910 
6911 #else /* !USE_UID16 */
6912 static inline int high2lowuid(int uid)
6913 {
6914     return uid;
6915 }
6916 static inline int high2lowgid(int gid)
6917 {
6918     return gid;
6919 }
6920 static inline int low2highuid(int uid)
6921 {
6922     return uid;
6923 }
6924 static inline int low2highgid(int gid)
6925 {
6926     return gid;
6927 }
6928 static inline int tswapid(int id)
6929 {
6930     return tswap32(id);
6931 }
6932 
6933 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6934 
6935 #endif /* USE_UID16 */
6936 
6937 /* We must do direct syscalls for setting UID/GID, because we want to
6938  * implement the Linux system call semantics of "change only for this thread",
6939  * not the libc/POSIX semantics of "change for all threads in process".
6940  * (See http://ewontfix.com/17/ for more details.)
6941  * We use the 32-bit version of the syscalls if present; if it is not
6942  * then either the host architecture supports 32-bit UIDs natively with
6943  * the standard syscall, or the 16-bit UID is the best we can do.
6944  */
6945 #ifdef __NR_setuid32
6946 #define __NR_sys_setuid __NR_setuid32
6947 #else
6948 #define __NR_sys_setuid __NR_setuid
6949 #endif
6950 #ifdef __NR_setgid32
6951 #define __NR_sys_setgid __NR_setgid32
6952 #else
6953 #define __NR_sys_setgid __NR_setgid
6954 #endif
6955 #ifdef __NR_setresuid32
6956 #define __NR_sys_setresuid __NR_setresuid32
6957 #else
6958 #define __NR_sys_setresuid __NR_setresuid
6959 #endif
6960 #ifdef __NR_setresgid32
6961 #define __NR_sys_setresgid __NR_setresgid32
6962 #else
6963 #define __NR_sys_setresgid __NR_setresgid
6964 #endif
6965 
6966 _syscall1(int, sys_setuid, uid_t, uid)
6967 _syscall1(int, sys_setgid, gid_t, gid)
6968 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6969 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6970 
6971 void syscall_init(void)
6972 {
6973     IOCTLEntry *ie;
6974     const argtype *arg_type;
6975     int size;
6976 
6977     thunk_init(STRUCT_MAX);
6978 
6979 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6980 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6981 #include "syscall_types.h"
6982 #undef STRUCT
6983 #undef STRUCT_SPECIAL
6984 
6985     /* we patch the ioctl size if necessary. We rely on the fact that
6986        no ioctl has all the bits at '1' in the size field */
6987     ie = ioctl_entries;
6988     while (ie->target_cmd != 0) {
6989         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6990             TARGET_IOC_SIZEMASK) {
6991             arg_type = ie->arg_type;
6992             if (arg_type[0] != TYPE_PTR) {
6993                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6994                         ie->target_cmd);
6995                 exit(1);
6996             }
6997             arg_type++;
6998             size = thunk_type_size(arg_type, 0);
6999             ie->target_cmd = (ie->target_cmd &
7000                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7001                 (size << TARGET_IOC_SIZESHIFT);
7002         }
7003 
7004         /* automatic consistency check if same arch */
7005 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7006     (defined(__x86_64__) && defined(TARGET_X86_64))
7007         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7008             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7009                     ie->name, ie->target_cmd, ie->host_cmd);
7010         }
7011 #endif
7012         ie++;
7013     }
7014 }
7015 
7016 #ifdef TARGET_NR_truncate64
7017 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7018                                          abi_long arg2,
7019                                          abi_long arg3,
7020                                          abi_long arg4)
7021 {
7022     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7023         arg2 = arg3;
7024         arg3 = arg4;
7025     }
7026     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7027 }
7028 #endif
7029 
7030 #ifdef TARGET_NR_ftruncate64
7031 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7032                                           abi_long arg2,
7033                                           abi_long arg3,
7034                                           abi_long arg4)
7035 {
7036     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7037         arg2 = arg3;
7038         arg3 = arg4;
7039     }
7040     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7041 }
7042 #endif
7043 
7044 #if defined(TARGET_NR_timer_settime) || \
7045     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7046 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7047                                                  abi_ulong target_addr)
7048 {
7049     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7050                                 offsetof(struct target_itimerspec,
7051                                          it_interval)) ||
7052         target_to_host_timespec(&host_its->it_value, target_addr +
7053                                 offsetof(struct target_itimerspec,
7054                                          it_value))) {
7055         return -TARGET_EFAULT;
7056     }
7057 
7058     return 0;
7059 }
7060 #endif
7061 
7062 #if defined(TARGET_NR_timer_settime64) || \
7063     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7064 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7065                                                    abi_ulong target_addr)
7066 {
7067     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7068                                   offsetof(struct target__kernel_itimerspec,
7069                                            it_interval)) ||
7070         target_to_host_timespec64(&host_its->it_value, target_addr +
7071                                   offsetof(struct target__kernel_itimerspec,
7072                                            it_value))) {
7073         return -TARGET_EFAULT;
7074     }
7075 
7076     return 0;
7077 }
7078 #endif
7079 
7080 #if ((defined(TARGET_NR_timerfd_gettime) || \
7081       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7082       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7083 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7084                                                  struct itimerspec *host_its)
7085 {
7086     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7087                                                        it_interval),
7088                                 &host_its->it_interval) ||
7089         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7090                                                        it_value),
7091                                 &host_its->it_value)) {
7092         return -TARGET_EFAULT;
7093     }
7094     return 0;
7095 }
7096 #endif
7097 
7098 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7099       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7100       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7101 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7102                                                    struct itimerspec *host_its)
7103 {
7104     if (host_to_target_timespec64(target_addr +
7105                                   offsetof(struct target__kernel_itimerspec,
7106                                            it_interval),
7107                                   &host_its->it_interval) ||
7108         host_to_target_timespec64(target_addr +
7109                                   offsetof(struct target__kernel_itimerspec,
7110                                            it_value),
7111                                   &host_its->it_value)) {
7112         return -TARGET_EFAULT;
7113     }
7114     return 0;
7115 }
7116 #endif
7117 
7118 #if defined(TARGET_NR_adjtimex) || \
7119     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7120 static inline abi_long target_to_host_timex(struct timex *host_tx,
7121                                             abi_long target_addr)
7122 {
7123     struct target_timex *target_tx;
7124 
7125     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7126         return -TARGET_EFAULT;
7127     }
7128 
7129     __get_user(host_tx->modes, &target_tx->modes);
7130     __get_user(host_tx->offset, &target_tx->offset);
7131     __get_user(host_tx->freq, &target_tx->freq);
7132     __get_user(host_tx->maxerror, &target_tx->maxerror);
7133     __get_user(host_tx->esterror, &target_tx->esterror);
7134     __get_user(host_tx->status, &target_tx->status);
7135     __get_user(host_tx->constant, &target_tx->constant);
7136     __get_user(host_tx->precision, &target_tx->precision);
7137     __get_user(host_tx->tolerance, &target_tx->tolerance);
7138     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7139     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7140     __get_user(host_tx->tick, &target_tx->tick);
7141     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7142     __get_user(host_tx->jitter, &target_tx->jitter);
7143     __get_user(host_tx->shift, &target_tx->shift);
7144     __get_user(host_tx->stabil, &target_tx->stabil);
7145     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7146     __get_user(host_tx->calcnt, &target_tx->calcnt);
7147     __get_user(host_tx->errcnt, &target_tx->errcnt);
7148     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7149     __get_user(host_tx->tai, &target_tx->tai);
7150 
7151     unlock_user_struct(target_tx, target_addr, 0);
7152     return 0;
7153 }
7154 
7155 static inline abi_long host_to_target_timex(abi_long target_addr,
7156                                             struct timex *host_tx)
7157 {
7158     struct target_timex *target_tx;
7159 
7160     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7161         return -TARGET_EFAULT;
7162     }
7163 
7164     __put_user(host_tx->modes, &target_tx->modes);
7165     __put_user(host_tx->offset, &target_tx->offset);
7166     __put_user(host_tx->freq, &target_tx->freq);
7167     __put_user(host_tx->maxerror, &target_tx->maxerror);
7168     __put_user(host_tx->esterror, &target_tx->esterror);
7169     __put_user(host_tx->status, &target_tx->status);
7170     __put_user(host_tx->constant, &target_tx->constant);
7171     __put_user(host_tx->precision, &target_tx->precision);
7172     __put_user(host_tx->tolerance, &target_tx->tolerance);
7173     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7174     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7175     __put_user(host_tx->tick, &target_tx->tick);
7176     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7177     __put_user(host_tx->jitter, &target_tx->jitter);
7178     __put_user(host_tx->shift, &target_tx->shift);
7179     __put_user(host_tx->stabil, &target_tx->stabil);
7180     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7181     __put_user(host_tx->calcnt, &target_tx->calcnt);
7182     __put_user(host_tx->errcnt, &target_tx->errcnt);
7183     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7184     __put_user(host_tx->tai, &target_tx->tai);
7185 
7186     unlock_user_struct(target_tx, target_addr, 1);
7187     return 0;
7188 }
7189 #endif
7190 
7191 
7192 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7193 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7194                                               abi_long target_addr)
7195 {
7196     struct target__kernel_timex *target_tx;
7197 
7198     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7199                                  offsetof(struct target__kernel_timex,
7200                                           time))) {
7201         return -TARGET_EFAULT;
7202     }
7203 
7204     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7205         return -TARGET_EFAULT;
7206     }
7207 
7208     __get_user(host_tx->modes, &target_tx->modes);
7209     __get_user(host_tx->offset, &target_tx->offset);
7210     __get_user(host_tx->freq, &target_tx->freq);
7211     __get_user(host_tx->maxerror, &target_tx->maxerror);
7212     __get_user(host_tx->esterror, &target_tx->esterror);
7213     __get_user(host_tx->status, &target_tx->status);
7214     __get_user(host_tx->constant, &target_tx->constant);
7215     __get_user(host_tx->precision, &target_tx->precision);
7216     __get_user(host_tx->tolerance, &target_tx->tolerance);
7217     __get_user(host_tx->tick, &target_tx->tick);
7218     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7219     __get_user(host_tx->jitter, &target_tx->jitter);
7220     __get_user(host_tx->shift, &target_tx->shift);
7221     __get_user(host_tx->stabil, &target_tx->stabil);
7222     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7223     __get_user(host_tx->calcnt, &target_tx->calcnt);
7224     __get_user(host_tx->errcnt, &target_tx->errcnt);
7225     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7226     __get_user(host_tx->tai, &target_tx->tai);
7227 
7228     unlock_user_struct(target_tx, target_addr, 0);
7229     return 0;
7230 }
7231 
7232 static inline abi_long host_to_target_timex64(abi_long target_addr,
7233                                               struct timex *host_tx)
7234 {
7235     struct target__kernel_timex *target_tx;
7236 
7237    if (copy_to_user_timeval64(target_addr +
7238                               offsetof(struct target__kernel_timex, time),
7239                               &host_tx->time)) {
7240         return -TARGET_EFAULT;
7241     }
7242 
7243     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7244         return -TARGET_EFAULT;
7245     }
7246 
7247     __put_user(host_tx->modes, &target_tx->modes);
7248     __put_user(host_tx->offset, &target_tx->offset);
7249     __put_user(host_tx->freq, &target_tx->freq);
7250     __put_user(host_tx->maxerror, &target_tx->maxerror);
7251     __put_user(host_tx->esterror, &target_tx->esterror);
7252     __put_user(host_tx->status, &target_tx->status);
7253     __put_user(host_tx->constant, &target_tx->constant);
7254     __put_user(host_tx->precision, &target_tx->precision);
7255     __put_user(host_tx->tolerance, &target_tx->tolerance);
7256     __put_user(host_tx->tick, &target_tx->tick);
7257     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7258     __put_user(host_tx->jitter, &target_tx->jitter);
7259     __put_user(host_tx->shift, &target_tx->shift);
7260     __put_user(host_tx->stabil, &target_tx->stabil);
7261     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7262     __put_user(host_tx->calcnt, &target_tx->calcnt);
7263     __put_user(host_tx->errcnt, &target_tx->errcnt);
7264     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7265     __put_user(host_tx->tai, &target_tx->tai);
7266 
7267     unlock_user_struct(target_tx, target_addr, 1);
7268     return 0;
7269 }
7270 #endif
7271 
7272 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7273 #define sigev_notify_thread_id _sigev_un._tid
7274 #endif
7275 
7276 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7277                                                abi_ulong target_addr)
7278 {
7279     struct target_sigevent *target_sevp;
7280 
7281     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7282         return -TARGET_EFAULT;
7283     }
7284 
7285     /* This union is awkward on 64 bit systems because it has a 32 bit
7286      * integer and a pointer in it; we follow the conversion approach
7287      * used for handling sigval types in signal.c so the guest should get
7288      * the correct value back even if we did a 64 bit byteswap and it's
7289      * using the 32 bit integer.
7290      */
7291     host_sevp->sigev_value.sival_ptr =
7292         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7293     host_sevp->sigev_signo =
7294         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7295     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7296     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7297 
7298     unlock_user_struct(target_sevp, target_addr, 1);
7299     return 0;
7300 }
7301 
7302 #if defined(TARGET_NR_mlockall)
7303 static inline int target_to_host_mlockall_arg(int arg)
7304 {
7305     int result = 0;
7306 
7307     if (arg & TARGET_MCL_CURRENT) {
7308         result |= MCL_CURRENT;
7309     }
7310     if (arg & TARGET_MCL_FUTURE) {
7311         result |= MCL_FUTURE;
7312     }
7313 #ifdef MCL_ONFAULT
7314     if (arg & TARGET_MCL_ONFAULT) {
7315         result |= MCL_ONFAULT;
7316     }
7317 #endif
7318 
7319     return result;
7320 }
7321 #endif
7322 
7323 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7324      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7325      defined(TARGET_NR_newfstatat))
7326 static inline abi_long host_to_target_stat64(void *cpu_env,
7327                                              abi_ulong target_addr,
7328                                              struct stat *host_st)
7329 {
7330 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7331     if (((CPUARMState *)cpu_env)->eabi) {
7332         struct target_eabi_stat64 *target_st;
7333 
7334         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7335             return -TARGET_EFAULT;
7336         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7337         __put_user(host_st->st_dev, &target_st->st_dev);
7338         __put_user(host_st->st_ino, &target_st->st_ino);
7339 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7340         __put_user(host_st->st_ino, &target_st->__st_ino);
7341 #endif
7342         __put_user(host_st->st_mode, &target_st->st_mode);
7343         __put_user(host_st->st_nlink, &target_st->st_nlink);
7344         __put_user(host_st->st_uid, &target_st->st_uid);
7345         __put_user(host_st->st_gid, &target_st->st_gid);
7346         __put_user(host_st->st_rdev, &target_st->st_rdev);
7347         __put_user(host_st->st_size, &target_st->st_size);
7348         __put_user(host_st->st_blksize, &target_st->st_blksize);
7349         __put_user(host_st->st_blocks, &target_st->st_blocks);
7350         __put_user(host_st->st_atime, &target_st->target_st_atime);
7351         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7352         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7353 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7354         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7355         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7356         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7357 #endif
7358         unlock_user_struct(target_st, target_addr, 1);
7359     } else
7360 #endif
7361     {
7362 #if defined(TARGET_HAS_STRUCT_STAT64)
7363         struct target_stat64 *target_st;
7364 #else
7365         struct target_stat *target_st;
7366 #endif
7367 
7368         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7369             return -TARGET_EFAULT;
7370         memset(target_st, 0, sizeof(*target_st));
7371         __put_user(host_st->st_dev, &target_st->st_dev);
7372         __put_user(host_st->st_ino, &target_st->st_ino);
7373 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7374         __put_user(host_st->st_ino, &target_st->__st_ino);
7375 #endif
7376         __put_user(host_st->st_mode, &target_st->st_mode);
7377         __put_user(host_st->st_nlink, &target_st->st_nlink);
7378         __put_user(host_st->st_uid, &target_st->st_uid);
7379         __put_user(host_st->st_gid, &target_st->st_gid);
7380         __put_user(host_st->st_rdev, &target_st->st_rdev);
7381         /* XXX: better use of kernel struct */
7382         __put_user(host_st->st_size, &target_st->st_size);
7383         __put_user(host_st->st_blksize, &target_st->st_blksize);
7384         __put_user(host_st->st_blocks, &target_st->st_blocks);
7385         __put_user(host_st->st_atime, &target_st->target_st_atime);
7386         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7387         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7388 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7389         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7390         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7391         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7392 #endif
7393         unlock_user_struct(target_st, target_addr, 1);
7394     }
7395 
7396     return 0;
7397 }
7398 #endif
7399 
7400 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7401 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7402                                             abi_ulong target_addr)
7403 {
7404     struct target_statx *target_stx;
7405 
7406     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7407         return -TARGET_EFAULT;
7408     }
7409     memset(target_stx, 0, sizeof(*target_stx));
7410 
7411     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7412     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7413     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7414     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7415     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7416     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7417     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7418     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7419     __put_user(host_stx->stx_size, &target_stx->stx_size);
7420     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7421     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7422     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7423     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7424     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7425     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7426     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7427     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7428     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7429     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7430     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7431     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7432     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7433     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7434 
7435     unlock_user_struct(target_stx, target_addr, 1);
7436 
7437     return 0;
7438 }
7439 #endif
7440 
7441 static int do_sys_futex(int *uaddr, int op, int val,
7442                          const struct timespec *timeout, int *uaddr2,
7443                          int val3)
7444 {
7445 #if HOST_LONG_BITS == 64
7446 #if defined(__NR_futex)
7447     /* always a 64-bit time_t, it doesn't define _time64 version  */
7448     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7449 
7450 #endif
7451 #else /* HOST_LONG_BITS == 64 */
7452 #if defined(__NR_futex_time64)
7453     if (sizeof(timeout->tv_sec) == 8) {
7454         /* _time64 function on 32bit arch */
7455         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7456     }
7457 #endif
7458 #if defined(__NR_futex)
7459     /* old function on 32bit arch */
7460     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7461 #endif
7462 #endif /* HOST_LONG_BITS == 64 */
7463     g_assert_not_reached();
7464 }
7465 
7466 static int do_safe_futex(int *uaddr, int op, int val,
7467                          const struct timespec *timeout, int *uaddr2,
7468                          int val3)
7469 {
7470 #if HOST_LONG_BITS == 64
7471 #if defined(__NR_futex)
7472     /* always a 64-bit time_t, it doesn't define _time64 version  */
7473     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7474 #endif
7475 #else /* HOST_LONG_BITS == 64 */
7476 #if defined(__NR_futex_time64)
7477     if (sizeof(timeout->tv_sec) == 8) {
7478         /* _time64 function on 32bit arch */
7479         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7480                                            val3));
7481     }
7482 #endif
7483 #if defined(__NR_futex)
7484     /* old function on 32bit arch */
7485     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7486 #endif
7487 #endif /* HOST_LONG_BITS == 64 */
7488     return -TARGET_ENOSYS;
7489 }
7490 
7491 /* ??? Using host futex calls even when target atomic operations
7492    are not really atomic probably breaks things.  However implementing
7493    futexes locally would make futexes shared between multiple processes
7494    tricky.  However they're probably useless because guest atomic
7495    operations won't work either.  */
7496 #if defined(TARGET_NR_futex)
7497 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7498                     target_ulong timeout, target_ulong uaddr2, int val3)
7499 {
7500     struct timespec ts, *pts;
7501     int base_op;
7502 
7503     /* ??? We assume FUTEX_* constants are the same on both host
7504        and target.  */
7505 #ifdef FUTEX_CMD_MASK
7506     base_op = op & FUTEX_CMD_MASK;
7507 #else
7508     base_op = op;
7509 #endif
7510     switch (base_op) {
7511     case FUTEX_WAIT:
7512     case FUTEX_WAIT_BITSET:
7513         if (timeout) {
7514             pts = &ts;
7515             target_to_host_timespec(pts, timeout);
7516         } else {
7517             pts = NULL;
7518         }
7519         return do_safe_futex(g2h(cpu, uaddr),
7520                              op, tswap32(val), pts, NULL, val3);
7521     case FUTEX_WAKE:
7522         return do_safe_futex(g2h(cpu, uaddr),
7523                              op, val, NULL, NULL, 0);
7524     case FUTEX_FD:
7525         return do_safe_futex(g2h(cpu, uaddr),
7526                              op, val, NULL, NULL, 0);
7527     case FUTEX_REQUEUE:
7528     case FUTEX_CMP_REQUEUE:
7529     case FUTEX_WAKE_OP:
7530         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7531            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7532            But the prototype takes a `struct timespec *'; insert casts
7533            to satisfy the compiler.  We do not need to tswap TIMEOUT
7534            since it's not compared to guest memory.  */
7535         pts = (struct timespec *)(uintptr_t) timeout;
7536         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7537                              (base_op == FUTEX_CMP_REQUEUE
7538                               ? tswap32(val3) : val3));
7539     default:
7540         return -TARGET_ENOSYS;
7541     }
7542 }
7543 #endif
7544 
7545 #if defined(TARGET_NR_futex_time64)
7546 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7547                            int val, target_ulong timeout,
7548                            target_ulong uaddr2, int val3)
7549 {
7550     struct timespec ts, *pts;
7551     int base_op;
7552 
7553     /* ??? We assume FUTEX_* constants are the same on both host
7554        and target.  */
7555 #ifdef FUTEX_CMD_MASK
7556     base_op = op & FUTEX_CMD_MASK;
7557 #else
7558     base_op = op;
7559 #endif
7560     switch (base_op) {
7561     case FUTEX_WAIT:
7562     case FUTEX_WAIT_BITSET:
7563         if (timeout) {
7564             pts = &ts;
7565             if (target_to_host_timespec64(pts, timeout)) {
7566                 return -TARGET_EFAULT;
7567             }
7568         } else {
7569             pts = NULL;
7570         }
7571         return do_safe_futex(g2h(cpu, uaddr), op,
7572                              tswap32(val), pts, NULL, val3);
7573     case FUTEX_WAKE:
7574         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7575     case FUTEX_FD:
7576         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7577     case FUTEX_REQUEUE:
7578     case FUTEX_CMP_REQUEUE:
7579     case FUTEX_WAKE_OP:
7580         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7581            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7582            But the prototype takes a `struct timespec *'; insert casts
7583            to satisfy the compiler.  We do not need to tswap TIMEOUT
7584            since it's not compared to guest memory.  */
7585         pts = (struct timespec *)(uintptr_t) timeout;
7586         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7587                              (base_op == FUTEX_CMP_REQUEUE
7588                               ? tswap32(val3) : val3));
7589     default:
7590         return -TARGET_ENOSYS;
7591     }
7592 }
7593 #endif
7594 
7595 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7596 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7597                                      abi_long handle, abi_long mount_id,
7598                                      abi_long flags)
7599 {
7600     struct file_handle *target_fh;
7601     struct file_handle *fh;
7602     int mid = 0;
7603     abi_long ret;
7604     char *name;
7605     unsigned int size, total_size;
7606 
7607     if (get_user_s32(size, handle)) {
7608         return -TARGET_EFAULT;
7609     }
7610 
7611     name = lock_user_string(pathname);
7612     if (!name) {
7613         return -TARGET_EFAULT;
7614     }
7615 
7616     total_size = sizeof(struct file_handle) + size;
7617     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7618     if (!target_fh) {
7619         unlock_user(name, pathname, 0);
7620         return -TARGET_EFAULT;
7621     }
7622 
7623     fh = g_malloc0(total_size);
7624     fh->handle_bytes = size;
7625 
7626     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7627     unlock_user(name, pathname, 0);
7628 
7629     /* man name_to_handle_at(2):
7630      * Other than the use of the handle_bytes field, the caller should treat
7631      * the file_handle structure as an opaque data type
7632      */
7633 
7634     memcpy(target_fh, fh, total_size);
7635     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7636     target_fh->handle_type = tswap32(fh->handle_type);
7637     g_free(fh);
7638     unlock_user(target_fh, handle, total_size);
7639 
7640     if (put_user_s32(mid, mount_id)) {
7641         return -TARGET_EFAULT;
7642     }
7643 
7644     return ret;
7645 
7646 }
7647 #endif
7648 
7649 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7650 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7651                                      abi_long flags)
7652 {
7653     struct file_handle *target_fh;
7654     struct file_handle *fh;
7655     unsigned int size, total_size;
7656     abi_long ret;
7657 
7658     if (get_user_s32(size, handle)) {
7659         return -TARGET_EFAULT;
7660     }
7661 
7662     total_size = sizeof(struct file_handle) + size;
7663     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7664     if (!target_fh) {
7665         return -TARGET_EFAULT;
7666     }
7667 
7668     fh = g_memdup(target_fh, total_size);
7669     fh->handle_bytes = size;
7670     fh->handle_type = tswap32(target_fh->handle_type);
7671 
7672     ret = get_errno(open_by_handle_at(mount_fd, fh,
7673                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7674 
7675     g_free(fh);
7676 
7677     unlock_user(target_fh, handle, total_size);
7678 
7679     return ret;
7680 }
7681 #endif
7682 
7683 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7684 
7685 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7686 {
7687     int host_flags;
7688     target_sigset_t *target_mask;
7689     sigset_t host_mask;
7690     abi_long ret;
7691 
7692     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7693         return -TARGET_EINVAL;
7694     }
7695     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7696         return -TARGET_EFAULT;
7697     }
7698 
7699     target_to_host_sigset(&host_mask, target_mask);
7700 
7701     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7702 
7703     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7704     if (ret >= 0) {
7705         fd_trans_register(ret, &target_signalfd_trans);
7706     }
7707 
7708     unlock_user_struct(target_mask, mask, 0);
7709 
7710     return ret;
7711 }
7712 #endif
7713 
7714 /* Map host to target signal numbers for the wait family of syscalls.
7715    Assume all other status bits are the same.  */
7716 int host_to_target_waitstatus(int status)
7717 {
7718     if (WIFSIGNALED(status)) {
7719         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7720     }
7721     if (WIFSTOPPED(status)) {
7722         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7723                | (status & 0xff);
7724     }
7725     return status;
7726 }
7727 
7728 static int open_self_cmdline(void *cpu_env, int fd)
7729 {
7730     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7731     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7732     int i;
7733 
7734     for (i = 0; i < bprm->argc; i++) {
7735         size_t len = strlen(bprm->argv[i]) + 1;
7736 
7737         if (write(fd, bprm->argv[i], len) != len) {
7738             return -1;
7739         }
7740     }
7741 
7742     return 0;
7743 }
7744 
7745 static int open_self_maps(void *cpu_env, int fd)
7746 {
7747     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7748     TaskState *ts = cpu->opaque;
7749     GSList *map_info = read_self_maps();
7750     GSList *s;
7751     int count;
7752 
7753     for (s = map_info; s; s = g_slist_next(s)) {
7754         MapInfo *e = (MapInfo *) s->data;
7755 
7756         if (h2g_valid(e->start)) {
7757             unsigned long min = e->start;
7758             unsigned long max = e->end;
7759             int flags = page_get_flags(h2g(min));
7760             const char *path;
7761 
7762             max = h2g_valid(max - 1) ?
7763                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7764 
7765             if (page_check_range(h2g(min), max - min, flags) == -1) {
7766                 continue;
7767             }
7768 
7769             if (h2g(min) == ts->info->stack_limit) {
7770                 path = "[stack]";
7771             } else {
7772                 path = e->path;
7773             }
7774 
7775             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7776                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7777                             h2g(min), h2g(max - 1) + 1,
7778                             (flags & PAGE_READ) ? 'r' : '-',
7779                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7780                             (flags & PAGE_EXEC) ? 'x' : '-',
7781                             e->is_priv ? 'p' : '-',
7782                             (uint64_t) e->offset, e->dev, e->inode);
7783             if (path) {
7784                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7785             } else {
7786                 dprintf(fd, "\n");
7787             }
7788         }
7789     }
7790 
7791     free_self_maps(map_info);
7792 
7793 #ifdef TARGET_VSYSCALL_PAGE
7794     /*
7795      * We only support execution from the vsyscall page.
7796      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7797      */
7798     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7799                     " --xp 00000000 00:00 0",
7800                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7801     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7802 #endif
7803 
7804     return 0;
7805 }
7806 
7807 static int open_self_stat(void *cpu_env, int fd)
7808 {
7809     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7810     TaskState *ts = cpu->opaque;
7811     g_autoptr(GString) buf = g_string_new(NULL);
7812     int i;
7813 
7814     for (i = 0; i < 44; i++) {
7815         if (i == 0) {
7816             /* pid */
7817             g_string_printf(buf, FMT_pid " ", getpid());
7818         } else if (i == 1) {
7819             /* app name */
7820             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7821             bin = bin ? bin + 1 : ts->bprm->argv[0];
7822             g_string_printf(buf, "(%.15s) ", bin);
7823         } else if (i == 3) {
7824             /* ppid */
7825             g_string_printf(buf, FMT_pid " ", getppid());
7826         } else if (i == 27) {
7827             /* stack bottom */
7828             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7829         } else {
7830             /* for the rest, there is MasterCard */
7831             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7832         }
7833 
7834         if (write(fd, buf->str, buf->len) != buf->len) {
7835             return -1;
7836         }
7837     }
7838 
7839     return 0;
7840 }
7841 
7842 static int open_self_auxv(void *cpu_env, int fd)
7843 {
7844     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7845     TaskState *ts = cpu->opaque;
7846     abi_ulong auxv = ts->info->saved_auxv;
7847     abi_ulong len = ts->info->auxv_len;
7848     char *ptr;
7849 
7850     /*
7851      * Auxiliary vector is stored in target process stack.
7852      * read in whole auxv vector and copy it to file
7853      */
7854     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7855     if (ptr != NULL) {
7856         while (len > 0) {
7857             ssize_t r;
7858             r = write(fd, ptr, len);
7859             if (r <= 0) {
7860                 break;
7861             }
7862             len -= r;
7863             ptr += r;
7864         }
7865         lseek(fd, 0, SEEK_SET);
7866         unlock_user(ptr, auxv, len);
7867     }
7868 
7869     return 0;
7870 }
7871 
7872 static int is_proc_myself(const char *filename, const char *entry)
7873 {
7874     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7875         filename += strlen("/proc/");
7876         if (!strncmp(filename, "self/", strlen("self/"))) {
7877             filename += strlen("self/");
7878         } else if (*filename >= '1' && *filename <= '9') {
7879             char myself[80];
7880             snprintf(myself, sizeof(myself), "%d/", getpid());
7881             if (!strncmp(filename, myself, strlen(myself))) {
7882                 filename += strlen(myself);
7883             } else {
7884                 return 0;
7885             }
7886         } else {
7887             return 0;
7888         }
7889         if (!strcmp(filename, entry)) {
7890             return 1;
7891         }
7892     }
7893     return 0;
7894 }
7895 
7896 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7897     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7898 static int is_proc(const char *filename, const char *entry)
7899 {
7900     return strcmp(filename, entry) == 0;
7901 }
7902 #endif
7903 
7904 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7905 static int open_net_route(void *cpu_env, int fd)
7906 {
7907     FILE *fp;
7908     char *line = NULL;
7909     size_t len = 0;
7910     ssize_t read;
7911 
7912     fp = fopen("/proc/net/route", "r");
7913     if (fp == NULL) {
7914         return -1;
7915     }
7916 
7917     /* read header */
7918 
7919     read = getline(&line, &len, fp);
7920     dprintf(fd, "%s", line);
7921 
7922     /* read routes */
7923 
7924     while ((read = getline(&line, &len, fp)) != -1) {
7925         char iface[16];
7926         uint32_t dest, gw, mask;
7927         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7928         int fields;
7929 
7930         fields = sscanf(line,
7931                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7932                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7933                         &mask, &mtu, &window, &irtt);
7934         if (fields != 11) {
7935             continue;
7936         }
7937         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7938                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7939                 metric, tswap32(mask), mtu, window, irtt);
7940     }
7941 
7942     free(line);
7943     fclose(fp);
7944 
7945     return 0;
7946 }
7947 #endif
7948 
7949 #if defined(TARGET_SPARC)
7950 static int open_cpuinfo(void *cpu_env, int fd)
7951 {
7952     dprintf(fd, "type\t\t: sun4u\n");
7953     return 0;
7954 }
7955 #endif
7956 
7957 #if defined(TARGET_HPPA)
7958 static int open_cpuinfo(void *cpu_env, int fd)
7959 {
7960     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7961     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7962     dprintf(fd, "capabilities\t: os32\n");
7963     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7964     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7965     return 0;
7966 }
7967 #endif
7968 
7969 #if defined(TARGET_M68K)
7970 static int open_hardware(void *cpu_env, int fd)
7971 {
7972     dprintf(fd, "Model:\t\tqemu-m68k\n");
7973     return 0;
7974 }
7975 #endif
7976 
7977 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7978 {
7979     struct fake_open {
7980         const char *filename;
7981         int (*fill)(void *cpu_env, int fd);
7982         int (*cmp)(const char *s1, const char *s2);
7983     };
7984     const struct fake_open *fake_open;
7985     static const struct fake_open fakes[] = {
7986         { "maps", open_self_maps, is_proc_myself },
7987         { "stat", open_self_stat, is_proc_myself },
7988         { "auxv", open_self_auxv, is_proc_myself },
7989         { "cmdline", open_self_cmdline, is_proc_myself },
7990 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7991         { "/proc/net/route", open_net_route, is_proc },
7992 #endif
7993 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7994         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7995 #endif
7996 #if defined(TARGET_M68K)
7997         { "/proc/hardware", open_hardware, is_proc },
7998 #endif
7999         { NULL, NULL, NULL }
8000     };
8001 
8002     if (is_proc_myself(pathname, "exe")) {
8003         int execfd = qemu_getauxval(AT_EXECFD);
8004         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8005     }
8006 
8007     for (fake_open = fakes; fake_open->filename; fake_open++) {
8008         if (fake_open->cmp(pathname, fake_open->filename)) {
8009             break;
8010         }
8011     }
8012 
8013     if (fake_open->filename) {
8014         const char *tmpdir;
8015         char filename[PATH_MAX];
8016         int fd, r;
8017 
8018         /* create temporary file to map stat to */
8019         tmpdir = getenv("TMPDIR");
8020         if (!tmpdir)
8021             tmpdir = "/tmp";
8022         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8023         fd = mkstemp(filename);
8024         if (fd < 0) {
8025             return fd;
8026         }
8027         unlink(filename);
8028 
8029         if ((r = fake_open->fill(cpu_env, fd))) {
8030             int e = errno;
8031             close(fd);
8032             errno = e;
8033             return r;
8034         }
8035         lseek(fd, 0, SEEK_SET);
8036 
8037         return fd;
8038     }
8039 
8040     return safe_openat(dirfd, path(pathname), flags, mode);
8041 }
8042 
8043 #define TIMER_MAGIC 0x0caf0000
8044 #define TIMER_MAGIC_MASK 0xffff0000
8045 
8046 /* Convert QEMU provided timer ID back to internal 16bit index format */
8047 static target_timer_t get_timer_id(abi_long arg)
8048 {
8049     target_timer_t timerid = arg;
8050 
8051     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8052         return -TARGET_EINVAL;
8053     }
8054 
8055     timerid &= 0xffff;
8056 
8057     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8058         return -TARGET_EINVAL;
8059     }
8060 
8061     return timerid;
8062 }
8063 
8064 static int target_to_host_cpu_mask(unsigned long *host_mask,
8065                                    size_t host_size,
8066                                    abi_ulong target_addr,
8067                                    size_t target_size)
8068 {
8069     unsigned target_bits = sizeof(abi_ulong) * 8;
8070     unsigned host_bits = sizeof(*host_mask) * 8;
8071     abi_ulong *target_mask;
8072     unsigned i, j;
8073 
8074     assert(host_size >= target_size);
8075 
8076     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8077     if (!target_mask) {
8078         return -TARGET_EFAULT;
8079     }
8080     memset(host_mask, 0, host_size);
8081 
8082     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8083         unsigned bit = i * target_bits;
8084         abi_ulong val;
8085 
8086         __get_user(val, &target_mask[i]);
8087         for (j = 0; j < target_bits; j++, bit++) {
8088             if (val & (1UL << j)) {
8089                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8090             }
8091         }
8092     }
8093 
8094     unlock_user(target_mask, target_addr, 0);
8095     return 0;
8096 }
8097 
8098 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8099                                    size_t host_size,
8100                                    abi_ulong target_addr,
8101                                    size_t target_size)
8102 {
8103     unsigned target_bits = sizeof(abi_ulong) * 8;
8104     unsigned host_bits = sizeof(*host_mask) * 8;
8105     abi_ulong *target_mask;
8106     unsigned i, j;
8107 
8108     assert(host_size >= target_size);
8109 
8110     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8111     if (!target_mask) {
8112         return -TARGET_EFAULT;
8113     }
8114 
8115     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8116         unsigned bit = i * target_bits;
8117         abi_ulong val = 0;
8118 
8119         for (j = 0; j < target_bits; j++, bit++) {
8120             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8121                 val |= 1UL << j;
8122             }
8123         }
8124         __put_user(val, &target_mask[i]);
8125     }
8126 
8127     unlock_user(target_mask, target_addr, target_size);
8128     return 0;
8129 }
8130 
8131 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8132 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8133 #endif
8134 
8135 /* This is an internal helper for do_syscall so that it is easier
8136  * to have a single return point, so that actions, such as logging
8137  * of syscall results, can be performed.
8138  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8139  */
8140 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8141                             abi_long arg2, abi_long arg3, abi_long arg4,
8142                             abi_long arg5, abi_long arg6, abi_long arg7,
8143                             abi_long arg8)
8144 {
8145     CPUState *cpu = env_cpu(cpu_env);
8146     abi_long ret;
8147 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8148     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8149     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8150     || defined(TARGET_NR_statx)
8151     struct stat st;
8152 #endif
8153 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8154     || defined(TARGET_NR_fstatfs)
8155     struct statfs stfs;
8156 #endif
8157     void *p;
8158 
8159     switch(num) {
8160     case TARGET_NR_exit:
8161         /* In old applications this may be used to implement _exit(2).
8162            However in threaded applications it is used for thread termination,
8163            and _exit_group is used for application termination.
8164            Do thread termination if we have more then one thread.  */
8165 
8166         if (block_signals()) {
8167             return -TARGET_ERESTARTSYS;
8168         }
8169 
8170         pthread_mutex_lock(&clone_lock);
8171 
8172         if (CPU_NEXT(first_cpu)) {
8173             TaskState *ts = cpu->opaque;
8174 
8175             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8176             object_unref(OBJECT(cpu));
8177             /*
8178              * At this point the CPU should be unrealized and removed
8179              * from cpu lists. We can clean-up the rest of the thread
8180              * data without the lock held.
8181              */
8182 
8183             pthread_mutex_unlock(&clone_lock);
8184 
8185             if (ts->child_tidptr) {
8186                 put_user_u32(0, ts->child_tidptr);
8187                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8188                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8189             }
8190             thread_cpu = NULL;
8191             g_free(ts);
8192             rcu_unregister_thread();
8193             pthread_exit(NULL);
8194         }
8195 
8196         pthread_mutex_unlock(&clone_lock);
8197         preexit_cleanup(cpu_env, arg1);
8198         _exit(arg1);
8199         return 0; /* avoid warning */
8200     case TARGET_NR_read:
8201         if (arg2 == 0 && arg3 == 0) {
8202             return get_errno(safe_read(arg1, 0, 0));
8203         } else {
8204             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8205                 return -TARGET_EFAULT;
8206             ret = get_errno(safe_read(arg1, p, arg3));
8207             if (ret >= 0 &&
8208                 fd_trans_host_to_target_data(arg1)) {
8209                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8210             }
8211             unlock_user(p, arg2, ret);
8212         }
8213         return ret;
8214     case TARGET_NR_write:
8215         if (arg2 == 0 && arg3 == 0) {
8216             return get_errno(safe_write(arg1, 0, 0));
8217         }
8218         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8219             return -TARGET_EFAULT;
8220         if (fd_trans_target_to_host_data(arg1)) {
8221             void *copy = g_malloc(arg3);
8222             memcpy(copy, p, arg3);
8223             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8224             if (ret >= 0) {
8225                 ret = get_errno(safe_write(arg1, copy, ret));
8226             }
8227             g_free(copy);
8228         } else {
8229             ret = get_errno(safe_write(arg1, p, arg3));
8230         }
8231         unlock_user(p, arg2, 0);
8232         return ret;
8233 
8234 #ifdef TARGET_NR_open
8235     case TARGET_NR_open:
8236         if (!(p = lock_user_string(arg1)))
8237             return -TARGET_EFAULT;
8238         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8239                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8240                                   arg3));
8241         fd_trans_unregister(ret);
8242         unlock_user(p, arg1, 0);
8243         return ret;
8244 #endif
8245     case TARGET_NR_openat:
8246         if (!(p = lock_user_string(arg2)))
8247             return -TARGET_EFAULT;
8248         ret = get_errno(do_openat(cpu_env, arg1, p,
8249                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8250                                   arg4));
8251         fd_trans_unregister(ret);
8252         unlock_user(p, arg2, 0);
8253         return ret;
8254 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8255     case TARGET_NR_name_to_handle_at:
8256         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8257         return ret;
8258 #endif
8259 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8260     case TARGET_NR_open_by_handle_at:
8261         ret = do_open_by_handle_at(arg1, arg2, arg3);
8262         fd_trans_unregister(ret);
8263         return ret;
8264 #endif
8265     case TARGET_NR_close:
8266         fd_trans_unregister(arg1);
8267         return get_errno(close(arg1));
8268 
8269     case TARGET_NR_brk:
8270         return do_brk(arg1);
8271 #ifdef TARGET_NR_fork
8272     case TARGET_NR_fork:
8273         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8274 #endif
8275 #ifdef TARGET_NR_waitpid
8276     case TARGET_NR_waitpid:
8277         {
8278             int status;
8279             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8280             if (!is_error(ret) && arg2 && ret
8281                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8282                 return -TARGET_EFAULT;
8283         }
8284         return ret;
8285 #endif
8286 #ifdef TARGET_NR_waitid
8287     case TARGET_NR_waitid:
8288         {
8289             siginfo_t info;
8290             info.si_pid = 0;
8291             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8292             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8293                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8294                     return -TARGET_EFAULT;
8295                 host_to_target_siginfo(p, &info);
8296                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8297             }
8298         }
8299         return ret;
8300 #endif
8301 #ifdef TARGET_NR_creat /* not on alpha */
8302     case TARGET_NR_creat:
8303         if (!(p = lock_user_string(arg1)))
8304             return -TARGET_EFAULT;
8305         ret = get_errno(creat(p, arg2));
8306         fd_trans_unregister(ret);
8307         unlock_user(p, arg1, 0);
8308         return ret;
8309 #endif
8310 #ifdef TARGET_NR_link
8311     case TARGET_NR_link:
8312         {
8313             void * p2;
8314             p = lock_user_string(arg1);
8315             p2 = lock_user_string(arg2);
8316             if (!p || !p2)
8317                 ret = -TARGET_EFAULT;
8318             else
8319                 ret = get_errno(link(p, p2));
8320             unlock_user(p2, arg2, 0);
8321             unlock_user(p, arg1, 0);
8322         }
8323         return ret;
8324 #endif
8325 #if defined(TARGET_NR_linkat)
8326     case TARGET_NR_linkat:
8327         {
8328             void * p2 = NULL;
8329             if (!arg2 || !arg4)
8330                 return -TARGET_EFAULT;
8331             p  = lock_user_string(arg2);
8332             p2 = lock_user_string(arg4);
8333             if (!p || !p2)
8334                 ret = -TARGET_EFAULT;
8335             else
8336                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8337             unlock_user(p, arg2, 0);
8338             unlock_user(p2, arg4, 0);
8339         }
8340         return ret;
8341 #endif
8342 #ifdef TARGET_NR_unlink
8343     case TARGET_NR_unlink:
8344         if (!(p = lock_user_string(arg1)))
8345             return -TARGET_EFAULT;
8346         ret = get_errno(unlink(p));
8347         unlock_user(p, arg1, 0);
8348         return ret;
8349 #endif
8350 #if defined(TARGET_NR_unlinkat)
8351     case TARGET_NR_unlinkat:
8352         if (!(p = lock_user_string(arg2)))
8353             return -TARGET_EFAULT;
8354         ret = get_errno(unlinkat(arg1, p, arg3));
8355         unlock_user(p, arg2, 0);
8356         return ret;
8357 #endif
8358     case TARGET_NR_execve:
8359         {
8360             char **argp, **envp;
8361             int argc, envc;
8362             abi_ulong gp;
8363             abi_ulong guest_argp;
8364             abi_ulong guest_envp;
8365             abi_ulong addr;
8366             char **q;
8367             int total_size = 0;
8368 
8369             argc = 0;
8370             guest_argp = arg2;
8371             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8372                 if (get_user_ual(addr, gp))
8373                     return -TARGET_EFAULT;
8374                 if (!addr)
8375                     break;
8376                 argc++;
8377             }
8378             envc = 0;
8379             guest_envp = arg3;
8380             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8381                 if (get_user_ual(addr, gp))
8382                     return -TARGET_EFAULT;
8383                 if (!addr)
8384                     break;
8385                 envc++;
8386             }
8387 
8388             argp = g_new0(char *, argc + 1);
8389             envp = g_new0(char *, envc + 1);
8390 
8391             for (gp = guest_argp, q = argp; gp;
8392                   gp += sizeof(abi_ulong), q++) {
8393                 if (get_user_ual(addr, gp))
8394                     goto execve_efault;
8395                 if (!addr)
8396                     break;
8397                 if (!(*q = lock_user_string(addr)))
8398                     goto execve_efault;
8399                 total_size += strlen(*q) + 1;
8400             }
8401             *q = NULL;
8402 
8403             for (gp = guest_envp, q = envp; gp;
8404                   gp += sizeof(abi_ulong), q++) {
8405                 if (get_user_ual(addr, gp))
8406                     goto execve_efault;
8407                 if (!addr)
8408                     break;
8409                 if (!(*q = lock_user_string(addr)))
8410                     goto execve_efault;
8411                 total_size += strlen(*q) + 1;
8412             }
8413             *q = NULL;
8414 
8415             if (!(p = lock_user_string(arg1)))
8416                 goto execve_efault;
8417             /* Although execve() is not an interruptible syscall it is
8418              * a special case where we must use the safe_syscall wrapper:
8419              * if we allow a signal to happen before we make the host
8420              * syscall then we will 'lose' it, because at the point of
8421              * execve the process leaves QEMU's control. So we use the
8422              * safe syscall wrapper to ensure that we either take the
8423              * signal as a guest signal, or else it does not happen
8424              * before the execve completes and makes it the other
8425              * program's problem.
8426              */
8427             ret = get_errno(safe_execve(p, argp, envp));
8428             unlock_user(p, arg1, 0);
8429 
8430             goto execve_end;
8431 
8432         execve_efault:
8433             ret = -TARGET_EFAULT;
8434 
8435         execve_end:
8436             for (gp = guest_argp, q = argp; *q;
8437                   gp += sizeof(abi_ulong), q++) {
8438                 if (get_user_ual(addr, gp)
8439                     || !addr)
8440                     break;
8441                 unlock_user(*q, addr, 0);
8442             }
8443             for (gp = guest_envp, q = envp; *q;
8444                   gp += sizeof(abi_ulong), q++) {
8445                 if (get_user_ual(addr, gp)
8446                     || !addr)
8447                     break;
8448                 unlock_user(*q, addr, 0);
8449             }
8450 
8451             g_free(argp);
8452             g_free(envp);
8453         }
8454         return ret;
8455     case TARGET_NR_chdir:
8456         if (!(p = lock_user_string(arg1)))
8457             return -TARGET_EFAULT;
8458         ret = get_errno(chdir(p));
8459         unlock_user(p, arg1, 0);
8460         return ret;
8461 #ifdef TARGET_NR_time
8462     case TARGET_NR_time:
8463         {
8464             time_t host_time;
8465             ret = get_errno(time(&host_time));
8466             if (!is_error(ret)
8467                 && arg1
8468                 && put_user_sal(host_time, arg1))
8469                 return -TARGET_EFAULT;
8470         }
8471         return ret;
8472 #endif
8473 #ifdef TARGET_NR_mknod
8474     case TARGET_NR_mknod:
8475         if (!(p = lock_user_string(arg1)))
8476             return -TARGET_EFAULT;
8477         ret = get_errno(mknod(p, arg2, arg3));
8478         unlock_user(p, arg1, 0);
8479         return ret;
8480 #endif
8481 #if defined(TARGET_NR_mknodat)
8482     case TARGET_NR_mknodat:
8483         if (!(p = lock_user_string(arg2)))
8484             return -TARGET_EFAULT;
8485         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8486         unlock_user(p, arg2, 0);
8487         return ret;
8488 #endif
8489 #ifdef TARGET_NR_chmod
8490     case TARGET_NR_chmod:
8491         if (!(p = lock_user_string(arg1)))
8492             return -TARGET_EFAULT;
8493         ret = get_errno(chmod(p, arg2));
8494         unlock_user(p, arg1, 0);
8495         return ret;
8496 #endif
8497 #ifdef TARGET_NR_lseek
8498     case TARGET_NR_lseek:
8499         return get_errno(lseek(arg1, arg2, arg3));
8500 #endif
8501 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8502     /* Alpha specific */
8503     case TARGET_NR_getxpid:
8504         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8505         return get_errno(getpid());
8506 #endif
8507 #ifdef TARGET_NR_getpid
8508     case TARGET_NR_getpid:
8509         return get_errno(getpid());
8510 #endif
8511     case TARGET_NR_mount:
8512         {
8513             /* need to look at the data field */
8514             void *p2, *p3;
8515 
8516             if (arg1) {
8517                 p = lock_user_string(arg1);
8518                 if (!p) {
8519                     return -TARGET_EFAULT;
8520                 }
8521             } else {
8522                 p = NULL;
8523             }
8524 
8525             p2 = lock_user_string(arg2);
8526             if (!p2) {
8527                 if (arg1) {
8528                     unlock_user(p, arg1, 0);
8529                 }
8530                 return -TARGET_EFAULT;
8531             }
8532 
8533             if (arg3) {
8534                 p3 = lock_user_string(arg3);
8535                 if (!p3) {
8536                     if (arg1) {
8537                         unlock_user(p, arg1, 0);
8538                     }
8539                     unlock_user(p2, arg2, 0);
8540                     return -TARGET_EFAULT;
8541                 }
8542             } else {
8543                 p3 = NULL;
8544             }
8545 
8546             /* FIXME - arg5 should be locked, but it isn't clear how to
8547              * do that since it's not guaranteed to be a NULL-terminated
8548              * string.
8549              */
8550             if (!arg5) {
8551                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8552             } else {
8553                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8554             }
8555             ret = get_errno(ret);
8556 
8557             if (arg1) {
8558                 unlock_user(p, arg1, 0);
8559             }
8560             unlock_user(p2, arg2, 0);
8561             if (arg3) {
8562                 unlock_user(p3, arg3, 0);
8563             }
8564         }
8565         return ret;
8566 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8567 #if defined(TARGET_NR_umount)
8568     case TARGET_NR_umount:
8569 #endif
8570 #if defined(TARGET_NR_oldumount)
8571     case TARGET_NR_oldumount:
8572 #endif
8573         if (!(p = lock_user_string(arg1)))
8574             return -TARGET_EFAULT;
8575         ret = get_errno(umount(p));
8576         unlock_user(p, arg1, 0);
8577         return ret;
8578 #endif
8579 #ifdef TARGET_NR_stime /* not on alpha */
8580     case TARGET_NR_stime:
8581         {
8582             struct timespec ts;
8583             ts.tv_nsec = 0;
8584             if (get_user_sal(ts.tv_sec, arg1)) {
8585                 return -TARGET_EFAULT;
8586             }
8587             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8588         }
8589 #endif
8590 #ifdef TARGET_NR_alarm /* not on alpha */
8591     case TARGET_NR_alarm:
8592         return alarm(arg1);
8593 #endif
8594 #ifdef TARGET_NR_pause /* not on alpha */
8595     case TARGET_NR_pause:
8596         if (!block_signals()) {
8597             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8598         }
8599         return -TARGET_EINTR;
8600 #endif
8601 #ifdef TARGET_NR_utime
8602     case TARGET_NR_utime:
8603         {
8604             struct utimbuf tbuf, *host_tbuf;
8605             struct target_utimbuf *target_tbuf;
8606             if (arg2) {
8607                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8608                     return -TARGET_EFAULT;
8609                 tbuf.actime = tswapal(target_tbuf->actime);
8610                 tbuf.modtime = tswapal(target_tbuf->modtime);
8611                 unlock_user_struct(target_tbuf, arg2, 0);
8612                 host_tbuf = &tbuf;
8613             } else {
8614                 host_tbuf = NULL;
8615             }
8616             if (!(p = lock_user_string(arg1)))
8617                 return -TARGET_EFAULT;
8618             ret = get_errno(utime(p, host_tbuf));
8619             unlock_user(p, arg1, 0);
8620         }
8621         return ret;
8622 #endif
8623 #ifdef TARGET_NR_utimes
8624     case TARGET_NR_utimes:
8625         {
8626             struct timeval *tvp, tv[2];
8627             if (arg2) {
8628                 if (copy_from_user_timeval(&tv[0], arg2)
8629                     || copy_from_user_timeval(&tv[1],
8630                                               arg2 + sizeof(struct target_timeval)))
8631                     return -TARGET_EFAULT;
8632                 tvp = tv;
8633             } else {
8634                 tvp = NULL;
8635             }
8636             if (!(p = lock_user_string(arg1)))
8637                 return -TARGET_EFAULT;
8638             ret = get_errno(utimes(p, tvp));
8639             unlock_user(p, arg1, 0);
8640         }
8641         return ret;
8642 #endif
8643 #if defined(TARGET_NR_futimesat)
8644     case TARGET_NR_futimesat:
8645         {
8646             struct timeval *tvp, tv[2];
8647             if (arg3) {
8648                 if (copy_from_user_timeval(&tv[0], arg3)
8649                     || copy_from_user_timeval(&tv[1],
8650                                               arg3 + sizeof(struct target_timeval)))
8651                     return -TARGET_EFAULT;
8652                 tvp = tv;
8653             } else {
8654                 tvp = NULL;
8655             }
8656             if (!(p = lock_user_string(arg2))) {
8657                 return -TARGET_EFAULT;
8658             }
8659             ret = get_errno(futimesat(arg1, path(p), tvp));
8660             unlock_user(p, arg2, 0);
8661         }
8662         return ret;
8663 #endif
8664 #ifdef TARGET_NR_access
8665     case TARGET_NR_access:
8666         if (!(p = lock_user_string(arg1))) {
8667             return -TARGET_EFAULT;
8668         }
8669         ret = get_errno(access(path(p), arg2));
8670         unlock_user(p, arg1, 0);
8671         return ret;
8672 #endif
8673 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8674     case TARGET_NR_faccessat:
8675         if (!(p = lock_user_string(arg2))) {
8676             return -TARGET_EFAULT;
8677         }
8678         ret = get_errno(faccessat(arg1, p, arg3, 0));
8679         unlock_user(p, arg2, 0);
8680         return ret;
8681 #endif
8682 #ifdef TARGET_NR_nice /* not on alpha */
8683     case TARGET_NR_nice:
8684         return get_errno(nice(arg1));
8685 #endif
8686     case TARGET_NR_sync:
8687         sync();
8688         return 0;
8689 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8690     case TARGET_NR_syncfs:
8691         return get_errno(syncfs(arg1));
8692 #endif
8693     case TARGET_NR_kill:
8694         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8695 #ifdef TARGET_NR_rename
8696     case TARGET_NR_rename:
8697         {
8698             void *p2;
8699             p = lock_user_string(arg1);
8700             p2 = lock_user_string(arg2);
8701             if (!p || !p2)
8702                 ret = -TARGET_EFAULT;
8703             else
8704                 ret = get_errno(rename(p, p2));
8705             unlock_user(p2, arg2, 0);
8706             unlock_user(p, arg1, 0);
8707         }
8708         return ret;
8709 #endif
8710 #if defined(TARGET_NR_renameat)
8711     case TARGET_NR_renameat:
8712         {
8713             void *p2;
8714             p  = lock_user_string(arg2);
8715             p2 = lock_user_string(arg4);
8716             if (!p || !p2)
8717                 ret = -TARGET_EFAULT;
8718             else
8719                 ret = get_errno(renameat(arg1, p, arg3, p2));
8720             unlock_user(p2, arg4, 0);
8721             unlock_user(p, arg2, 0);
8722         }
8723         return ret;
8724 #endif
8725 #if defined(TARGET_NR_renameat2)
8726     case TARGET_NR_renameat2:
8727         {
8728             void *p2;
8729             p  = lock_user_string(arg2);
8730             p2 = lock_user_string(arg4);
8731             if (!p || !p2) {
8732                 ret = -TARGET_EFAULT;
8733             } else {
8734                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8735             }
8736             unlock_user(p2, arg4, 0);
8737             unlock_user(p, arg2, 0);
8738         }
8739         return ret;
8740 #endif
8741 #ifdef TARGET_NR_mkdir
8742     case TARGET_NR_mkdir:
8743         if (!(p = lock_user_string(arg1)))
8744             return -TARGET_EFAULT;
8745         ret = get_errno(mkdir(p, arg2));
8746         unlock_user(p, arg1, 0);
8747         return ret;
8748 #endif
8749 #if defined(TARGET_NR_mkdirat)
8750     case TARGET_NR_mkdirat:
8751         if (!(p = lock_user_string(arg2)))
8752             return -TARGET_EFAULT;
8753         ret = get_errno(mkdirat(arg1, p, arg3));
8754         unlock_user(p, arg2, 0);
8755         return ret;
8756 #endif
8757 #ifdef TARGET_NR_rmdir
8758     case TARGET_NR_rmdir:
8759         if (!(p = lock_user_string(arg1)))
8760             return -TARGET_EFAULT;
8761         ret = get_errno(rmdir(p));
8762         unlock_user(p, arg1, 0);
8763         return ret;
8764 #endif
8765     case TARGET_NR_dup:
8766         ret = get_errno(dup(arg1));
8767         if (ret >= 0) {
8768             fd_trans_dup(arg1, ret);
8769         }
8770         return ret;
8771 #ifdef TARGET_NR_pipe
8772     case TARGET_NR_pipe:
8773         return do_pipe(cpu_env, arg1, 0, 0);
8774 #endif
8775 #ifdef TARGET_NR_pipe2
8776     case TARGET_NR_pipe2:
8777         return do_pipe(cpu_env, arg1,
8778                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8779 #endif
8780     case TARGET_NR_times:
8781         {
8782             struct target_tms *tmsp;
8783             struct tms tms;
8784             ret = get_errno(times(&tms));
8785             if (arg1) {
8786                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8787                 if (!tmsp)
8788                     return -TARGET_EFAULT;
8789                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8790                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8791                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8792                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8793             }
8794             if (!is_error(ret))
8795                 ret = host_to_target_clock_t(ret);
8796         }
8797         return ret;
8798     case TARGET_NR_acct:
8799         if (arg1 == 0) {
8800             ret = get_errno(acct(NULL));
8801         } else {
8802             if (!(p = lock_user_string(arg1))) {
8803                 return -TARGET_EFAULT;
8804             }
8805             ret = get_errno(acct(path(p)));
8806             unlock_user(p, arg1, 0);
8807         }
8808         return ret;
8809 #ifdef TARGET_NR_umount2
8810     case TARGET_NR_umount2:
8811         if (!(p = lock_user_string(arg1)))
8812             return -TARGET_EFAULT;
8813         ret = get_errno(umount2(p, arg2));
8814         unlock_user(p, arg1, 0);
8815         return ret;
8816 #endif
8817     case TARGET_NR_ioctl:
8818         return do_ioctl(arg1, arg2, arg3);
8819 #ifdef TARGET_NR_fcntl
8820     case TARGET_NR_fcntl:
8821         return do_fcntl(arg1, arg2, arg3);
8822 #endif
8823     case TARGET_NR_setpgid:
8824         return get_errno(setpgid(arg1, arg2));
8825     case TARGET_NR_umask:
8826         return get_errno(umask(arg1));
8827     case TARGET_NR_chroot:
8828         if (!(p = lock_user_string(arg1)))
8829             return -TARGET_EFAULT;
8830         ret = get_errno(chroot(p));
8831         unlock_user(p, arg1, 0);
8832         return ret;
8833 #ifdef TARGET_NR_dup2
8834     case TARGET_NR_dup2:
8835         ret = get_errno(dup2(arg1, arg2));
8836         if (ret >= 0) {
8837             fd_trans_dup(arg1, arg2);
8838         }
8839         return ret;
8840 #endif
8841 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8842     case TARGET_NR_dup3:
8843     {
8844         int host_flags;
8845 
8846         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8847             return -EINVAL;
8848         }
8849         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8850         ret = get_errno(dup3(arg1, arg2, host_flags));
8851         if (ret >= 0) {
8852             fd_trans_dup(arg1, arg2);
8853         }
8854         return ret;
8855     }
8856 #endif
8857 #ifdef TARGET_NR_getppid /* not on alpha */
8858     case TARGET_NR_getppid:
8859         return get_errno(getppid());
8860 #endif
8861 #ifdef TARGET_NR_getpgrp
8862     case TARGET_NR_getpgrp:
8863         return get_errno(getpgrp());
8864 #endif
8865     case TARGET_NR_setsid:
8866         return get_errno(setsid());
8867 #ifdef TARGET_NR_sigaction
8868     case TARGET_NR_sigaction:
8869         {
8870 #if defined(TARGET_MIPS)
8871 	    struct target_sigaction act, oact, *pact, *old_act;
8872 
8873 	    if (arg2) {
8874                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8875                     return -TARGET_EFAULT;
8876 		act._sa_handler = old_act->_sa_handler;
8877 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8878 		act.sa_flags = old_act->sa_flags;
8879 		unlock_user_struct(old_act, arg2, 0);
8880 		pact = &act;
8881 	    } else {
8882 		pact = NULL;
8883 	    }
8884 
8885         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8886 
8887 	    if (!is_error(ret) && arg3) {
8888                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8889                     return -TARGET_EFAULT;
8890 		old_act->_sa_handler = oact._sa_handler;
8891 		old_act->sa_flags = oact.sa_flags;
8892 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8893 		old_act->sa_mask.sig[1] = 0;
8894 		old_act->sa_mask.sig[2] = 0;
8895 		old_act->sa_mask.sig[3] = 0;
8896 		unlock_user_struct(old_act, arg3, 1);
8897 	    }
8898 #else
8899             struct target_old_sigaction *old_act;
8900             struct target_sigaction act, oact, *pact;
8901             if (arg2) {
8902                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8903                     return -TARGET_EFAULT;
8904                 act._sa_handler = old_act->_sa_handler;
8905                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8906                 act.sa_flags = old_act->sa_flags;
8907 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8908                 act.sa_restorer = old_act->sa_restorer;
8909 #endif
8910                 unlock_user_struct(old_act, arg2, 0);
8911                 pact = &act;
8912             } else {
8913                 pact = NULL;
8914             }
8915             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8916             if (!is_error(ret) && arg3) {
8917                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8918                     return -TARGET_EFAULT;
8919                 old_act->_sa_handler = oact._sa_handler;
8920                 old_act->sa_mask = oact.sa_mask.sig[0];
8921                 old_act->sa_flags = oact.sa_flags;
8922 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8923                 old_act->sa_restorer = oact.sa_restorer;
8924 #endif
8925                 unlock_user_struct(old_act, arg3, 1);
8926             }
8927 #endif
8928         }
8929         return ret;
8930 #endif
8931     case TARGET_NR_rt_sigaction:
8932         {
8933             /*
8934              * For Alpha and SPARC this is a 5 argument syscall, with
8935              * a 'restorer' parameter which must be copied into the
8936              * sa_restorer field of the sigaction struct.
8937              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8938              * and arg5 is the sigsetsize.
8939              */
8940 #if defined(TARGET_ALPHA)
8941             target_ulong sigsetsize = arg4;
8942             target_ulong restorer = arg5;
8943 #elif defined(TARGET_SPARC)
8944             target_ulong restorer = arg4;
8945             target_ulong sigsetsize = arg5;
8946 #else
8947             target_ulong sigsetsize = arg4;
8948             target_ulong restorer = 0;
8949 #endif
8950             struct target_sigaction *act = NULL;
8951             struct target_sigaction *oact = NULL;
8952 
8953             if (sigsetsize != sizeof(target_sigset_t)) {
8954                 return -TARGET_EINVAL;
8955             }
8956             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8957                 return -TARGET_EFAULT;
8958             }
8959             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8960                 ret = -TARGET_EFAULT;
8961             } else {
8962                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
8963                 if (oact) {
8964                     unlock_user_struct(oact, arg3, 1);
8965                 }
8966             }
8967             if (act) {
8968                 unlock_user_struct(act, arg2, 0);
8969             }
8970         }
8971         return ret;
8972 #ifdef TARGET_NR_sgetmask /* not on alpha */
8973     case TARGET_NR_sgetmask:
8974         {
8975             sigset_t cur_set;
8976             abi_ulong target_set;
8977             ret = do_sigprocmask(0, NULL, &cur_set);
8978             if (!ret) {
8979                 host_to_target_old_sigset(&target_set, &cur_set);
8980                 ret = target_set;
8981             }
8982         }
8983         return ret;
8984 #endif
8985 #ifdef TARGET_NR_ssetmask /* not on alpha */
8986     case TARGET_NR_ssetmask:
8987         {
8988             sigset_t set, oset;
8989             abi_ulong target_set = arg1;
8990             target_to_host_old_sigset(&set, &target_set);
8991             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8992             if (!ret) {
8993                 host_to_target_old_sigset(&target_set, &oset);
8994                 ret = target_set;
8995             }
8996         }
8997         return ret;
8998 #endif
8999 #ifdef TARGET_NR_sigprocmask
9000     case TARGET_NR_sigprocmask:
9001         {
9002 #if defined(TARGET_ALPHA)
9003             sigset_t set, oldset;
9004             abi_ulong mask;
9005             int how;
9006 
9007             switch (arg1) {
9008             case TARGET_SIG_BLOCK:
9009                 how = SIG_BLOCK;
9010                 break;
9011             case TARGET_SIG_UNBLOCK:
9012                 how = SIG_UNBLOCK;
9013                 break;
9014             case TARGET_SIG_SETMASK:
9015                 how = SIG_SETMASK;
9016                 break;
9017             default:
9018                 return -TARGET_EINVAL;
9019             }
9020             mask = arg2;
9021             target_to_host_old_sigset(&set, &mask);
9022 
9023             ret = do_sigprocmask(how, &set, &oldset);
9024             if (!is_error(ret)) {
9025                 host_to_target_old_sigset(&mask, &oldset);
9026                 ret = mask;
9027                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9028             }
9029 #else
9030             sigset_t set, oldset, *set_ptr;
9031             int how;
9032 
9033             if (arg2) {
9034                 switch (arg1) {
9035                 case TARGET_SIG_BLOCK:
9036                     how = SIG_BLOCK;
9037                     break;
9038                 case TARGET_SIG_UNBLOCK:
9039                     how = SIG_UNBLOCK;
9040                     break;
9041                 case TARGET_SIG_SETMASK:
9042                     how = SIG_SETMASK;
9043                     break;
9044                 default:
9045                     return -TARGET_EINVAL;
9046                 }
9047                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9048                     return -TARGET_EFAULT;
9049                 target_to_host_old_sigset(&set, p);
9050                 unlock_user(p, arg2, 0);
9051                 set_ptr = &set;
9052             } else {
9053                 how = 0;
9054                 set_ptr = NULL;
9055             }
9056             ret = do_sigprocmask(how, set_ptr, &oldset);
9057             if (!is_error(ret) && arg3) {
9058                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9059                     return -TARGET_EFAULT;
9060                 host_to_target_old_sigset(p, &oldset);
9061                 unlock_user(p, arg3, sizeof(target_sigset_t));
9062             }
9063 #endif
9064         }
9065         return ret;
9066 #endif
9067     case TARGET_NR_rt_sigprocmask:
9068         {
9069             int how = arg1;
9070             sigset_t set, oldset, *set_ptr;
9071 
9072             if (arg4 != sizeof(target_sigset_t)) {
9073                 return -TARGET_EINVAL;
9074             }
9075 
9076             if (arg2) {
9077                 switch(how) {
9078                 case TARGET_SIG_BLOCK:
9079                     how = SIG_BLOCK;
9080                     break;
9081                 case TARGET_SIG_UNBLOCK:
9082                     how = SIG_UNBLOCK;
9083                     break;
9084                 case TARGET_SIG_SETMASK:
9085                     how = SIG_SETMASK;
9086                     break;
9087                 default:
9088                     return -TARGET_EINVAL;
9089                 }
9090                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9091                     return -TARGET_EFAULT;
9092                 target_to_host_sigset(&set, p);
9093                 unlock_user(p, arg2, 0);
9094                 set_ptr = &set;
9095             } else {
9096                 how = 0;
9097                 set_ptr = NULL;
9098             }
9099             ret = do_sigprocmask(how, set_ptr, &oldset);
9100             if (!is_error(ret) && arg3) {
9101                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9102                     return -TARGET_EFAULT;
9103                 host_to_target_sigset(p, &oldset);
9104                 unlock_user(p, arg3, sizeof(target_sigset_t));
9105             }
9106         }
9107         return ret;
9108 #ifdef TARGET_NR_sigpending
9109     case TARGET_NR_sigpending:
9110         {
9111             sigset_t set;
9112             ret = get_errno(sigpending(&set));
9113             if (!is_error(ret)) {
9114                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9115                     return -TARGET_EFAULT;
9116                 host_to_target_old_sigset(p, &set);
9117                 unlock_user(p, arg1, sizeof(target_sigset_t));
9118             }
9119         }
9120         return ret;
9121 #endif
9122     case TARGET_NR_rt_sigpending:
9123         {
9124             sigset_t set;
9125 
9126             /* Yes, this check is >, not != like most. We follow the kernel's
9127              * logic and it does it like this because it implements
9128              * NR_sigpending through the same code path, and in that case
9129              * the old_sigset_t is smaller in size.
9130              */
9131             if (arg2 > sizeof(target_sigset_t)) {
9132                 return -TARGET_EINVAL;
9133             }
9134 
9135             ret = get_errno(sigpending(&set));
9136             if (!is_error(ret)) {
9137                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9138                     return -TARGET_EFAULT;
9139                 host_to_target_sigset(p, &set);
9140                 unlock_user(p, arg1, sizeof(target_sigset_t));
9141             }
9142         }
9143         return ret;
9144 #ifdef TARGET_NR_sigsuspend
9145     case TARGET_NR_sigsuspend:
9146         {
9147             TaskState *ts = cpu->opaque;
9148 #if defined(TARGET_ALPHA)
9149             abi_ulong mask = arg1;
9150             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9151 #else
9152             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9153                 return -TARGET_EFAULT;
9154             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9155             unlock_user(p, arg1, 0);
9156 #endif
9157             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9158                                                SIGSET_T_SIZE));
9159             if (ret != -TARGET_ERESTARTSYS) {
9160                 ts->in_sigsuspend = 1;
9161             }
9162         }
9163         return ret;
9164 #endif
9165     case TARGET_NR_rt_sigsuspend:
9166         {
9167             TaskState *ts = cpu->opaque;
9168 
9169             if (arg2 != sizeof(target_sigset_t)) {
9170                 return -TARGET_EINVAL;
9171             }
9172             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9173                 return -TARGET_EFAULT;
9174             target_to_host_sigset(&ts->sigsuspend_mask, p);
9175             unlock_user(p, arg1, 0);
9176             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9177                                                SIGSET_T_SIZE));
9178             if (ret != -TARGET_ERESTARTSYS) {
9179                 ts->in_sigsuspend = 1;
9180             }
9181         }
9182         return ret;
9183 #ifdef TARGET_NR_rt_sigtimedwait
9184     case TARGET_NR_rt_sigtimedwait:
9185         {
9186             sigset_t set;
9187             struct timespec uts, *puts;
9188             siginfo_t uinfo;
9189 
9190             if (arg4 != sizeof(target_sigset_t)) {
9191                 return -TARGET_EINVAL;
9192             }
9193 
9194             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9195                 return -TARGET_EFAULT;
9196             target_to_host_sigset(&set, p);
9197             unlock_user(p, arg1, 0);
9198             if (arg3) {
9199                 puts = &uts;
9200                 if (target_to_host_timespec(puts, arg3)) {
9201                     return -TARGET_EFAULT;
9202                 }
9203             } else {
9204                 puts = NULL;
9205             }
9206             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9207                                                  SIGSET_T_SIZE));
9208             if (!is_error(ret)) {
9209                 if (arg2) {
9210                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9211                                   0);
9212                     if (!p) {
9213                         return -TARGET_EFAULT;
9214                     }
9215                     host_to_target_siginfo(p, &uinfo);
9216                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9217                 }
9218                 ret = host_to_target_signal(ret);
9219             }
9220         }
9221         return ret;
9222 #endif
9223 #ifdef TARGET_NR_rt_sigtimedwait_time64
9224     case TARGET_NR_rt_sigtimedwait_time64:
9225         {
9226             sigset_t set;
9227             struct timespec uts, *puts;
9228             siginfo_t uinfo;
9229 
9230             if (arg4 != sizeof(target_sigset_t)) {
9231                 return -TARGET_EINVAL;
9232             }
9233 
9234             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9235             if (!p) {
9236                 return -TARGET_EFAULT;
9237             }
9238             target_to_host_sigset(&set, p);
9239             unlock_user(p, arg1, 0);
9240             if (arg3) {
9241                 puts = &uts;
9242                 if (target_to_host_timespec64(puts, arg3)) {
9243                     return -TARGET_EFAULT;
9244                 }
9245             } else {
9246                 puts = NULL;
9247             }
9248             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9249                                                  SIGSET_T_SIZE));
9250             if (!is_error(ret)) {
9251                 if (arg2) {
9252                     p = lock_user(VERIFY_WRITE, arg2,
9253                                   sizeof(target_siginfo_t), 0);
9254                     if (!p) {
9255                         return -TARGET_EFAULT;
9256                     }
9257                     host_to_target_siginfo(p, &uinfo);
9258                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9259                 }
9260                 ret = host_to_target_signal(ret);
9261             }
9262         }
9263         return ret;
9264 #endif
9265     case TARGET_NR_rt_sigqueueinfo:
9266         {
9267             siginfo_t uinfo;
9268 
9269             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9270             if (!p) {
9271                 return -TARGET_EFAULT;
9272             }
9273             target_to_host_siginfo(&uinfo, p);
9274             unlock_user(p, arg3, 0);
9275             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9276         }
9277         return ret;
9278     case TARGET_NR_rt_tgsigqueueinfo:
9279         {
9280             siginfo_t uinfo;
9281 
9282             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9283             if (!p) {
9284                 return -TARGET_EFAULT;
9285             }
9286             target_to_host_siginfo(&uinfo, p);
9287             unlock_user(p, arg4, 0);
9288             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9289         }
9290         return ret;
9291 #ifdef TARGET_NR_sigreturn
9292     case TARGET_NR_sigreturn:
9293         if (block_signals()) {
9294             return -TARGET_ERESTARTSYS;
9295         }
9296         return do_sigreturn(cpu_env);
9297 #endif
9298     case TARGET_NR_rt_sigreturn:
9299         if (block_signals()) {
9300             return -TARGET_ERESTARTSYS;
9301         }
9302         return do_rt_sigreturn(cpu_env);
9303     case TARGET_NR_sethostname:
9304         if (!(p = lock_user_string(arg1)))
9305             return -TARGET_EFAULT;
9306         ret = get_errno(sethostname(p, arg2));
9307         unlock_user(p, arg1, 0);
9308         return ret;
9309 #ifdef TARGET_NR_setrlimit
9310     case TARGET_NR_setrlimit:
9311         {
9312             int resource = target_to_host_resource(arg1);
9313             struct target_rlimit *target_rlim;
9314             struct rlimit rlim;
9315             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9316                 return -TARGET_EFAULT;
9317             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9318             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9319             unlock_user_struct(target_rlim, arg2, 0);
9320             /*
9321              * If we just passed through resource limit settings for memory then
9322              * they would also apply to QEMU's own allocations, and QEMU will
9323              * crash or hang or die if its allocations fail. Ideally we would
9324              * track the guest allocations in QEMU and apply the limits ourselves.
9325              * For now, just tell the guest the call succeeded but don't actually
9326              * limit anything.
9327              */
9328             if (resource != RLIMIT_AS &&
9329                 resource != RLIMIT_DATA &&
9330                 resource != RLIMIT_STACK) {
9331                 return get_errno(setrlimit(resource, &rlim));
9332             } else {
9333                 return 0;
9334             }
9335         }
9336 #endif
9337 #ifdef TARGET_NR_getrlimit
9338     case TARGET_NR_getrlimit:
9339         {
9340             int resource = target_to_host_resource(arg1);
9341             struct target_rlimit *target_rlim;
9342             struct rlimit rlim;
9343 
9344             ret = get_errno(getrlimit(resource, &rlim));
9345             if (!is_error(ret)) {
9346                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9347                     return -TARGET_EFAULT;
9348                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9349                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9350                 unlock_user_struct(target_rlim, arg2, 1);
9351             }
9352         }
9353         return ret;
9354 #endif
9355     case TARGET_NR_getrusage:
9356         {
9357             struct rusage rusage;
9358             ret = get_errno(getrusage(arg1, &rusage));
9359             if (!is_error(ret)) {
9360                 ret = host_to_target_rusage(arg2, &rusage);
9361             }
9362         }
9363         return ret;
9364 #if defined(TARGET_NR_gettimeofday)
9365     case TARGET_NR_gettimeofday:
9366         {
9367             struct timeval tv;
9368             struct timezone tz;
9369 
9370             ret = get_errno(gettimeofday(&tv, &tz));
9371             if (!is_error(ret)) {
9372                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9373                     return -TARGET_EFAULT;
9374                 }
9375                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9376                     return -TARGET_EFAULT;
9377                 }
9378             }
9379         }
9380         return ret;
9381 #endif
9382 #if defined(TARGET_NR_settimeofday)
9383     case TARGET_NR_settimeofday:
9384         {
9385             struct timeval tv, *ptv = NULL;
9386             struct timezone tz, *ptz = NULL;
9387 
9388             if (arg1) {
9389                 if (copy_from_user_timeval(&tv, arg1)) {
9390                     return -TARGET_EFAULT;
9391                 }
9392                 ptv = &tv;
9393             }
9394 
9395             if (arg2) {
9396                 if (copy_from_user_timezone(&tz, arg2)) {
9397                     return -TARGET_EFAULT;
9398                 }
9399                 ptz = &tz;
9400             }
9401 
9402             return get_errno(settimeofday(ptv, ptz));
9403         }
9404 #endif
9405 #if defined(TARGET_NR_select)
9406     case TARGET_NR_select:
9407 #if defined(TARGET_WANT_NI_OLD_SELECT)
9408         /* some architectures used to have old_select here
9409          * but now ENOSYS it.
9410          */
9411         ret = -TARGET_ENOSYS;
9412 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9413         ret = do_old_select(arg1);
9414 #else
9415         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9416 #endif
9417         return ret;
9418 #endif
9419 #ifdef TARGET_NR_pselect6
9420     case TARGET_NR_pselect6:
9421         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9422 #endif
9423 #ifdef TARGET_NR_pselect6_time64
9424     case TARGET_NR_pselect6_time64:
9425         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9426 #endif
9427 #ifdef TARGET_NR_symlink
9428     case TARGET_NR_symlink:
9429         {
9430             void *p2;
9431             p = lock_user_string(arg1);
9432             p2 = lock_user_string(arg2);
9433             if (!p || !p2)
9434                 ret = -TARGET_EFAULT;
9435             else
9436                 ret = get_errno(symlink(p, p2));
9437             unlock_user(p2, arg2, 0);
9438             unlock_user(p, arg1, 0);
9439         }
9440         return ret;
9441 #endif
9442 #if defined(TARGET_NR_symlinkat)
9443     case TARGET_NR_symlinkat:
9444         {
9445             void *p2;
9446             p  = lock_user_string(arg1);
9447             p2 = lock_user_string(arg3);
9448             if (!p || !p2)
9449                 ret = -TARGET_EFAULT;
9450             else
9451                 ret = get_errno(symlinkat(p, arg2, p2));
9452             unlock_user(p2, arg3, 0);
9453             unlock_user(p, arg1, 0);
9454         }
9455         return ret;
9456 #endif
9457 #ifdef TARGET_NR_readlink
9458     case TARGET_NR_readlink:
9459         {
9460             void *p2;
9461             p = lock_user_string(arg1);
9462             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9463             if (!p || !p2) {
9464                 ret = -TARGET_EFAULT;
9465             } else if (!arg3) {
9466                 /* Short circuit this for the magic exe check. */
9467                 ret = -TARGET_EINVAL;
9468             } else if (is_proc_myself((const char *)p, "exe")) {
9469                 char real[PATH_MAX], *temp;
9470                 temp = realpath(exec_path, real);
9471                 /* Return value is # of bytes that we wrote to the buffer. */
9472                 if (temp == NULL) {
9473                     ret = get_errno(-1);
9474                 } else {
9475                     /* Don't worry about sign mismatch as earlier mapping
9476                      * logic would have thrown a bad address error. */
9477                     ret = MIN(strlen(real), arg3);
9478                     /* We cannot NUL terminate the string. */
9479                     memcpy(p2, real, ret);
9480                 }
9481             } else {
9482                 ret = get_errno(readlink(path(p), p2, arg3));
9483             }
9484             unlock_user(p2, arg2, ret);
9485             unlock_user(p, arg1, 0);
9486         }
9487         return ret;
9488 #endif
9489 #if defined(TARGET_NR_readlinkat)
9490     case TARGET_NR_readlinkat:
9491         {
9492             void *p2;
9493             p  = lock_user_string(arg2);
9494             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9495             if (!p || !p2) {
9496                 ret = -TARGET_EFAULT;
9497             } else if (is_proc_myself((const char *)p, "exe")) {
9498                 char real[PATH_MAX], *temp;
9499                 temp = realpath(exec_path, real);
9500                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9501                 snprintf((char *)p2, arg4, "%s", real);
9502             } else {
9503                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9504             }
9505             unlock_user(p2, arg3, ret);
9506             unlock_user(p, arg2, 0);
9507         }
9508         return ret;
9509 #endif
9510 #ifdef TARGET_NR_swapon
9511     case TARGET_NR_swapon:
9512         if (!(p = lock_user_string(arg1)))
9513             return -TARGET_EFAULT;
9514         ret = get_errno(swapon(p, arg2));
9515         unlock_user(p, arg1, 0);
9516         return ret;
9517 #endif
9518     case TARGET_NR_reboot:
9519         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9520            /* arg4 must be ignored in all other cases */
9521            p = lock_user_string(arg4);
9522            if (!p) {
9523                return -TARGET_EFAULT;
9524            }
9525            ret = get_errno(reboot(arg1, arg2, arg3, p));
9526            unlock_user(p, arg4, 0);
9527         } else {
9528            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9529         }
9530         return ret;
9531 #ifdef TARGET_NR_mmap
9532     case TARGET_NR_mmap:
9533 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9534     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9535     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9536     || defined(TARGET_S390X)
9537         {
9538             abi_ulong *v;
9539             abi_ulong v1, v2, v3, v4, v5, v6;
9540             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9541                 return -TARGET_EFAULT;
9542             v1 = tswapal(v[0]);
9543             v2 = tswapal(v[1]);
9544             v3 = tswapal(v[2]);
9545             v4 = tswapal(v[3]);
9546             v5 = tswapal(v[4]);
9547             v6 = tswapal(v[5]);
9548             unlock_user(v, arg1, 0);
9549             ret = get_errno(target_mmap(v1, v2, v3,
9550                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9551                                         v5, v6));
9552         }
9553 #else
9554         /* mmap pointers are always untagged */
9555         ret = get_errno(target_mmap(arg1, arg2, arg3,
9556                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9557                                     arg5,
9558                                     arg6));
9559 #endif
9560         return ret;
9561 #endif
9562 #ifdef TARGET_NR_mmap2
9563     case TARGET_NR_mmap2:
9564 #ifndef MMAP_SHIFT
9565 #define MMAP_SHIFT 12
9566 #endif
9567         ret = target_mmap(arg1, arg2, arg3,
9568                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9569                           arg5, arg6 << MMAP_SHIFT);
9570         return get_errno(ret);
9571 #endif
9572     case TARGET_NR_munmap:
9573         arg1 = cpu_untagged_addr(cpu, arg1);
9574         return get_errno(target_munmap(arg1, arg2));
9575     case TARGET_NR_mprotect:
9576         arg1 = cpu_untagged_addr(cpu, arg1);
9577         {
9578             TaskState *ts = cpu->opaque;
9579             /* Special hack to detect libc making the stack executable.  */
9580             if ((arg3 & PROT_GROWSDOWN)
9581                 && arg1 >= ts->info->stack_limit
9582                 && arg1 <= ts->info->start_stack) {
9583                 arg3 &= ~PROT_GROWSDOWN;
9584                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9585                 arg1 = ts->info->stack_limit;
9586             }
9587         }
9588         return get_errno(target_mprotect(arg1, arg2, arg3));
9589 #ifdef TARGET_NR_mremap
9590     case TARGET_NR_mremap:
9591         arg1 = cpu_untagged_addr(cpu, arg1);
9592         /* mremap new_addr (arg5) is always untagged */
9593         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9594 #endif
9595         /* ??? msync/mlock/munlock are broken for softmmu.  */
9596 #ifdef TARGET_NR_msync
9597     case TARGET_NR_msync:
9598         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9599 #endif
9600 #ifdef TARGET_NR_mlock
9601     case TARGET_NR_mlock:
9602         return get_errno(mlock(g2h(cpu, arg1), arg2));
9603 #endif
9604 #ifdef TARGET_NR_munlock
9605     case TARGET_NR_munlock:
9606         return get_errno(munlock(g2h(cpu, arg1), arg2));
9607 #endif
9608 #ifdef TARGET_NR_mlockall
9609     case TARGET_NR_mlockall:
9610         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9611 #endif
9612 #ifdef TARGET_NR_munlockall
9613     case TARGET_NR_munlockall:
9614         return get_errno(munlockall());
9615 #endif
9616 #ifdef TARGET_NR_truncate
9617     case TARGET_NR_truncate:
9618         if (!(p = lock_user_string(arg1)))
9619             return -TARGET_EFAULT;
9620         ret = get_errno(truncate(p, arg2));
9621         unlock_user(p, arg1, 0);
9622         return ret;
9623 #endif
9624 #ifdef TARGET_NR_ftruncate
9625     case TARGET_NR_ftruncate:
9626         return get_errno(ftruncate(arg1, arg2));
9627 #endif
9628     case TARGET_NR_fchmod:
9629         return get_errno(fchmod(arg1, arg2));
9630 #if defined(TARGET_NR_fchmodat)
9631     case TARGET_NR_fchmodat:
9632         if (!(p = lock_user_string(arg2)))
9633             return -TARGET_EFAULT;
9634         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9635         unlock_user(p, arg2, 0);
9636         return ret;
9637 #endif
9638     case TARGET_NR_getpriority:
9639         /* Note that negative values are valid for getpriority, so we must
9640            differentiate based on errno settings.  */
9641         errno = 0;
9642         ret = getpriority(arg1, arg2);
9643         if (ret == -1 && errno != 0) {
9644             return -host_to_target_errno(errno);
9645         }
9646 #ifdef TARGET_ALPHA
9647         /* Return value is the unbiased priority.  Signal no error.  */
9648         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9649 #else
9650         /* Return value is a biased priority to avoid negative numbers.  */
9651         ret = 20 - ret;
9652 #endif
9653         return ret;
9654     case TARGET_NR_setpriority:
9655         return get_errno(setpriority(arg1, arg2, arg3));
9656 #ifdef TARGET_NR_statfs
9657     case TARGET_NR_statfs:
9658         if (!(p = lock_user_string(arg1))) {
9659             return -TARGET_EFAULT;
9660         }
9661         ret = get_errno(statfs(path(p), &stfs));
9662         unlock_user(p, arg1, 0);
9663     convert_statfs:
9664         if (!is_error(ret)) {
9665             struct target_statfs *target_stfs;
9666 
9667             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9668                 return -TARGET_EFAULT;
9669             __put_user(stfs.f_type, &target_stfs->f_type);
9670             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9671             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9672             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9673             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9674             __put_user(stfs.f_files, &target_stfs->f_files);
9675             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9676             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9677             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9678             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9679             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9680 #ifdef _STATFS_F_FLAGS
9681             __put_user(stfs.f_flags, &target_stfs->f_flags);
9682 #else
9683             __put_user(0, &target_stfs->f_flags);
9684 #endif
9685             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9686             unlock_user_struct(target_stfs, arg2, 1);
9687         }
9688         return ret;
9689 #endif
9690 #ifdef TARGET_NR_fstatfs
9691     case TARGET_NR_fstatfs:
9692         ret = get_errno(fstatfs(arg1, &stfs));
9693         goto convert_statfs;
9694 #endif
9695 #ifdef TARGET_NR_statfs64
9696     case TARGET_NR_statfs64:
9697         if (!(p = lock_user_string(arg1))) {
9698             return -TARGET_EFAULT;
9699         }
9700         ret = get_errno(statfs(path(p), &stfs));
9701         unlock_user(p, arg1, 0);
9702     convert_statfs64:
9703         if (!is_error(ret)) {
9704             struct target_statfs64 *target_stfs;
9705 
9706             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9707                 return -TARGET_EFAULT;
9708             __put_user(stfs.f_type, &target_stfs->f_type);
9709             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9710             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9711             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9712             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9713             __put_user(stfs.f_files, &target_stfs->f_files);
9714             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9715             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9716             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9717             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9718             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9719 #ifdef _STATFS_F_FLAGS
9720             __put_user(stfs.f_flags, &target_stfs->f_flags);
9721 #else
9722             __put_user(0, &target_stfs->f_flags);
9723 #endif
9724             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9725             unlock_user_struct(target_stfs, arg3, 1);
9726         }
9727         return ret;
9728     case TARGET_NR_fstatfs64:
9729         ret = get_errno(fstatfs(arg1, &stfs));
9730         goto convert_statfs64;
9731 #endif
9732 #ifdef TARGET_NR_socketcall
9733     case TARGET_NR_socketcall:
9734         return do_socketcall(arg1, arg2);
9735 #endif
9736 #ifdef TARGET_NR_accept
9737     case TARGET_NR_accept:
9738         return do_accept4(arg1, arg2, arg3, 0);
9739 #endif
9740 #ifdef TARGET_NR_accept4
9741     case TARGET_NR_accept4:
9742         return do_accept4(arg1, arg2, arg3, arg4);
9743 #endif
9744 #ifdef TARGET_NR_bind
9745     case TARGET_NR_bind:
9746         return do_bind(arg1, arg2, arg3);
9747 #endif
9748 #ifdef TARGET_NR_connect
9749     case TARGET_NR_connect:
9750         return do_connect(arg1, arg2, arg3);
9751 #endif
9752 #ifdef TARGET_NR_getpeername
9753     case TARGET_NR_getpeername:
9754         return do_getpeername(arg1, arg2, arg3);
9755 #endif
9756 #ifdef TARGET_NR_getsockname
9757     case TARGET_NR_getsockname:
9758         return do_getsockname(arg1, arg2, arg3);
9759 #endif
9760 #ifdef TARGET_NR_getsockopt
9761     case TARGET_NR_getsockopt:
9762         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9763 #endif
9764 #ifdef TARGET_NR_listen
9765     case TARGET_NR_listen:
9766         return get_errno(listen(arg1, arg2));
9767 #endif
9768 #ifdef TARGET_NR_recv
9769     case TARGET_NR_recv:
9770         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9771 #endif
9772 #ifdef TARGET_NR_recvfrom
9773     case TARGET_NR_recvfrom:
9774         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9775 #endif
9776 #ifdef TARGET_NR_recvmsg
9777     case TARGET_NR_recvmsg:
9778         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9779 #endif
9780 #ifdef TARGET_NR_send
9781     case TARGET_NR_send:
9782         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9783 #endif
9784 #ifdef TARGET_NR_sendmsg
9785     case TARGET_NR_sendmsg:
9786         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9787 #endif
9788 #ifdef TARGET_NR_sendmmsg
9789     case TARGET_NR_sendmmsg:
9790         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9791 #endif
9792 #ifdef TARGET_NR_recvmmsg
9793     case TARGET_NR_recvmmsg:
9794         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9795 #endif
9796 #ifdef TARGET_NR_sendto
9797     case TARGET_NR_sendto:
9798         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9799 #endif
9800 #ifdef TARGET_NR_shutdown
9801     case TARGET_NR_shutdown:
9802         return get_errno(shutdown(arg1, arg2));
9803 #endif
9804 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9805     case TARGET_NR_getrandom:
9806         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9807         if (!p) {
9808             return -TARGET_EFAULT;
9809         }
9810         ret = get_errno(getrandom(p, arg2, arg3));
9811         unlock_user(p, arg1, ret);
9812         return ret;
9813 #endif
9814 #ifdef TARGET_NR_socket
9815     case TARGET_NR_socket:
9816         return do_socket(arg1, arg2, arg3);
9817 #endif
9818 #ifdef TARGET_NR_socketpair
9819     case TARGET_NR_socketpair:
9820         return do_socketpair(arg1, arg2, arg3, arg4);
9821 #endif
9822 #ifdef TARGET_NR_setsockopt
9823     case TARGET_NR_setsockopt:
9824         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9825 #endif
9826 #if defined(TARGET_NR_syslog)
9827     case TARGET_NR_syslog:
9828         {
9829             int len = arg2;
9830 
9831             switch (arg1) {
9832             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9833             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9834             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9835             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9836             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9837             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9838             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9839             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9840                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9841             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9842             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9843             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9844                 {
9845                     if (len < 0) {
9846                         return -TARGET_EINVAL;
9847                     }
9848                     if (len == 0) {
9849                         return 0;
9850                     }
9851                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9852                     if (!p) {
9853                         return -TARGET_EFAULT;
9854                     }
9855                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9856                     unlock_user(p, arg2, arg3);
9857                 }
9858                 return ret;
9859             default:
9860                 return -TARGET_EINVAL;
9861             }
9862         }
9863         break;
9864 #endif
9865     case TARGET_NR_setitimer:
9866         {
9867             struct itimerval value, ovalue, *pvalue;
9868 
9869             if (arg2) {
9870                 pvalue = &value;
9871                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9872                     || copy_from_user_timeval(&pvalue->it_value,
9873                                               arg2 + sizeof(struct target_timeval)))
9874                     return -TARGET_EFAULT;
9875             } else {
9876                 pvalue = NULL;
9877             }
9878             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9879             if (!is_error(ret) && arg3) {
9880                 if (copy_to_user_timeval(arg3,
9881                                          &ovalue.it_interval)
9882                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9883                                             &ovalue.it_value))
9884                     return -TARGET_EFAULT;
9885             }
9886         }
9887         return ret;
9888     case TARGET_NR_getitimer:
9889         {
9890             struct itimerval value;
9891 
9892             ret = get_errno(getitimer(arg1, &value));
9893             if (!is_error(ret) && arg2) {
9894                 if (copy_to_user_timeval(arg2,
9895                                          &value.it_interval)
9896                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9897                                             &value.it_value))
9898                     return -TARGET_EFAULT;
9899             }
9900         }
9901         return ret;
9902 #ifdef TARGET_NR_stat
9903     case TARGET_NR_stat:
9904         if (!(p = lock_user_string(arg1))) {
9905             return -TARGET_EFAULT;
9906         }
9907         ret = get_errno(stat(path(p), &st));
9908         unlock_user(p, arg1, 0);
9909         goto do_stat;
9910 #endif
9911 #ifdef TARGET_NR_lstat
9912     case TARGET_NR_lstat:
9913         if (!(p = lock_user_string(arg1))) {
9914             return -TARGET_EFAULT;
9915         }
9916         ret = get_errno(lstat(path(p), &st));
9917         unlock_user(p, arg1, 0);
9918         goto do_stat;
9919 #endif
9920 #ifdef TARGET_NR_fstat
9921     case TARGET_NR_fstat:
9922         {
9923             ret = get_errno(fstat(arg1, &st));
9924 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9925         do_stat:
9926 #endif
9927             if (!is_error(ret)) {
9928                 struct target_stat *target_st;
9929 
9930                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9931                     return -TARGET_EFAULT;
9932                 memset(target_st, 0, sizeof(*target_st));
9933                 __put_user(st.st_dev, &target_st->st_dev);
9934                 __put_user(st.st_ino, &target_st->st_ino);
9935                 __put_user(st.st_mode, &target_st->st_mode);
9936                 __put_user(st.st_uid, &target_st->st_uid);
9937                 __put_user(st.st_gid, &target_st->st_gid);
9938                 __put_user(st.st_nlink, &target_st->st_nlink);
9939                 __put_user(st.st_rdev, &target_st->st_rdev);
9940                 __put_user(st.st_size, &target_st->st_size);
9941                 __put_user(st.st_blksize, &target_st->st_blksize);
9942                 __put_user(st.st_blocks, &target_st->st_blocks);
9943                 __put_user(st.st_atime, &target_st->target_st_atime);
9944                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9945                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9946 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9947                 __put_user(st.st_atim.tv_nsec,
9948                            &target_st->target_st_atime_nsec);
9949                 __put_user(st.st_mtim.tv_nsec,
9950                            &target_st->target_st_mtime_nsec);
9951                 __put_user(st.st_ctim.tv_nsec,
9952                            &target_st->target_st_ctime_nsec);
9953 #endif
9954                 unlock_user_struct(target_st, arg2, 1);
9955             }
9956         }
9957         return ret;
9958 #endif
9959     case TARGET_NR_vhangup:
9960         return get_errno(vhangup());
9961 #ifdef TARGET_NR_syscall
9962     case TARGET_NR_syscall:
9963         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9964                           arg6, arg7, arg8, 0);
9965 #endif
9966 #if defined(TARGET_NR_wait4)
9967     case TARGET_NR_wait4:
9968         {
9969             int status;
9970             abi_long status_ptr = arg2;
9971             struct rusage rusage, *rusage_ptr;
9972             abi_ulong target_rusage = arg4;
9973             abi_long rusage_err;
9974             if (target_rusage)
9975                 rusage_ptr = &rusage;
9976             else
9977                 rusage_ptr = NULL;
9978             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9979             if (!is_error(ret)) {
9980                 if (status_ptr && ret) {
9981                     status = host_to_target_waitstatus(status);
9982                     if (put_user_s32(status, status_ptr))
9983                         return -TARGET_EFAULT;
9984                 }
9985                 if (target_rusage) {
9986                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9987                     if (rusage_err) {
9988                         ret = rusage_err;
9989                     }
9990                 }
9991             }
9992         }
9993         return ret;
9994 #endif
9995 #ifdef TARGET_NR_swapoff
9996     case TARGET_NR_swapoff:
9997         if (!(p = lock_user_string(arg1)))
9998             return -TARGET_EFAULT;
9999         ret = get_errno(swapoff(p));
10000         unlock_user(p, arg1, 0);
10001         return ret;
10002 #endif
10003     case TARGET_NR_sysinfo:
10004         {
10005             struct target_sysinfo *target_value;
10006             struct sysinfo value;
10007             ret = get_errno(sysinfo(&value));
10008             if (!is_error(ret) && arg1)
10009             {
10010                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10011                     return -TARGET_EFAULT;
10012                 __put_user(value.uptime, &target_value->uptime);
10013                 __put_user(value.loads[0], &target_value->loads[0]);
10014                 __put_user(value.loads[1], &target_value->loads[1]);
10015                 __put_user(value.loads[2], &target_value->loads[2]);
10016                 __put_user(value.totalram, &target_value->totalram);
10017                 __put_user(value.freeram, &target_value->freeram);
10018                 __put_user(value.sharedram, &target_value->sharedram);
10019                 __put_user(value.bufferram, &target_value->bufferram);
10020                 __put_user(value.totalswap, &target_value->totalswap);
10021                 __put_user(value.freeswap, &target_value->freeswap);
10022                 __put_user(value.procs, &target_value->procs);
10023                 __put_user(value.totalhigh, &target_value->totalhigh);
10024                 __put_user(value.freehigh, &target_value->freehigh);
10025                 __put_user(value.mem_unit, &target_value->mem_unit);
10026                 unlock_user_struct(target_value, arg1, 1);
10027             }
10028         }
10029         return ret;
10030 #ifdef TARGET_NR_ipc
10031     case TARGET_NR_ipc:
10032         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10033 #endif
10034 #ifdef TARGET_NR_semget
10035     case TARGET_NR_semget:
10036         return get_errno(semget(arg1, arg2, arg3));
10037 #endif
10038 #ifdef TARGET_NR_semop
10039     case TARGET_NR_semop:
10040         return do_semtimedop(arg1, arg2, arg3, 0, false);
10041 #endif
10042 #ifdef TARGET_NR_semtimedop
10043     case TARGET_NR_semtimedop:
10044         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10045 #endif
10046 #ifdef TARGET_NR_semtimedop_time64
10047     case TARGET_NR_semtimedop_time64:
10048         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10049 #endif
10050 #ifdef TARGET_NR_semctl
10051     case TARGET_NR_semctl:
10052         return do_semctl(arg1, arg2, arg3, arg4);
10053 #endif
10054 #ifdef TARGET_NR_msgctl
10055     case TARGET_NR_msgctl:
10056         return do_msgctl(arg1, arg2, arg3);
10057 #endif
10058 #ifdef TARGET_NR_msgget
10059     case TARGET_NR_msgget:
10060         return get_errno(msgget(arg1, arg2));
10061 #endif
10062 #ifdef TARGET_NR_msgrcv
10063     case TARGET_NR_msgrcv:
10064         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10065 #endif
10066 #ifdef TARGET_NR_msgsnd
10067     case TARGET_NR_msgsnd:
10068         return do_msgsnd(arg1, arg2, arg3, arg4);
10069 #endif
10070 #ifdef TARGET_NR_shmget
10071     case TARGET_NR_shmget:
10072         return get_errno(shmget(arg1, arg2, arg3));
10073 #endif
10074 #ifdef TARGET_NR_shmctl
10075     case TARGET_NR_shmctl:
10076         return do_shmctl(arg1, arg2, arg3);
10077 #endif
10078 #ifdef TARGET_NR_shmat
10079     case TARGET_NR_shmat:
10080         return do_shmat(cpu_env, arg1, arg2, arg3);
10081 #endif
10082 #ifdef TARGET_NR_shmdt
10083     case TARGET_NR_shmdt:
10084         return do_shmdt(arg1);
10085 #endif
10086     case TARGET_NR_fsync:
10087         return get_errno(fsync(arg1));
10088     case TARGET_NR_clone:
10089         /* Linux manages to have three different orderings for its
10090          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10091          * match the kernel's CONFIG_CLONE_* settings.
10092          * Microblaze is further special in that it uses a sixth
10093          * implicit argument to clone for the TLS pointer.
10094          */
10095 #if defined(TARGET_MICROBLAZE)
10096         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10097 #elif defined(TARGET_CLONE_BACKWARDS)
10098         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10099 #elif defined(TARGET_CLONE_BACKWARDS2)
10100         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10101 #else
10102         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10103 #endif
10104         return ret;
10105 #ifdef __NR_exit_group
10106         /* new thread calls */
10107     case TARGET_NR_exit_group:
10108         preexit_cleanup(cpu_env, arg1);
10109         return get_errno(exit_group(arg1));
10110 #endif
10111     case TARGET_NR_setdomainname:
10112         if (!(p = lock_user_string(arg1)))
10113             return -TARGET_EFAULT;
10114         ret = get_errno(setdomainname(p, arg2));
10115         unlock_user(p, arg1, 0);
10116         return ret;
10117     case TARGET_NR_uname:
10118         /* no need to transcode because we use the linux syscall */
10119         {
10120             struct new_utsname * buf;
10121 
10122             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10123                 return -TARGET_EFAULT;
10124             ret = get_errno(sys_uname(buf));
10125             if (!is_error(ret)) {
10126                 /* Overwrite the native machine name with whatever is being
10127                    emulated. */
10128                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10129                           sizeof(buf->machine));
10130                 /* Allow the user to override the reported release.  */
10131                 if (qemu_uname_release && *qemu_uname_release) {
10132                     g_strlcpy(buf->release, qemu_uname_release,
10133                               sizeof(buf->release));
10134                 }
10135             }
10136             unlock_user_struct(buf, arg1, 1);
10137         }
10138         return ret;
10139 #ifdef TARGET_I386
10140     case TARGET_NR_modify_ldt:
10141         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10142 #if !defined(TARGET_X86_64)
10143     case TARGET_NR_vm86:
10144         return do_vm86(cpu_env, arg1, arg2);
10145 #endif
10146 #endif
10147 #if defined(TARGET_NR_adjtimex)
10148     case TARGET_NR_adjtimex:
10149         {
10150             struct timex host_buf;
10151 
10152             if (target_to_host_timex(&host_buf, arg1) != 0) {
10153                 return -TARGET_EFAULT;
10154             }
10155             ret = get_errno(adjtimex(&host_buf));
10156             if (!is_error(ret)) {
10157                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10158                     return -TARGET_EFAULT;
10159                 }
10160             }
10161         }
10162         return ret;
10163 #endif
10164 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10165     case TARGET_NR_clock_adjtime:
10166         {
10167             struct timex htx, *phtx = &htx;
10168 
10169             if (target_to_host_timex(phtx, arg2) != 0) {
10170                 return -TARGET_EFAULT;
10171             }
10172             ret = get_errno(clock_adjtime(arg1, phtx));
10173             if (!is_error(ret) && phtx) {
10174                 if (host_to_target_timex(arg2, phtx) != 0) {
10175                     return -TARGET_EFAULT;
10176                 }
10177             }
10178         }
10179         return ret;
10180 #endif
10181 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10182     case TARGET_NR_clock_adjtime64:
10183         {
10184             struct timex htx;
10185 
10186             if (target_to_host_timex64(&htx, arg2) != 0) {
10187                 return -TARGET_EFAULT;
10188             }
10189             ret = get_errno(clock_adjtime(arg1, &htx));
10190             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10191                     return -TARGET_EFAULT;
10192             }
10193         }
10194         return ret;
10195 #endif
10196     case TARGET_NR_getpgid:
10197         return get_errno(getpgid(arg1));
10198     case TARGET_NR_fchdir:
10199         return get_errno(fchdir(arg1));
10200     case TARGET_NR_personality:
10201         return get_errno(personality(arg1));
10202 #ifdef TARGET_NR__llseek /* Not on alpha */
10203     case TARGET_NR__llseek:
10204         {
10205             int64_t res;
10206 #if !defined(__NR_llseek)
10207             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10208             if (res == -1) {
10209                 ret = get_errno(res);
10210             } else {
10211                 ret = 0;
10212             }
10213 #else
10214             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10215 #endif
10216             if ((ret == 0) && put_user_s64(res, arg4)) {
10217                 return -TARGET_EFAULT;
10218             }
10219         }
10220         return ret;
10221 #endif
10222 #ifdef TARGET_NR_getdents
10223     case TARGET_NR_getdents:
10224 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10225 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10226         {
10227             struct target_dirent *target_dirp;
10228             struct linux_dirent *dirp;
10229             abi_long count = arg3;
10230 
10231             dirp = g_try_malloc(count);
10232             if (!dirp) {
10233                 return -TARGET_ENOMEM;
10234             }
10235 
10236             ret = get_errno(sys_getdents(arg1, dirp, count));
10237             if (!is_error(ret)) {
10238                 struct linux_dirent *de;
10239 		struct target_dirent *tde;
10240                 int len = ret;
10241                 int reclen, treclen;
10242 		int count1, tnamelen;
10243 
10244 		count1 = 0;
10245                 de = dirp;
10246                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10247                     return -TARGET_EFAULT;
10248 		tde = target_dirp;
10249                 while (len > 0) {
10250                     reclen = de->d_reclen;
10251                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10252                     assert(tnamelen >= 0);
10253                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10254                     assert(count1 + treclen <= count);
10255                     tde->d_reclen = tswap16(treclen);
10256                     tde->d_ino = tswapal(de->d_ino);
10257                     tde->d_off = tswapal(de->d_off);
10258                     memcpy(tde->d_name, de->d_name, tnamelen);
10259                     de = (struct linux_dirent *)((char *)de + reclen);
10260                     len -= reclen;
10261                     tde = (struct target_dirent *)((char *)tde + treclen);
10262 		    count1 += treclen;
10263                 }
10264 		ret = count1;
10265                 unlock_user(target_dirp, arg2, ret);
10266             }
10267             g_free(dirp);
10268         }
10269 #else
10270         {
10271             struct linux_dirent *dirp;
10272             abi_long count = arg3;
10273 
10274             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10275                 return -TARGET_EFAULT;
10276             ret = get_errno(sys_getdents(arg1, dirp, count));
10277             if (!is_error(ret)) {
10278                 struct linux_dirent *de;
10279                 int len = ret;
10280                 int reclen;
10281                 de = dirp;
10282                 while (len > 0) {
10283                     reclen = de->d_reclen;
10284                     if (reclen > len)
10285                         break;
10286                     de->d_reclen = tswap16(reclen);
10287                     tswapls(&de->d_ino);
10288                     tswapls(&de->d_off);
10289                     de = (struct linux_dirent *)((char *)de + reclen);
10290                     len -= reclen;
10291                 }
10292             }
10293             unlock_user(dirp, arg2, ret);
10294         }
10295 #endif
10296 #else
10297         /* Implement getdents in terms of getdents64 */
10298         {
10299             struct linux_dirent64 *dirp;
10300             abi_long count = arg3;
10301 
10302             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10303             if (!dirp) {
10304                 return -TARGET_EFAULT;
10305             }
10306             ret = get_errno(sys_getdents64(arg1, dirp, count));
10307             if (!is_error(ret)) {
10308                 /* Convert the dirent64 structs to target dirent.  We do this
10309                  * in-place, since we can guarantee that a target_dirent is no
10310                  * larger than a dirent64; however this means we have to be
10311                  * careful to read everything before writing in the new format.
10312                  */
10313                 struct linux_dirent64 *de;
10314                 struct target_dirent *tde;
10315                 int len = ret;
10316                 int tlen = 0;
10317 
10318                 de = dirp;
10319                 tde = (struct target_dirent *)dirp;
10320                 while (len > 0) {
10321                     int namelen, treclen;
10322                     int reclen = de->d_reclen;
10323                     uint64_t ino = de->d_ino;
10324                     int64_t off = de->d_off;
10325                     uint8_t type = de->d_type;
10326 
10327                     namelen = strlen(de->d_name);
10328                     treclen = offsetof(struct target_dirent, d_name)
10329                         + namelen + 2;
10330                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10331 
10332                     memmove(tde->d_name, de->d_name, namelen + 1);
10333                     tde->d_ino = tswapal(ino);
10334                     tde->d_off = tswapal(off);
10335                     tde->d_reclen = tswap16(treclen);
10336                     /* The target_dirent type is in what was formerly a padding
10337                      * byte at the end of the structure:
10338                      */
10339                     *(((char *)tde) + treclen - 1) = type;
10340 
10341                     de = (struct linux_dirent64 *)((char *)de + reclen);
10342                     tde = (struct target_dirent *)((char *)tde + treclen);
10343                     len -= reclen;
10344                     tlen += treclen;
10345                 }
10346                 ret = tlen;
10347             }
10348             unlock_user(dirp, arg2, ret);
10349         }
10350 #endif
10351         return ret;
10352 #endif /* TARGET_NR_getdents */
10353 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10354     case TARGET_NR_getdents64:
10355         {
10356             struct linux_dirent64 *dirp;
10357             abi_long count = arg3;
10358             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10359                 return -TARGET_EFAULT;
10360             ret = get_errno(sys_getdents64(arg1, dirp, count));
10361             if (!is_error(ret)) {
10362                 struct linux_dirent64 *de;
10363                 int len = ret;
10364                 int reclen;
10365                 de = dirp;
10366                 while (len > 0) {
10367                     reclen = de->d_reclen;
10368                     if (reclen > len)
10369                         break;
10370                     de->d_reclen = tswap16(reclen);
10371                     tswap64s((uint64_t *)&de->d_ino);
10372                     tswap64s((uint64_t *)&de->d_off);
10373                     de = (struct linux_dirent64 *)((char *)de + reclen);
10374                     len -= reclen;
10375                 }
10376             }
10377             unlock_user(dirp, arg2, ret);
10378         }
10379         return ret;
10380 #endif /* TARGET_NR_getdents64 */
10381 #if defined(TARGET_NR__newselect)
10382     case TARGET_NR__newselect:
10383         return do_select(arg1, arg2, arg3, arg4, arg5);
10384 #endif
10385 #ifdef TARGET_NR_poll
10386     case TARGET_NR_poll:
10387         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10388 #endif
10389 #ifdef TARGET_NR_ppoll
10390     case TARGET_NR_ppoll:
10391         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10392 #endif
10393 #ifdef TARGET_NR_ppoll_time64
10394     case TARGET_NR_ppoll_time64:
10395         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10396 #endif
10397     case TARGET_NR_flock:
10398         /* NOTE: the flock constant seems to be the same for every
10399            Linux platform */
10400         return get_errno(safe_flock(arg1, arg2));
10401     case TARGET_NR_readv:
10402         {
10403             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10404             if (vec != NULL) {
10405                 ret = get_errno(safe_readv(arg1, vec, arg3));
10406                 unlock_iovec(vec, arg2, arg3, 1);
10407             } else {
10408                 ret = -host_to_target_errno(errno);
10409             }
10410         }
10411         return ret;
10412     case TARGET_NR_writev:
10413         {
10414             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10415             if (vec != NULL) {
10416                 ret = get_errno(safe_writev(arg1, vec, arg3));
10417                 unlock_iovec(vec, arg2, arg3, 0);
10418             } else {
10419                 ret = -host_to_target_errno(errno);
10420             }
10421         }
10422         return ret;
10423 #if defined(TARGET_NR_preadv)
10424     case TARGET_NR_preadv:
10425         {
10426             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10427             if (vec != NULL) {
10428                 unsigned long low, high;
10429 
10430                 target_to_host_low_high(arg4, arg5, &low, &high);
10431                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10432                 unlock_iovec(vec, arg2, arg3, 1);
10433             } else {
10434                 ret = -host_to_target_errno(errno);
10435            }
10436         }
10437         return ret;
10438 #endif
10439 #if defined(TARGET_NR_pwritev)
10440     case TARGET_NR_pwritev:
10441         {
10442             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10443             if (vec != NULL) {
10444                 unsigned long low, high;
10445 
10446                 target_to_host_low_high(arg4, arg5, &low, &high);
10447                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10448                 unlock_iovec(vec, arg2, arg3, 0);
10449             } else {
10450                 ret = -host_to_target_errno(errno);
10451            }
10452         }
10453         return ret;
10454 #endif
10455     case TARGET_NR_getsid:
10456         return get_errno(getsid(arg1));
10457 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10458     case TARGET_NR_fdatasync:
10459         return get_errno(fdatasync(arg1));
10460 #endif
10461     case TARGET_NR_sched_getaffinity:
10462         {
10463             unsigned int mask_size;
10464             unsigned long *mask;
10465 
10466             /*
10467              * sched_getaffinity needs multiples of ulong, so need to take
10468              * care of mismatches between target ulong and host ulong sizes.
10469              */
10470             if (arg2 & (sizeof(abi_ulong) - 1)) {
10471                 return -TARGET_EINVAL;
10472             }
10473             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10474 
10475             mask = alloca(mask_size);
10476             memset(mask, 0, mask_size);
10477             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10478 
10479             if (!is_error(ret)) {
10480                 if (ret > arg2) {
10481                     /* More data returned than the caller's buffer will fit.
10482                      * This only happens if sizeof(abi_long) < sizeof(long)
10483                      * and the caller passed us a buffer holding an odd number
10484                      * of abi_longs. If the host kernel is actually using the
10485                      * extra 4 bytes then fail EINVAL; otherwise we can just
10486                      * ignore them and only copy the interesting part.
10487                      */
10488                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10489                     if (numcpus > arg2 * 8) {
10490                         return -TARGET_EINVAL;
10491                     }
10492                     ret = arg2;
10493                 }
10494 
10495                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10496                     return -TARGET_EFAULT;
10497                 }
10498             }
10499         }
10500         return ret;
10501     case TARGET_NR_sched_setaffinity:
10502         {
10503             unsigned int mask_size;
10504             unsigned long *mask;
10505 
10506             /*
10507              * sched_setaffinity needs multiples of ulong, so need to take
10508              * care of mismatches between target ulong and host ulong sizes.
10509              */
10510             if (arg2 & (sizeof(abi_ulong) - 1)) {
10511                 return -TARGET_EINVAL;
10512             }
10513             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10514             mask = alloca(mask_size);
10515 
10516             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10517             if (ret) {
10518                 return ret;
10519             }
10520 
10521             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10522         }
10523     case TARGET_NR_getcpu:
10524         {
10525             unsigned cpu, node;
10526             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10527                                        arg2 ? &node : NULL,
10528                                        NULL));
10529             if (is_error(ret)) {
10530                 return ret;
10531             }
10532             if (arg1 && put_user_u32(cpu, arg1)) {
10533                 return -TARGET_EFAULT;
10534             }
10535             if (arg2 && put_user_u32(node, arg2)) {
10536                 return -TARGET_EFAULT;
10537             }
10538         }
10539         return ret;
10540     case TARGET_NR_sched_setparam:
10541         {
10542             struct sched_param *target_schp;
10543             struct sched_param schp;
10544 
10545             if (arg2 == 0) {
10546                 return -TARGET_EINVAL;
10547             }
10548             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10549                 return -TARGET_EFAULT;
10550             schp.sched_priority = tswap32(target_schp->sched_priority);
10551             unlock_user_struct(target_schp, arg2, 0);
10552             return get_errno(sched_setparam(arg1, &schp));
10553         }
10554     case TARGET_NR_sched_getparam:
10555         {
10556             struct sched_param *target_schp;
10557             struct sched_param schp;
10558 
10559             if (arg2 == 0) {
10560                 return -TARGET_EINVAL;
10561             }
10562             ret = get_errno(sched_getparam(arg1, &schp));
10563             if (!is_error(ret)) {
10564                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10565                     return -TARGET_EFAULT;
10566                 target_schp->sched_priority = tswap32(schp.sched_priority);
10567                 unlock_user_struct(target_schp, arg2, 1);
10568             }
10569         }
10570         return ret;
10571     case TARGET_NR_sched_setscheduler:
10572         {
10573             struct sched_param *target_schp;
10574             struct sched_param schp;
10575             if (arg3 == 0) {
10576                 return -TARGET_EINVAL;
10577             }
10578             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10579                 return -TARGET_EFAULT;
10580             schp.sched_priority = tswap32(target_schp->sched_priority);
10581             unlock_user_struct(target_schp, arg3, 0);
10582             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10583         }
10584     case TARGET_NR_sched_getscheduler:
10585         return get_errno(sched_getscheduler(arg1));
10586     case TARGET_NR_sched_yield:
10587         return get_errno(sched_yield());
10588     case TARGET_NR_sched_get_priority_max:
10589         return get_errno(sched_get_priority_max(arg1));
10590     case TARGET_NR_sched_get_priority_min:
10591         return get_errno(sched_get_priority_min(arg1));
10592 #ifdef TARGET_NR_sched_rr_get_interval
10593     case TARGET_NR_sched_rr_get_interval:
10594         {
10595             struct timespec ts;
10596             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10597             if (!is_error(ret)) {
10598                 ret = host_to_target_timespec(arg2, &ts);
10599             }
10600         }
10601         return ret;
10602 #endif
10603 #ifdef TARGET_NR_sched_rr_get_interval_time64
10604     case TARGET_NR_sched_rr_get_interval_time64:
10605         {
10606             struct timespec ts;
10607             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10608             if (!is_error(ret)) {
10609                 ret = host_to_target_timespec64(arg2, &ts);
10610             }
10611         }
10612         return ret;
10613 #endif
10614 #if defined(TARGET_NR_nanosleep)
10615     case TARGET_NR_nanosleep:
10616         {
10617             struct timespec req, rem;
10618             target_to_host_timespec(&req, arg1);
10619             ret = get_errno(safe_nanosleep(&req, &rem));
10620             if (is_error(ret) && arg2) {
10621                 host_to_target_timespec(arg2, &rem);
10622             }
10623         }
10624         return ret;
10625 #endif
10626     case TARGET_NR_prctl:
10627         switch (arg1) {
10628         case PR_GET_PDEATHSIG:
10629         {
10630             int deathsig;
10631             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10632             if (!is_error(ret) && arg2
10633                 && put_user_s32(deathsig, arg2)) {
10634                 return -TARGET_EFAULT;
10635             }
10636             return ret;
10637         }
10638 #ifdef PR_GET_NAME
10639         case PR_GET_NAME:
10640         {
10641             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10642             if (!name) {
10643                 return -TARGET_EFAULT;
10644             }
10645             ret = get_errno(prctl(arg1, (unsigned long)name,
10646                                   arg3, arg4, arg5));
10647             unlock_user(name, arg2, 16);
10648             return ret;
10649         }
10650         case PR_SET_NAME:
10651         {
10652             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10653             if (!name) {
10654                 return -TARGET_EFAULT;
10655             }
10656             ret = get_errno(prctl(arg1, (unsigned long)name,
10657                                   arg3, arg4, arg5));
10658             unlock_user(name, arg2, 0);
10659             return ret;
10660         }
10661 #endif
10662 #ifdef TARGET_MIPS
10663         case TARGET_PR_GET_FP_MODE:
10664         {
10665             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10666             ret = 0;
10667             if (env->CP0_Status & (1 << CP0St_FR)) {
10668                 ret |= TARGET_PR_FP_MODE_FR;
10669             }
10670             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10671                 ret |= TARGET_PR_FP_MODE_FRE;
10672             }
10673             return ret;
10674         }
10675         case TARGET_PR_SET_FP_MODE:
10676         {
10677             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10678             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10679             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10680             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10681             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10682 
10683             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10684                                             TARGET_PR_FP_MODE_FRE;
10685 
10686             /* If nothing to change, return right away, successfully.  */
10687             if (old_fr == new_fr && old_fre == new_fre) {
10688                 return 0;
10689             }
10690             /* Check the value is valid */
10691             if (arg2 & ~known_bits) {
10692                 return -TARGET_EOPNOTSUPP;
10693             }
10694             /* Setting FRE without FR is not supported.  */
10695             if (new_fre && !new_fr) {
10696                 return -TARGET_EOPNOTSUPP;
10697             }
10698             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10699                 /* FR1 is not supported */
10700                 return -TARGET_EOPNOTSUPP;
10701             }
10702             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10703                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10704                 /* cannot set FR=0 */
10705                 return -TARGET_EOPNOTSUPP;
10706             }
10707             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10708                 /* Cannot set FRE=1 */
10709                 return -TARGET_EOPNOTSUPP;
10710             }
10711 
10712             int i;
10713             fpr_t *fpr = env->active_fpu.fpr;
10714             for (i = 0; i < 32 ; i += 2) {
10715                 if (!old_fr && new_fr) {
10716                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10717                 } else if (old_fr && !new_fr) {
10718                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10719                 }
10720             }
10721 
10722             if (new_fr) {
10723                 env->CP0_Status |= (1 << CP0St_FR);
10724                 env->hflags |= MIPS_HFLAG_F64;
10725             } else {
10726                 env->CP0_Status &= ~(1 << CP0St_FR);
10727                 env->hflags &= ~MIPS_HFLAG_F64;
10728             }
10729             if (new_fre) {
10730                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10731                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10732                     env->hflags |= MIPS_HFLAG_FRE;
10733                 }
10734             } else {
10735                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10736                 env->hflags &= ~MIPS_HFLAG_FRE;
10737             }
10738 
10739             return 0;
10740         }
10741 #endif /* MIPS */
10742 #ifdef TARGET_AARCH64
10743         case TARGET_PR_SVE_SET_VL:
10744             /*
10745              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10746              * PR_SVE_VL_INHERIT.  Note the kernel definition
10747              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10748              * even though the current architectural maximum is VQ=16.
10749              */
10750             ret = -TARGET_EINVAL;
10751             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10752                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10753                 CPUARMState *env = cpu_env;
10754                 ARMCPU *cpu = env_archcpu(env);
10755                 uint32_t vq, old_vq;
10756 
10757                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10758                 vq = MAX(arg2 / 16, 1);
10759                 vq = MIN(vq, cpu->sve_max_vq);
10760 
10761                 if (vq < old_vq) {
10762                     aarch64_sve_narrow_vq(env, vq);
10763                 }
10764                 env->vfp.zcr_el[1] = vq - 1;
10765                 arm_rebuild_hflags(env);
10766                 ret = vq * 16;
10767             }
10768             return ret;
10769         case TARGET_PR_SVE_GET_VL:
10770             ret = -TARGET_EINVAL;
10771             {
10772                 ARMCPU *cpu = env_archcpu(cpu_env);
10773                 if (cpu_isar_feature(aa64_sve, cpu)) {
10774                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10775                 }
10776             }
10777             return ret;
10778         case TARGET_PR_PAC_RESET_KEYS:
10779             {
10780                 CPUARMState *env = cpu_env;
10781                 ARMCPU *cpu = env_archcpu(env);
10782 
10783                 if (arg3 || arg4 || arg5) {
10784                     return -TARGET_EINVAL;
10785                 }
10786                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10787                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10788                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10789                                TARGET_PR_PAC_APGAKEY);
10790                     int ret = 0;
10791                     Error *err = NULL;
10792 
10793                     if (arg2 == 0) {
10794                         arg2 = all;
10795                     } else if (arg2 & ~all) {
10796                         return -TARGET_EINVAL;
10797                     }
10798                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10799                         ret |= qemu_guest_getrandom(&env->keys.apia,
10800                                                     sizeof(ARMPACKey), &err);
10801                     }
10802                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10803                         ret |= qemu_guest_getrandom(&env->keys.apib,
10804                                                     sizeof(ARMPACKey), &err);
10805                     }
10806                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10807                         ret |= qemu_guest_getrandom(&env->keys.apda,
10808                                                     sizeof(ARMPACKey), &err);
10809                     }
10810                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10811                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10812                                                     sizeof(ARMPACKey), &err);
10813                     }
10814                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10815                         ret |= qemu_guest_getrandom(&env->keys.apga,
10816                                                     sizeof(ARMPACKey), &err);
10817                     }
10818                     if (ret != 0) {
10819                         /*
10820                          * Some unknown failure in the crypto.  The best
10821                          * we can do is log it and fail the syscall.
10822                          * The real syscall cannot fail this way.
10823                          */
10824                         qemu_log_mask(LOG_UNIMP,
10825                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10826                                       error_get_pretty(err));
10827                         error_free(err);
10828                         return -TARGET_EIO;
10829                     }
10830                     return 0;
10831                 }
10832             }
10833             return -TARGET_EINVAL;
10834         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10835             {
10836                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10837                 CPUARMState *env = cpu_env;
10838                 ARMCPU *cpu = env_archcpu(env);
10839 
10840                 if (cpu_isar_feature(aa64_mte, cpu)) {
10841                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10842                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10843                 }
10844 
10845                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10846                     return -TARGET_EINVAL;
10847                 }
10848                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10849 
10850                 if (cpu_isar_feature(aa64_mte, cpu)) {
10851                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10852                     case TARGET_PR_MTE_TCF_NONE:
10853                     case TARGET_PR_MTE_TCF_SYNC:
10854                     case TARGET_PR_MTE_TCF_ASYNC:
10855                         break;
10856                     default:
10857                         return -EINVAL;
10858                     }
10859 
10860                     /*
10861                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10862                      * Note that the syscall values are consistent with hw.
10863                      */
10864                     env->cp15.sctlr_el[1] =
10865                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10866                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10867 
10868                     /*
10869                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10870                      * Note that the syscall uses an include mask,
10871                      * and hardware uses an exclude mask -- invert.
10872                      */
10873                     env->cp15.gcr_el1 =
10874                         deposit64(env->cp15.gcr_el1, 0, 16,
10875                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10876                     arm_rebuild_hflags(env);
10877                 }
10878                 return 0;
10879             }
10880         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10881             {
10882                 abi_long ret = 0;
10883                 CPUARMState *env = cpu_env;
10884                 ARMCPU *cpu = env_archcpu(env);
10885 
10886                 if (arg2 || arg3 || arg4 || arg5) {
10887                     return -TARGET_EINVAL;
10888                 }
10889                 if (env->tagged_addr_enable) {
10890                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10891                 }
10892                 if (cpu_isar_feature(aa64_mte, cpu)) {
10893                     /* See above. */
10894                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10895                             << TARGET_PR_MTE_TCF_SHIFT);
10896                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10897                                     ~env->cp15.gcr_el1);
10898                 }
10899                 return ret;
10900             }
10901 #endif /* AARCH64 */
10902         case PR_GET_SECCOMP:
10903         case PR_SET_SECCOMP:
10904             /* Disable seccomp to prevent the target disabling syscalls we
10905              * need. */
10906             return -TARGET_EINVAL;
10907         default:
10908             /* Most prctl options have no pointer arguments */
10909             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10910         }
10911         break;
10912 #ifdef TARGET_NR_arch_prctl
10913     case TARGET_NR_arch_prctl:
10914         return do_arch_prctl(cpu_env, arg1, arg2);
10915 #endif
10916 #ifdef TARGET_NR_pread64
10917     case TARGET_NR_pread64:
10918         if (regpairs_aligned(cpu_env, num)) {
10919             arg4 = arg5;
10920             arg5 = arg6;
10921         }
10922         if (arg2 == 0 && arg3 == 0) {
10923             /* Special-case NULL buffer and zero length, which should succeed */
10924             p = 0;
10925         } else {
10926             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10927             if (!p) {
10928                 return -TARGET_EFAULT;
10929             }
10930         }
10931         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10932         unlock_user(p, arg2, ret);
10933         return ret;
10934     case TARGET_NR_pwrite64:
10935         if (regpairs_aligned(cpu_env, num)) {
10936             arg4 = arg5;
10937             arg5 = arg6;
10938         }
10939         if (arg2 == 0 && arg3 == 0) {
10940             /* Special-case NULL buffer and zero length, which should succeed */
10941             p = 0;
10942         } else {
10943             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10944             if (!p) {
10945                 return -TARGET_EFAULT;
10946             }
10947         }
10948         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10949         unlock_user(p, arg2, 0);
10950         return ret;
10951 #endif
10952     case TARGET_NR_getcwd:
10953         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10954             return -TARGET_EFAULT;
10955         ret = get_errno(sys_getcwd1(p, arg2));
10956         unlock_user(p, arg1, ret);
10957         return ret;
10958     case TARGET_NR_capget:
10959     case TARGET_NR_capset:
10960     {
10961         struct target_user_cap_header *target_header;
10962         struct target_user_cap_data *target_data = NULL;
10963         struct __user_cap_header_struct header;
10964         struct __user_cap_data_struct data[2];
10965         struct __user_cap_data_struct *dataptr = NULL;
10966         int i, target_datalen;
10967         int data_items = 1;
10968 
10969         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10970             return -TARGET_EFAULT;
10971         }
10972         header.version = tswap32(target_header->version);
10973         header.pid = tswap32(target_header->pid);
10974 
10975         if (header.version != _LINUX_CAPABILITY_VERSION) {
10976             /* Version 2 and up takes pointer to two user_data structs */
10977             data_items = 2;
10978         }
10979 
10980         target_datalen = sizeof(*target_data) * data_items;
10981 
10982         if (arg2) {
10983             if (num == TARGET_NR_capget) {
10984                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10985             } else {
10986                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10987             }
10988             if (!target_data) {
10989                 unlock_user_struct(target_header, arg1, 0);
10990                 return -TARGET_EFAULT;
10991             }
10992 
10993             if (num == TARGET_NR_capset) {
10994                 for (i = 0; i < data_items; i++) {
10995                     data[i].effective = tswap32(target_data[i].effective);
10996                     data[i].permitted = tswap32(target_data[i].permitted);
10997                     data[i].inheritable = tswap32(target_data[i].inheritable);
10998                 }
10999             }
11000 
11001             dataptr = data;
11002         }
11003 
11004         if (num == TARGET_NR_capget) {
11005             ret = get_errno(capget(&header, dataptr));
11006         } else {
11007             ret = get_errno(capset(&header, dataptr));
11008         }
11009 
11010         /* The kernel always updates version for both capget and capset */
11011         target_header->version = tswap32(header.version);
11012         unlock_user_struct(target_header, arg1, 1);
11013 
11014         if (arg2) {
11015             if (num == TARGET_NR_capget) {
11016                 for (i = 0; i < data_items; i++) {
11017                     target_data[i].effective = tswap32(data[i].effective);
11018                     target_data[i].permitted = tswap32(data[i].permitted);
11019                     target_data[i].inheritable = tswap32(data[i].inheritable);
11020                 }
11021                 unlock_user(target_data, arg2, target_datalen);
11022             } else {
11023                 unlock_user(target_data, arg2, 0);
11024             }
11025         }
11026         return ret;
11027     }
11028     case TARGET_NR_sigaltstack:
11029         return do_sigaltstack(arg1, arg2, cpu_env);
11030 
11031 #ifdef CONFIG_SENDFILE
11032 #ifdef TARGET_NR_sendfile
11033     case TARGET_NR_sendfile:
11034     {
11035         off_t *offp = NULL;
11036         off_t off;
11037         if (arg3) {
11038             ret = get_user_sal(off, arg3);
11039             if (is_error(ret)) {
11040                 return ret;
11041             }
11042             offp = &off;
11043         }
11044         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11045         if (!is_error(ret) && arg3) {
11046             abi_long ret2 = put_user_sal(off, arg3);
11047             if (is_error(ret2)) {
11048                 ret = ret2;
11049             }
11050         }
11051         return ret;
11052     }
11053 #endif
11054 #ifdef TARGET_NR_sendfile64
11055     case TARGET_NR_sendfile64:
11056     {
11057         off_t *offp = NULL;
11058         off_t off;
11059         if (arg3) {
11060             ret = get_user_s64(off, arg3);
11061             if (is_error(ret)) {
11062                 return ret;
11063             }
11064             offp = &off;
11065         }
11066         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11067         if (!is_error(ret) && arg3) {
11068             abi_long ret2 = put_user_s64(off, arg3);
11069             if (is_error(ret2)) {
11070                 ret = ret2;
11071             }
11072         }
11073         return ret;
11074     }
11075 #endif
11076 #endif
11077 #ifdef TARGET_NR_vfork
11078     case TARGET_NR_vfork:
11079         return get_errno(do_fork(cpu_env,
11080                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11081                          0, 0, 0, 0));
11082 #endif
11083 #ifdef TARGET_NR_ugetrlimit
11084     case TARGET_NR_ugetrlimit:
11085     {
11086 	struct rlimit rlim;
11087 	int resource = target_to_host_resource(arg1);
11088 	ret = get_errno(getrlimit(resource, &rlim));
11089 	if (!is_error(ret)) {
11090 	    struct target_rlimit *target_rlim;
11091             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11092                 return -TARGET_EFAULT;
11093 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11094 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11095             unlock_user_struct(target_rlim, arg2, 1);
11096 	}
11097         return ret;
11098     }
11099 #endif
11100 #ifdef TARGET_NR_truncate64
11101     case TARGET_NR_truncate64:
11102         if (!(p = lock_user_string(arg1)))
11103             return -TARGET_EFAULT;
11104 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11105         unlock_user(p, arg1, 0);
11106         return ret;
11107 #endif
11108 #ifdef TARGET_NR_ftruncate64
11109     case TARGET_NR_ftruncate64:
11110         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11111 #endif
11112 #ifdef TARGET_NR_stat64
11113     case TARGET_NR_stat64:
11114         if (!(p = lock_user_string(arg1))) {
11115             return -TARGET_EFAULT;
11116         }
11117         ret = get_errno(stat(path(p), &st));
11118         unlock_user(p, arg1, 0);
11119         if (!is_error(ret))
11120             ret = host_to_target_stat64(cpu_env, arg2, &st);
11121         return ret;
11122 #endif
11123 #ifdef TARGET_NR_lstat64
11124     case TARGET_NR_lstat64:
11125         if (!(p = lock_user_string(arg1))) {
11126             return -TARGET_EFAULT;
11127         }
11128         ret = get_errno(lstat(path(p), &st));
11129         unlock_user(p, arg1, 0);
11130         if (!is_error(ret))
11131             ret = host_to_target_stat64(cpu_env, arg2, &st);
11132         return ret;
11133 #endif
11134 #ifdef TARGET_NR_fstat64
11135     case TARGET_NR_fstat64:
11136         ret = get_errno(fstat(arg1, &st));
11137         if (!is_error(ret))
11138             ret = host_to_target_stat64(cpu_env, arg2, &st);
11139         return ret;
11140 #endif
11141 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11142 #ifdef TARGET_NR_fstatat64
11143     case TARGET_NR_fstatat64:
11144 #endif
11145 #ifdef TARGET_NR_newfstatat
11146     case TARGET_NR_newfstatat:
11147 #endif
11148         if (!(p = lock_user_string(arg2))) {
11149             return -TARGET_EFAULT;
11150         }
11151         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11152         unlock_user(p, arg2, 0);
11153         if (!is_error(ret))
11154             ret = host_to_target_stat64(cpu_env, arg3, &st);
11155         return ret;
11156 #endif
11157 #if defined(TARGET_NR_statx)
11158     case TARGET_NR_statx:
11159         {
11160             struct target_statx *target_stx;
11161             int dirfd = arg1;
11162             int flags = arg3;
11163 
11164             p = lock_user_string(arg2);
11165             if (p == NULL) {
11166                 return -TARGET_EFAULT;
11167             }
11168 #if defined(__NR_statx)
11169             {
11170                 /*
11171                  * It is assumed that struct statx is architecture independent.
11172                  */
11173                 struct target_statx host_stx;
11174                 int mask = arg4;
11175 
11176                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11177                 if (!is_error(ret)) {
11178                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11179                         unlock_user(p, arg2, 0);
11180                         return -TARGET_EFAULT;
11181                     }
11182                 }
11183 
11184                 if (ret != -TARGET_ENOSYS) {
11185                     unlock_user(p, arg2, 0);
11186                     return ret;
11187                 }
11188             }
11189 #endif
11190             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11191             unlock_user(p, arg2, 0);
11192 
11193             if (!is_error(ret)) {
11194                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11195                     return -TARGET_EFAULT;
11196                 }
11197                 memset(target_stx, 0, sizeof(*target_stx));
11198                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11199                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11200                 __put_user(st.st_ino, &target_stx->stx_ino);
11201                 __put_user(st.st_mode, &target_stx->stx_mode);
11202                 __put_user(st.st_uid, &target_stx->stx_uid);
11203                 __put_user(st.st_gid, &target_stx->stx_gid);
11204                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11205                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11206                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11207                 __put_user(st.st_size, &target_stx->stx_size);
11208                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11209                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11210                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11211                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11212                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11213                 unlock_user_struct(target_stx, arg5, 1);
11214             }
11215         }
11216         return ret;
11217 #endif
11218 #ifdef TARGET_NR_lchown
11219     case TARGET_NR_lchown:
11220         if (!(p = lock_user_string(arg1)))
11221             return -TARGET_EFAULT;
11222         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11223         unlock_user(p, arg1, 0);
11224         return ret;
11225 #endif
11226 #ifdef TARGET_NR_getuid
11227     case TARGET_NR_getuid:
11228         return get_errno(high2lowuid(getuid()));
11229 #endif
11230 #ifdef TARGET_NR_getgid
11231     case TARGET_NR_getgid:
11232         return get_errno(high2lowgid(getgid()));
11233 #endif
11234 #ifdef TARGET_NR_geteuid
11235     case TARGET_NR_geteuid:
11236         return get_errno(high2lowuid(geteuid()));
11237 #endif
11238 #ifdef TARGET_NR_getegid
11239     case TARGET_NR_getegid:
11240         return get_errno(high2lowgid(getegid()));
11241 #endif
11242     case TARGET_NR_setreuid:
11243         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11244     case TARGET_NR_setregid:
11245         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11246     case TARGET_NR_getgroups:
11247         {
11248             int gidsetsize = arg1;
11249             target_id *target_grouplist;
11250             gid_t *grouplist;
11251             int i;
11252 
11253             grouplist = alloca(gidsetsize * sizeof(gid_t));
11254             ret = get_errno(getgroups(gidsetsize, grouplist));
11255             if (gidsetsize == 0)
11256                 return ret;
11257             if (!is_error(ret)) {
11258                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11259                 if (!target_grouplist)
11260                     return -TARGET_EFAULT;
11261                 for(i = 0;i < ret; i++)
11262                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11263                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11264             }
11265         }
11266         return ret;
11267     case TARGET_NR_setgroups:
11268         {
11269             int gidsetsize = arg1;
11270             target_id *target_grouplist;
11271             gid_t *grouplist = NULL;
11272             int i;
11273             if (gidsetsize) {
11274                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11275                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11276                 if (!target_grouplist) {
11277                     return -TARGET_EFAULT;
11278                 }
11279                 for (i = 0; i < gidsetsize; i++) {
11280                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11281                 }
11282                 unlock_user(target_grouplist, arg2, 0);
11283             }
11284             return get_errno(setgroups(gidsetsize, grouplist));
11285         }
11286     case TARGET_NR_fchown:
11287         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11288 #if defined(TARGET_NR_fchownat)
11289     case TARGET_NR_fchownat:
11290         if (!(p = lock_user_string(arg2)))
11291             return -TARGET_EFAULT;
11292         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11293                                  low2highgid(arg4), arg5));
11294         unlock_user(p, arg2, 0);
11295         return ret;
11296 #endif
11297 #ifdef TARGET_NR_setresuid
11298     case TARGET_NR_setresuid:
11299         return get_errno(sys_setresuid(low2highuid(arg1),
11300                                        low2highuid(arg2),
11301                                        low2highuid(arg3)));
11302 #endif
11303 #ifdef TARGET_NR_getresuid
11304     case TARGET_NR_getresuid:
11305         {
11306             uid_t ruid, euid, suid;
11307             ret = get_errno(getresuid(&ruid, &euid, &suid));
11308             if (!is_error(ret)) {
11309                 if (put_user_id(high2lowuid(ruid), arg1)
11310                     || put_user_id(high2lowuid(euid), arg2)
11311                     || put_user_id(high2lowuid(suid), arg3))
11312                     return -TARGET_EFAULT;
11313             }
11314         }
11315         return ret;
11316 #endif
11317 #ifdef TARGET_NR_getresgid
11318     case TARGET_NR_setresgid:
11319         return get_errno(sys_setresgid(low2highgid(arg1),
11320                                        low2highgid(arg2),
11321                                        low2highgid(arg3)));
11322 #endif
11323 #ifdef TARGET_NR_getresgid
11324     case TARGET_NR_getresgid:
11325         {
11326             gid_t rgid, egid, sgid;
11327             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11328             if (!is_error(ret)) {
11329                 if (put_user_id(high2lowgid(rgid), arg1)
11330                     || put_user_id(high2lowgid(egid), arg2)
11331                     || put_user_id(high2lowgid(sgid), arg3))
11332                     return -TARGET_EFAULT;
11333             }
11334         }
11335         return ret;
11336 #endif
11337 #ifdef TARGET_NR_chown
11338     case TARGET_NR_chown:
11339         if (!(p = lock_user_string(arg1)))
11340             return -TARGET_EFAULT;
11341         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11342         unlock_user(p, arg1, 0);
11343         return ret;
11344 #endif
11345     case TARGET_NR_setuid:
11346         return get_errno(sys_setuid(low2highuid(arg1)));
11347     case TARGET_NR_setgid:
11348         return get_errno(sys_setgid(low2highgid(arg1)));
11349     case TARGET_NR_setfsuid:
11350         return get_errno(setfsuid(arg1));
11351     case TARGET_NR_setfsgid:
11352         return get_errno(setfsgid(arg1));
11353 
11354 #ifdef TARGET_NR_lchown32
11355     case TARGET_NR_lchown32:
11356         if (!(p = lock_user_string(arg1)))
11357             return -TARGET_EFAULT;
11358         ret = get_errno(lchown(p, arg2, arg3));
11359         unlock_user(p, arg1, 0);
11360         return ret;
11361 #endif
11362 #ifdef TARGET_NR_getuid32
11363     case TARGET_NR_getuid32:
11364         return get_errno(getuid());
11365 #endif
11366 
11367 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11368    /* Alpha specific */
11369     case TARGET_NR_getxuid:
11370          {
11371             uid_t euid;
11372             euid=geteuid();
11373             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11374          }
11375         return get_errno(getuid());
11376 #endif
11377 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11378    /* Alpha specific */
11379     case TARGET_NR_getxgid:
11380          {
11381             uid_t egid;
11382             egid=getegid();
11383             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11384          }
11385         return get_errno(getgid());
11386 #endif
11387 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11388     /* Alpha specific */
11389     case TARGET_NR_osf_getsysinfo:
11390         ret = -TARGET_EOPNOTSUPP;
11391         switch (arg1) {
11392           case TARGET_GSI_IEEE_FP_CONTROL:
11393             {
11394                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11395                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11396 
11397                 swcr &= ~SWCR_STATUS_MASK;
11398                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11399 
11400                 if (put_user_u64 (swcr, arg2))
11401                         return -TARGET_EFAULT;
11402                 ret = 0;
11403             }
11404             break;
11405 
11406           /* case GSI_IEEE_STATE_AT_SIGNAL:
11407              -- Not implemented in linux kernel.
11408              case GSI_UACPROC:
11409              -- Retrieves current unaligned access state; not much used.
11410              case GSI_PROC_TYPE:
11411              -- Retrieves implver information; surely not used.
11412              case GSI_GET_HWRPB:
11413              -- Grabs a copy of the HWRPB; surely not used.
11414           */
11415         }
11416         return ret;
11417 #endif
11418 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11419     /* Alpha specific */
11420     case TARGET_NR_osf_setsysinfo:
11421         ret = -TARGET_EOPNOTSUPP;
11422         switch (arg1) {
11423           case TARGET_SSI_IEEE_FP_CONTROL:
11424             {
11425                 uint64_t swcr, fpcr;
11426 
11427                 if (get_user_u64 (swcr, arg2)) {
11428                     return -TARGET_EFAULT;
11429                 }
11430 
11431                 /*
11432                  * The kernel calls swcr_update_status to update the
11433                  * status bits from the fpcr at every point that it
11434                  * could be queried.  Therefore, we store the status
11435                  * bits only in FPCR.
11436                  */
11437                 ((CPUAlphaState *)cpu_env)->swcr
11438                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11439 
11440                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11441                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11442                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11443                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11444                 ret = 0;
11445             }
11446             break;
11447 
11448           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11449             {
11450                 uint64_t exc, fpcr, fex;
11451 
11452                 if (get_user_u64(exc, arg2)) {
11453                     return -TARGET_EFAULT;
11454                 }
11455                 exc &= SWCR_STATUS_MASK;
11456                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11457 
11458                 /* Old exceptions are not signaled.  */
11459                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11460                 fex = exc & ~fex;
11461                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11462                 fex &= ((CPUArchState *)cpu_env)->swcr;
11463 
11464                 /* Update the hardware fpcr.  */
11465                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11466                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11467 
11468                 if (fex) {
11469                     int si_code = TARGET_FPE_FLTUNK;
11470                     target_siginfo_t info;
11471 
11472                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11473                         si_code = TARGET_FPE_FLTUND;
11474                     }
11475                     if (fex & SWCR_TRAP_ENABLE_INE) {
11476                         si_code = TARGET_FPE_FLTRES;
11477                     }
11478                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11479                         si_code = TARGET_FPE_FLTUND;
11480                     }
11481                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11482                         si_code = TARGET_FPE_FLTOVF;
11483                     }
11484                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11485                         si_code = TARGET_FPE_FLTDIV;
11486                     }
11487                     if (fex & SWCR_TRAP_ENABLE_INV) {
11488                         si_code = TARGET_FPE_FLTINV;
11489                     }
11490 
11491                     info.si_signo = SIGFPE;
11492                     info.si_errno = 0;
11493                     info.si_code = si_code;
11494                     info._sifields._sigfault._addr
11495                         = ((CPUArchState *)cpu_env)->pc;
11496                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11497                                  QEMU_SI_FAULT, &info);
11498                 }
11499                 ret = 0;
11500             }
11501             break;
11502 
11503           /* case SSI_NVPAIRS:
11504              -- Used with SSIN_UACPROC to enable unaligned accesses.
11505              case SSI_IEEE_STATE_AT_SIGNAL:
11506              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11507              -- Not implemented in linux kernel
11508           */
11509         }
11510         return ret;
11511 #endif
11512 #ifdef TARGET_NR_osf_sigprocmask
11513     /* Alpha specific.  */
11514     case TARGET_NR_osf_sigprocmask:
11515         {
11516             abi_ulong mask;
11517             int how;
11518             sigset_t set, oldset;
11519 
11520             switch(arg1) {
11521             case TARGET_SIG_BLOCK:
11522                 how = SIG_BLOCK;
11523                 break;
11524             case TARGET_SIG_UNBLOCK:
11525                 how = SIG_UNBLOCK;
11526                 break;
11527             case TARGET_SIG_SETMASK:
11528                 how = SIG_SETMASK;
11529                 break;
11530             default:
11531                 return -TARGET_EINVAL;
11532             }
11533             mask = arg2;
11534             target_to_host_old_sigset(&set, &mask);
11535             ret = do_sigprocmask(how, &set, &oldset);
11536             if (!ret) {
11537                 host_to_target_old_sigset(&mask, &oldset);
11538                 ret = mask;
11539             }
11540         }
11541         return ret;
11542 #endif
11543 
11544 #ifdef TARGET_NR_getgid32
11545     case TARGET_NR_getgid32:
11546         return get_errno(getgid());
11547 #endif
11548 #ifdef TARGET_NR_geteuid32
11549     case TARGET_NR_geteuid32:
11550         return get_errno(geteuid());
11551 #endif
11552 #ifdef TARGET_NR_getegid32
11553     case TARGET_NR_getegid32:
11554         return get_errno(getegid());
11555 #endif
11556 #ifdef TARGET_NR_setreuid32
11557     case TARGET_NR_setreuid32:
11558         return get_errno(setreuid(arg1, arg2));
11559 #endif
11560 #ifdef TARGET_NR_setregid32
11561     case TARGET_NR_setregid32:
11562         return get_errno(setregid(arg1, arg2));
11563 #endif
11564 #ifdef TARGET_NR_getgroups32
11565     case TARGET_NR_getgroups32:
11566         {
11567             int gidsetsize = arg1;
11568             uint32_t *target_grouplist;
11569             gid_t *grouplist;
11570             int i;
11571 
11572             grouplist = alloca(gidsetsize * sizeof(gid_t));
11573             ret = get_errno(getgroups(gidsetsize, grouplist));
11574             if (gidsetsize == 0)
11575                 return ret;
11576             if (!is_error(ret)) {
11577                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11578                 if (!target_grouplist) {
11579                     return -TARGET_EFAULT;
11580                 }
11581                 for(i = 0;i < ret; i++)
11582                     target_grouplist[i] = tswap32(grouplist[i]);
11583                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11584             }
11585         }
11586         return ret;
11587 #endif
11588 #ifdef TARGET_NR_setgroups32
11589     case TARGET_NR_setgroups32:
11590         {
11591             int gidsetsize = arg1;
11592             uint32_t *target_grouplist;
11593             gid_t *grouplist;
11594             int i;
11595 
11596             grouplist = alloca(gidsetsize * sizeof(gid_t));
11597             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11598             if (!target_grouplist) {
11599                 return -TARGET_EFAULT;
11600             }
11601             for(i = 0;i < gidsetsize; i++)
11602                 grouplist[i] = tswap32(target_grouplist[i]);
11603             unlock_user(target_grouplist, arg2, 0);
11604             return get_errno(setgroups(gidsetsize, grouplist));
11605         }
11606 #endif
11607 #ifdef TARGET_NR_fchown32
11608     case TARGET_NR_fchown32:
11609         return get_errno(fchown(arg1, arg2, arg3));
11610 #endif
11611 #ifdef TARGET_NR_setresuid32
11612     case TARGET_NR_setresuid32:
11613         return get_errno(sys_setresuid(arg1, arg2, arg3));
11614 #endif
11615 #ifdef TARGET_NR_getresuid32
11616     case TARGET_NR_getresuid32:
11617         {
11618             uid_t ruid, euid, suid;
11619             ret = get_errno(getresuid(&ruid, &euid, &suid));
11620             if (!is_error(ret)) {
11621                 if (put_user_u32(ruid, arg1)
11622                     || put_user_u32(euid, arg2)
11623                     || put_user_u32(suid, arg3))
11624                     return -TARGET_EFAULT;
11625             }
11626         }
11627         return ret;
11628 #endif
11629 #ifdef TARGET_NR_setresgid32
11630     case TARGET_NR_setresgid32:
11631         return get_errno(sys_setresgid(arg1, arg2, arg3));
11632 #endif
11633 #ifdef TARGET_NR_getresgid32
11634     case TARGET_NR_getresgid32:
11635         {
11636             gid_t rgid, egid, sgid;
11637             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11638             if (!is_error(ret)) {
11639                 if (put_user_u32(rgid, arg1)
11640                     || put_user_u32(egid, arg2)
11641                     || put_user_u32(sgid, arg3))
11642                     return -TARGET_EFAULT;
11643             }
11644         }
11645         return ret;
11646 #endif
11647 #ifdef TARGET_NR_chown32
11648     case TARGET_NR_chown32:
11649         if (!(p = lock_user_string(arg1)))
11650             return -TARGET_EFAULT;
11651         ret = get_errno(chown(p, arg2, arg3));
11652         unlock_user(p, arg1, 0);
11653         return ret;
11654 #endif
11655 #ifdef TARGET_NR_setuid32
11656     case TARGET_NR_setuid32:
11657         return get_errno(sys_setuid(arg1));
11658 #endif
11659 #ifdef TARGET_NR_setgid32
11660     case TARGET_NR_setgid32:
11661         return get_errno(sys_setgid(arg1));
11662 #endif
11663 #ifdef TARGET_NR_setfsuid32
11664     case TARGET_NR_setfsuid32:
11665         return get_errno(setfsuid(arg1));
11666 #endif
11667 #ifdef TARGET_NR_setfsgid32
11668     case TARGET_NR_setfsgid32:
11669         return get_errno(setfsgid(arg1));
11670 #endif
11671 #ifdef TARGET_NR_mincore
11672     case TARGET_NR_mincore:
11673         {
11674             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11675             if (!a) {
11676                 return -TARGET_ENOMEM;
11677             }
11678             p = lock_user_string(arg3);
11679             if (!p) {
11680                 ret = -TARGET_EFAULT;
11681             } else {
11682                 ret = get_errno(mincore(a, arg2, p));
11683                 unlock_user(p, arg3, ret);
11684             }
11685             unlock_user(a, arg1, 0);
11686         }
11687         return ret;
11688 #endif
11689 #ifdef TARGET_NR_arm_fadvise64_64
11690     case TARGET_NR_arm_fadvise64_64:
11691         /* arm_fadvise64_64 looks like fadvise64_64 but
11692          * with different argument order: fd, advice, offset, len
11693          * rather than the usual fd, offset, len, advice.
11694          * Note that offset and len are both 64-bit so appear as
11695          * pairs of 32-bit registers.
11696          */
11697         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11698                             target_offset64(arg5, arg6), arg2);
11699         return -host_to_target_errno(ret);
11700 #endif
11701 
11702 #if TARGET_ABI_BITS == 32
11703 
11704 #ifdef TARGET_NR_fadvise64_64
11705     case TARGET_NR_fadvise64_64:
11706 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11707         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11708         ret = arg2;
11709         arg2 = arg3;
11710         arg3 = arg4;
11711         arg4 = arg5;
11712         arg5 = arg6;
11713         arg6 = ret;
11714 #else
11715         /* 6 args: fd, offset (high, low), len (high, low), advice */
11716         if (regpairs_aligned(cpu_env, num)) {
11717             /* offset is in (3,4), len in (5,6) and advice in 7 */
11718             arg2 = arg3;
11719             arg3 = arg4;
11720             arg4 = arg5;
11721             arg5 = arg6;
11722             arg6 = arg7;
11723         }
11724 #endif
11725         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11726                             target_offset64(arg4, arg5), arg6);
11727         return -host_to_target_errno(ret);
11728 #endif
11729 
11730 #ifdef TARGET_NR_fadvise64
11731     case TARGET_NR_fadvise64:
11732         /* 5 args: fd, offset (high, low), len, advice */
11733         if (regpairs_aligned(cpu_env, num)) {
11734             /* offset is in (3,4), len in 5 and advice in 6 */
11735             arg2 = arg3;
11736             arg3 = arg4;
11737             arg4 = arg5;
11738             arg5 = arg6;
11739         }
11740         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11741         return -host_to_target_errno(ret);
11742 #endif
11743 
11744 #else /* not a 32-bit ABI */
11745 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11746 #ifdef TARGET_NR_fadvise64_64
11747     case TARGET_NR_fadvise64_64:
11748 #endif
11749 #ifdef TARGET_NR_fadvise64
11750     case TARGET_NR_fadvise64:
11751 #endif
11752 #ifdef TARGET_S390X
11753         switch (arg4) {
11754         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11755         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11756         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11757         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11758         default: break;
11759         }
11760 #endif
11761         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11762 #endif
11763 #endif /* end of 64-bit ABI fadvise handling */
11764 
11765 #ifdef TARGET_NR_madvise
11766     case TARGET_NR_madvise:
11767         /* A straight passthrough may not be safe because qemu sometimes
11768            turns private file-backed mappings into anonymous mappings.
11769            This will break MADV_DONTNEED.
11770            This is a hint, so ignoring and returning success is ok.  */
11771         return 0;
11772 #endif
11773 #ifdef TARGET_NR_fcntl64
11774     case TARGET_NR_fcntl64:
11775     {
11776         int cmd;
11777         struct flock64 fl;
11778         from_flock64_fn *copyfrom = copy_from_user_flock64;
11779         to_flock64_fn *copyto = copy_to_user_flock64;
11780 
11781 #ifdef TARGET_ARM
11782         if (!((CPUARMState *)cpu_env)->eabi) {
11783             copyfrom = copy_from_user_oabi_flock64;
11784             copyto = copy_to_user_oabi_flock64;
11785         }
11786 #endif
11787 
11788         cmd = target_to_host_fcntl_cmd(arg2);
11789         if (cmd == -TARGET_EINVAL) {
11790             return cmd;
11791         }
11792 
11793         switch(arg2) {
11794         case TARGET_F_GETLK64:
11795             ret = copyfrom(&fl, arg3);
11796             if (ret) {
11797                 break;
11798             }
11799             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11800             if (ret == 0) {
11801                 ret = copyto(arg3, &fl);
11802             }
11803 	    break;
11804 
11805         case TARGET_F_SETLK64:
11806         case TARGET_F_SETLKW64:
11807             ret = copyfrom(&fl, arg3);
11808             if (ret) {
11809                 break;
11810             }
11811             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11812 	    break;
11813         default:
11814             ret = do_fcntl(arg1, arg2, arg3);
11815             break;
11816         }
11817         return ret;
11818     }
11819 #endif
11820 #ifdef TARGET_NR_cacheflush
11821     case TARGET_NR_cacheflush:
11822         /* self-modifying code is handled automatically, so nothing needed */
11823         return 0;
11824 #endif
11825 #ifdef TARGET_NR_getpagesize
11826     case TARGET_NR_getpagesize:
11827         return TARGET_PAGE_SIZE;
11828 #endif
11829     case TARGET_NR_gettid:
11830         return get_errno(sys_gettid());
11831 #ifdef TARGET_NR_readahead
11832     case TARGET_NR_readahead:
11833 #if TARGET_ABI_BITS == 32
11834         if (regpairs_aligned(cpu_env, num)) {
11835             arg2 = arg3;
11836             arg3 = arg4;
11837             arg4 = arg5;
11838         }
11839         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11840 #else
11841         ret = get_errno(readahead(arg1, arg2, arg3));
11842 #endif
11843         return ret;
11844 #endif
11845 #ifdef CONFIG_ATTR
11846 #ifdef TARGET_NR_setxattr
11847     case TARGET_NR_listxattr:
11848     case TARGET_NR_llistxattr:
11849     {
11850         void *p, *b = 0;
11851         if (arg2) {
11852             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11853             if (!b) {
11854                 return -TARGET_EFAULT;
11855             }
11856         }
11857         p = lock_user_string(arg1);
11858         if (p) {
11859             if (num == TARGET_NR_listxattr) {
11860                 ret = get_errno(listxattr(p, b, arg3));
11861             } else {
11862                 ret = get_errno(llistxattr(p, b, arg3));
11863             }
11864         } else {
11865             ret = -TARGET_EFAULT;
11866         }
11867         unlock_user(p, arg1, 0);
11868         unlock_user(b, arg2, arg3);
11869         return ret;
11870     }
11871     case TARGET_NR_flistxattr:
11872     {
11873         void *b = 0;
11874         if (arg2) {
11875             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11876             if (!b) {
11877                 return -TARGET_EFAULT;
11878             }
11879         }
11880         ret = get_errno(flistxattr(arg1, b, arg3));
11881         unlock_user(b, arg2, arg3);
11882         return ret;
11883     }
11884     case TARGET_NR_setxattr:
11885     case TARGET_NR_lsetxattr:
11886         {
11887             void *p, *n, *v = 0;
11888             if (arg3) {
11889                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11890                 if (!v) {
11891                     return -TARGET_EFAULT;
11892                 }
11893             }
11894             p = lock_user_string(arg1);
11895             n = lock_user_string(arg2);
11896             if (p && n) {
11897                 if (num == TARGET_NR_setxattr) {
11898                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11899                 } else {
11900                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11901                 }
11902             } else {
11903                 ret = -TARGET_EFAULT;
11904             }
11905             unlock_user(p, arg1, 0);
11906             unlock_user(n, arg2, 0);
11907             unlock_user(v, arg3, 0);
11908         }
11909         return ret;
11910     case TARGET_NR_fsetxattr:
11911         {
11912             void *n, *v = 0;
11913             if (arg3) {
11914                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11915                 if (!v) {
11916                     return -TARGET_EFAULT;
11917                 }
11918             }
11919             n = lock_user_string(arg2);
11920             if (n) {
11921                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11922             } else {
11923                 ret = -TARGET_EFAULT;
11924             }
11925             unlock_user(n, arg2, 0);
11926             unlock_user(v, arg3, 0);
11927         }
11928         return ret;
11929     case TARGET_NR_getxattr:
11930     case TARGET_NR_lgetxattr:
11931         {
11932             void *p, *n, *v = 0;
11933             if (arg3) {
11934                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11935                 if (!v) {
11936                     return -TARGET_EFAULT;
11937                 }
11938             }
11939             p = lock_user_string(arg1);
11940             n = lock_user_string(arg2);
11941             if (p && n) {
11942                 if (num == TARGET_NR_getxattr) {
11943                     ret = get_errno(getxattr(p, n, v, arg4));
11944                 } else {
11945                     ret = get_errno(lgetxattr(p, n, v, arg4));
11946                 }
11947             } else {
11948                 ret = -TARGET_EFAULT;
11949             }
11950             unlock_user(p, arg1, 0);
11951             unlock_user(n, arg2, 0);
11952             unlock_user(v, arg3, arg4);
11953         }
11954         return ret;
11955     case TARGET_NR_fgetxattr:
11956         {
11957             void *n, *v = 0;
11958             if (arg3) {
11959                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11960                 if (!v) {
11961                     return -TARGET_EFAULT;
11962                 }
11963             }
11964             n = lock_user_string(arg2);
11965             if (n) {
11966                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11967             } else {
11968                 ret = -TARGET_EFAULT;
11969             }
11970             unlock_user(n, arg2, 0);
11971             unlock_user(v, arg3, arg4);
11972         }
11973         return ret;
11974     case TARGET_NR_removexattr:
11975     case TARGET_NR_lremovexattr:
11976         {
11977             void *p, *n;
11978             p = lock_user_string(arg1);
11979             n = lock_user_string(arg2);
11980             if (p && n) {
11981                 if (num == TARGET_NR_removexattr) {
11982                     ret = get_errno(removexattr(p, n));
11983                 } else {
11984                     ret = get_errno(lremovexattr(p, n));
11985                 }
11986             } else {
11987                 ret = -TARGET_EFAULT;
11988             }
11989             unlock_user(p, arg1, 0);
11990             unlock_user(n, arg2, 0);
11991         }
11992         return ret;
11993     case TARGET_NR_fremovexattr:
11994         {
11995             void *n;
11996             n = lock_user_string(arg2);
11997             if (n) {
11998                 ret = get_errno(fremovexattr(arg1, n));
11999             } else {
12000                 ret = -TARGET_EFAULT;
12001             }
12002             unlock_user(n, arg2, 0);
12003         }
12004         return ret;
12005 #endif
12006 #endif /* CONFIG_ATTR */
12007 #ifdef TARGET_NR_set_thread_area
12008     case TARGET_NR_set_thread_area:
12009 #if defined(TARGET_MIPS)
12010       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12011       return 0;
12012 #elif defined(TARGET_CRIS)
12013       if (arg1 & 0xff)
12014           ret = -TARGET_EINVAL;
12015       else {
12016           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12017           ret = 0;
12018       }
12019       return ret;
12020 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12021       return do_set_thread_area(cpu_env, arg1);
12022 #elif defined(TARGET_M68K)
12023       {
12024           TaskState *ts = cpu->opaque;
12025           ts->tp_value = arg1;
12026           return 0;
12027       }
12028 #else
12029       return -TARGET_ENOSYS;
12030 #endif
12031 #endif
12032 #ifdef TARGET_NR_get_thread_area
12033     case TARGET_NR_get_thread_area:
12034 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12035         return do_get_thread_area(cpu_env, arg1);
12036 #elif defined(TARGET_M68K)
12037         {
12038             TaskState *ts = cpu->opaque;
12039             return ts->tp_value;
12040         }
12041 #else
12042         return -TARGET_ENOSYS;
12043 #endif
12044 #endif
12045 #ifdef TARGET_NR_getdomainname
12046     case TARGET_NR_getdomainname:
12047         return -TARGET_ENOSYS;
12048 #endif
12049 
12050 #ifdef TARGET_NR_clock_settime
12051     case TARGET_NR_clock_settime:
12052     {
12053         struct timespec ts;
12054 
12055         ret = target_to_host_timespec(&ts, arg2);
12056         if (!is_error(ret)) {
12057             ret = get_errno(clock_settime(arg1, &ts));
12058         }
12059         return ret;
12060     }
12061 #endif
12062 #ifdef TARGET_NR_clock_settime64
12063     case TARGET_NR_clock_settime64:
12064     {
12065         struct timespec ts;
12066 
12067         ret = target_to_host_timespec64(&ts, arg2);
12068         if (!is_error(ret)) {
12069             ret = get_errno(clock_settime(arg1, &ts));
12070         }
12071         return ret;
12072     }
12073 #endif
12074 #ifdef TARGET_NR_clock_gettime
12075     case TARGET_NR_clock_gettime:
12076     {
12077         struct timespec ts;
12078         ret = get_errno(clock_gettime(arg1, &ts));
12079         if (!is_error(ret)) {
12080             ret = host_to_target_timespec(arg2, &ts);
12081         }
12082         return ret;
12083     }
12084 #endif
12085 #ifdef TARGET_NR_clock_gettime64
12086     case TARGET_NR_clock_gettime64:
12087     {
12088         struct timespec ts;
12089         ret = get_errno(clock_gettime(arg1, &ts));
12090         if (!is_error(ret)) {
12091             ret = host_to_target_timespec64(arg2, &ts);
12092         }
12093         return ret;
12094     }
12095 #endif
12096 #ifdef TARGET_NR_clock_getres
12097     case TARGET_NR_clock_getres:
12098     {
12099         struct timespec ts;
12100         ret = get_errno(clock_getres(arg1, &ts));
12101         if (!is_error(ret)) {
12102             host_to_target_timespec(arg2, &ts);
12103         }
12104         return ret;
12105     }
12106 #endif
12107 #ifdef TARGET_NR_clock_getres_time64
12108     case TARGET_NR_clock_getres_time64:
12109     {
12110         struct timespec ts;
12111         ret = get_errno(clock_getres(arg1, &ts));
12112         if (!is_error(ret)) {
12113             host_to_target_timespec64(arg2, &ts);
12114         }
12115         return ret;
12116     }
12117 #endif
12118 #ifdef TARGET_NR_clock_nanosleep
12119     case TARGET_NR_clock_nanosleep:
12120     {
12121         struct timespec ts;
12122         if (target_to_host_timespec(&ts, arg3)) {
12123             return -TARGET_EFAULT;
12124         }
12125         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12126                                              &ts, arg4 ? &ts : NULL));
12127         /*
12128          * if the call is interrupted by a signal handler, it fails
12129          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12130          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12131          */
12132         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12133             host_to_target_timespec(arg4, &ts)) {
12134               return -TARGET_EFAULT;
12135         }
12136 
12137         return ret;
12138     }
12139 #endif
12140 #ifdef TARGET_NR_clock_nanosleep_time64
12141     case TARGET_NR_clock_nanosleep_time64:
12142     {
12143         struct timespec ts;
12144 
12145         if (target_to_host_timespec64(&ts, arg3)) {
12146             return -TARGET_EFAULT;
12147         }
12148 
12149         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12150                                              &ts, arg4 ? &ts : NULL));
12151 
12152         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12153             host_to_target_timespec64(arg4, &ts)) {
12154             return -TARGET_EFAULT;
12155         }
12156         return ret;
12157     }
12158 #endif
12159 
12160 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12161     case TARGET_NR_set_tid_address:
12162         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12163 #endif
12164 
12165     case TARGET_NR_tkill:
12166         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12167 
12168     case TARGET_NR_tgkill:
12169         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12170                          target_to_host_signal(arg3)));
12171 
12172 #ifdef TARGET_NR_set_robust_list
12173     case TARGET_NR_set_robust_list:
12174     case TARGET_NR_get_robust_list:
12175         /* The ABI for supporting robust futexes has userspace pass
12176          * the kernel a pointer to a linked list which is updated by
12177          * userspace after the syscall; the list is walked by the kernel
12178          * when the thread exits. Since the linked list in QEMU guest
12179          * memory isn't a valid linked list for the host and we have
12180          * no way to reliably intercept the thread-death event, we can't
12181          * support these. Silently return ENOSYS so that guest userspace
12182          * falls back to a non-robust futex implementation (which should
12183          * be OK except in the corner case of the guest crashing while
12184          * holding a mutex that is shared with another process via
12185          * shared memory).
12186          */
12187         return -TARGET_ENOSYS;
12188 #endif
12189 
12190 #if defined(TARGET_NR_utimensat)
12191     case TARGET_NR_utimensat:
12192         {
12193             struct timespec *tsp, ts[2];
12194             if (!arg3) {
12195                 tsp = NULL;
12196             } else {
12197                 if (target_to_host_timespec(ts, arg3)) {
12198                     return -TARGET_EFAULT;
12199                 }
12200                 if (target_to_host_timespec(ts + 1, arg3 +
12201                                             sizeof(struct target_timespec))) {
12202                     return -TARGET_EFAULT;
12203                 }
12204                 tsp = ts;
12205             }
12206             if (!arg2)
12207                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12208             else {
12209                 if (!(p = lock_user_string(arg2))) {
12210                     return -TARGET_EFAULT;
12211                 }
12212                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12213                 unlock_user(p, arg2, 0);
12214             }
12215         }
12216         return ret;
12217 #endif
12218 #ifdef TARGET_NR_utimensat_time64
12219     case TARGET_NR_utimensat_time64:
12220         {
12221             struct timespec *tsp, ts[2];
12222             if (!arg3) {
12223                 tsp = NULL;
12224             } else {
12225                 if (target_to_host_timespec64(ts, arg3)) {
12226                     return -TARGET_EFAULT;
12227                 }
12228                 if (target_to_host_timespec64(ts + 1, arg3 +
12229                                      sizeof(struct target__kernel_timespec))) {
12230                     return -TARGET_EFAULT;
12231                 }
12232                 tsp = ts;
12233             }
12234             if (!arg2)
12235                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12236             else {
12237                 p = lock_user_string(arg2);
12238                 if (!p) {
12239                     return -TARGET_EFAULT;
12240                 }
12241                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12242                 unlock_user(p, arg2, 0);
12243             }
12244         }
12245         return ret;
12246 #endif
12247 #ifdef TARGET_NR_futex
12248     case TARGET_NR_futex:
12249         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12250 #endif
12251 #ifdef TARGET_NR_futex_time64
12252     case TARGET_NR_futex_time64:
12253         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12254 #endif
12255 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12256     case TARGET_NR_inotify_init:
12257         ret = get_errno(sys_inotify_init());
12258         if (ret >= 0) {
12259             fd_trans_register(ret, &target_inotify_trans);
12260         }
12261         return ret;
12262 #endif
12263 #ifdef CONFIG_INOTIFY1
12264 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12265     case TARGET_NR_inotify_init1:
12266         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12267                                           fcntl_flags_tbl)));
12268         if (ret >= 0) {
12269             fd_trans_register(ret, &target_inotify_trans);
12270         }
12271         return ret;
12272 #endif
12273 #endif
12274 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12275     case TARGET_NR_inotify_add_watch:
12276         p = lock_user_string(arg2);
12277         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12278         unlock_user(p, arg2, 0);
12279         return ret;
12280 #endif
12281 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12282     case TARGET_NR_inotify_rm_watch:
12283         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12284 #endif
12285 
12286 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12287     case TARGET_NR_mq_open:
12288         {
12289             struct mq_attr posix_mq_attr;
12290             struct mq_attr *pposix_mq_attr;
12291             int host_flags;
12292 
12293             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12294             pposix_mq_attr = NULL;
12295             if (arg4) {
12296                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12297                     return -TARGET_EFAULT;
12298                 }
12299                 pposix_mq_attr = &posix_mq_attr;
12300             }
12301             p = lock_user_string(arg1 - 1);
12302             if (!p) {
12303                 return -TARGET_EFAULT;
12304             }
12305             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12306             unlock_user (p, arg1, 0);
12307         }
12308         return ret;
12309 
12310     case TARGET_NR_mq_unlink:
12311         p = lock_user_string(arg1 - 1);
12312         if (!p) {
12313             return -TARGET_EFAULT;
12314         }
12315         ret = get_errno(mq_unlink(p));
12316         unlock_user (p, arg1, 0);
12317         return ret;
12318 
12319 #ifdef TARGET_NR_mq_timedsend
12320     case TARGET_NR_mq_timedsend:
12321         {
12322             struct timespec ts;
12323 
12324             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12325             if (arg5 != 0) {
12326                 if (target_to_host_timespec(&ts, arg5)) {
12327                     return -TARGET_EFAULT;
12328                 }
12329                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12330                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12331                     return -TARGET_EFAULT;
12332                 }
12333             } else {
12334                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12335             }
12336             unlock_user (p, arg2, arg3);
12337         }
12338         return ret;
12339 #endif
12340 #ifdef TARGET_NR_mq_timedsend_time64
12341     case TARGET_NR_mq_timedsend_time64:
12342         {
12343             struct timespec ts;
12344 
12345             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12346             if (arg5 != 0) {
12347                 if (target_to_host_timespec64(&ts, arg5)) {
12348                     return -TARGET_EFAULT;
12349                 }
12350                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12351                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12352                     return -TARGET_EFAULT;
12353                 }
12354             } else {
12355                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12356             }
12357             unlock_user(p, arg2, arg3);
12358         }
12359         return ret;
12360 #endif
12361 
12362 #ifdef TARGET_NR_mq_timedreceive
12363     case TARGET_NR_mq_timedreceive:
12364         {
12365             struct timespec ts;
12366             unsigned int prio;
12367 
12368             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12369             if (arg5 != 0) {
12370                 if (target_to_host_timespec(&ts, arg5)) {
12371                     return -TARGET_EFAULT;
12372                 }
12373                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12374                                                      &prio, &ts));
12375                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12376                     return -TARGET_EFAULT;
12377                 }
12378             } else {
12379                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12380                                                      &prio, NULL));
12381             }
12382             unlock_user (p, arg2, arg3);
12383             if (arg4 != 0)
12384                 put_user_u32(prio, arg4);
12385         }
12386         return ret;
12387 #endif
12388 #ifdef TARGET_NR_mq_timedreceive_time64
12389     case TARGET_NR_mq_timedreceive_time64:
12390         {
12391             struct timespec ts;
12392             unsigned int prio;
12393 
12394             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12395             if (arg5 != 0) {
12396                 if (target_to_host_timespec64(&ts, arg5)) {
12397                     return -TARGET_EFAULT;
12398                 }
12399                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12400                                                      &prio, &ts));
12401                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12402                     return -TARGET_EFAULT;
12403                 }
12404             } else {
12405                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12406                                                      &prio, NULL));
12407             }
12408             unlock_user(p, arg2, arg3);
12409             if (arg4 != 0) {
12410                 put_user_u32(prio, arg4);
12411             }
12412         }
12413         return ret;
12414 #endif
12415 
12416     /* Not implemented for now... */
12417 /*     case TARGET_NR_mq_notify: */
12418 /*         break; */
12419 
12420     case TARGET_NR_mq_getsetattr:
12421         {
12422             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12423             ret = 0;
12424             if (arg2 != 0) {
12425                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12426                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12427                                            &posix_mq_attr_out));
12428             } else if (arg3 != 0) {
12429                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12430             }
12431             if (ret == 0 && arg3 != 0) {
12432                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12433             }
12434         }
12435         return ret;
12436 #endif
12437 
12438 #ifdef CONFIG_SPLICE
12439 #ifdef TARGET_NR_tee
12440     case TARGET_NR_tee:
12441         {
12442             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12443         }
12444         return ret;
12445 #endif
12446 #ifdef TARGET_NR_splice
12447     case TARGET_NR_splice:
12448         {
12449             loff_t loff_in, loff_out;
12450             loff_t *ploff_in = NULL, *ploff_out = NULL;
12451             if (arg2) {
12452                 if (get_user_u64(loff_in, arg2)) {
12453                     return -TARGET_EFAULT;
12454                 }
12455                 ploff_in = &loff_in;
12456             }
12457             if (arg4) {
12458                 if (get_user_u64(loff_out, arg4)) {
12459                     return -TARGET_EFAULT;
12460                 }
12461                 ploff_out = &loff_out;
12462             }
12463             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12464             if (arg2) {
12465                 if (put_user_u64(loff_in, arg2)) {
12466                     return -TARGET_EFAULT;
12467                 }
12468             }
12469             if (arg4) {
12470                 if (put_user_u64(loff_out, arg4)) {
12471                     return -TARGET_EFAULT;
12472                 }
12473             }
12474         }
12475         return ret;
12476 #endif
12477 #ifdef TARGET_NR_vmsplice
12478 	case TARGET_NR_vmsplice:
12479         {
12480             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12481             if (vec != NULL) {
12482                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12483                 unlock_iovec(vec, arg2, arg3, 0);
12484             } else {
12485                 ret = -host_to_target_errno(errno);
12486             }
12487         }
12488         return ret;
12489 #endif
12490 #endif /* CONFIG_SPLICE */
12491 #ifdef CONFIG_EVENTFD
12492 #if defined(TARGET_NR_eventfd)
12493     case TARGET_NR_eventfd:
12494         ret = get_errno(eventfd(arg1, 0));
12495         if (ret >= 0) {
12496             fd_trans_register(ret, &target_eventfd_trans);
12497         }
12498         return ret;
12499 #endif
12500 #if defined(TARGET_NR_eventfd2)
12501     case TARGET_NR_eventfd2:
12502     {
12503         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12504         if (arg2 & TARGET_O_NONBLOCK) {
12505             host_flags |= O_NONBLOCK;
12506         }
12507         if (arg2 & TARGET_O_CLOEXEC) {
12508             host_flags |= O_CLOEXEC;
12509         }
12510         ret = get_errno(eventfd(arg1, host_flags));
12511         if (ret >= 0) {
12512             fd_trans_register(ret, &target_eventfd_trans);
12513         }
12514         return ret;
12515     }
12516 #endif
12517 #endif /* CONFIG_EVENTFD  */
12518 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12519     case TARGET_NR_fallocate:
12520 #if TARGET_ABI_BITS == 32
12521         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12522                                   target_offset64(arg5, arg6)));
12523 #else
12524         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12525 #endif
12526         return ret;
12527 #endif
12528 #if defined(CONFIG_SYNC_FILE_RANGE)
12529 #if defined(TARGET_NR_sync_file_range)
12530     case TARGET_NR_sync_file_range:
12531 #if TARGET_ABI_BITS == 32
12532 #if defined(TARGET_MIPS)
12533         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12534                                         target_offset64(arg5, arg6), arg7));
12535 #else
12536         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12537                                         target_offset64(arg4, arg5), arg6));
12538 #endif /* !TARGET_MIPS */
12539 #else
12540         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12541 #endif
12542         return ret;
12543 #endif
12544 #if defined(TARGET_NR_sync_file_range2) || \
12545     defined(TARGET_NR_arm_sync_file_range)
12546 #if defined(TARGET_NR_sync_file_range2)
12547     case TARGET_NR_sync_file_range2:
12548 #endif
12549 #if defined(TARGET_NR_arm_sync_file_range)
12550     case TARGET_NR_arm_sync_file_range:
12551 #endif
12552         /* This is like sync_file_range but the arguments are reordered */
12553 #if TARGET_ABI_BITS == 32
12554         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12555                                         target_offset64(arg5, arg6), arg2));
12556 #else
12557         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12558 #endif
12559         return ret;
12560 #endif
12561 #endif
12562 #if defined(TARGET_NR_signalfd4)
12563     case TARGET_NR_signalfd4:
12564         return do_signalfd4(arg1, arg2, arg4);
12565 #endif
12566 #if defined(TARGET_NR_signalfd)
12567     case TARGET_NR_signalfd:
12568         return do_signalfd4(arg1, arg2, 0);
12569 #endif
12570 #if defined(CONFIG_EPOLL)
12571 #if defined(TARGET_NR_epoll_create)
12572     case TARGET_NR_epoll_create:
12573         return get_errno(epoll_create(arg1));
12574 #endif
12575 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12576     case TARGET_NR_epoll_create1:
12577         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12578 #endif
12579 #if defined(TARGET_NR_epoll_ctl)
12580     case TARGET_NR_epoll_ctl:
12581     {
12582         struct epoll_event ep;
12583         struct epoll_event *epp = 0;
12584         if (arg4) {
12585             if (arg2 != EPOLL_CTL_DEL) {
12586                 struct target_epoll_event *target_ep;
12587                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12588                     return -TARGET_EFAULT;
12589                 }
12590                 ep.events = tswap32(target_ep->events);
12591                 /*
12592                  * The epoll_data_t union is just opaque data to the kernel,
12593                  * so we transfer all 64 bits across and need not worry what
12594                  * actual data type it is.
12595                  */
12596                 ep.data.u64 = tswap64(target_ep->data.u64);
12597                 unlock_user_struct(target_ep, arg4, 0);
12598             }
12599             /*
12600              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12601              * non-null pointer, even though this argument is ignored.
12602              *
12603              */
12604             epp = &ep;
12605         }
12606         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12607     }
12608 #endif
12609 
12610 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12611 #if defined(TARGET_NR_epoll_wait)
12612     case TARGET_NR_epoll_wait:
12613 #endif
12614 #if defined(TARGET_NR_epoll_pwait)
12615     case TARGET_NR_epoll_pwait:
12616 #endif
12617     {
12618         struct target_epoll_event *target_ep;
12619         struct epoll_event *ep;
12620         int epfd = arg1;
12621         int maxevents = arg3;
12622         int timeout = arg4;
12623 
12624         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12625             return -TARGET_EINVAL;
12626         }
12627 
12628         target_ep = lock_user(VERIFY_WRITE, arg2,
12629                               maxevents * sizeof(struct target_epoll_event), 1);
12630         if (!target_ep) {
12631             return -TARGET_EFAULT;
12632         }
12633 
12634         ep = g_try_new(struct epoll_event, maxevents);
12635         if (!ep) {
12636             unlock_user(target_ep, arg2, 0);
12637             return -TARGET_ENOMEM;
12638         }
12639 
12640         switch (num) {
12641 #if defined(TARGET_NR_epoll_pwait)
12642         case TARGET_NR_epoll_pwait:
12643         {
12644             target_sigset_t *target_set;
12645             sigset_t _set, *set = &_set;
12646 
12647             if (arg5) {
12648                 if (arg6 != sizeof(target_sigset_t)) {
12649                     ret = -TARGET_EINVAL;
12650                     break;
12651                 }
12652 
12653                 target_set = lock_user(VERIFY_READ, arg5,
12654                                        sizeof(target_sigset_t), 1);
12655                 if (!target_set) {
12656                     ret = -TARGET_EFAULT;
12657                     break;
12658                 }
12659                 target_to_host_sigset(set, target_set);
12660                 unlock_user(target_set, arg5, 0);
12661             } else {
12662                 set = NULL;
12663             }
12664 
12665             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12666                                              set, SIGSET_T_SIZE));
12667             break;
12668         }
12669 #endif
12670 #if defined(TARGET_NR_epoll_wait)
12671         case TARGET_NR_epoll_wait:
12672             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12673                                              NULL, 0));
12674             break;
12675 #endif
12676         default:
12677             ret = -TARGET_ENOSYS;
12678         }
12679         if (!is_error(ret)) {
12680             int i;
12681             for (i = 0; i < ret; i++) {
12682                 target_ep[i].events = tswap32(ep[i].events);
12683                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12684             }
12685             unlock_user(target_ep, arg2,
12686                         ret * sizeof(struct target_epoll_event));
12687         } else {
12688             unlock_user(target_ep, arg2, 0);
12689         }
12690         g_free(ep);
12691         return ret;
12692     }
12693 #endif
12694 #endif
12695 #ifdef TARGET_NR_prlimit64
12696     case TARGET_NR_prlimit64:
12697     {
12698         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12699         struct target_rlimit64 *target_rnew, *target_rold;
12700         struct host_rlimit64 rnew, rold, *rnewp = 0;
12701         int resource = target_to_host_resource(arg2);
12702 
12703         if (arg3 && (resource != RLIMIT_AS &&
12704                      resource != RLIMIT_DATA &&
12705                      resource != RLIMIT_STACK)) {
12706             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12707                 return -TARGET_EFAULT;
12708             }
12709             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12710             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12711             unlock_user_struct(target_rnew, arg3, 0);
12712             rnewp = &rnew;
12713         }
12714 
12715         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12716         if (!is_error(ret) && arg4) {
12717             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12718                 return -TARGET_EFAULT;
12719             }
12720             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12721             target_rold->rlim_max = tswap64(rold.rlim_max);
12722             unlock_user_struct(target_rold, arg4, 1);
12723         }
12724         return ret;
12725     }
12726 #endif
12727 #ifdef TARGET_NR_gethostname
12728     case TARGET_NR_gethostname:
12729     {
12730         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12731         if (name) {
12732             ret = get_errno(gethostname(name, arg2));
12733             unlock_user(name, arg1, arg2);
12734         } else {
12735             ret = -TARGET_EFAULT;
12736         }
12737         return ret;
12738     }
12739 #endif
12740 #ifdef TARGET_NR_atomic_cmpxchg_32
12741     case TARGET_NR_atomic_cmpxchg_32:
12742     {
12743         /* should use start_exclusive from main.c */
12744         abi_ulong mem_value;
12745         if (get_user_u32(mem_value, arg6)) {
12746             target_siginfo_t info;
12747             info.si_signo = SIGSEGV;
12748             info.si_errno = 0;
12749             info.si_code = TARGET_SEGV_MAPERR;
12750             info._sifields._sigfault._addr = arg6;
12751             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12752                          QEMU_SI_FAULT, &info);
12753             ret = 0xdeadbeef;
12754 
12755         }
12756         if (mem_value == arg2)
12757             put_user_u32(arg1, arg6);
12758         return mem_value;
12759     }
12760 #endif
12761 #ifdef TARGET_NR_atomic_barrier
12762     case TARGET_NR_atomic_barrier:
12763         /* Like the kernel implementation and the
12764            qemu arm barrier, no-op this? */
12765         return 0;
12766 #endif
12767 
12768 #ifdef TARGET_NR_timer_create
12769     case TARGET_NR_timer_create:
12770     {
12771         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12772 
12773         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12774 
12775         int clkid = arg1;
12776         int timer_index = next_free_host_timer();
12777 
12778         if (timer_index < 0) {
12779             ret = -TARGET_EAGAIN;
12780         } else {
12781             timer_t *phtimer = g_posix_timers  + timer_index;
12782 
12783             if (arg2) {
12784                 phost_sevp = &host_sevp;
12785                 ret = target_to_host_sigevent(phost_sevp, arg2);
12786                 if (ret != 0) {
12787                     return ret;
12788                 }
12789             }
12790 
12791             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12792             if (ret) {
12793                 phtimer = NULL;
12794             } else {
12795                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12796                     return -TARGET_EFAULT;
12797                 }
12798             }
12799         }
12800         return ret;
12801     }
12802 #endif
12803 
12804 #ifdef TARGET_NR_timer_settime
12805     case TARGET_NR_timer_settime:
12806     {
12807         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12808          * struct itimerspec * old_value */
12809         target_timer_t timerid = get_timer_id(arg1);
12810 
12811         if (timerid < 0) {
12812             ret = timerid;
12813         } else if (arg3 == 0) {
12814             ret = -TARGET_EINVAL;
12815         } else {
12816             timer_t htimer = g_posix_timers[timerid];
12817             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12818 
12819             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12820                 return -TARGET_EFAULT;
12821             }
12822             ret = get_errno(
12823                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12824             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12825                 return -TARGET_EFAULT;
12826             }
12827         }
12828         return ret;
12829     }
12830 #endif
12831 
12832 #ifdef TARGET_NR_timer_settime64
12833     case TARGET_NR_timer_settime64:
12834     {
12835         target_timer_t timerid = get_timer_id(arg1);
12836 
12837         if (timerid < 0) {
12838             ret = timerid;
12839         } else if (arg3 == 0) {
12840             ret = -TARGET_EINVAL;
12841         } else {
12842             timer_t htimer = g_posix_timers[timerid];
12843             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12844 
12845             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12846                 return -TARGET_EFAULT;
12847             }
12848             ret = get_errno(
12849                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12850             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12851                 return -TARGET_EFAULT;
12852             }
12853         }
12854         return ret;
12855     }
12856 #endif
12857 
12858 #ifdef TARGET_NR_timer_gettime
12859     case TARGET_NR_timer_gettime:
12860     {
12861         /* args: timer_t timerid, struct itimerspec *curr_value */
12862         target_timer_t timerid = get_timer_id(arg1);
12863 
12864         if (timerid < 0) {
12865             ret = timerid;
12866         } else if (!arg2) {
12867             ret = -TARGET_EFAULT;
12868         } else {
12869             timer_t htimer = g_posix_timers[timerid];
12870             struct itimerspec hspec;
12871             ret = get_errno(timer_gettime(htimer, &hspec));
12872 
12873             if (host_to_target_itimerspec(arg2, &hspec)) {
12874                 ret = -TARGET_EFAULT;
12875             }
12876         }
12877         return ret;
12878     }
12879 #endif
12880 
12881 #ifdef TARGET_NR_timer_gettime64
12882     case TARGET_NR_timer_gettime64:
12883     {
12884         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12885         target_timer_t timerid = get_timer_id(arg1);
12886 
12887         if (timerid < 0) {
12888             ret = timerid;
12889         } else if (!arg2) {
12890             ret = -TARGET_EFAULT;
12891         } else {
12892             timer_t htimer = g_posix_timers[timerid];
12893             struct itimerspec hspec;
12894             ret = get_errno(timer_gettime(htimer, &hspec));
12895 
12896             if (host_to_target_itimerspec64(arg2, &hspec)) {
12897                 ret = -TARGET_EFAULT;
12898             }
12899         }
12900         return ret;
12901     }
12902 #endif
12903 
12904 #ifdef TARGET_NR_timer_getoverrun
12905     case TARGET_NR_timer_getoverrun:
12906     {
12907         /* args: timer_t timerid */
12908         target_timer_t timerid = get_timer_id(arg1);
12909 
12910         if (timerid < 0) {
12911             ret = timerid;
12912         } else {
12913             timer_t htimer = g_posix_timers[timerid];
12914             ret = get_errno(timer_getoverrun(htimer));
12915         }
12916         return ret;
12917     }
12918 #endif
12919 
12920 #ifdef TARGET_NR_timer_delete
12921     case TARGET_NR_timer_delete:
12922     {
12923         /* args: timer_t timerid */
12924         target_timer_t timerid = get_timer_id(arg1);
12925 
12926         if (timerid < 0) {
12927             ret = timerid;
12928         } else {
12929             timer_t htimer = g_posix_timers[timerid];
12930             ret = get_errno(timer_delete(htimer));
12931             g_posix_timers[timerid] = 0;
12932         }
12933         return ret;
12934     }
12935 #endif
12936 
12937 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12938     case TARGET_NR_timerfd_create:
12939         return get_errno(timerfd_create(arg1,
12940                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12941 #endif
12942 
12943 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12944     case TARGET_NR_timerfd_gettime:
12945         {
12946             struct itimerspec its_curr;
12947 
12948             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12949 
12950             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12951                 return -TARGET_EFAULT;
12952             }
12953         }
12954         return ret;
12955 #endif
12956 
12957 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12958     case TARGET_NR_timerfd_gettime64:
12959         {
12960             struct itimerspec its_curr;
12961 
12962             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12963 
12964             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12965                 return -TARGET_EFAULT;
12966             }
12967         }
12968         return ret;
12969 #endif
12970 
12971 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12972     case TARGET_NR_timerfd_settime:
12973         {
12974             struct itimerspec its_new, its_old, *p_new;
12975 
12976             if (arg3) {
12977                 if (target_to_host_itimerspec(&its_new, arg3)) {
12978                     return -TARGET_EFAULT;
12979                 }
12980                 p_new = &its_new;
12981             } else {
12982                 p_new = NULL;
12983             }
12984 
12985             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12986 
12987             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12988                 return -TARGET_EFAULT;
12989             }
12990         }
12991         return ret;
12992 #endif
12993 
12994 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12995     case TARGET_NR_timerfd_settime64:
12996         {
12997             struct itimerspec its_new, its_old, *p_new;
12998 
12999             if (arg3) {
13000                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13001                     return -TARGET_EFAULT;
13002                 }
13003                 p_new = &its_new;
13004             } else {
13005                 p_new = NULL;
13006             }
13007 
13008             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13009 
13010             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13011                 return -TARGET_EFAULT;
13012             }
13013         }
13014         return ret;
13015 #endif
13016 
13017 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13018     case TARGET_NR_ioprio_get:
13019         return get_errno(ioprio_get(arg1, arg2));
13020 #endif
13021 
13022 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13023     case TARGET_NR_ioprio_set:
13024         return get_errno(ioprio_set(arg1, arg2, arg3));
13025 #endif
13026 
13027 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13028     case TARGET_NR_setns:
13029         return get_errno(setns(arg1, arg2));
13030 #endif
13031 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13032     case TARGET_NR_unshare:
13033         return get_errno(unshare(arg1));
13034 #endif
13035 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13036     case TARGET_NR_kcmp:
13037         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13038 #endif
13039 #ifdef TARGET_NR_swapcontext
13040     case TARGET_NR_swapcontext:
13041         /* PowerPC specific.  */
13042         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13043 #endif
13044 #ifdef TARGET_NR_memfd_create
13045     case TARGET_NR_memfd_create:
13046         p = lock_user_string(arg1);
13047         if (!p) {
13048             return -TARGET_EFAULT;
13049         }
13050         ret = get_errno(memfd_create(p, arg2));
13051         fd_trans_unregister(ret);
13052         unlock_user(p, arg1, 0);
13053         return ret;
13054 #endif
13055 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13056     case TARGET_NR_membarrier:
13057         return get_errno(membarrier(arg1, arg2));
13058 #endif
13059 
13060 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13061     case TARGET_NR_copy_file_range:
13062         {
13063             loff_t inoff, outoff;
13064             loff_t *pinoff = NULL, *poutoff = NULL;
13065 
13066             if (arg2) {
13067                 if (get_user_u64(inoff, arg2)) {
13068                     return -TARGET_EFAULT;
13069                 }
13070                 pinoff = &inoff;
13071             }
13072             if (arg4) {
13073                 if (get_user_u64(outoff, arg4)) {
13074                     return -TARGET_EFAULT;
13075                 }
13076                 poutoff = &outoff;
13077             }
13078             /* Do not sign-extend the count parameter. */
13079             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13080                                                  (abi_ulong)arg5, arg6));
13081             if (!is_error(ret) && ret > 0) {
13082                 if (arg2) {
13083                     if (put_user_u64(inoff, arg2)) {
13084                         return -TARGET_EFAULT;
13085                     }
13086                 }
13087                 if (arg4) {
13088                     if (put_user_u64(outoff, arg4)) {
13089                         return -TARGET_EFAULT;
13090                     }
13091                 }
13092             }
13093         }
13094         return ret;
13095 #endif
13096 
13097 #if defined(TARGET_NR_pivot_root)
13098     case TARGET_NR_pivot_root:
13099         {
13100             void *p2;
13101             p = lock_user_string(arg1); /* new_root */
13102             p2 = lock_user_string(arg2); /* put_old */
13103             if (!p || !p2) {
13104                 ret = -TARGET_EFAULT;
13105             } else {
13106                 ret = get_errno(pivot_root(p, p2));
13107             }
13108             unlock_user(p2, arg2, 0);
13109             unlock_user(p, arg1, 0);
13110         }
13111         return ret;
13112 #endif
13113 
13114     default:
13115         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13116         return -TARGET_ENOSYS;
13117     }
13118     return ret;
13119 }
13120 
13121 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13122                     abi_long arg2, abi_long arg3, abi_long arg4,
13123                     abi_long arg5, abi_long arg6, abi_long arg7,
13124                     abi_long arg8)
13125 {
13126     CPUState *cpu = env_cpu(cpu_env);
13127     abi_long ret;
13128 
13129 #ifdef DEBUG_ERESTARTSYS
13130     /* Debug-only code for exercising the syscall-restart code paths
13131      * in the per-architecture cpu main loops: restart every syscall
13132      * the guest makes once before letting it through.
13133      */
13134     {
13135         static bool flag;
13136         flag = !flag;
13137         if (flag) {
13138             return -TARGET_ERESTARTSYS;
13139         }
13140     }
13141 #endif
13142 
13143     record_syscall_start(cpu, num, arg1,
13144                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13145 
13146     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13147         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13148     }
13149 
13150     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13151                       arg5, arg6, arg7, arg8);
13152 
13153     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13154         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13155                           arg3, arg4, arg5, arg6);
13156     }
13157 
13158     record_syscall_return(cpu, num, ret);
13159     return ret;
13160 }
13161