xref: /qemu/linux-user/syscall.c (revision dc293f60)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
135 #include "tcg/tcg.h"
136 
137 #ifndef CLONE_IO
138 #define CLONE_IO                0x80000000      /* Clone io context */
139 #endif
140 
141 /* We can't directly call the host clone syscall, because this will
142  * badly confuse libc (breaking mutexes, for example). So we must
143  * divide clone flags into:
144  *  * flag combinations that look like pthread_create()
145  *  * flag combinations that look like fork()
146  *  * flags we can implement within QEMU itself
147  *  * flags we can't support and will return an error for
148  */
149 /* For thread creation, all these flags must be present; for
150  * fork, none must be present.
151  */
152 #define CLONE_THREAD_FLAGS                              \
153     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155 
156 /* These flags are ignored:
157  * CLONE_DETACHED is now ignored by the kernel;
158  * CLONE_IO is just an optimisation hint to the I/O scheduler
159  */
160 #define CLONE_IGNORED_FLAGS                     \
161     (CLONE_DETACHED | CLONE_IO)
162 
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS               \
165     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167 
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172 
173 #define CLONE_INVALID_FORK_FLAGS                                        \
174     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175 
176 #define CLONE_INVALID_THREAD_FLAGS                                      \
177     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178        CLONE_IGNORED_FLAGS))
179 
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181  * have almost all been allocated. We cannot support any of
182  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184  * The checks against the invalid thread masks above will catch these.
185  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186  */
187 
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189  * once. This exercises the codepaths for restart.
190  */
191 //#define DEBUG_ERESTARTSYS
192 
193 //#include <linux/msdos_fs.h>
194 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196 
197 #undef _syscall0
198 #undef _syscall1
199 #undef _syscall2
200 #undef _syscall3
201 #undef _syscall4
202 #undef _syscall5
203 #undef _syscall6
204 
205 #define _syscall0(type,name)		\
206 static type name (void)			\
207 {					\
208 	return syscall(__NR_##name);	\
209 }
210 
211 #define _syscall1(type,name,type1,arg1)		\
212 static type name (type1 arg1)			\
213 {						\
214 	return syscall(__NR_##name, arg1);	\
215 }
216 
217 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218 static type name (type1 arg1,type2 arg2)		\
219 {							\
220 	return syscall(__NR_##name, arg1, arg2);	\
221 }
222 
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224 static type name (type1 arg1,type2 arg2,type3 arg3)		\
225 {								\
226 	return syscall(__NR_##name, arg1, arg2, arg3);		\
227 }
228 
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233 }
234 
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5)							\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238 {										\
239 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240 }
241 
242 
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5,type6,arg6)					\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                   type6 arg6)							\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249 }
250 
251 
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
262 #endif
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
265 #endif
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
270 
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
273 #endif
274 
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
278 #endif
279 
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
283 #endif
284 
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid)
287 
288 /* For the 64-bit guest on 32-bit host case we must emulate
289  * getdents using getdents64, because otherwise the host
290  * might hand us back more dirent records than we can fit
291  * into the guest buffer after structure format conversion.
292  * Otherwise we emulate getdents with getdents if the host has it.
293  */
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
296 #endif
297 
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
300 #endif
301 #if (defined(TARGET_NR_getdents) && \
302       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
305 #endif
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
308           loff_t *, res, uint, wh);
309 #endif
310 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
311 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
312           siginfo_t *, uinfo)
313 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group,int,error_code)
316 #endif
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address,int *,tidptr)
319 #endif
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
322           const struct timespec *,timeout,int *,uaddr2,int,val3)
323 #endif
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
326           const struct timespec *,timeout,int *,uaddr2,int,val3)
327 #endif
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
330           unsigned long *, user_mask_ptr);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
336 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
337           void *, arg);
338 _syscall2(int, capget, struct __user_cap_header_struct *, header,
339           struct __user_cap_data_struct *, data);
340 _syscall2(int, capset, struct __user_cap_header_struct *, header,
341           struct __user_cap_data_struct *, data);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get, int, which, int, who)
344 #endif
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
347 #endif
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
350 #endif
351 
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
354           unsigned long, idx1, unsigned long, idx2)
355 #endif
356 
357 /*
358  * It is assumed that struct statx is architecture independent.
359  */
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
362           unsigned int, mask, struct target_statx *, statxbuf)
363 #endif
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier, int, cmd, int, flags)
366 #endif
367 
368 static bitmask_transtbl fcntl_flags_tbl[] = {
369   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
370   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
371   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
372   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
373   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
374   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
375   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
376   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
377   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
378   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
379   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
380   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
381   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
382 #if defined(O_DIRECT)
383   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
384 #endif
385 #if defined(O_NOATIME)
386   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
387 #endif
388 #if defined(O_CLOEXEC)
389   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
390 #endif
391 #if defined(O_PATH)
392   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
393 #endif
394 #if defined(O_TMPFILE)
395   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
396 #endif
397   /* Don't terminate the list prematurely on 64-bit host+guest.  */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
400 #endif
401   { 0, 0, 0, 0 }
402 };
403 
404 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
405 
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
410           const struct timespec *,tsp,int,flags)
411 #else
412 static int sys_utimensat(int dirfd, const char *pathname,
413                          const struct timespec times[2], int flags)
414 {
415     errno = ENOSYS;
416     return -1;
417 }
418 #endif
419 #endif /* TARGET_NR_utimensat */
420 
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
425           const char *, new, unsigned int, flags)
426 #else
427 static int sys_renameat2(int oldfd, const char *old,
428                          int newfd, const char *new, int flags)
429 {
430     if (flags == 0) {
431         return renameat(oldfd, old, newfd, new);
432     }
433     errno = ENOSYS;
434     return -1;
435 }
436 #endif
437 #endif /* TARGET_NR_renameat2 */
438 
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
441 
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
444 {
445   return (inotify_init());
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
450 {
451   return (inotify_add_watch(fd, pathname, mask));
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd, int32_t wd)
456 {
457   return (inotify_rm_watch(fd, wd));
458 }
459 #endif
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags)
463 {
464   return (inotify_init1(flags));
465 }
466 #endif
467 #endif
468 #else
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY  */
475 
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
479 #endif
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64 {
483     uint64_t rlim_cur;
484     uint64_t rlim_max;
485 };
486 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
487           const struct host_rlimit64 *, new_limit,
488           struct host_rlimit64 *, old_limit)
489 #endif
490 
491 
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers[32] = { 0, } ;
495 
496 static inline int next_free_host_timer(void)
497 {
498     int k ;
499     /* FIXME: Does finding the next free slot require a lock? */
500     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
501         if (g_posix_timers[k] == 0) {
502             g_posix_timers[k] = (timer_t) 1;
503             return k;
504         }
505     }
506     return -1;
507 }
508 #endif
509 
510 #define ERRNO_TABLE_SIZE 1200
511 
512 /* target_to_host_errno_table[] is initialized from
513  * host_to_target_errno_table[] in syscall_init(). */
514 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
515 };
516 
517 /*
518  * This list is the union of errno values overridden in asm-<arch>/errno.h
519  * minus the errnos that are not actually generic to all archs.
520  */
521 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
522     [EAGAIN]		= TARGET_EAGAIN,
523     [EIDRM]		= TARGET_EIDRM,
524     [ECHRNG]		= TARGET_ECHRNG,
525     [EL2NSYNC]		= TARGET_EL2NSYNC,
526     [EL3HLT]		= TARGET_EL3HLT,
527     [EL3RST]		= TARGET_EL3RST,
528     [ELNRNG]		= TARGET_ELNRNG,
529     [EUNATCH]		= TARGET_EUNATCH,
530     [ENOCSI]		= TARGET_ENOCSI,
531     [EL2HLT]		= TARGET_EL2HLT,
532     [EDEADLK]		= TARGET_EDEADLK,
533     [ENOLCK]		= TARGET_ENOLCK,
534     [EBADE]		= TARGET_EBADE,
535     [EBADR]		= TARGET_EBADR,
536     [EXFULL]		= TARGET_EXFULL,
537     [ENOANO]		= TARGET_ENOANO,
538     [EBADRQC]		= TARGET_EBADRQC,
539     [EBADSLT]		= TARGET_EBADSLT,
540     [EBFONT]		= TARGET_EBFONT,
541     [ENOSTR]		= TARGET_ENOSTR,
542     [ENODATA]		= TARGET_ENODATA,
543     [ETIME]		= TARGET_ETIME,
544     [ENOSR]		= TARGET_ENOSR,
545     [ENONET]		= TARGET_ENONET,
546     [ENOPKG]		= TARGET_ENOPKG,
547     [EREMOTE]		= TARGET_EREMOTE,
548     [ENOLINK]		= TARGET_ENOLINK,
549     [EADV]		= TARGET_EADV,
550     [ESRMNT]		= TARGET_ESRMNT,
551     [ECOMM]		= TARGET_ECOMM,
552     [EPROTO]		= TARGET_EPROTO,
553     [EDOTDOT]		= TARGET_EDOTDOT,
554     [EMULTIHOP]		= TARGET_EMULTIHOP,
555     [EBADMSG]		= TARGET_EBADMSG,
556     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
557     [EOVERFLOW]		= TARGET_EOVERFLOW,
558     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
559     [EBADFD]		= TARGET_EBADFD,
560     [EREMCHG]		= TARGET_EREMCHG,
561     [ELIBACC]		= TARGET_ELIBACC,
562     [ELIBBAD]		= TARGET_ELIBBAD,
563     [ELIBSCN]		= TARGET_ELIBSCN,
564     [ELIBMAX]		= TARGET_ELIBMAX,
565     [ELIBEXEC]		= TARGET_ELIBEXEC,
566     [EILSEQ]		= TARGET_EILSEQ,
567     [ENOSYS]		= TARGET_ENOSYS,
568     [ELOOP]		= TARGET_ELOOP,
569     [ERESTART]		= TARGET_ERESTART,
570     [ESTRPIPE]		= TARGET_ESTRPIPE,
571     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
572     [EUSERS]		= TARGET_EUSERS,
573     [ENOTSOCK]		= TARGET_ENOTSOCK,
574     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
575     [EMSGSIZE]		= TARGET_EMSGSIZE,
576     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
577     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
578     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
579     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
580     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
581     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
582     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
583     [EADDRINUSE]	= TARGET_EADDRINUSE,
584     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
585     [ENETDOWN]		= TARGET_ENETDOWN,
586     [ENETUNREACH]	= TARGET_ENETUNREACH,
587     [ENETRESET]		= TARGET_ENETRESET,
588     [ECONNABORTED]	= TARGET_ECONNABORTED,
589     [ECONNRESET]	= TARGET_ECONNRESET,
590     [ENOBUFS]		= TARGET_ENOBUFS,
591     [EISCONN]		= TARGET_EISCONN,
592     [ENOTCONN]		= TARGET_ENOTCONN,
593     [EUCLEAN]		= TARGET_EUCLEAN,
594     [ENOTNAM]		= TARGET_ENOTNAM,
595     [ENAVAIL]		= TARGET_ENAVAIL,
596     [EISNAM]		= TARGET_EISNAM,
597     [EREMOTEIO]		= TARGET_EREMOTEIO,
598     [EDQUOT]            = TARGET_EDQUOT,
599     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
600     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
601     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
602     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
603     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
604     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
605     [EALREADY]		= TARGET_EALREADY,
606     [EINPROGRESS]	= TARGET_EINPROGRESS,
607     [ESTALE]		= TARGET_ESTALE,
608     [ECANCELED]		= TARGET_ECANCELED,
609     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
610     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
611 #ifdef ENOKEY
612     [ENOKEY]		= TARGET_ENOKEY,
613 #endif
614 #ifdef EKEYEXPIRED
615     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
616 #endif
617 #ifdef EKEYREVOKED
618     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
619 #endif
620 #ifdef EKEYREJECTED
621     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
622 #endif
623 #ifdef EOWNERDEAD
624     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
625 #endif
626 #ifdef ENOTRECOVERABLE
627     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
628 #endif
629 #ifdef ENOMSG
630     [ENOMSG]            = TARGET_ENOMSG,
631 #endif
632 #ifdef ERKFILL
633     [ERFKILL]           = TARGET_ERFKILL,
634 #endif
635 #ifdef EHWPOISON
636     [EHWPOISON]         = TARGET_EHWPOISON,
637 #endif
638 };
639 
640 static inline int host_to_target_errno(int err)
641 {
642     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643         host_to_target_errno_table[err]) {
644         return host_to_target_errno_table[err];
645     }
646     return err;
647 }
648 
649 static inline int target_to_host_errno(int err)
650 {
651     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
652         target_to_host_errno_table[err]) {
653         return target_to_host_errno_table[err];
654     }
655     return err;
656 }
657 
658 static inline abi_long get_errno(abi_long ret)
659 {
660     if (ret == -1)
661         return -host_to_target_errno(errno);
662     else
663         return ret;
664 }
665 
666 const char *target_strerror(int err)
667 {
668     if (err == TARGET_ERESTARTSYS) {
669         return "To be restarted";
670     }
671     if (err == TARGET_QEMU_ESIGRETURN) {
672         return "Successful exit from sigreturn";
673     }
674 
675     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
676         return NULL;
677     }
678     return strerror(target_to_host_errno(err));
679 }
680 
681 #define safe_syscall0(type, name) \
682 static type safe_##name(void) \
683 { \
684     return safe_syscall(__NR_##name); \
685 }
686 
687 #define safe_syscall1(type, name, type1, arg1) \
688 static type safe_##name(type1 arg1) \
689 { \
690     return safe_syscall(__NR_##name, arg1); \
691 }
692 
693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694 static type safe_##name(type1 arg1, type2 arg2) \
695 { \
696     return safe_syscall(__NR_##name, arg1, arg2); \
697 }
698 
699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
701 { \
702     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 }
704 
705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
706     type4, arg4) \
707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 }
711 
712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713     type4, arg4, type5, arg5) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715     type5 arg5) \
716 { \
717     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 }
719 
720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721     type4, arg4, type5, arg5, type6, arg6) \
722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723     type5 arg5, type6 arg6) \
724 { \
725     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 }
727 
728 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
729 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
730 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
731               int, flags, mode_t, mode)
732 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
734               struct rusage *, rusage)
735 #endif
736 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
737               int, options, struct rusage *, rusage)
738 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
739 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743 #endif
744 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
746               struct timespec *, tsp, const sigset_t *, sigmask,
747               size_t, sigsetsize)
748 #endif
749 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
750               int, maxevents, int, timeout, const sigset_t *, sigmask,
751               size_t, sigsetsize)
752 #if defined(__NR_futex)
753 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
754               const struct timespec *,timeout,int *,uaddr2,int,val3)
755 #endif
756 #if defined(__NR_futex_time64)
757 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
758               const struct timespec *,timeout,int *,uaddr2,int,val3)
759 #endif
760 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
761 safe_syscall2(int, kill, pid_t, pid, int, sig)
762 safe_syscall2(int, tkill, int, tid, int, sig)
763 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
764 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
765 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
766 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
767               unsigned long, pos_l, unsigned long, pos_h)
768 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
769               unsigned long, pos_l, unsigned long, pos_h)
770 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
771               socklen_t, addrlen)
772 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
773               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
774 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
775               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
776 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
777 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
778 safe_syscall2(int, flock, int, fd, int, operation)
779 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
781               const struct timespec *, uts, size_t, sigsetsize)
782 #endif
783 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
784               int, flags)
785 #if defined(TARGET_NR_nanosleep)
786 safe_syscall2(int, nanosleep, const struct timespec *, req,
787               struct timespec *, rem)
788 #endif
789 #if defined(TARGET_NR_clock_nanosleep) || \
790     defined(TARGET_NR_clock_nanosleep_time64)
791 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
792               const struct timespec *, req, struct timespec *, rem)
793 #endif
794 #ifdef __NR_ipc
795 #ifdef __s390x__
796 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
797               void *, ptr)
798 #else
799 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
800               void *, ptr, long, fifth)
801 #endif
802 #endif
803 #ifdef __NR_msgsnd
804 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
805               int, flags)
806 #endif
807 #ifdef __NR_msgrcv
808 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
809               long, msgtype, int, flags)
810 #endif
811 #ifdef __NR_semtimedop
812 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
813               unsigned, nsops, const struct timespec *, timeout)
814 #endif
815 #if defined(TARGET_NR_mq_timedsend) || \
816     defined(TARGET_NR_mq_timedsend_time64)
817 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
818               size_t, len, unsigned, prio, const struct timespec *, timeout)
819 #endif
820 #if defined(TARGET_NR_mq_timedreceive) || \
821     defined(TARGET_NR_mq_timedreceive_time64)
822 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
823               size_t, len, unsigned *, prio, const struct timespec *, timeout)
824 #endif
825 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
827               int, outfd, loff_t *, poutoff, size_t, length,
828               unsigned int, flags)
829 #endif
830 
831 /* We do ioctl like this rather than via safe_syscall3 to preserve the
832  * "third argument might be integer or pointer or not present" behaviour of
833  * the libc function.
834  */
835 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836 /* Similarly for fcntl. Note that callers must always:
837  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838  *  use the flock64 struct rather than unsuffixed flock
839  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
840  */
841 #ifdef __NR_fcntl64
842 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
843 #else
844 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
845 #endif
846 
847 static inline int host_to_target_sock_type(int host_type)
848 {
849     int target_type;
850 
851     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
852     case SOCK_DGRAM:
853         target_type = TARGET_SOCK_DGRAM;
854         break;
855     case SOCK_STREAM:
856         target_type = TARGET_SOCK_STREAM;
857         break;
858     default:
859         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
860         break;
861     }
862 
863 #if defined(SOCK_CLOEXEC)
864     if (host_type & SOCK_CLOEXEC) {
865         target_type |= TARGET_SOCK_CLOEXEC;
866     }
867 #endif
868 
869 #if defined(SOCK_NONBLOCK)
870     if (host_type & SOCK_NONBLOCK) {
871         target_type |= TARGET_SOCK_NONBLOCK;
872     }
873 #endif
874 
875     return target_type;
876 }
877 
878 static abi_ulong target_brk;
879 static abi_ulong target_original_brk;
880 static abi_ulong brk_page;
881 
882 void target_set_brk(abi_ulong new_brk)
883 {
884     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
885     brk_page = HOST_PAGE_ALIGN(target_brk);
886 }
887 
888 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889 #define DEBUGF_BRK(message, args...)
890 
891 /* do_brk() must return target values and target errnos. */
892 abi_long do_brk(abi_ulong new_brk)
893 {
894     abi_long mapped_addr;
895     abi_ulong new_alloc_size;
896 
897     /* brk pointers are always untagged */
898 
899     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
900 
901     if (!new_brk) {
902         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
903         return target_brk;
904     }
905     if (new_brk < target_original_brk) {
906         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
907                    target_brk);
908         return target_brk;
909     }
910 
911     /* If the new brk is less than the highest page reserved to the
912      * target heap allocation, set it and we're almost done...  */
913     if (new_brk <= brk_page) {
914         /* Heap contents are initialized to zero, as for anonymous
915          * mapped pages.  */
916         if (new_brk > target_brk) {
917             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
918         }
919 	target_brk = new_brk;
920         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
921 	return target_brk;
922     }
923 
924     /* We need to allocate more memory after the brk... Note that
925      * we don't use MAP_FIXED because that will map over the top of
926      * any existing mapping (like the one with the host libc or qemu
927      * itself); instead we treat "mapped but at wrong address" as
928      * a failure and unmap again.
929      */
930     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
931     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
932                                         PROT_READ|PROT_WRITE,
933                                         MAP_ANON|MAP_PRIVATE, 0, 0));
934 
935     if (mapped_addr == brk_page) {
936         /* Heap contents are initialized to zero, as for anonymous
937          * mapped pages.  Technically the new pages are already
938          * initialized to zero since they *are* anonymous mapped
939          * pages, however we have to take care with the contents that
940          * come from the remaining part of the previous page: it may
941          * contains garbage data due to a previous heap usage (grown
942          * then shrunken).  */
943         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
944 
945         target_brk = new_brk;
946         brk_page = HOST_PAGE_ALIGN(target_brk);
947         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
948             target_brk);
949         return target_brk;
950     } else if (mapped_addr != -1) {
951         /* Mapped but at wrong address, meaning there wasn't actually
952          * enough space for this brk.
953          */
954         target_munmap(mapped_addr, new_alloc_size);
955         mapped_addr = -1;
956         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
957     }
958     else {
959         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
960     }
961 
962 #if defined(TARGET_ALPHA)
963     /* We (partially) emulate OSF/1 on Alpha, which requires we
964        return a proper errno, not an unchanged brk value.  */
965     return -TARGET_ENOMEM;
966 #endif
967     /* For everything else, return the previous break. */
968     return target_brk;
969 }
970 
971 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
972     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
973 static inline abi_long copy_from_user_fdset(fd_set *fds,
974                                             abi_ulong target_fds_addr,
975                                             int n)
976 {
977     int i, nw, j, k;
978     abi_ulong b, *target_fds;
979 
980     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
981     if (!(target_fds = lock_user(VERIFY_READ,
982                                  target_fds_addr,
983                                  sizeof(abi_ulong) * nw,
984                                  1)))
985         return -TARGET_EFAULT;
986 
987     FD_ZERO(fds);
988     k = 0;
989     for (i = 0; i < nw; i++) {
990         /* grab the abi_ulong */
991         __get_user(b, &target_fds[i]);
992         for (j = 0; j < TARGET_ABI_BITS; j++) {
993             /* check the bit inside the abi_ulong */
994             if ((b >> j) & 1)
995                 FD_SET(k, fds);
996             k++;
997         }
998     }
999 
1000     unlock_user(target_fds, target_fds_addr, 0);
1001 
1002     return 0;
1003 }
1004 
1005 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1006                                                  abi_ulong target_fds_addr,
1007                                                  int n)
1008 {
1009     if (target_fds_addr) {
1010         if (copy_from_user_fdset(fds, target_fds_addr, n))
1011             return -TARGET_EFAULT;
1012         *fds_ptr = fds;
1013     } else {
1014         *fds_ptr = NULL;
1015     }
1016     return 0;
1017 }
1018 
1019 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1020                                           const fd_set *fds,
1021                                           int n)
1022 {
1023     int i, nw, j, k;
1024     abi_long v;
1025     abi_ulong *target_fds;
1026 
1027     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1028     if (!(target_fds = lock_user(VERIFY_WRITE,
1029                                  target_fds_addr,
1030                                  sizeof(abi_ulong) * nw,
1031                                  0)))
1032         return -TARGET_EFAULT;
1033 
1034     k = 0;
1035     for (i = 0; i < nw; i++) {
1036         v = 0;
1037         for (j = 0; j < TARGET_ABI_BITS; j++) {
1038             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1039             k++;
1040         }
1041         __put_user(v, &target_fds[i]);
1042     }
1043 
1044     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1045 
1046     return 0;
1047 }
1048 #endif
1049 
1050 #if defined(__alpha__)
1051 #define HOST_HZ 1024
1052 #else
1053 #define HOST_HZ 100
1054 #endif
1055 
1056 static inline abi_long host_to_target_clock_t(long ticks)
1057 {
1058 #if HOST_HZ == TARGET_HZ
1059     return ticks;
1060 #else
1061     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1062 #endif
1063 }
1064 
1065 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1066                                              const struct rusage *rusage)
1067 {
1068     struct target_rusage *target_rusage;
1069 
1070     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1071         return -TARGET_EFAULT;
1072     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1073     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1074     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1075     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1076     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1077     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1078     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1079     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1080     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1081     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1082     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1083     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1084     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1085     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1086     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1087     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1088     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1089     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1090     unlock_user_struct(target_rusage, target_addr, 1);
1091 
1092     return 0;
1093 }
1094 
1095 #ifdef TARGET_NR_setrlimit
1096 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1097 {
1098     abi_ulong target_rlim_swap;
1099     rlim_t result;
1100 
1101     target_rlim_swap = tswapal(target_rlim);
1102     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1103         return RLIM_INFINITY;
1104 
1105     result = target_rlim_swap;
1106     if (target_rlim_swap != (rlim_t)result)
1107         return RLIM_INFINITY;
1108 
1109     return result;
1110 }
1111 #endif
1112 
1113 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1114 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1115 {
1116     abi_ulong target_rlim_swap;
1117     abi_ulong result;
1118 
1119     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1120         target_rlim_swap = TARGET_RLIM_INFINITY;
1121     else
1122         target_rlim_swap = rlim;
1123     result = tswapal(target_rlim_swap);
1124 
1125     return result;
1126 }
1127 #endif
1128 
1129 static inline int target_to_host_resource(int code)
1130 {
1131     switch (code) {
1132     case TARGET_RLIMIT_AS:
1133         return RLIMIT_AS;
1134     case TARGET_RLIMIT_CORE:
1135         return RLIMIT_CORE;
1136     case TARGET_RLIMIT_CPU:
1137         return RLIMIT_CPU;
1138     case TARGET_RLIMIT_DATA:
1139         return RLIMIT_DATA;
1140     case TARGET_RLIMIT_FSIZE:
1141         return RLIMIT_FSIZE;
1142     case TARGET_RLIMIT_LOCKS:
1143         return RLIMIT_LOCKS;
1144     case TARGET_RLIMIT_MEMLOCK:
1145         return RLIMIT_MEMLOCK;
1146     case TARGET_RLIMIT_MSGQUEUE:
1147         return RLIMIT_MSGQUEUE;
1148     case TARGET_RLIMIT_NICE:
1149         return RLIMIT_NICE;
1150     case TARGET_RLIMIT_NOFILE:
1151         return RLIMIT_NOFILE;
1152     case TARGET_RLIMIT_NPROC:
1153         return RLIMIT_NPROC;
1154     case TARGET_RLIMIT_RSS:
1155         return RLIMIT_RSS;
1156     case TARGET_RLIMIT_RTPRIO:
1157         return RLIMIT_RTPRIO;
1158     case TARGET_RLIMIT_SIGPENDING:
1159         return RLIMIT_SIGPENDING;
1160     case TARGET_RLIMIT_STACK:
1161         return RLIMIT_STACK;
1162     default:
1163         return code;
1164     }
1165 }
1166 
1167 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1168                                               abi_ulong target_tv_addr)
1169 {
1170     struct target_timeval *target_tv;
1171 
1172     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1173         return -TARGET_EFAULT;
1174     }
1175 
1176     __get_user(tv->tv_sec, &target_tv->tv_sec);
1177     __get_user(tv->tv_usec, &target_tv->tv_usec);
1178 
1179     unlock_user_struct(target_tv, target_tv_addr, 0);
1180 
1181     return 0;
1182 }
1183 
1184 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1185                                             const struct timeval *tv)
1186 {
1187     struct target_timeval *target_tv;
1188 
1189     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1190         return -TARGET_EFAULT;
1191     }
1192 
1193     __put_user(tv->tv_sec, &target_tv->tv_sec);
1194     __put_user(tv->tv_usec, &target_tv->tv_usec);
1195 
1196     unlock_user_struct(target_tv, target_tv_addr, 1);
1197 
1198     return 0;
1199 }
1200 
1201 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1202 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1203                                                 abi_ulong target_tv_addr)
1204 {
1205     struct target__kernel_sock_timeval *target_tv;
1206 
1207     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1208         return -TARGET_EFAULT;
1209     }
1210 
1211     __get_user(tv->tv_sec, &target_tv->tv_sec);
1212     __get_user(tv->tv_usec, &target_tv->tv_usec);
1213 
1214     unlock_user_struct(target_tv, target_tv_addr, 0);
1215 
1216     return 0;
1217 }
1218 #endif
1219 
1220 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1221                                               const struct timeval *tv)
1222 {
1223     struct target__kernel_sock_timeval *target_tv;
1224 
1225     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1226         return -TARGET_EFAULT;
1227     }
1228 
1229     __put_user(tv->tv_sec, &target_tv->tv_sec);
1230     __put_user(tv->tv_usec, &target_tv->tv_usec);
1231 
1232     unlock_user_struct(target_tv, target_tv_addr, 1);
1233 
1234     return 0;
1235 }
1236 
1237 #if defined(TARGET_NR_futex) || \
1238     defined(TARGET_NR_rt_sigtimedwait) || \
1239     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1240     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1241     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1242     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1243     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1244     defined(TARGET_NR_timer_settime) || \
1245     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1246 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1247                                                abi_ulong target_addr)
1248 {
1249     struct target_timespec *target_ts;
1250 
1251     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1252         return -TARGET_EFAULT;
1253     }
1254     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1255     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1256     unlock_user_struct(target_ts, target_addr, 0);
1257     return 0;
1258 }
1259 #endif
1260 
1261 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1262     defined(TARGET_NR_timer_settime64) || \
1263     defined(TARGET_NR_mq_timedsend_time64) || \
1264     defined(TARGET_NR_mq_timedreceive_time64) || \
1265     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1266     defined(TARGET_NR_clock_nanosleep_time64) || \
1267     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1268     defined(TARGET_NR_utimensat) || \
1269     defined(TARGET_NR_utimensat_time64) || \
1270     defined(TARGET_NR_semtimedop_time64) || \
1271     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1272 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1273                                                  abi_ulong target_addr)
1274 {
1275     struct target__kernel_timespec *target_ts;
1276 
1277     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1278         return -TARGET_EFAULT;
1279     }
1280     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1281     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1282     /* in 32bit mode, this drops the padding */
1283     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1284     unlock_user_struct(target_ts, target_addr, 0);
1285     return 0;
1286 }
1287 #endif
1288 
1289 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1290                                                struct timespec *host_ts)
1291 {
1292     struct target_timespec *target_ts;
1293 
1294     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1295         return -TARGET_EFAULT;
1296     }
1297     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1298     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1299     unlock_user_struct(target_ts, target_addr, 1);
1300     return 0;
1301 }
1302 
1303 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1304                                                  struct timespec *host_ts)
1305 {
1306     struct target__kernel_timespec *target_ts;
1307 
1308     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1309         return -TARGET_EFAULT;
1310     }
1311     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1312     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1313     unlock_user_struct(target_ts, target_addr, 1);
1314     return 0;
1315 }
1316 
1317 #if defined(TARGET_NR_gettimeofday)
1318 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1319                                              struct timezone *tz)
1320 {
1321     struct target_timezone *target_tz;
1322 
1323     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1324         return -TARGET_EFAULT;
1325     }
1326 
1327     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1328     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1329 
1330     unlock_user_struct(target_tz, target_tz_addr, 1);
1331 
1332     return 0;
1333 }
1334 #endif
1335 
1336 #if defined(TARGET_NR_settimeofday)
1337 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1338                                                abi_ulong target_tz_addr)
1339 {
1340     struct target_timezone *target_tz;
1341 
1342     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1343         return -TARGET_EFAULT;
1344     }
1345 
1346     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1347     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1348 
1349     unlock_user_struct(target_tz, target_tz_addr, 0);
1350 
1351     return 0;
1352 }
1353 #endif
1354 
1355 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1356 #include <mqueue.h>
1357 
1358 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1359                                               abi_ulong target_mq_attr_addr)
1360 {
1361     struct target_mq_attr *target_mq_attr;
1362 
1363     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1364                           target_mq_attr_addr, 1))
1365         return -TARGET_EFAULT;
1366 
1367     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1368     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1369     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1370     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1371 
1372     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1373 
1374     return 0;
1375 }
1376 
1377 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1378                                             const struct mq_attr *attr)
1379 {
1380     struct target_mq_attr *target_mq_attr;
1381 
1382     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1383                           target_mq_attr_addr, 0))
1384         return -TARGET_EFAULT;
1385 
1386     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1387     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1388     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1389     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1390 
1391     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1392 
1393     return 0;
1394 }
1395 #endif
1396 
1397 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1398 /* do_select() must return target values and target errnos. */
1399 static abi_long do_select(int n,
1400                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1401                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1402 {
1403     fd_set rfds, wfds, efds;
1404     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1405     struct timeval tv;
1406     struct timespec ts, *ts_ptr;
1407     abi_long ret;
1408 
1409     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1410     if (ret) {
1411         return ret;
1412     }
1413     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1414     if (ret) {
1415         return ret;
1416     }
1417     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1418     if (ret) {
1419         return ret;
1420     }
1421 
1422     if (target_tv_addr) {
1423         if (copy_from_user_timeval(&tv, target_tv_addr))
1424             return -TARGET_EFAULT;
1425         ts.tv_sec = tv.tv_sec;
1426         ts.tv_nsec = tv.tv_usec * 1000;
1427         ts_ptr = &ts;
1428     } else {
1429         ts_ptr = NULL;
1430     }
1431 
1432     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1433                                   ts_ptr, NULL));
1434 
1435     if (!is_error(ret)) {
1436         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1437             return -TARGET_EFAULT;
1438         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1439             return -TARGET_EFAULT;
1440         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1441             return -TARGET_EFAULT;
1442 
1443         if (target_tv_addr) {
1444             tv.tv_sec = ts.tv_sec;
1445             tv.tv_usec = ts.tv_nsec / 1000;
1446             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1447                 return -TARGET_EFAULT;
1448             }
1449         }
1450     }
1451 
1452     return ret;
1453 }
1454 
1455 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1456 static abi_long do_old_select(abi_ulong arg1)
1457 {
1458     struct target_sel_arg_struct *sel;
1459     abi_ulong inp, outp, exp, tvp;
1460     long nsel;
1461 
1462     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1463         return -TARGET_EFAULT;
1464     }
1465 
1466     nsel = tswapal(sel->n);
1467     inp = tswapal(sel->inp);
1468     outp = tswapal(sel->outp);
1469     exp = tswapal(sel->exp);
1470     tvp = tswapal(sel->tvp);
1471 
1472     unlock_user_struct(sel, arg1, 0);
1473 
1474     return do_select(nsel, inp, outp, exp, tvp);
1475 }
1476 #endif
1477 #endif
1478 
1479 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1480 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1481                             abi_long arg4, abi_long arg5, abi_long arg6,
1482                             bool time64)
1483 {
1484     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1485     fd_set rfds, wfds, efds;
1486     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1487     struct timespec ts, *ts_ptr;
1488     abi_long ret;
1489 
1490     /*
1491      * The 6th arg is actually two args smashed together,
1492      * so we cannot use the C library.
1493      */
1494     sigset_t set;
1495     struct {
1496         sigset_t *set;
1497         size_t size;
1498     } sig, *sig_ptr;
1499 
1500     abi_ulong arg_sigset, arg_sigsize, *arg7;
1501     target_sigset_t *target_sigset;
1502 
1503     n = arg1;
1504     rfd_addr = arg2;
1505     wfd_addr = arg3;
1506     efd_addr = arg4;
1507     ts_addr = arg5;
1508 
1509     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1510     if (ret) {
1511         return ret;
1512     }
1513     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1514     if (ret) {
1515         return ret;
1516     }
1517     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1518     if (ret) {
1519         return ret;
1520     }
1521 
1522     /*
1523      * This takes a timespec, and not a timeval, so we cannot
1524      * use the do_select() helper ...
1525      */
1526     if (ts_addr) {
1527         if (time64) {
1528             if (target_to_host_timespec64(&ts, ts_addr)) {
1529                 return -TARGET_EFAULT;
1530             }
1531         } else {
1532             if (target_to_host_timespec(&ts, ts_addr)) {
1533                 return -TARGET_EFAULT;
1534             }
1535         }
1536             ts_ptr = &ts;
1537     } else {
1538         ts_ptr = NULL;
1539     }
1540 
1541     /* Extract the two packed args for the sigset */
1542     if (arg6) {
1543         sig_ptr = &sig;
1544         sig.size = SIGSET_T_SIZE;
1545 
1546         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1547         if (!arg7) {
1548             return -TARGET_EFAULT;
1549         }
1550         arg_sigset = tswapal(arg7[0]);
1551         arg_sigsize = tswapal(arg7[1]);
1552         unlock_user(arg7, arg6, 0);
1553 
1554         if (arg_sigset) {
1555             sig.set = &set;
1556             if (arg_sigsize != sizeof(*target_sigset)) {
1557                 /* Like the kernel, we enforce correct size sigsets */
1558                 return -TARGET_EINVAL;
1559             }
1560             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1561                                       sizeof(*target_sigset), 1);
1562             if (!target_sigset) {
1563                 return -TARGET_EFAULT;
1564             }
1565             target_to_host_sigset(&set, target_sigset);
1566             unlock_user(target_sigset, arg_sigset, 0);
1567         } else {
1568             sig.set = NULL;
1569         }
1570     } else {
1571         sig_ptr = NULL;
1572     }
1573 
1574     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1575                                   ts_ptr, sig_ptr));
1576 
1577     if (!is_error(ret)) {
1578         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1579             return -TARGET_EFAULT;
1580         }
1581         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1582             return -TARGET_EFAULT;
1583         }
1584         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1585             return -TARGET_EFAULT;
1586         }
1587         if (time64) {
1588             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1589                 return -TARGET_EFAULT;
1590             }
1591         } else {
1592             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1593                 return -TARGET_EFAULT;
1594             }
1595         }
1596     }
1597     return ret;
1598 }
1599 #endif
1600 
1601 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1602     defined(TARGET_NR_ppoll_time64)
1603 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1604                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1605 {
1606     struct target_pollfd *target_pfd;
1607     unsigned int nfds = arg2;
1608     struct pollfd *pfd;
1609     unsigned int i;
1610     abi_long ret;
1611 
1612     pfd = NULL;
1613     target_pfd = NULL;
1614     if (nfds) {
1615         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1616             return -TARGET_EINVAL;
1617         }
1618         target_pfd = lock_user(VERIFY_WRITE, arg1,
1619                                sizeof(struct target_pollfd) * nfds, 1);
1620         if (!target_pfd) {
1621             return -TARGET_EFAULT;
1622         }
1623 
1624         pfd = alloca(sizeof(struct pollfd) * nfds);
1625         for (i = 0; i < nfds; i++) {
1626             pfd[i].fd = tswap32(target_pfd[i].fd);
1627             pfd[i].events = tswap16(target_pfd[i].events);
1628         }
1629     }
1630     if (ppoll) {
1631         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1632         target_sigset_t *target_set;
1633         sigset_t _set, *set = &_set;
1634 
1635         if (arg3) {
1636             if (time64) {
1637                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1638                     unlock_user(target_pfd, arg1, 0);
1639                     return -TARGET_EFAULT;
1640                 }
1641             } else {
1642                 if (target_to_host_timespec(timeout_ts, arg3)) {
1643                     unlock_user(target_pfd, arg1, 0);
1644                     return -TARGET_EFAULT;
1645                 }
1646             }
1647         } else {
1648             timeout_ts = NULL;
1649         }
1650 
1651         if (arg4) {
1652             if (arg5 != sizeof(target_sigset_t)) {
1653                 unlock_user(target_pfd, arg1, 0);
1654                 return -TARGET_EINVAL;
1655             }
1656 
1657             target_set = lock_user(VERIFY_READ, arg4,
1658                                    sizeof(target_sigset_t), 1);
1659             if (!target_set) {
1660                 unlock_user(target_pfd, arg1, 0);
1661                 return -TARGET_EFAULT;
1662             }
1663             target_to_host_sigset(set, target_set);
1664         } else {
1665             set = NULL;
1666         }
1667 
1668         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1669                                    set, SIGSET_T_SIZE));
1670 
1671         if (!is_error(ret) && arg3) {
1672             if (time64) {
1673                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1674                     return -TARGET_EFAULT;
1675                 }
1676             } else {
1677                 if (host_to_target_timespec(arg3, timeout_ts)) {
1678                     return -TARGET_EFAULT;
1679                 }
1680             }
1681         }
1682         if (arg4) {
1683             unlock_user(target_set, arg4, 0);
1684         }
1685     } else {
1686           struct timespec ts, *pts;
1687 
1688           if (arg3 >= 0) {
1689               /* Convert ms to secs, ns */
1690               ts.tv_sec = arg3 / 1000;
1691               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1692               pts = &ts;
1693           } else {
1694               /* -ve poll() timeout means "infinite" */
1695               pts = NULL;
1696           }
1697           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1698     }
1699 
1700     if (!is_error(ret)) {
1701         for (i = 0; i < nfds; i++) {
1702             target_pfd[i].revents = tswap16(pfd[i].revents);
1703         }
1704     }
1705     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1706     return ret;
1707 }
1708 #endif
1709 
1710 static abi_long do_pipe2(int host_pipe[], int flags)
1711 {
1712 #ifdef CONFIG_PIPE2
1713     return pipe2(host_pipe, flags);
1714 #else
1715     return -ENOSYS;
1716 #endif
1717 }
1718 
1719 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1720                         int flags, int is_pipe2)
1721 {
1722     int host_pipe[2];
1723     abi_long ret;
1724     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1725 
1726     if (is_error(ret))
1727         return get_errno(ret);
1728 
1729     /* Several targets have special calling conventions for the original
1730        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1731     if (!is_pipe2) {
1732 #if defined(TARGET_ALPHA)
1733         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1734         return host_pipe[0];
1735 #elif defined(TARGET_MIPS)
1736         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1737         return host_pipe[0];
1738 #elif defined(TARGET_SH4)
1739         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1740         return host_pipe[0];
1741 #elif defined(TARGET_SPARC)
1742         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1743         return host_pipe[0];
1744 #endif
1745     }
1746 
1747     if (put_user_s32(host_pipe[0], pipedes)
1748         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1749         return -TARGET_EFAULT;
1750     return get_errno(ret);
1751 }
1752 
1753 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1754                                               abi_ulong target_addr,
1755                                               socklen_t len)
1756 {
1757     struct target_ip_mreqn *target_smreqn;
1758 
1759     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1760     if (!target_smreqn)
1761         return -TARGET_EFAULT;
1762     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1763     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1764     if (len == sizeof(struct target_ip_mreqn))
1765         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1766     unlock_user(target_smreqn, target_addr, 0);
1767 
1768     return 0;
1769 }
1770 
1771 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1772                                                abi_ulong target_addr,
1773                                                socklen_t len)
1774 {
1775     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1776     sa_family_t sa_family;
1777     struct target_sockaddr *target_saddr;
1778 
1779     if (fd_trans_target_to_host_addr(fd)) {
1780         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1781     }
1782 
1783     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1784     if (!target_saddr)
1785         return -TARGET_EFAULT;
1786 
1787     sa_family = tswap16(target_saddr->sa_family);
1788 
1789     /* Oops. The caller might send a incomplete sun_path; sun_path
1790      * must be terminated by \0 (see the manual page), but
1791      * unfortunately it is quite common to specify sockaddr_un
1792      * length as "strlen(x->sun_path)" while it should be
1793      * "strlen(...) + 1". We'll fix that here if needed.
1794      * Linux kernel has a similar feature.
1795      */
1796 
1797     if (sa_family == AF_UNIX) {
1798         if (len < unix_maxlen && len > 0) {
1799             char *cp = (char*)target_saddr;
1800 
1801             if ( cp[len-1] && !cp[len] )
1802                 len++;
1803         }
1804         if (len > unix_maxlen)
1805             len = unix_maxlen;
1806     }
1807 
1808     memcpy(addr, target_saddr, len);
1809     addr->sa_family = sa_family;
1810     if (sa_family == AF_NETLINK) {
1811         struct sockaddr_nl *nladdr;
1812 
1813         nladdr = (struct sockaddr_nl *)addr;
1814         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1815         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1816     } else if (sa_family == AF_PACKET) {
1817 	struct target_sockaddr_ll *lladdr;
1818 
1819 	lladdr = (struct target_sockaddr_ll *)addr;
1820 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1821 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1822     }
1823     unlock_user(target_saddr, target_addr, 0);
1824 
1825     return 0;
1826 }
1827 
1828 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1829                                                struct sockaddr *addr,
1830                                                socklen_t len)
1831 {
1832     struct target_sockaddr *target_saddr;
1833 
1834     if (len == 0) {
1835         return 0;
1836     }
1837     assert(addr);
1838 
1839     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1840     if (!target_saddr)
1841         return -TARGET_EFAULT;
1842     memcpy(target_saddr, addr, len);
1843     if (len >= offsetof(struct target_sockaddr, sa_family) +
1844         sizeof(target_saddr->sa_family)) {
1845         target_saddr->sa_family = tswap16(addr->sa_family);
1846     }
1847     if (addr->sa_family == AF_NETLINK &&
1848         len >= sizeof(struct target_sockaddr_nl)) {
1849         struct target_sockaddr_nl *target_nl =
1850                (struct target_sockaddr_nl *)target_saddr;
1851         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1852         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1853     } else if (addr->sa_family == AF_PACKET) {
1854         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1855         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1856         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1857     } else if (addr->sa_family == AF_INET6 &&
1858                len >= sizeof(struct target_sockaddr_in6)) {
1859         struct target_sockaddr_in6 *target_in6 =
1860                (struct target_sockaddr_in6 *)target_saddr;
1861         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1862     }
1863     unlock_user(target_saddr, target_addr, len);
1864 
1865     return 0;
1866 }
1867 
1868 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1869                                            struct target_msghdr *target_msgh)
1870 {
1871     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1872     abi_long msg_controllen;
1873     abi_ulong target_cmsg_addr;
1874     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1875     socklen_t space = 0;
1876 
1877     msg_controllen = tswapal(target_msgh->msg_controllen);
1878     if (msg_controllen < sizeof (struct target_cmsghdr))
1879         goto the_end;
1880     target_cmsg_addr = tswapal(target_msgh->msg_control);
1881     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1882     target_cmsg_start = target_cmsg;
1883     if (!target_cmsg)
1884         return -TARGET_EFAULT;
1885 
1886     while (cmsg && target_cmsg) {
1887         void *data = CMSG_DATA(cmsg);
1888         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1889 
1890         int len = tswapal(target_cmsg->cmsg_len)
1891             - sizeof(struct target_cmsghdr);
1892 
1893         space += CMSG_SPACE(len);
1894         if (space > msgh->msg_controllen) {
1895             space -= CMSG_SPACE(len);
1896             /* This is a QEMU bug, since we allocated the payload
1897              * area ourselves (unlike overflow in host-to-target
1898              * conversion, which is just the guest giving us a buffer
1899              * that's too small). It can't happen for the payload types
1900              * we currently support; if it becomes an issue in future
1901              * we would need to improve our allocation strategy to
1902              * something more intelligent than "twice the size of the
1903              * target buffer we're reading from".
1904              */
1905             qemu_log_mask(LOG_UNIMP,
1906                           ("Unsupported ancillary data %d/%d: "
1907                            "unhandled msg size\n"),
1908                           tswap32(target_cmsg->cmsg_level),
1909                           tswap32(target_cmsg->cmsg_type));
1910             break;
1911         }
1912 
1913         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1914             cmsg->cmsg_level = SOL_SOCKET;
1915         } else {
1916             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1917         }
1918         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1919         cmsg->cmsg_len = CMSG_LEN(len);
1920 
1921         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1922             int *fd = (int *)data;
1923             int *target_fd = (int *)target_data;
1924             int i, numfds = len / sizeof(int);
1925 
1926             for (i = 0; i < numfds; i++) {
1927                 __get_user(fd[i], target_fd + i);
1928             }
1929         } else if (cmsg->cmsg_level == SOL_SOCKET
1930                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1931             struct ucred *cred = (struct ucred *)data;
1932             struct target_ucred *target_cred =
1933                 (struct target_ucred *)target_data;
1934 
1935             __get_user(cred->pid, &target_cred->pid);
1936             __get_user(cred->uid, &target_cred->uid);
1937             __get_user(cred->gid, &target_cred->gid);
1938         } else {
1939             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1940                           cmsg->cmsg_level, cmsg->cmsg_type);
1941             memcpy(data, target_data, len);
1942         }
1943 
1944         cmsg = CMSG_NXTHDR(msgh, cmsg);
1945         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1946                                          target_cmsg_start);
1947     }
1948     unlock_user(target_cmsg, target_cmsg_addr, 0);
1949  the_end:
1950     msgh->msg_controllen = space;
1951     return 0;
1952 }
1953 
1954 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1955                                            struct msghdr *msgh)
1956 {
1957     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1958     abi_long msg_controllen;
1959     abi_ulong target_cmsg_addr;
1960     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1961     socklen_t space = 0;
1962 
1963     msg_controllen = tswapal(target_msgh->msg_controllen);
1964     if (msg_controllen < sizeof (struct target_cmsghdr))
1965         goto the_end;
1966     target_cmsg_addr = tswapal(target_msgh->msg_control);
1967     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1968     target_cmsg_start = target_cmsg;
1969     if (!target_cmsg)
1970         return -TARGET_EFAULT;
1971 
1972     while (cmsg && target_cmsg) {
1973         void *data = CMSG_DATA(cmsg);
1974         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1975 
1976         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1977         int tgt_len, tgt_space;
1978 
1979         /* We never copy a half-header but may copy half-data;
1980          * this is Linux's behaviour in put_cmsg(). Note that
1981          * truncation here is a guest problem (which we report
1982          * to the guest via the CTRUNC bit), unlike truncation
1983          * in target_to_host_cmsg, which is a QEMU bug.
1984          */
1985         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1986             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1987             break;
1988         }
1989 
1990         if (cmsg->cmsg_level == SOL_SOCKET) {
1991             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1992         } else {
1993             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1994         }
1995         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1996 
1997         /* Payload types which need a different size of payload on
1998          * the target must adjust tgt_len here.
1999          */
2000         tgt_len = len;
2001         switch (cmsg->cmsg_level) {
2002         case SOL_SOCKET:
2003             switch (cmsg->cmsg_type) {
2004             case SO_TIMESTAMP:
2005                 tgt_len = sizeof(struct target_timeval);
2006                 break;
2007             default:
2008                 break;
2009             }
2010             break;
2011         default:
2012             break;
2013         }
2014 
2015         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2016             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2017             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2018         }
2019 
2020         /* We must now copy-and-convert len bytes of payload
2021          * into tgt_len bytes of destination space. Bear in mind
2022          * that in both source and destination we may be dealing
2023          * with a truncated value!
2024          */
2025         switch (cmsg->cmsg_level) {
2026         case SOL_SOCKET:
2027             switch (cmsg->cmsg_type) {
2028             case SCM_RIGHTS:
2029             {
2030                 int *fd = (int *)data;
2031                 int *target_fd = (int *)target_data;
2032                 int i, numfds = tgt_len / sizeof(int);
2033 
2034                 for (i = 0; i < numfds; i++) {
2035                     __put_user(fd[i], target_fd + i);
2036                 }
2037                 break;
2038             }
2039             case SO_TIMESTAMP:
2040             {
2041                 struct timeval *tv = (struct timeval *)data;
2042                 struct target_timeval *target_tv =
2043                     (struct target_timeval *)target_data;
2044 
2045                 if (len != sizeof(struct timeval) ||
2046                     tgt_len != sizeof(struct target_timeval)) {
2047                     goto unimplemented;
2048                 }
2049 
2050                 /* copy struct timeval to target */
2051                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2052                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2053                 break;
2054             }
2055             case SCM_CREDENTIALS:
2056             {
2057                 struct ucred *cred = (struct ucred *)data;
2058                 struct target_ucred *target_cred =
2059                     (struct target_ucred *)target_data;
2060 
2061                 __put_user(cred->pid, &target_cred->pid);
2062                 __put_user(cred->uid, &target_cred->uid);
2063                 __put_user(cred->gid, &target_cred->gid);
2064                 break;
2065             }
2066             default:
2067                 goto unimplemented;
2068             }
2069             break;
2070 
2071         case SOL_IP:
2072             switch (cmsg->cmsg_type) {
2073             case IP_TTL:
2074             {
2075                 uint32_t *v = (uint32_t *)data;
2076                 uint32_t *t_int = (uint32_t *)target_data;
2077 
2078                 if (len != sizeof(uint32_t) ||
2079                     tgt_len != sizeof(uint32_t)) {
2080                     goto unimplemented;
2081                 }
2082                 __put_user(*v, t_int);
2083                 break;
2084             }
2085             case IP_RECVERR:
2086             {
2087                 struct errhdr_t {
2088                    struct sock_extended_err ee;
2089                    struct sockaddr_in offender;
2090                 };
2091                 struct errhdr_t *errh = (struct errhdr_t *)data;
2092                 struct errhdr_t *target_errh =
2093                     (struct errhdr_t *)target_data;
2094 
2095                 if (len != sizeof(struct errhdr_t) ||
2096                     tgt_len != sizeof(struct errhdr_t)) {
2097                     goto unimplemented;
2098                 }
2099                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2100                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2101                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2102                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2103                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2104                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2105                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2106                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2107                     (void *) &errh->offender, sizeof(errh->offender));
2108                 break;
2109             }
2110             default:
2111                 goto unimplemented;
2112             }
2113             break;
2114 
2115         case SOL_IPV6:
2116             switch (cmsg->cmsg_type) {
2117             case IPV6_HOPLIMIT:
2118             {
2119                 uint32_t *v = (uint32_t *)data;
2120                 uint32_t *t_int = (uint32_t *)target_data;
2121 
2122                 if (len != sizeof(uint32_t) ||
2123                     tgt_len != sizeof(uint32_t)) {
2124                     goto unimplemented;
2125                 }
2126                 __put_user(*v, t_int);
2127                 break;
2128             }
2129             case IPV6_RECVERR:
2130             {
2131                 struct errhdr6_t {
2132                    struct sock_extended_err ee;
2133                    struct sockaddr_in6 offender;
2134                 };
2135                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2136                 struct errhdr6_t *target_errh =
2137                     (struct errhdr6_t *)target_data;
2138 
2139                 if (len != sizeof(struct errhdr6_t) ||
2140                     tgt_len != sizeof(struct errhdr6_t)) {
2141                     goto unimplemented;
2142                 }
2143                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2144                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2145                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2146                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2147                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2148                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2149                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2150                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2151                     (void *) &errh->offender, sizeof(errh->offender));
2152                 break;
2153             }
2154             default:
2155                 goto unimplemented;
2156             }
2157             break;
2158 
2159         default:
2160         unimplemented:
2161             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2162                           cmsg->cmsg_level, cmsg->cmsg_type);
2163             memcpy(target_data, data, MIN(len, tgt_len));
2164             if (tgt_len > len) {
2165                 memset(target_data + len, 0, tgt_len - len);
2166             }
2167         }
2168 
2169         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2170         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2171         if (msg_controllen < tgt_space) {
2172             tgt_space = msg_controllen;
2173         }
2174         msg_controllen -= tgt_space;
2175         space += tgt_space;
2176         cmsg = CMSG_NXTHDR(msgh, cmsg);
2177         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2178                                          target_cmsg_start);
2179     }
2180     unlock_user(target_cmsg, target_cmsg_addr, space);
2181  the_end:
2182     target_msgh->msg_controllen = tswapal(space);
2183     return 0;
2184 }
2185 
2186 /* do_setsockopt() Must return target values and target errnos. */
2187 static abi_long do_setsockopt(int sockfd, int level, int optname,
2188                               abi_ulong optval_addr, socklen_t optlen)
2189 {
2190     abi_long ret;
2191     int val;
2192     struct ip_mreqn *ip_mreq;
2193     struct ip_mreq_source *ip_mreq_source;
2194 
2195     switch(level) {
2196     case SOL_TCP:
2197     case SOL_UDP:
2198         /* TCP and UDP options all take an 'int' value.  */
2199         if (optlen < sizeof(uint32_t))
2200             return -TARGET_EINVAL;
2201 
2202         if (get_user_u32(val, optval_addr))
2203             return -TARGET_EFAULT;
2204         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2205         break;
2206     case SOL_IP:
2207         switch(optname) {
2208         case IP_TOS:
2209         case IP_TTL:
2210         case IP_HDRINCL:
2211         case IP_ROUTER_ALERT:
2212         case IP_RECVOPTS:
2213         case IP_RETOPTS:
2214         case IP_PKTINFO:
2215         case IP_MTU_DISCOVER:
2216         case IP_RECVERR:
2217         case IP_RECVTTL:
2218         case IP_RECVTOS:
2219 #ifdef IP_FREEBIND
2220         case IP_FREEBIND:
2221 #endif
2222         case IP_MULTICAST_TTL:
2223         case IP_MULTICAST_LOOP:
2224             val = 0;
2225             if (optlen >= sizeof(uint32_t)) {
2226                 if (get_user_u32(val, optval_addr))
2227                     return -TARGET_EFAULT;
2228             } else if (optlen >= 1) {
2229                 if (get_user_u8(val, optval_addr))
2230                     return -TARGET_EFAULT;
2231             }
2232             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2233             break;
2234         case IP_ADD_MEMBERSHIP:
2235         case IP_DROP_MEMBERSHIP:
2236             if (optlen < sizeof (struct target_ip_mreq) ||
2237                 optlen > sizeof (struct target_ip_mreqn))
2238                 return -TARGET_EINVAL;
2239 
2240             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2241             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2242             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2243             break;
2244 
2245         case IP_BLOCK_SOURCE:
2246         case IP_UNBLOCK_SOURCE:
2247         case IP_ADD_SOURCE_MEMBERSHIP:
2248         case IP_DROP_SOURCE_MEMBERSHIP:
2249             if (optlen != sizeof (struct target_ip_mreq_source))
2250                 return -TARGET_EINVAL;
2251 
2252             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2253             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2254             unlock_user (ip_mreq_source, optval_addr, 0);
2255             break;
2256 
2257         default:
2258             goto unimplemented;
2259         }
2260         break;
2261     case SOL_IPV6:
2262         switch (optname) {
2263         case IPV6_MTU_DISCOVER:
2264         case IPV6_MTU:
2265         case IPV6_V6ONLY:
2266         case IPV6_RECVPKTINFO:
2267         case IPV6_UNICAST_HOPS:
2268         case IPV6_MULTICAST_HOPS:
2269         case IPV6_MULTICAST_LOOP:
2270         case IPV6_RECVERR:
2271         case IPV6_RECVHOPLIMIT:
2272         case IPV6_2292HOPLIMIT:
2273         case IPV6_CHECKSUM:
2274         case IPV6_ADDRFORM:
2275         case IPV6_2292PKTINFO:
2276         case IPV6_RECVTCLASS:
2277         case IPV6_RECVRTHDR:
2278         case IPV6_2292RTHDR:
2279         case IPV6_RECVHOPOPTS:
2280         case IPV6_2292HOPOPTS:
2281         case IPV6_RECVDSTOPTS:
2282         case IPV6_2292DSTOPTS:
2283         case IPV6_TCLASS:
2284         case IPV6_ADDR_PREFERENCES:
2285 #ifdef IPV6_RECVPATHMTU
2286         case IPV6_RECVPATHMTU:
2287 #endif
2288 #ifdef IPV6_TRANSPARENT
2289         case IPV6_TRANSPARENT:
2290 #endif
2291 #ifdef IPV6_FREEBIND
2292         case IPV6_FREEBIND:
2293 #endif
2294 #ifdef IPV6_RECVORIGDSTADDR
2295         case IPV6_RECVORIGDSTADDR:
2296 #endif
2297             val = 0;
2298             if (optlen < sizeof(uint32_t)) {
2299                 return -TARGET_EINVAL;
2300             }
2301             if (get_user_u32(val, optval_addr)) {
2302                 return -TARGET_EFAULT;
2303             }
2304             ret = get_errno(setsockopt(sockfd, level, optname,
2305                                        &val, sizeof(val)));
2306             break;
2307         case IPV6_PKTINFO:
2308         {
2309             struct in6_pktinfo pki;
2310 
2311             if (optlen < sizeof(pki)) {
2312                 return -TARGET_EINVAL;
2313             }
2314 
2315             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2316                 return -TARGET_EFAULT;
2317             }
2318 
2319             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2320 
2321             ret = get_errno(setsockopt(sockfd, level, optname,
2322                                        &pki, sizeof(pki)));
2323             break;
2324         }
2325         case IPV6_ADD_MEMBERSHIP:
2326         case IPV6_DROP_MEMBERSHIP:
2327         {
2328             struct ipv6_mreq ipv6mreq;
2329 
2330             if (optlen < sizeof(ipv6mreq)) {
2331                 return -TARGET_EINVAL;
2332             }
2333 
2334             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2335                 return -TARGET_EFAULT;
2336             }
2337 
2338             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2339 
2340             ret = get_errno(setsockopt(sockfd, level, optname,
2341                                        &ipv6mreq, sizeof(ipv6mreq)));
2342             break;
2343         }
2344         default:
2345             goto unimplemented;
2346         }
2347         break;
2348     case SOL_ICMPV6:
2349         switch (optname) {
2350         case ICMPV6_FILTER:
2351         {
2352             struct icmp6_filter icmp6f;
2353 
2354             if (optlen > sizeof(icmp6f)) {
2355                 optlen = sizeof(icmp6f);
2356             }
2357 
2358             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2359                 return -TARGET_EFAULT;
2360             }
2361 
2362             for (val = 0; val < 8; val++) {
2363                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2364             }
2365 
2366             ret = get_errno(setsockopt(sockfd, level, optname,
2367                                        &icmp6f, optlen));
2368             break;
2369         }
2370         default:
2371             goto unimplemented;
2372         }
2373         break;
2374     case SOL_RAW:
2375         switch (optname) {
2376         case ICMP_FILTER:
2377         case IPV6_CHECKSUM:
2378             /* those take an u32 value */
2379             if (optlen < sizeof(uint32_t)) {
2380                 return -TARGET_EINVAL;
2381             }
2382 
2383             if (get_user_u32(val, optval_addr)) {
2384                 return -TARGET_EFAULT;
2385             }
2386             ret = get_errno(setsockopt(sockfd, level, optname,
2387                                        &val, sizeof(val)));
2388             break;
2389 
2390         default:
2391             goto unimplemented;
2392         }
2393         break;
2394 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2395     case SOL_ALG:
2396         switch (optname) {
2397         case ALG_SET_KEY:
2398         {
2399             char *alg_key = g_malloc(optlen);
2400 
2401             if (!alg_key) {
2402                 return -TARGET_ENOMEM;
2403             }
2404             if (copy_from_user(alg_key, optval_addr, optlen)) {
2405                 g_free(alg_key);
2406                 return -TARGET_EFAULT;
2407             }
2408             ret = get_errno(setsockopt(sockfd, level, optname,
2409                                        alg_key, optlen));
2410             g_free(alg_key);
2411             break;
2412         }
2413         case ALG_SET_AEAD_AUTHSIZE:
2414         {
2415             ret = get_errno(setsockopt(sockfd, level, optname,
2416                                        NULL, optlen));
2417             break;
2418         }
2419         default:
2420             goto unimplemented;
2421         }
2422         break;
2423 #endif
2424     case TARGET_SOL_SOCKET:
2425         switch (optname) {
2426         case TARGET_SO_RCVTIMEO:
2427         {
2428                 struct timeval tv;
2429 
2430                 optname = SO_RCVTIMEO;
2431 
2432 set_timeout:
2433                 if (optlen != sizeof(struct target_timeval)) {
2434                     return -TARGET_EINVAL;
2435                 }
2436 
2437                 if (copy_from_user_timeval(&tv, optval_addr)) {
2438                     return -TARGET_EFAULT;
2439                 }
2440 
2441                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2442                                 &tv, sizeof(tv)));
2443                 return ret;
2444         }
2445         case TARGET_SO_SNDTIMEO:
2446                 optname = SO_SNDTIMEO;
2447                 goto set_timeout;
2448         case TARGET_SO_ATTACH_FILTER:
2449         {
2450                 struct target_sock_fprog *tfprog;
2451                 struct target_sock_filter *tfilter;
2452                 struct sock_fprog fprog;
2453                 struct sock_filter *filter;
2454                 int i;
2455 
2456                 if (optlen != sizeof(*tfprog)) {
2457                     return -TARGET_EINVAL;
2458                 }
2459                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2460                     return -TARGET_EFAULT;
2461                 }
2462                 if (!lock_user_struct(VERIFY_READ, tfilter,
2463                                       tswapal(tfprog->filter), 0)) {
2464                     unlock_user_struct(tfprog, optval_addr, 1);
2465                     return -TARGET_EFAULT;
2466                 }
2467 
2468                 fprog.len = tswap16(tfprog->len);
2469                 filter = g_try_new(struct sock_filter, fprog.len);
2470                 if (filter == NULL) {
2471                     unlock_user_struct(tfilter, tfprog->filter, 1);
2472                     unlock_user_struct(tfprog, optval_addr, 1);
2473                     return -TARGET_ENOMEM;
2474                 }
2475                 for (i = 0; i < fprog.len; i++) {
2476                     filter[i].code = tswap16(tfilter[i].code);
2477                     filter[i].jt = tfilter[i].jt;
2478                     filter[i].jf = tfilter[i].jf;
2479                     filter[i].k = tswap32(tfilter[i].k);
2480                 }
2481                 fprog.filter = filter;
2482 
2483                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2484                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2485                 g_free(filter);
2486 
2487                 unlock_user_struct(tfilter, tfprog->filter, 1);
2488                 unlock_user_struct(tfprog, optval_addr, 1);
2489                 return ret;
2490         }
2491 	case TARGET_SO_BINDTODEVICE:
2492 	{
2493 		char *dev_ifname, *addr_ifname;
2494 
2495 		if (optlen > IFNAMSIZ - 1) {
2496 		    optlen = IFNAMSIZ - 1;
2497 		}
2498 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2499 		if (!dev_ifname) {
2500 		    return -TARGET_EFAULT;
2501 		}
2502 		optname = SO_BINDTODEVICE;
2503 		addr_ifname = alloca(IFNAMSIZ);
2504 		memcpy(addr_ifname, dev_ifname, optlen);
2505 		addr_ifname[optlen] = 0;
2506 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2507                                            addr_ifname, optlen));
2508 		unlock_user (dev_ifname, optval_addr, 0);
2509 		return ret;
2510 	}
2511         case TARGET_SO_LINGER:
2512         {
2513                 struct linger lg;
2514                 struct target_linger *tlg;
2515 
2516                 if (optlen != sizeof(struct target_linger)) {
2517                     return -TARGET_EINVAL;
2518                 }
2519                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2520                     return -TARGET_EFAULT;
2521                 }
2522                 __get_user(lg.l_onoff, &tlg->l_onoff);
2523                 __get_user(lg.l_linger, &tlg->l_linger);
2524                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2525                                 &lg, sizeof(lg)));
2526                 unlock_user_struct(tlg, optval_addr, 0);
2527                 return ret;
2528         }
2529             /* Options with 'int' argument.  */
2530         case TARGET_SO_DEBUG:
2531 		optname = SO_DEBUG;
2532 		break;
2533         case TARGET_SO_REUSEADDR:
2534 		optname = SO_REUSEADDR;
2535 		break;
2536 #ifdef SO_REUSEPORT
2537         case TARGET_SO_REUSEPORT:
2538                 optname = SO_REUSEPORT;
2539                 break;
2540 #endif
2541         case TARGET_SO_TYPE:
2542 		optname = SO_TYPE;
2543 		break;
2544         case TARGET_SO_ERROR:
2545 		optname = SO_ERROR;
2546 		break;
2547         case TARGET_SO_DONTROUTE:
2548 		optname = SO_DONTROUTE;
2549 		break;
2550         case TARGET_SO_BROADCAST:
2551 		optname = SO_BROADCAST;
2552 		break;
2553         case TARGET_SO_SNDBUF:
2554 		optname = SO_SNDBUF;
2555 		break;
2556         case TARGET_SO_SNDBUFFORCE:
2557                 optname = SO_SNDBUFFORCE;
2558                 break;
2559         case TARGET_SO_RCVBUF:
2560 		optname = SO_RCVBUF;
2561 		break;
2562         case TARGET_SO_RCVBUFFORCE:
2563                 optname = SO_RCVBUFFORCE;
2564                 break;
2565         case TARGET_SO_KEEPALIVE:
2566 		optname = SO_KEEPALIVE;
2567 		break;
2568         case TARGET_SO_OOBINLINE:
2569 		optname = SO_OOBINLINE;
2570 		break;
2571         case TARGET_SO_NO_CHECK:
2572 		optname = SO_NO_CHECK;
2573 		break;
2574         case TARGET_SO_PRIORITY:
2575 		optname = SO_PRIORITY;
2576 		break;
2577 #ifdef SO_BSDCOMPAT
2578         case TARGET_SO_BSDCOMPAT:
2579 		optname = SO_BSDCOMPAT;
2580 		break;
2581 #endif
2582         case TARGET_SO_PASSCRED:
2583 		optname = SO_PASSCRED;
2584 		break;
2585         case TARGET_SO_PASSSEC:
2586                 optname = SO_PASSSEC;
2587                 break;
2588         case TARGET_SO_TIMESTAMP:
2589 		optname = SO_TIMESTAMP;
2590 		break;
2591         case TARGET_SO_RCVLOWAT:
2592 		optname = SO_RCVLOWAT;
2593 		break;
2594         default:
2595             goto unimplemented;
2596         }
2597 	if (optlen < sizeof(uint32_t))
2598             return -TARGET_EINVAL;
2599 
2600 	if (get_user_u32(val, optval_addr))
2601             return -TARGET_EFAULT;
2602 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2603         break;
2604 #ifdef SOL_NETLINK
2605     case SOL_NETLINK:
2606         switch (optname) {
2607         case NETLINK_PKTINFO:
2608         case NETLINK_ADD_MEMBERSHIP:
2609         case NETLINK_DROP_MEMBERSHIP:
2610         case NETLINK_BROADCAST_ERROR:
2611         case NETLINK_NO_ENOBUFS:
2612 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2613         case NETLINK_LISTEN_ALL_NSID:
2614         case NETLINK_CAP_ACK:
2615 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2616 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2617         case NETLINK_EXT_ACK:
2618 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2619 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2620         case NETLINK_GET_STRICT_CHK:
2621 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2622             break;
2623         default:
2624             goto unimplemented;
2625         }
2626         val = 0;
2627         if (optlen < sizeof(uint32_t)) {
2628             return -TARGET_EINVAL;
2629         }
2630         if (get_user_u32(val, optval_addr)) {
2631             return -TARGET_EFAULT;
2632         }
2633         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2634                                    sizeof(val)));
2635         break;
2636 #endif /* SOL_NETLINK */
2637     default:
2638     unimplemented:
2639         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2640                       level, optname);
2641         ret = -TARGET_ENOPROTOOPT;
2642     }
2643     return ret;
2644 }
2645 
2646 /* do_getsockopt() Must return target values and target errnos. */
2647 static abi_long do_getsockopt(int sockfd, int level, int optname,
2648                               abi_ulong optval_addr, abi_ulong optlen)
2649 {
2650     abi_long ret;
2651     int len, val;
2652     socklen_t lv;
2653 
2654     switch(level) {
2655     case TARGET_SOL_SOCKET:
2656         level = SOL_SOCKET;
2657         switch (optname) {
2658         /* These don't just return a single integer */
2659         case TARGET_SO_PEERNAME:
2660             goto unimplemented;
2661         case TARGET_SO_RCVTIMEO: {
2662             struct timeval tv;
2663             socklen_t tvlen;
2664 
2665             optname = SO_RCVTIMEO;
2666 
2667 get_timeout:
2668             if (get_user_u32(len, optlen)) {
2669                 return -TARGET_EFAULT;
2670             }
2671             if (len < 0) {
2672                 return -TARGET_EINVAL;
2673             }
2674 
2675             tvlen = sizeof(tv);
2676             ret = get_errno(getsockopt(sockfd, level, optname,
2677                                        &tv, &tvlen));
2678             if (ret < 0) {
2679                 return ret;
2680             }
2681             if (len > sizeof(struct target_timeval)) {
2682                 len = sizeof(struct target_timeval);
2683             }
2684             if (copy_to_user_timeval(optval_addr, &tv)) {
2685                 return -TARGET_EFAULT;
2686             }
2687             if (put_user_u32(len, optlen)) {
2688                 return -TARGET_EFAULT;
2689             }
2690             break;
2691         }
2692         case TARGET_SO_SNDTIMEO:
2693             optname = SO_SNDTIMEO;
2694             goto get_timeout;
2695         case TARGET_SO_PEERCRED: {
2696             struct ucred cr;
2697             socklen_t crlen;
2698             struct target_ucred *tcr;
2699 
2700             if (get_user_u32(len, optlen)) {
2701                 return -TARGET_EFAULT;
2702             }
2703             if (len < 0) {
2704                 return -TARGET_EINVAL;
2705             }
2706 
2707             crlen = sizeof(cr);
2708             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2709                                        &cr, &crlen));
2710             if (ret < 0) {
2711                 return ret;
2712             }
2713             if (len > crlen) {
2714                 len = crlen;
2715             }
2716             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2717                 return -TARGET_EFAULT;
2718             }
2719             __put_user(cr.pid, &tcr->pid);
2720             __put_user(cr.uid, &tcr->uid);
2721             __put_user(cr.gid, &tcr->gid);
2722             unlock_user_struct(tcr, optval_addr, 1);
2723             if (put_user_u32(len, optlen)) {
2724                 return -TARGET_EFAULT;
2725             }
2726             break;
2727         }
2728         case TARGET_SO_PEERSEC: {
2729             char *name;
2730 
2731             if (get_user_u32(len, optlen)) {
2732                 return -TARGET_EFAULT;
2733             }
2734             if (len < 0) {
2735                 return -TARGET_EINVAL;
2736             }
2737             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2738             if (!name) {
2739                 return -TARGET_EFAULT;
2740             }
2741             lv = len;
2742             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2743                                        name, &lv));
2744             if (put_user_u32(lv, optlen)) {
2745                 ret = -TARGET_EFAULT;
2746             }
2747             unlock_user(name, optval_addr, lv);
2748             break;
2749         }
2750         case TARGET_SO_LINGER:
2751         {
2752             struct linger lg;
2753             socklen_t lglen;
2754             struct target_linger *tlg;
2755 
2756             if (get_user_u32(len, optlen)) {
2757                 return -TARGET_EFAULT;
2758             }
2759             if (len < 0) {
2760                 return -TARGET_EINVAL;
2761             }
2762 
2763             lglen = sizeof(lg);
2764             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2765                                        &lg, &lglen));
2766             if (ret < 0) {
2767                 return ret;
2768             }
2769             if (len > lglen) {
2770                 len = lglen;
2771             }
2772             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2773                 return -TARGET_EFAULT;
2774             }
2775             __put_user(lg.l_onoff, &tlg->l_onoff);
2776             __put_user(lg.l_linger, &tlg->l_linger);
2777             unlock_user_struct(tlg, optval_addr, 1);
2778             if (put_user_u32(len, optlen)) {
2779                 return -TARGET_EFAULT;
2780             }
2781             break;
2782         }
2783         /* Options with 'int' argument.  */
2784         case TARGET_SO_DEBUG:
2785             optname = SO_DEBUG;
2786             goto int_case;
2787         case TARGET_SO_REUSEADDR:
2788             optname = SO_REUSEADDR;
2789             goto int_case;
2790 #ifdef SO_REUSEPORT
2791         case TARGET_SO_REUSEPORT:
2792             optname = SO_REUSEPORT;
2793             goto int_case;
2794 #endif
2795         case TARGET_SO_TYPE:
2796             optname = SO_TYPE;
2797             goto int_case;
2798         case TARGET_SO_ERROR:
2799             optname = SO_ERROR;
2800             goto int_case;
2801         case TARGET_SO_DONTROUTE:
2802             optname = SO_DONTROUTE;
2803             goto int_case;
2804         case TARGET_SO_BROADCAST:
2805             optname = SO_BROADCAST;
2806             goto int_case;
2807         case TARGET_SO_SNDBUF:
2808             optname = SO_SNDBUF;
2809             goto int_case;
2810         case TARGET_SO_RCVBUF:
2811             optname = SO_RCVBUF;
2812             goto int_case;
2813         case TARGET_SO_KEEPALIVE:
2814             optname = SO_KEEPALIVE;
2815             goto int_case;
2816         case TARGET_SO_OOBINLINE:
2817             optname = SO_OOBINLINE;
2818             goto int_case;
2819         case TARGET_SO_NO_CHECK:
2820             optname = SO_NO_CHECK;
2821             goto int_case;
2822         case TARGET_SO_PRIORITY:
2823             optname = SO_PRIORITY;
2824             goto int_case;
2825 #ifdef SO_BSDCOMPAT
2826         case TARGET_SO_BSDCOMPAT:
2827             optname = SO_BSDCOMPAT;
2828             goto int_case;
2829 #endif
2830         case TARGET_SO_PASSCRED:
2831             optname = SO_PASSCRED;
2832             goto int_case;
2833         case TARGET_SO_TIMESTAMP:
2834             optname = SO_TIMESTAMP;
2835             goto int_case;
2836         case TARGET_SO_RCVLOWAT:
2837             optname = SO_RCVLOWAT;
2838             goto int_case;
2839         case TARGET_SO_ACCEPTCONN:
2840             optname = SO_ACCEPTCONN;
2841             goto int_case;
2842         case TARGET_SO_PROTOCOL:
2843             optname = SO_PROTOCOL;
2844             goto int_case;
2845         case TARGET_SO_DOMAIN:
2846             optname = SO_DOMAIN;
2847             goto int_case;
2848         default:
2849             goto int_case;
2850         }
2851         break;
2852     case SOL_TCP:
2853     case SOL_UDP:
2854         /* TCP and UDP options all take an 'int' value.  */
2855     int_case:
2856         if (get_user_u32(len, optlen))
2857             return -TARGET_EFAULT;
2858         if (len < 0)
2859             return -TARGET_EINVAL;
2860         lv = sizeof(lv);
2861         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2862         if (ret < 0)
2863             return ret;
2864         if (optname == SO_TYPE) {
2865             val = host_to_target_sock_type(val);
2866         }
2867         if (len > lv)
2868             len = lv;
2869         if (len == 4) {
2870             if (put_user_u32(val, optval_addr))
2871                 return -TARGET_EFAULT;
2872         } else {
2873             if (put_user_u8(val, optval_addr))
2874                 return -TARGET_EFAULT;
2875         }
2876         if (put_user_u32(len, optlen))
2877             return -TARGET_EFAULT;
2878         break;
2879     case SOL_IP:
2880         switch(optname) {
2881         case IP_TOS:
2882         case IP_TTL:
2883         case IP_HDRINCL:
2884         case IP_ROUTER_ALERT:
2885         case IP_RECVOPTS:
2886         case IP_RETOPTS:
2887         case IP_PKTINFO:
2888         case IP_MTU_DISCOVER:
2889         case IP_RECVERR:
2890         case IP_RECVTOS:
2891 #ifdef IP_FREEBIND
2892         case IP_FREEBIND:
2893 #endif
2894         case IP_MULTICAST_TTL:
2895         case IP_MULTICAST_LOOP:
2896             if (get_user_u32(len, optlen))
2897                 return -TARGET_EFAULT;
2898             if (len < 0)
2899                 return -TARGET_EINVAL;
2900             lv = sizeof(lv);
2901             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2902             if (ret < 0)
2903                 return ret;
2904             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2905                 len = 1;
2906                 if (put_user_u32(len, optlen)
2907                     || put_user_u8(val, optval_addr))
2908                     return -TARGET_EFAULT;
2909             } else {
2910                 if (len > sizeof(int))
2911                     len = sizeof(int);
2912                 if (put_user_u32(len, optlen)
2913                     || put_user_u32(val, optval_addr))
2914                     return -TARGET_EFAULT;
2915             }
2916             break;
2917         default:
2918             ret = -TARGET_ENOPROTOOPT;
2919             break;
2920         }
2921         break;
2922     case SOL_IPV6:
2923         switch (optname) {
2924         case IPV6_MTU_DISCOVER:
2925         case IPV6_MTU:
2926         case IPV6_V6ONLY:
2927         case IPV6_RECVPKTINFO:
2928         case IPV6_UNICAST_HOPS:
2929         case IPV6_MULTICAST_HOPS:
2930         case IPV6_MULTICAST_LOOP:
2931         case IPV6_RECVERR:
2932         case IPV6_RECVHOPLIMIT:
2933         case IPV6_2292HOPLIMIT:
2934         case IPV6_CHECKSUM:
2935         case IPV6_ADDRFORM:
2936         case IPV6_2292PKTINFO:
2937         case IPV6_RECVTCLASS:
2938         case IPV6_RECVRTHDR:
2939         case IPV6_2292RTHDR:
2940         case IPV6_RECVHOPOPTS:
2941         case IPV6_2292HOPOPTS:
2942         case IPV6_RECVDSTOPTS:
2943         case IPV6_2292DSTOPTS:
2944         case IPV6_TCLASS:
2945         case IPV6_ADDR_PREFERENCES:
2946 #ifdef IPV6_RECVPATHMTU
2947         case IPV6_RECVPATHMTU:
2948 #endif
2949 #ifdef IPV6_TRANSPARENT
2950         case IPV6_TRANSPARENT:
2951 #endif
2952 #ifdef IPV6_FREEBIND
2953         case IPV6_FREEBIND:
2954 #endif
2955 #ifdef IPV6_RECVORIGDSTADDR
2956         case IPV6_RECVORIGDSTADDR:
2957 #endif
2958             if (get_user_u32(len, optlen))
2959                 return -TARGET_EFAULT;
2960             if (len < 0)
2961                 return -TARGET_EINVAL;
2962             lv = sizeof(lv);
2963             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2964             if (ret < 0)
2965                 return ret;
2966             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2967                 len = 1;
2968                 if (put_user_u32(len, optlen)
2969                     || put_user_u8(val, optval_addr))
2970                     return -TARGET_EFAULT;
2971             } else {
2972                 if (len > sizeof(int))
2973                     len = sizeof(int);
2974                 if (put_user_u32(len, optlen)
2975                     || put_user_u32(val, optval_addr))
2976                     return -TARGET_EFAULT;
2977             }
2978             break;
2979         default:
2980             ret = -TARGET_ENOPROTOOPT;
2981             break;
2982         }
2983         break;
2984 #ifdef SOL_NETLINK
2985     case SOL_NETLINK:
2986         switch (optname) {
2987         case NETLINK_PKTINFO:
2988         case NETLINK_BROADCAST_ERROR:
2989         case NETLINK_NO_ENOBUFS:
2990 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2991         case NETLINK_LISTEN_ALL_NSID:
2992         case NETLINK_CAP_ACK:
2993 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2994 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2995         case NETLINK_EXT_ACK:
2996 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2997 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2998         case NETLINK_GET_STRICT_CHK:
2999 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
3000             if (get_user_u32(len, optlen)) {
3001                 return -TARGET_EFAULT;
3002             }
3003             if (len != sizeof(val)) {
3004                 return -TARGET_EINVAL;
3005             }
3006             lv = len;
3007             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3008             if (ret < 0) {
3009                 return ret;
3010             }
3011             if (put_user_u32(lv, optlen)
3012                 || put_user_u32(val, optval_addr)) {
3013                 return -TARGET_EFAULT;
3014             }
3015             break;
3016 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3017         case NETLINK_LIST_MEMBERSHIPS:
3018         {
3019             uint32_t *results;
3020             int i;
3021             if (get_user_u32(len, optlen)) {
3022                 return -TARGET_EFAULT;
3023             }
3024             if (len < 0) {
3025                 return -TARGET_EINVAL;
3026             }
3027             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3028             if (!results) {
3029                 return -TARGET_EFAULT;
3030             }
3031             lv = len;
3032             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3033             if (ret < 0) {
3034                 unlock_user(results, optval_addr, 0);
3035                 return ret;
3036             }
3037             /* swap host endianess to target endianess. */
3038             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3039                 results[i] = tswap32(results[i]);
3040             }
3041             if (put_user_u32(lv, optlen)) {
3042                 return -TARGET_EFAULT;
3043             }
3044             unlock_user(results, optval_addr, 0);
3045             break;
3046         }
3047 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3048         default:
3049             goto unimplemented;
3050         }
3051         break;
3052 #endif /* SOL_NETLINK */
3053     default:
3054     unimplemented:
3055         qemu_log_mask(LOG_UNIMP,
3056                       "getsockopt level=%d optname=%d not yet supported\n",
3057                       level, optname);
3058         ret = -TARGET_EOPNOTSUPP;
3059         break;
3060     }
3061     return ret;
3062 }
3063 
3064 /* Convert target low/high pair representing file offset into the host
3065  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3066  * as the kernel doesn't handle them either.
3067  */
3068 static void target_to_host_low_high(abi_ulong tlow,
3069                                     abi_ulong thigh,
3070                                     unsigned long *hlow,
3071                                     unsigned long *hhigh)
3072 {
3073     uint64_t off = tlow |
3074         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3075         TARGET_LONG_BITS / 2;
3076 
3077     *hlow = off;
3078     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3079 }
3080 
3081 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3082                                 abi_ulong count, int copy)
3083 {
3084     struct target_iovec *target_vec;
3085     struct iovec *vec;
3086     abi_ulong total_len, max_len;
3087     int i;
3088     int err = 0;
3089     bool bad_address = false;
3090 
3091     if (count == 0) {
3092         errno = 0;
3093         return NULL;
3094     }
3095     if (count > IOV_MAX) {
3096         errno = EINVAL;
3097         return NULL;
3098     }
3099 
3100     vec = g_try_new0(struct iovec, count);
3101     if (vec == NULL) {
3102         errno = ENOMEM;
3103         return NULL;
3104     }
3105 
3106     target_vec = lock_user(VERIFY_READ, target_addr,
3107                            count * sizeof(struct target_iovec), 1);
3108     if (target_vec == NULL) {
3109         err = EFAULT;
3110         goto fail2;
3111     }
3112 
3113     /* ??? If host page size > target page size, this will result in a
3114        value larger than what we can actually support.  */
3115     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3116     total_len = 0;
3117 
3118     for (i = 0; i < count; i++) {
3119         abi_ulong base = tswapal(target_vec[i].iov_base);
3120         abi_long len = tswapal(target_vec[i].iov_len);
3121 
3122         if (len < 0) {
3123             err = EINVAL;
3124             goto fail;
3125         } else if (len == 0) {
3126             /* Zero length pointer is ignored.  */
3127             vec[i].iov_base = 0;
3128         } else {
3129             vec[i].iov_base = lock_user(type, base, len, copy);
3130             /* If the first buffer pointer is bad, this is a fault.  But
3131              * subsequent bad buffers will result in a partial write; this
3132              * is realized by filling the vector with null pointers and
3133              * zero lengths. */
3134             if (!vec[i].iov_base) {
3135                 if (i == 0) {
3136                     err = EFAULT;
3137                     goto fail;
3138                 } else {
3139                     bad_address = true;
3140                 }
3141             }
3142             if (bad_address) {
3143                 len = 0;
3144             }
3145             if (len > max_len - total_len) {
3146                 len = max_len - total_len;
3147             }
3148         }
3149         vec[i].iov_len = len;
3150         total_len += len;
3151     }
3152 
3153     unlock_user(target_vec, target_addr, 0);
3154     return vec;
3155 
3156  fail:
3157     while (--i >= 0) {
3158         if (tswapal(target_vec[i].iov_len) > 0) {
3159             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3160         }
3161     }
3162     unlock_user(target_vec, target_addr, 0);
3163  fail2:
3164     g_free(vec);
3165     errno = err;
3166     return NULL;
3167 }
3168 
3169 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3170                          abi_ulong count, int copy)
3171 {
3172     struct target_iovec *target_vec;
3173     int i;
3174 
3175     target_vec = lock_user(VERIFY_READ, target_addr,
3176                            count * sizeof(struct target_iovec), 1);
3177     if (target_vec) {
3178         for (i = 0; i < count; i++) {
3179             abi_ulong base = tswapal(target_vec[i].iov_base);
3180             abi_long len = tswapal(target_vec[i].iov_len);
3181             if (len < 0) {
3182                 break;
3183             }
3184             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3185         }
3186         unlock_user(target_vec, target_addr, 0);
3187     }
3188 
3189     g_free(vec);
3190 }
3191 
3192 static inline int target_to_host_sock_type(int *type)
3193 {
3194     int host_type = 0;
3195     int target_type = *type;
3196 
3197     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3198     case TARGET_SOCK_DGRAM:
3199         host_type = SOCK_DGRAM;
3200         break;
3201     case TARGET_SOCK_STREAM:
3202         host_type = SOCK_STREAM;
3203         break;
3204     default:
3205         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3206         break;
3207     }
3208     if (target_type & TARGET_SOCK_CLOEXEC) {
3209 #if defined(SOCK_CLOEXEC)
3210         host_type |= SOCK_CLOEXEC;
3211 #else
3212         return -TARGET_EINVAL;
3213 #endif
3214     }
3215     if (target_type & TARGET_SOCK_NONBLOCK) {
3216 #if defined(SOCK_NONBLOCK)
3217         host_type |= SOCK_NONBLOCK;
3218 #elif !defined(O_NONBLOCK)
3219         return -TARGET_EINVAL;
3220 #endif
3221     }
3222     *type = host_type;
3223     return 0;
3224 }
3225 
3226 /* Try to emulate socket type flags after socket creation.  */
3227 static int sock_flags_fixup(int fd, int target_type)
3228 {
3229 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3230     if (target_type & TARGET_SOCK_NONBLOCK) {
3231         int flags = fcntl(fd, F_GETFL);
3232         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3233             close(fd);
3234             return -TARGET_EINVAL;
3235         }
3236     }
3237 #endif
3238     return fd;
3239 }
3240 
3241 /* do_socket() Must return target values and target errnos. */
3242 static abi_long do_socket(int domain, int type, int protocol)
3243 {
3244     int target_type = type;
3245     int ret;
3246 
3247     ret = target_to_host_sock_type(&type);
3248     if (ret) {
3249         return ret;
3250     }
3251 
3252     if (domain == PF_NETLINK && !(
3253 #ifdef CONFIG_RTNETLINK
3254          protocol == NETLINK_ROUTE ||
3255 #endif
3256          protocol == NETLINK_KOBJECT_UEVENT ||
3257          protocol == NETLINK_AUDIT)) {
3258         return -TARGET_EPROTONOSUPPORT;
3259     }
3260 
3261     if (domain == AF_PACKET ||
3262         (domain == AF_INET && type == SOCK_PACKET)) {
3263         protocol = tswap16(protocol);
3264     }
3265 
3266     ret = get_errno(socket(domain, type, protocol));
3267     if (ret >= 0) {
3268         ret = sock_flags_fixup(ret, target_type);
3269         if (type == SOCK_PACKET) {
3270             /* Manage an obsolete case :
3271              * if socket type is SOCK_PACKET, bind by name
3272              */
3273             fd_trans_register(ret, &target_packet_trans);
3274         } else if (domain == PF_NETLINK) {
3275             switch (protocol) {
3276 #ifdef CONFIG_RTNETLINK
3277             case NETLINK_ROUTE:
3278                 fd_trans_register(ret, &target_netlink_route_trans);
3279                 break;
3280 #endif
3281             case NETLINK_KOBJECT_UEVENT:
3282                 /* nothing to do: messages are strings */
3283                 break;
3284             case NETLINK_AUDIT:
3285                 fd_trans_register(ret, &target_netlink_audit_trans);
3286                 break;
3287             default:
3288                 g_assert_not_reached();
3289             }
3290         }
3291     }
3292     return ret;
3293 }
3294 
3295 /* do_bind() Must return target values and target errnos. */
3296 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3297                         socklen_t addrlen)
3298 {
3299     void *addr;
3300     abi_long ret;
3301 
3302     if ((int)addrlen < 0) {
3303         return -TARGET_EINVAL;
3304     }
3305 
3306     addr = alloca(addrlen+1);
3307 
3308     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3309     if (ret)
3310         return ret;
3311 
3312     return get_errno(bind(sockfd, addr, addrlen));
3313 }
3314 
3315 /* do_connect() Must return target values and target errnos. */
3316 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3317                            socklen_t addrlen)
3318 {
3319     void *addr;
3320     abi_long ret;
3321 
3322     if ((int)addrlen < 0) {
3323         return -TARGET_EINVAL;
3324     }
3325 
3326     addr = alloca(addrlen+1);
3327 
3328     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3329     if (ret)
3330         return ret;
3331 
3332     return get_errno(safe_connect(sockfd, addr, addrlen));
3333 }
3334 
3335 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3336 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3337                                       int flags, int send)
3338 {
3339     abi_long ret, len;
3340     struct msghdr msg;
3341     abi_ulong count;
3342     struct iovec *vec;
3343     abi_ulong target_vec;
3344 
3345     if (msgp->msg_name) {
3346         msg.msg_namelen = tswap32(msgp->msg_namelen);
3347         msg.msg_name = alloca(msg.msg_namelen+1);
3348         ret = target_to_host_sockaddr(fd, msg.msg_name,
3349                                       tswapal(msgp->msg_name),
3350                                       msg.msg_namelen);
3351         if (ret == -TARGET_EFAULT) {
3352             /* For connected sockets msg_name and msg_namelen must
3353              * be ignored, so returning EFAULT immediately is wrong.
3354              * Instead, pass a bad msg_name to the host kernel, and
3355              * let it decide whether to return EFAULT or not.
3356              */
3357             msg.msg_name = (void *)-1;
3358         } else if (ret) {
3359             goto out2;
3360         }
3361     } else {
3362         msg.msg_name = NULL;
3363         msg.msg_namelen = 0;
3364     }
3365     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3366     msg.msg_control = alloca(msg.msg_controllen);
3367     memset(msg.msg_control, 0, msg.msg_controllen);
3368 
3369     msg.msg_flags = tswap32(msgp->msg_flags);
3370 
3371     count = tswapal(msgp->msg_iovlen);
3372     target_vec = tswapal(msgp->msg_iov);
3373 
3374     if (count > IOV_MAX) {
3375         /* sendrcvmsg returns a different errno for this condition than
3376          * readv/writev, so we must catch it here before lock_iovec() does.
3377          */
3378         ret = -TARGET_EMSGSIZE;
3379         goto out2;
3380     }
3381 
3382     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3383                      target_vec, count, send);
3384     if (vec == NULL) {
3385         ret = -host_to_target_errno(errno);
3386         goto out2;
3387     }
3388     msg.msg_iovlen = count;
3389     msg.msg_iov = vec;
3390 
3391     if (send) {
3392         if (fd_trans_target_to_host_data(fd)) {
3393             void *host_msg;
3394 
3395             host_msg = g_malloc(msg.msg_iov->iov_len);
3396             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3397             ret = fd_trans_target_to_host_data(fd)(host_msg,
3398                                                    msg.msg_iov->iov_len);
3399             if (ret >= 0) {
3400                 msg.msg_iov->iov_base = host_msg;
3401                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3402             }
3403             g_free(host_msg);
3404         } else {
3405             ret = target_to_host_cmsg(&msg, msgp);
3406             if (ret == 0) {
3407                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3408             }
3409         }
3410     } else {
3411         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3412         if (!is_error(ret)) {
3413             len = ret;
3414             if (fd_trans_host_to_target_data(fd)) {
3415                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3416                                                MIN(msg.msg_iov->iov_len, len));
3417             } else {
3418                 ret = host_to_target_cmsg(msgp, &msg);
3419             }
3420             if (!is_error(ret)) {
3421                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3422                 msgp->msg_flags = tswap32(msg.msg_flags);
3423                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3424                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3425                                     msg.msg_name, msg.msg_namelen);
3426                     if (ret) {
3427                         goto out;
3428                     }
3429                 }
3430 
3431                 ret = len;
3432             }
3433         }
3434     }
3435 
3436 out:
3437     unlock_iovec(vec, target_vec, count, !send);
3438 out2:
3439     return ret;
3440 }
3441 
3442 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3443                                int flags, int send)
3444 {
3445     abi_long ret;
3446     struct target_msghdr *msgp;
3447 
3448     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3449                           msgp,
3450                           target_msg,
3451                           send ? 1 : 0)) {
3452         return -TARGET_EFAULT;
3453     }
3454     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3455     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3456     return ret;
3457 }
3458 
3459 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3460  * so it might not have this *mmsg-specific flag either.
3461  */
3462 #ifndef MSG_WAITFORONE
3463 #define MSG_WAITFORONE 0x10000
3464 #endif
3465 
3466 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3467                                 unsigned int vlen, unsigned int flags,
3468                                 int send)
3469 {
3470     struct target_mmsghdr *mmsgp;
3471     abi_long ret = 0;
3472     int i;
3473 
3474     if (vlen > UIO_MAXIOV) {
3475         vlen = UIO_MAXIOV;
3476     }
3477 
3478     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3479     if (!mmsgp) {
3480         return -TARGET_EFAULT;
3481     }
3482 
3483     for (i = 0; i < vlen; i++) {
3484         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3485         if (is_error(ret)) {
3486             break;
3487         }
3488         mmsgp[i].msg_len = tswap32(ret);
3489         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3490         if (flags & MSG_WAITFORONE) {
3491             flags |= MSG_DONTWAIT;
3492         }
3493     }
3494 
3495     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3496 
3497     /* Return number of datagrams sent if we sent any at all;
3498      * otherwise return the error.
3499      */
3500     if (i) {
3501         return i;
3502     }
3503     return ret;
3504 }
3505 
3506 /* do_accept4() Must return target values and target errnos. */
3507 static abi_long do_accept4(int fd, abi_ulong target_addr,
3508                            abi_ulong target_addrlen_addr, int flags)
3509 {
3510     socklen_t addrlen, ret_addrlen;
3511     void *addr;
3512     abi_long ret;
3513     int host_flags;
3514 
3515     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3516 
3517     if (target_addr == 0) {
3518         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3519     }
3520 
3521     /* linux returns EFAULT if addrlen pointer is invalid */
3522     if (get_user_u32(addrlen, target_addrlen_addr))
3523         return -TARGET_EFAULT;
3524 
3525     if ((int)addrlen < 0) {
3526         return -TARGET_EINVAL;
3527     }
3528 
3529     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3530         return -TARGET_EFAULT;
3531     }
3532 
3533     addr = alloca(addrlen);
3534 
3535     ret_addrlen = addrlen;
3536     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3537     if (!is_error(ret)) {
3538         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3539         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3540             ret = -TARGET_EFAULT;
3541         }
3542     }
3543     return ret;
3544 }
3545 
3546 /* do_getpeername() Must return target values and target errnos. */
3547 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3548                                abi_ulong target_addrlen_addr)
3549 {
3550     socklen_t addrlen, ret_addrlen;
3551     void *addr;
3552     abi_long ret;
3553 
3554     if (get_user_u32(addrlen, target_addrlen_addr))
3555         return -TARGET_EFAULT;
3556 
3557     if ((int)addrlen < 0) {
3558         return -TARGET_EINVAL;
3559     }
3560 
3561     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3562         return -TARGET_EFAULT;
3563     }
3564 
3565     addr = alloca(addrlen);
3566 
3567     ret_addrlen = addrlen;
3568     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3569     if (!is_error(ret)) {
3570         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3571         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3572             ret = -TARGET_EFAULT;
3573         }
3574     }
3575     return ret;
3576 }
3577 
3578 /* do_getsockname() Must return target values and target errnos. */
3579 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3580                                abi_ulong target_addrlen_addr)
3581 {
3582     socklen_t addrlen, ret_addrlen;
3583     void *addr;
3584     abi_long ret;
3585 
3586     if (get_user_u32(addrlen, target_addrlen_addr))
3587         return -TARGET_EFAULT;
3588 
3589     if ((int)addrlen < 0) {
3590         return -TARGET_EINVAL;
3591     }
3592 
3593     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3594         return -TARGET_EFAULT;
3595     }
3596 
3597     addr = alloca(addrlen);
3598 
3599     ret_addrlen = addrlen;
3600     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3601     if (!is_error(ret)) {
3602         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3603         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3604             ret = -TARGET_EFAULT;
3605         }
3606     }
3607     return ret;
3608 }
3609 
3610 /* do_socketpair() Must return target values and target errnos. */
3611 static abi_long do_socketpair(int domain, int type, int protocol,
3612                               abi_ulong target_tab_addr)
3613 {
3614     int tab[2];
3615     abi_long ret;
3616 
3617     target_to_host_sock_type(&type);
3618 
3619     ret = get_errno(socketpair(domain, type, protocol, tab));
3620     if (!is_error(ret)) {
3621         if (put_user_s32(tab[0], target_tab_addr)
3622             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3623             ret = -TARGET_EFAULT;
3624     }
3625     return ret;
3626 }
3627 
3628 /* do_sendto() Must return target values and target errnos. */
3629 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3630                           abi_ulong target_addr, socklen_t addrlen)
3631 {
3632     void *addr;
3633     void *host_msg;
3634     void *copy_msg = NULL;
3635     abi_long ret;
3636 
3637     if ((int)addrlen < 0) {
3638         return -TARGET_EINVAL;
3639     }
3640 
3641     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3642     if (!host_msg)
3643         return -TARGET_EFAULT;
3644     if (fd_trans_target_to_host_data(fd)) {
3645         copy_msg = host_msg;
3646         host_msg = g_malloc(len);
3647         memcpy(host_msg, copy_msg, len);
3648         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3649         if (ret < 0) {
3650             goto fail;
3651         }
3652     }
3653     if (target_addr) {
3654         addr = alloca(addrlen+1);
3655         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3656         if (ret) {
3657             goto fail;
3658         }
3659         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3660     } else {
3661         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3662     }
3663 fail:
3664     if (copy_msg) {
3665         g_free(host_msg);
3666         host_msg = copy_msg;
3667     }
3668     unlock_user(host_msg, msg, 0);
3669     return ret;
3670 }
3671 
3672 /* do_recvfrom() Must return target values and target errnos. */
3673 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3674                             abi_ulong target_addr,
3675                             abi_ulong target_addrlen)
3676 {
3677     socklen_t addrlen, ret_addrlen;
3678     void *addr;
3679     void *host_msg;
3680     abi_long ret;
3681 
3682     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3683     if (!host_msg)
3684         return -TARGET_EFAULT;
3685     if (target_addr) {
3686         if (get_user_u32(addrlen, target_addrlen)) {
3687             ret = -TARGET_EFAULT;
3688             goto fail;
3689         }
3690         if ((int)addrlen < 0) {
3691             ret = -TARGET_EINVAL;
3692             goto fail;
3693         }
3694         addr = alloca(addrlen);
3695         ret_addrlen = addrlen;
3696         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3697                                       addr, &ret_addrlen));
3698     } else {
3699         addr = NULL; /* To keep compiler quiet.  */
3700         addrlen = 0; /* To keep compiler quiet.  */
3701         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3702     }
3703     if (!is_error(ret)) {
3704         if (fd_trans_host_to_target_data(fd)) {
3705             abi_long trans;
3706             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3707             if (is_error(trans)) {
3708                 ret = trans;
3709                 goto fail;
3710             }
3711         }
3712         if (target_addr) {
3713             host_to_target_sockaddr(target_addr, addr,
3714                                     MIN(addrlen, ret_addrlen));
3715             if (put_user_u32(ret_addrlen, target_addrlen)) {
3716                 ret = -TARGET_EFAULT;
3717                 goto fail;
3718             }
3719         }
3720         unlock_user(host_msg, msg, len);
3721     } else {
3722 fail:
3723         unlock_user(host_msg, msg, 0);
3724     }
3725     return ret;
3726 }
3727 
3728 #ifdef TARGET_NR_socketcall
3729 /* do_socketcall() must return target values and target errnos. */
3730 static abi_long do_socketcall(int num, abi_ulong vptr)
3731 {
3732     static const unsigned nargs[] = { /* number of arguments per operation */
3733         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3734         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3735         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3736         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3737         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3738         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3739         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3740         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3741         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3742         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3743         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3744         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3745         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3746         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3747         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3748         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3749         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3750         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3751         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3752         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3753     };
3754     abi_long a[6]; /* max 6 args */
3755     unsigned i;
3756 
3757     /* check the range of the first argument num */
3758     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3759     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3760         return -TARGET_EINVAL;
3761     }
3762     /* ensure we have space for args */
3763     if (nargs[num] > ARRAY_SIZE(a)) {
3764         return -TARGET_EINVAL;
3765     }
3766     /* collect the arguments in a[] according to nargs[] */
3767     for (i = 0; i < nargs[num]; ++i) {
3768         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3769             return -TARGET_EFAULT;
3770         }
3771     }
3772     /* now when we have the args, invoke the appropriate underlying function */
3773     switch (num) {
3774     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3775         return do_socket(a[0], a[1], a[2]);
3776     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3777         return do_bind(a[0], a[1], a[2]);
3778     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3779         return do_connect(a[0], a[1], a[2]);
3780     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3781         return get_errno(listen(a[0], a[1]));
3782     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3783         return do_accept4(a[0], a[1], a[2], 0);
3784     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3785         return do_getsockname(a[0], a[1], a[2]);
3786     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3787         return do_getpeername(a[0], a[1], a[2]);
3788     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3789         return do_socketpair(a[0], a[1], a[2], a[3]);
3790     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3791         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3792     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3793         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3794     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3795         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3796     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3797         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3798     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3799         return get_errno(shutdown(a[0], a[1]));
3800     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3801         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3802     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3803         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3804     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3805         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3806     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3807         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3808     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3809         return do_accept4(a[0], a[1], a[2], a[3]);
3810     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3811         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3812     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3813         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3814     default:
3815         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3816         return -TARGET_EINVAL;
3817     }
3818 }
3819 #endif
3820 
3821 #define N_SHM_REGIONS	32
3822 
3823 static struct shm_region {
3824     abi_ulong start;
3825     abi_ulong size;
3826     bool in_use;
3827 } shm_regions[N_SHM_REGIONS];
3828 
3829 #ifndef TARGET_SEMID64_DS
3830 /* asm-generic version of this struct */
3831 struct target_semid64_ds
3832 {
3833   struct target_ipc_perm sem_perm;
3834   abi_ulong sem_otime;
3835 #if TARGET_ABI_BITS == 32
3836   abi_ulong __unused1;
3837 #endif
3838   abi_ulong sem_ctime;
3839 #if TARGET_ABI_BITS == 32
3840   abi_ulong __unused2;
3841 #endif
3842   abi_ulong sem_nsems;
3843   abi_ulong __unused3;
3844   abi_ulong __unused4;
3845 };
3846 #endif
3847 
3848 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3849                                                abi_ulong target_addr)
3850 {
3851     struct target_ipc_perm *target_ip;
3852     struct target_semid64_ds *target_sd;
3853 
3854     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3855         return -TARGET_EFAULT;
3856     target_ip = &(target_sd->sem_perm);
3857     host_ip->__key = tswap32(target_ip->__key);
3858     host_ip->uid = tswap32(target_ip->uid);
3859     host_ip->gid = tswap32(target_ip->gid);
3860     host_ip->cuid = tswap32(target_ip->cuid);
3861     host_ip->cgid = tswap32(target_ip->cgid);
3862 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3863     host_ip->mode = tswap32(target_ip->mode);
3864 #else
3865     host_ip->mode = tswap16(target_ip->mode);
3866 #endif
3867 #if defined(TARGET_PPC)
3868     host_ip->__seq = tswap32(target_ip->__seq);
3869 #else
3870     host_ip->__seq = tswap16(target_ip->__seq);
3871 #endif
3872     unlock_user_struct(target_sd, target_addr, 0);
3873     return 0;
3874 }
3875 
3876 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3877                                                struct ipc_perm *host_ip)
3878 {
3879     struct target_ipc_perm *target_ip;
3880     struct target_semid64_ds *target_sd;
3881 
3882     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3883         return -TARGET_EFAULT;
3884     target_ip = &(target_sd->sem_perm);
3885     target_ip->__key = tswap32(host_ip->__key);
3886     target_ip->uid = tswap32(host_ip->uid);
3887     target_ip->gid = tswap32(host_ip->gid);
3888     target_ip->cuid = tswap32(host_ip->cuid);
3889     target_ip->cgid = tswap32(host_ip->cgid);
3890 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3891     target_ip->mode = tswap32(host_ip->mode);
3892 #else
3893     target_ip->mode = tswap16(host_ip->mode);
3894 #endif
3895 #if defined(TARGET_PPC)
3896     target_ip->__seq = tswap32(host_ip->__seq);
3897 #else
3898     target_ip->__seq = tswap16(host_ip->__seq);
3899 #endif
3900     unlock_user_struct(target_sd, target_addr, 1);
3901     return 0;
3902 }
3903 
3904 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3905                                                abi_ulong target_addr)
3906 {
3907     struct target_semid64_ds *target_sd;
3908 
3909     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3910         return -TARGET_EFAULT;
3911     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3912         return -TARGET_EFAULT;
3913     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3914     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3915     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3916     unlock_user_struct(target_sd, target_addr, 0);
3917     return 0;
3918 }
3919 
3920 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3921                                                struct semid_ds *host_sd)
3922 {
3923     struct target_semid64_ds *target_sd;
3924 
3925     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3926         return -TARGET_EFAULT;
3927     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3928         return -TARGET_EFAULT;
3929     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3930     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3931     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3932     unlock_user_struct(target_sd, target_addr, 1);
3933     return 0;
3934 }
3935 
3936 struct target_seminfo {
3937     int semmap;
3938     int semmni;
3939     int semmns;
3940     int semmnu;
3941     int semmsl;
3942     int semopm;
3943     int semume;
3944     int semusz;
3945     int semvmx;
3946     int semaem;
3947 };
3948 
3949 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3950                                               struct seminfo *host_seminfo)
3951 {
3952     struct target_seminfo *target_seminfo;
3953     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3954         return -TARGET_EFAULT;
3955     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3956     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3957     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3958     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3959     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3960     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3961     __put_user(host_seminfo->semume, &target_seminfo->semume);
3962     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3963     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3964     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3965     unlock_user_struct(target_seminfo, target_addr, 1);
3966     return 0;
3967 }
3968 
3969 union semun {
3970 	int val;
3971 	struct semid_ds *buf;
3972 	unsigned short *array;
3973 	struct seminfo *__buf;
3974 };
3975 
3976 union target_semun {
3977 	int val;
3978 	abi_ulong buf;
3979 	abi_ulong array;
3980 	abi_ulong __buf;
3981 };
3982 
3983 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3984                                                abi_ulong target_addr)
3985 {
3986     int nsems;
3987     unsigned short *array;
3988     union semun semun;
3989     struct semid_ds semid_ds;
3990     int i, ret;
3991 
3992     semun.buf = &semid_ds;
3993 
3994     ret = semctl(semid, 0, IPC_STAT, semun);
3995     if (ret == -1)
3996         return get_errno(ret);
3997 
3998     nsems = semid_ds.sem_nsems;
3999 
4000     *host_array = g_try_new(unsigned short, nsems);
4001     if (!*host_array) {
4002         return -TARGET_ENOMEM;
4003     }
4004     array = lock_user(VERIFY_READ, target_addr,
4005                       nsems*sizeof(unsigned short), 1);
4006     if (!array) {
4007         g_free(*host_array);
4008         return -TARGET_EFAULT;
4009     }
4010 
4011     for(i=0; i<nsems; i++) {
4012         __get_user((*host_array)[i], &array[i]);
4013     }
4014     unlock_user(array, target_addr, 0);
4015 
4016     return 0;
4017 }
4018 
4019 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4020                                                unsigned short **host_array)
4021 {
4022     int nsems;
4023     unsigned short *array;
4024     union semun semun;
4025     struct semid_ds semid_ds;
4026     int i, ret;
4027 
4028     semun.buf = &semid_ds;
4029 
4030     ret = semctl(semid, 0, IPC_STAT, semun);
4031     if (ret == -1)
4032         return get_errno(ret);
4033 
4034     nsems = semid_ds.sem_nsems;
4035 
4036     array = lock_user(VERIFY_WRITE, target_addr,
4037                       nsems*sizeof(unsigned short), 0);
4038     if (!array)
4039         return -TARGET_EFAULT;
4040 
4041     for(i=0; i<nsems; i++) {
4042         __put_user((*host_array)[i], &array[i]);
4043     }
4044     g_free(*host_array);
4045     unlock_user(array, target_addr, 1);
4046 
4047     return 0;
4048 }
4049 
4050 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4051                                  abi_ulong target_arg)
4052 {
4053     union target_semun target_su = { .buf = target_arg };
4054     union semun arg;
4055     struct semid_ds dsarg;
4056     unsigned short *array = NULL;
4057     struct seminfo seminfo;
4058     abi_long ret = -TARGET_EINVAL;
4059     abi_long err;
4060     cmd &= 0xff;
4061 
4062     switch( cmd ) {
4063 	case GETVAL:
4064 	case SETVAL:
4065             /* In 64 bit cross-endian situations, we will erroneously pick up
4066              * the wrong half of the union for the "val" element.  To rectify
4067              * this, the entire 8-byte structure is byteswapped, followed by
4068 	     * a swap of the 4 byte val field. In other cases, the data is
4069 	     * already in proper host byte order. */
4070 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4071 		target_su.buf = tswapal(target_su.buf);
4072 		arg.val = tswap32(target_su.val);
4073 	    } else {
4074 		arg.val = target_su.val;
4075 	    }
4076             ret = get_errno(semctl(semid, semnum, cmd, arg));
4077             break;
4078 	case GETALL:
4079 	case SETALL:
4080             err = target_to_host_semarray(semid, &array, target_su.array);
4081             if (err)
4082                 return err;
4083             arg.array = array;
4084             ret = get_errno(semctl(semid, semnum, cmd, arg));
4085             err = host_to_target_semarray(semid, target_su.array, &array);
4086             if (err)
4087                 return err;
4088             break;
4089 	case IPC_STAT:
4090 	case IPC_SET:
4091 	case SEM_STAT:
4092             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4093             if (err)
4094                 return err;
4095             arg.buf = &dsarg;
4096             ret = get_errno(semctl(semid, semnum, cmd, arg));
4097             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4098             if (err)
4099                 return err;
4100             break;
4101 	case IPC_INFO:
4102 	case SEM_INFO:
4103             arg.__buf = &seminfo;
4104             ret = get_errno(semctl(semid, semnum, cmd, arg));
4105             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4106             if (err)
4107                 return err;
4108             break;
4109 	case IPC_RMID:
4110 	case GETPID:
4111 	case GETNCNT:
4112 	case GETZCNT:
4113             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4114             break;
4115     }
4116 
4117     return ret;
4118 }
4119 
4120 struct target_sembuf {
4121     unsigned short sem_num;
4122     short sem_op;
4123     short sem_flg;
4124 };
4125 
4126 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4127                                              abi_ulong target_addr,
4128                                              unsigned nsops)
4129 {
4130     struct target_sembuf *target_sembuf;
4131     int i;
4132 
4133     target_sembuf = lock_user(VERIFY_READ, target_addr,
4134                               nsops*sizeof(struct target_sembuf), 1);
4135     if (!target_sembuf)
4136         return -TARGET_EFAULT;
4137 
4138     for(i=0; i<nsops; i++) {
4139         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4140         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4141         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4142     }
4143 
4144     unlock_user(target_sembuf, target_addr, 0);
4145 
4146     return 0;
4147 }
4148 
4149 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4150     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4151 
4152 /*
4153  * This macro is required to handle the s390 variants, which passes the
4154  * arguments in a different order than default.
4155  */
4156 #ifdef __s390x__
4157 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4158   (__nsops), (__timeout), (__sops)
4159 #else
4160 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4161   (__nsops), 0, (__sops), (__timeout)
4162 #endif
4163 
4164 static inline abi_long do_semtimedop(int semid,
4165                                      abi_long ptr,
4166                                      unsigned nsops,
4167                                      abi_long timeout, bool time64)
4168 {
4169     struct sembuf *sops;
4170     struct timespec ts, *pts = NULL;
4171     abi_long ret;
4172 
4173     if (timeout) {
4174         pts = &ts;
4175         if (time64) {
4176             if (target_to_host_timespec64(pts, timeout)) {
4177                 return -TARGET_EFAULT;
4178             }
4179         } else {
4180             if (target_to_host_timespec(pts, timeout)) {
4181                 return -TARGET_EFAULT;
4182             }
4183         }
4184     }
4185 
4186     if (nsops > TARGET_SEMOPM) {
4187         return -TARGET_E2BIG;
4188     }
4189 
4190     sops = g_new(struct sembuf, nsops);
4191 
4192     if (target_to_host_sembuf(sops, ptr, nsops)) {
4193         g_free(sops);
4194         return -TARGET_EFAULT;
4195     }
4196 
4197     ret = -TARGET_ENOSYS;
4198 #ifdef __NR_semtimedop
4199     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4200 #endif
4201 #ifdef __NR_ipc
4202     if (ret == -TARGET_ENOSYS) {
4203         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4204                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4205     }
4206 #endif
4207     g_free(sops);
4208     return ret;
4209 }
4210 #endif
4211 
4212 struct target_msqid_ds
4213 {
4214     struct target_ipc_perm msg_perm;
4215     abi_ulong msg_stime;
4216 #if TARGET_ABI_BITS == 32
4217     abi_ulong __unused1;
4218 #endif
4219     abi_ulong msg_rtime;
4220 #if TARGET_ABI_BITS == 32
4221     abi_ulong __unused2;
4222 #endif
4223     abi_ulong msg_ctime;
4224 #if TARGET_ABI_BITS == 32
4225     abi_ulong __unused3;
4226 #endif
4227     abi_ulong __msg_cbytes;
4228     abi_ulong msg_qnum;
4229     abi_ulong msg_qbytes;
4230     abi_ulong msg_lspid;
4231     abi_ulong msg_lrpid;
4232     abi_ulong __unused4;
4233     abi_ulong __unused5;
4234 };
4235 
4236 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4237                                                abi_ulong target_addr)
4238 {
4239     struct target_msqid_ds *target_md;
4240 
4241     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4242         return -TARGET_EFAULT;
4243     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4244         return -TARGET_EFAULT;
4245     host_md->msg_stime = tswapal(target_md->msg_stime);
4246     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4247     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4248     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4249     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4250     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4251     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4252     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4253     unlock_user_struct(target_md, target_addr, 0);
4254     return 0;
4255 }
4256 
4257 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4258                                                struct msqid_ds *host_md)
4259 {
4260     struct target_msqid_ds *target_md;
4261 
4262     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4263         return -TARGET_EFAULT;
4264     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4265         return -TARGET_EFAULT;
4266     target_md->msg_stime = tswapal(host_md->msg_stime);
4267     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4268     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4269     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4270     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4271     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4272     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4273     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4274     unlock_user_struct(target_md, target_addr, 1);
4275     return 0;
4276 }
4277 
4278 struct target_msginfo {
4279     int msgpool;
4280     int msgmap;
4281     int msgmax;
4282     int msgmnb;
4283     int msgmni;
4284     int msgssz;
4285     int msgtql;
4286     unsigned short int msgseg;
4287 };
4288 
4289 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4290                                               struct msginfo *host_msginfo)
4291 {
4292     struct target_msginfo *target_msginfo;
4293     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4294         return -TARGET_EFAULT;
4295     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4296     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4297     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4298     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4299     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4300     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4301     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4302     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4303     unlock_user_struct(target_msginfo, target_addr, 1);
4304     return 0;
4305 }
4306 
4307 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4308 {
4309     struct msqid_ds dsarg;
4310     struct msginfo msginfo;
4311     abi_long ret = -TARGET_EINVAL;
4312 
4313     cmd &= 0xff;
4314 
4315     switch (cmd) {
4316     case IPC_STAT:
4317     case IPC_SET:
4318     case MSG_STAT:
4319         if (target_to_host_msqid_ds(&dsarg,ptr))
4320             return -TARGET_EFAULT;
4321         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4322         if (host_to_target_msqid_ds(ptr,&dsarg))
4323             return -TARGET_EFAULT;
4324         break;
4325     case IPC_RMID:
4326         ret = get_errno(msgctl(msgid, cmd, NULL));
4327         break;
4328     case IPC_INFO:
4329     case MSG_INFO:
4330         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4331         if (host_to_target_msginfo(ptr, &msginfo))
4332             return -TARGET_EFAULT;
4333         break;
4334     }
4335 
4336     return ret;
4337 }
4338 
4339 struct target_msgbuf {
4340     abi_long mtype;
4341     char	mtext[1];
4342 };
4343 
4344 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4345                                  ssize_t msgsz, int msgflg)
4346 {
4347     struct target_msgbuf *target_mb;
4348     struct msgbuf *host_mb;
4349     abi_long ret = 0;
4350 
4351     if (msgsz < 0) {
4352         return -TARGET_EINVAL;
4353     }
4354 
4355     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4356         return -TARGET_EFAULT;
4357     host_mb = g_try_malloc(msgsz + sizeof(long));
4358     if (!host_mb) {
4359         unlock_user_struct(target_mb, msgp, 0);
4360         return -TARGET_ENOMEM;
4361     }
4362     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4363     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4364     ret = -TARGET_ENOSYS;
4365 #ifdef __NR_msgsnd
4366     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4367 #endif
4368 #ifdef __NR_ipc
4369     if (ret == -TARGET_ENOSYS) {
4370 #ifdef __s390x__
4371         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4372                                  host_mb));
4373 #else
4374         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4375                                  host_mb, 0));
4376 #endif
4377     }
4378 #endif
4379     g_free(host_mb);
4380     unlock_user_struct(target_mb, msgp, 0);
4381 
4382     return ret;
4383 }
4384 
4385 #ifdef __NR_ipc
4386 #if defined(__sparc__)
4387 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4388 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4389 #elif defined(__s390x__)
4390 /* The s390 sys_ipc variant has only five parameters.  */
4391 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4392     ((long int[]){(long int)__msgp, __msgtyp})
4393 #else
4394 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4395     ((long int[]){(long int)__msgp, __msgtyp}), 0
4396 #endif
4397 #endif
4398 
4399 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4400                                  ssize_t msgsz, abi_long msgtyp,
4401                                  int msgflg)
4402 {
4403     struct target_msgbuf *target_mb;
4404     char *target_mtext;
4405     struct msgbuf *host_mb;
4406     abi_long ret = 0;
4407 
4408     if (msgsz < 0) {
4409         return -TARGET_EINVAL;
4410     }
4411 
4412     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4413         return -TARGET_EFAULT;
4414 
4415     host_mb = g_try_malloc(msgsz + sizeof(long));
4416     if (!host_mb) {
4417         ret = -TARGET_ENOMEM;
4418         goto end;
4419     }
4420     ret = -TARGET_ENOSYS;
4421 #ifdef __NR_msgrcv
4422     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4423 #endif
4424 #ifdef __NR_ipc
4425     if (ret == -TARGET_ENOSYS) {
4426         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4427                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4428     }
4429 #endif
4430 
4431     if (ret > 0) {
4432         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4433         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4434         if (!target_mtext) {
4435             ret = -TARGET_EFAULT;
4436             goto end;
4437         }
4438         memcpy(target_mb->mtext, host_mb->mtext, ret);
4439         unlock_user(target_mtext, target_mtext_addr, ret);
4440     }
4441 
4442     target_mb->mtype = tswapal(host_mb->mtype);
4443 
4444 end:
4445     if (target_mb)
4446         unlock_user_struct(target_mb, msgp, 1);
4447     g_free(host_mb);
4448     return ret;
4449 }
4450 
4451 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4452                                                abi_ulong target_addr)
4453 {
4454     struct target_shmid_ds *target_sd;
4455 
4456     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4457         return -TARGET_EFAULT;
4458     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4459         return -TARGET_EFAULT;
4460     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4461     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4462     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4463     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4464     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4465     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4466     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4467     unlock_user_struct(target_sd, target_addr, 0);
4468     return 0;
4469 }
4470 
4471 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4472                                                struct shmid_ds *host_sd)
4473 {
4474     struct target_shmid_ds *target_sd;
4475 
4476     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4477         return -TARGET_EFAULT;
4478     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4479         return -TARGET_EFAULT;
4480     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4481     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4482     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4483     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4484     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4485     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4486     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4487     unlock_user_struct(target_sd, target_addr, 1);
4488     return 0;
4489 }
4490 
4491 struct  target_shminfo {
4492     abi_ulong shmmax;
4493     abi_ulong shmmin;
4494     abi_ulong shmmni;
4495     abi_ulong shmseg;
4496     abi_ulong shmall;
4497 };
4498 
4499 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4500                                               struct shminfo *host_shminfo)
4501 {
4502     struct target_shminfo *target_shminfo;
4503     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4504         return -TARGET_EFAULT;
4505     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4506     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4507     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4508     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4509     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4510     unlock_user_struct(target_shminfo, target_addr, 1);
4511     return 0;
4512 }
4513 
4514 struct target_shm_info {
4515     int used_ids;
4516     abi_ulong shm_tot;
4517     abi_ulong shm_rss;
4518     abi_ulong shm_swp;
4519     abi_ulong swap_attempts;
4520     abi_ulong swap_successes;
4521 };
4522 
4523 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4524                                                struct shm_info *host_shm_info)
4525 {
4526     struct target_shm_info *target_shm_info;
4527     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4528         return -TARGET_EFAULT;
4529     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4530     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4531     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4532     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4533     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4534     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4535     unlock_user_struct(target_shm_info, target_addr, 1);
4536     return 0;
4537 }
4538 
4539 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4540 {
4541     struct shmid_ds dsarg;
4542     struct shminfo shminfo;
4543     struct shm_info shm_info;
4544     abi_long ret = -TARGET_EINVAL;
4545 
4546     cmd &= 0xff;
4547 
4548     switch(cmd) {
4549     case IPC_STAT:
4550     case IPC_SET:
4551     case SHM_STAT:
4552         if (target_to_host_shmid_ds(&dsarg, buf))
4553             return -TARGET_EFAULT;
4554         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4555         if (host_to_target_shmid_ds(buf, &dsarg))
4556             return -TARGET_EFAULT;
4557         break;
4558     case IPC_INFO:
4559         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4560         if (host_to_target_shminfo(buf, &shminfo))
4561             return -TARGET_EFAULT;
4562         break;
4563     case SHM_INFO:
4564         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4565         if (host_to_target_shm_info(buf, &shm_info))
4566             return -TARGET_EFAULT;
4567         break;
4568     case IPC_RMID:
4569     case SHM_LOCK:
4570     case SHM_UNLOCK:
4571         ret = get_errno(shmctl(shmid, cmd, NULL));
4572         break;
4573     }
4574 
4575     return ret;
4576 }
4577 
4578 #ifndef TARGET_FORCE_SHMLBA
4579 /* For most architectures, SHMLBA is the same as the page size;
4580  * some architectures have larger values, in which case they should
4581  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4582  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4583  * and defining its own value for SHMLBA.
4584  *
4585  * The kernel also permits SHMLBA to be set by the architecture to a
4586  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4587  * this means that addresses are rounded to the large size if
4588  * SHM_RND is set but addresses not aligned to that size are not rejected
4589  * as long as they are at least page-aligned. Since the only architecture
4590  * which uses this is ia64 this code doesn't provide for that oddity.
4591  */
4592 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4593 {
4594     return TARGET_PAGE_SIZE;
4595 }
4596 #endif
4597 
4598 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4599                                  int shmid, abi_ulong shmaddr, int shmflg)
4600 {
4601     abi_long raddr;
4602     void *host_raddr;
4603     struct shmid_ds shm_info;
4604     int i,ret;
4605     abi_ulong shmlba;
4606 
4607     /* shmat pointers are always untagged */
4608 
4609     /* find out the length of the shared memory segment */
4610     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4611     if (is_error(ret)) {
4612         /* can't get length, bail out */
4613         return ret;
4614     }
4615 
4616     shmlba = target_shmlba(cpu_env);
4617 
4618     if (shmaddr & (shmlba - 1)) {
4619         if (shmflg & SHM_RND) {
4620             shmaddr &= ~(shmlba - 1);
4621         } else {
4622             return -TARGET_EINVAL;
4623         }
4624     }
4625     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4626         return -TARGET_EINVAL;
4627     }
4628 
4629     mmap_lock();
4630 
4631     if (shmaddr)
4632         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4633     else {
4634         abi_ulong mmap_start;
4635 
4636         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4637         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4638 
4639         if (mmap_start == -1) {
4640             errno = ENOMEM;
4641             host_raddr = (void *)-1;
4642         } else
4643             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4644                                shmflg | SHM_REMAP);
4645     }
4646 
4647     if (host_raddr == (void *)-1) {
4648         mmap_unlock();
4649         return get_errno((long)host_raddr);
4650     }
4651     raddr=h2g((unsigned long)host_raddr);
4652 
4653     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4654                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4655                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4656 
4657     for (i = 0; i < N_SHM_REGIONS; i++) {
4658         if (!shm_regions[i].in_use) {
4659             shm_regions[i].in_use = true;
4660             shm_regions[i].start = raddr;
4661             shm_regions[i].size = shm_info.shm_segsz;
4662             break;
4663         }
4664     }
4665 
4666     mmap_unlock();
4667     return raddr;
4668 
4669 }
4670 
4671 static inline abi_long do_shmdt(abi_ulong shmaddr)
4672 {
4673     int i;
4674     abi_long rv;
4675 
4676     /* shmdt pointers are always untagged */
4677 
4678     mmap_lock();
4679 
4680     for (i = 0; i < N_SHM_REGIONS; ++i) {
4681         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4682             shm_regions[i].in_use = false;
4683             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4684             break;
4685         }
4686     }
4687     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4688 
4689     mmap_unlock();
4690 
4691     return rv;
4692 }
4693 
4694 #ifdef TARGET_NR_ipc
4695 /* ??? This only works with linear mappings.  */
4696 /* do_ipc() must return target values and target errnos. */
4697 static abi_long do_ipc(CPUArchState *cpu_env,
4698                        unsigned int call, abi_long first,
4699                        abi_long second, abi_long third,
4700                        abi_long ptr, abi_long fifth)
4701 {
4702     int version;
4703     abi_long ret = 0;
4704 
4705     version = call >> 16;
4706     call &= 0xffff;
4707 
4708     switch (call) {
4709     case IPCOP_semop:
4710         ret = do_semtimedop(first, ptr, second, 0, false);
4711         break;
4712     case IPCOP_semtimedop:
4713     /*
4714      * The s390 sys_ipc variant has only five parameters instead of six
4715      * (as for default variant) and the only difference is the handling of
4716      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4717      * to a struct timespec where the generic variant uses fifth parameter.
4718      */
4719 #if defined(TARGET_S390X)
4720         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4721 #else
4722         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4723 #endif
4724         break;
4725 
4726     case IPCOP_semget:
4727         ret = get_errno(semget(first, second, third));
4728         break;
4729 
4730     case IPCOP_semctl: {
4731         /* The semun argument to semctl is passed by value, so dereference the
4732          * ptr argument. */
4733         abi_ulong atptr;
4734         get_user_ual(atptr, ptr);
4735         ret = do_semctl(first, second, third, atptr);
4736         break;
4737     }
4738 
4739     case IPCOP_msgget:
4740         ret = get_errno(msgget(first, second));
4741         break;
4742 
4743     case IPCOP_msgsnd:
4744         ret = do_msgsnd(first, ptr, second, third);
4745         break;
4746 
4747     case IPCOP_msgctl:
4748         ret = do_msgctl(first, second, ptr);
4749         break;
4750 
4751     case IPCOP_msgrcv:
4752         switch (version) {
4753         case 0:
4754             {
4755                 struct target_ipc_kludge {
4756                     abi_long msgp;
4757                     abi_long msgtyp;
4758                 } *tmp;
4759 
4760                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4761                     ret = -TARGET_EFAULT;
4762                     break;
4763                 }
4764 
4765                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4766 
4767                 unlock_user_struct(tmp, ptr, 0);
4768                 break;
4769             }
4770         default:
4771             ret = do_msgrcv(first, ptr, second, fifth, third);
4772         }
4773         break;
4774 
4775     case IPCOP_shmat:
4776         switch (version) {
4777         default:
4778         {
4779             abi_ulong raddr;
4780             raddr = do_shmat(cpu_env, first, ptr, second);
4781             if (is_error(raddr))
4782                 return get_errno(raddr);
4783             if (put_user_ual(raddr, third))
4784                 return -TARGET_EFAULT;
4785             break;
4786         }
4787         case 1:
4788             ret = -TARGET_EINVAL;
4789             break;
4790         }
4791 	break;
4792     case IPCOP_shmdt:
4793         ret = do_shmdt(ptr);
4794 	break;
4795 
4796     case IPCOP_shmget:
4797 	/* IPC_* flag values are the same on all linux platforms */
4798 	ret = get_errno(shmget(first, second, third));
4799 	break;
4800 
4801 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4802     case IPCOP_shmctl:
4803         ret = do_shmctl(first, second, ptr);
4804         break;
4805     default:
4806         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4807                       call, version);
4808 	ret = -TARGET_ENOSYS;
4809 	break;
4810     }
4811     return ret;
4812 }
4813 #endif
4814 
4815 /* kernel structure types definitions */
4816 
4817 #define STRUCT(name, ...) STRUCT_ ## name,
4818 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4819 enum {
4820 #include "syscall_types.h"
4821 STRUCT_MAX
4822 };
4823 #undef STRUCT
4824 #undef STRUCT_SPECIAL
4825 
4826 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4827 #define STRUCT_SPECIAL(name)
4828 #include "syscall_types.h"
4829 #undef STRUCT
4830 #undef STRUCT_SPECIAL
4831 
4832 #define MAX_STRUCT_SIZE 4096
4833 
4834 #ifdef CONFIG_FIEMAP
4835 /* So fiemap access checks don't overflow on 32 bit systems.
4836  * This is very slightly smaller than the limit imposed by
4837  * the underlying kernel.
4838  */
4839 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4840                             / sizeof(struct fiemap_extent))
4841 
4842 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4843                                        int fd, int cmd, abi_long arg)
4844 {
4845     /* The parameter for this ioctl is a struct fiemap followed
4846      * by an array of struct fiemap_extent whose size is set
4847      * in fiemap->fm_extent_count. The array is filled in by the
4848      * ioctl.
4849      */
4850     int target_size_in, target_size_out;
4851     struct fiemap *fm;
4852     const argtype *arg_type = ie->arg_type;
4853     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4854     void *argptr, *p;
4855     abi_long ret;
4856     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4857     uint32_t outbufsz;
4858     int free_fm = 0;
4859 
4860     assert(arg_type[0] == TYPE_PTR);
4861     assert(ie->access == IOC_RW);
4862     arg_type++;
4863     target_size_in = thunk_type_size(arg_type, 0);
4864     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4865     if (!argptr) {
4866         return -TARGET_EFAULT;
4867     }
4868     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4869     unlock_user(argptr, arg, 0);
4870     fm = (struct fiemap *)buf_temp;
4871     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4872         return -TARGET_EINVAL;
4873     }
4874 
4875     outbufsz = sizeof (*fm) +
4876         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4877 
4878     if (outbufsz > MAX_STRUCT_SIZE) {
4879         /* We can't fit all the extents into the fixed size buffer.
4880          * Allocate one that is large enough and use it instead.
4881          */
4882         fm = g_try_malloc(outbufsz);
4883         if (!fm) {
4884             return -TARGET_ENOMEM;
4885         }
4886         memcpy(fm, buf_temp, sizeof(struct fiemap));
4887         free_fm = 1;
4888     }
4889     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4890     if (!is_error(ret)) {
4891         target_size_out = target_size_in;
4892         /* An extent_count of 0 means we were only counting the extents
4893          * so there are no structs to copy
4894          */
4895         if (fm->fm_extent_count != 0) {
4896             target_size_out += fm->fm_mapped_extents * extent_size;
4897         }
4898         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4899         if (!argptr) {
4900             ret = -TARGET_EFAULT;
4901         } else {
4902             /* Convert the struct fiemap */
4903             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4904             if (fm->fm_extent_count != 0) {
4905                 p = argptr + target_size_in;
4906                 /* ...and then all the struct fiemap_extents */
4907                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4908                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4909                                   THUNK_TARGET);
4910                     p += extent_size;
4911                 }
4912             }
4913             unlock_user(argptr, arg, target_size_out);
4914         }
4915     }
4916     if (free_fm) {
4917         g_free(fm);
4918     }
4919     return ret;
4920 }
4921 #endif
4922 
4923 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4924                                 int fd, int cmd, abi_long arg)
4925 {
4926     const argtype *arg_type = ie->arg_type;
4927     int target_size;
4928     void *argptr;
4929     int ret;
4930     struct ifconf *host_ifconf;
4931     uint32_t outbufsz;
4932     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4933     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4934     int target_ifreq_size;
4935     int nb_ifreq;
4936     int free_buf = 0;
4937     int i;
4938     int target_ifc_len;
4939     abi_long target_ifc_buf;
4940     int host_ifc_len;
4941     char *host_ifc_buf;
4942 
4943     assert(arg_type[0] == TYPE_PTR);
4944     assert(ie->access == IOC_RW);
4945 
4946     arg_type++;
4947     target_size = thunk_type_size(arg_type, 0);
4948 
4949     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4950     if (!argptr)
4951         return -TARGET_EFAULT;
4952     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4953     unlock_user(argptr, arg, 0);
4954 
4955     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4956     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4957     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4958 
4959     if (target_ifc_buf != 0) {
4960         target_ifc_len = host_ifconf->ifc_len;
4961         nb_ifreq = target_ifc_len / target_ifreq_size;
4962         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4963 
4964         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4965         if (outbufsz > MAX_STRUCT_SIZE) {
4966             /*
4967              * We can't fit all the extents into the fixed size buffer.
4968              * Allocate one that is large enough and use it instead.
4969              */
4970             host_ifconf = malloc(outbufsz);
4971             if (!host_ifconf) {
4972                 return -TARGET_ENOMEM;
4973             }
4974             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4975             free_buf = 1;
4976         }
4977         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4978 
4979         host_ifconf->ifc_len = host_ifc_len;
4980     } else {
4981       host_ifc_buf = NULL;
4982     }
4983     host_ifconf->ifc_buf = host_ifc_buf;
4984 
4985     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4986     if (!is_error(ret)) {
4987 	/* convert host ifc_len to target ifc_len */
4988 
4989         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4990         target_ifc_len = nb_ifreq * target_ifreq_size;
4991         host_ifconf->ifc_len = target_ifc_len;
4992 
4993 	/* restore target ifc_buf */
4994 
4995         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4996 
4997 	/* copy struct ifconf to target user */
4998 
4999         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5000         if (!argptr)
5001             return -TARGET_EFAULT;
5002         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5003         unlock_user(argptr, arg, target_size);
5004 
5005         if (target_ifc_buf != 0) {
5006             /* copy ifreq[] to target user */
5007             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5008             for (i = 0; i < nb_ifreq ; i++) {
5009                 thunk_convert(argptr + i * target_ifreq_size,
5010                               host_ifc_buf + i * sizeof(struct ifreq),
5011                               ifreq_arg_type, THUNK_TARGET);
5012             }
5013             unlock_user(argptr, target_ifc_buf, target_ifc_len);
5014         }
5015     }
5016 
5017     if (free_buf) {
5018         free(host_ifconf);
5019     }
5020 
5021     return ret;
5022 }
5023 
5024 #if defined(CONFIG_USBFS)
5025 #if HOST_LONG_BITS > 64
5026 #error USBDEVFS thunks do not support >64 bit hosts yet.
5027 #endif
5028 struct live_urb {
5029     uint64_t target_urb_adr;
5030     uint64_t target_buf_adr;
5031     char *target_buf_ptr;
5032     struct usbdevfs_urb host_urb;
5033 };
5034 
5035 static GHashTable *usbdevfs_urb_hashtable(void)
5036 {
5037     static GHashTable *urb_hashtable;
5038 
5039     if (!urb_hashtable) {
5040         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5041     }
5042     return urb_hashtable;
5043 }
5044 
5045 static void urb_hashtable_insert(struct live_urb *urb)
5046 {
5047     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5048     g_hash_table_insert(urb_hashtable, urb, urb);
5049 }
5050 
5051 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5052 {
5053     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5054     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5055 }
5056 
5057 static void urb_hashtable_remove(struct live_urb *urb)
5058 {
5059     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5060     g_hash_table_remove(urb_hashtable, urb);
5061 }
5062 
5063 static abi_long
5064 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5065                           int fd, int cmd, abi_long arg)
5066 {
5067     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5068     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5069     struct live_urb *lurb;
5070     void *argptr;
5071     uint64_t hurb;
5072     int target_size;
5073     uintptr_t target_urb_adr;
5074     abi_long ret;
5075 
5076     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5077 
5078     memset(buf_temp, 0, sizeof(uint64_t));
5079     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5080     if (is_error(ret)) {
5081         return ret;
5082     }
5083 
5084     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5085     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5086     if (!lurb->target_urb_adr) {
5087         return -TARGET_EFAULT;
5088     }
5089     urb_hashtable_remove(lurb);
5090     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5091         lurb->host_urb.buffer_length);
5092     lurb->target_buf_ptr = NULL;
5093 
5094     /* restore the guest buffer pointer */
5095     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5096 
5097     /* update the guest urb struct */
5098     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5099     if (!argptr) {
5100         g_free(lurb);
5101         return -TARGET_EFAULT;
5102     }
5103     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5104     unlock_user(argptr, lurb->target_urb_adr, target_size);
5105 
5106     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5107     /* write back the urb handle */
5108     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5109     if (!argptr) {
5110         g_free(lurb);
5111         return -TARGET_EFAULT;
5112     }
5113 
5114     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5115     target_urb_adr = lurb->target_urb_adr;
5116     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5117     unlock_user(argptr, arg, target_size);
5118 
5119     g_free(lurb);
5120     return ret;
5121 }
5122 
5123 static abi_long
5124 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5125                              uint8_t *buf_temp __attribute__((unused)),
5126                              int fd, int cmd, abi_long arg)
5127 {
5128     struct live_urb *lurb;
5129 
5130     /* map target address back to host URB with metadata. */
5131     lurb = urb_hashtable_lookup(arg);
5132     if (!lurb) {
5133         return -TARGET_EFAULT;
5134     }
5135     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5136 }
5137 
5138 static abi_long
5139 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5140                             int fd, int cmd, abi_long arg)
5141 {
5142     const argtype *arg_type = ie->arg_type;
5143     int target_size;
5144     abi_long ret;
5145     void *argptr;
5146     int rw_dir;
5147     struct live_urb *lurb;
5148 
5149     /*
5150      * each submitted URB needs to map to a unique ID for the
5151      * kernel, and that unique ID needs to be a pointer to
5152      * host memory.  hence, we need to malloc for each URB.
5153      * isochronous transfers have a variable length struct.
5154      */
5155     arg_type++;
5156     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5157 
5158     /* construct host copy of urb and metadata */
5159     lurb = g_try_malloc0(sizeof(struct live_urb));
5160     if (!lurb) {
5161         return -TARGET_ENOMEM;
5162     }
5163 
5164     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5165     if (!argptr) {
5166         g_free(lurb);
5167         return -TARGET_EFAULT;
5168     }
5169     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5170     unlock_user(argptr, arg, 0);
5171 
5172     lurb->target_urb_adr = arg;
5173     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5174 
5175     /* buffer space used depends on endpoint type so lock the entire buffer */
5176     /* control type urbs should check the buffer contents for true direction */
5177     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5178     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5179         lurb->host_urb.buffer_length, 1);
5180     if (lurb->target_buf_ptr == NULL) {
5181         g_free(lurb);
5182         return -TARGET_EFAULT;
5183     }
5184 
5185     /* update buffer pointer in host copy */
5186     lurb->host_urb.buffer = lurb->target_buf_ptr;
5187 
5188     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5189     if (is_error(ret)) {
5190         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5191         g_free(lurb);
5192     } else {
5193         urb_hashtable_insert(lurb);
5194     }
5195 
5196     return ret;
5197 }
5198 #endif /* CONFIG_USBFS */
5199 
5200 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5201                             int cmd, abi_long arg)
5202 {
5203     void *argptr;
5204     struct dm_ioctl *host_dm;
5205     abi_long guest_data;
5206     uint32_t guest_data_size;
5207     int target_size;
5208     const argtype *arg_type = ie->arg_type;
5209     abi_long ret;
5210     void *big_buf = NULL;
5211     char *host_data;
5212 
5213     arg_type++;
5214     target_size = thunk_type_size(arg_type, 0);
5215     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5216     if (!argptr) {
5217         ret = -TARGET_EFAULT;
5218         goto out;
5219     }
5220     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5221     unlock_user(argptr, arg, 0);
5222 
5223     /* buf_temp is too small, so fetch things into a bigger buffer */
5224     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5225     memcpy(big_buf, buf_temp, target_size);
5226     buf_temp = big_buf;
5227     host_dm = big_buf;
5228 
5229     guest_data = arg + host_dm->data_start;
5230     if ((guest_data - arg) < 0) {
5231         ret = -TARGET_EINVAL;
5232         goto out;
5233     }
5234     guest_data_size = host_dm->data_size - host_dm->data_start;
5235     host_data = (char*)host_dm + host_dm->data_start;
5236 
5237     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5238     if (!argptr) {
5239         ret = -TARGET_EFAULT;
5240         goto out;
5241     }
5242 
5243     switch (ie->host_cmd) {
5244     case DM_REMOVE_ALL:
5245     case DM_LIST_DEVICES:
5246     case DM_DEV_CREATE:
5247     case DM_DEV_REMOVE:
5248     case DM_DEV_SUSPEND:
5249     case DM_DEV_STATUS:
5250     case DM_DEV_WAIT:
5251     case DM_TABLE_STATUS:
5252     case DM_TABLE_CLEAR:
5253     case DM_TABLE_DEPS:
5254     case DM_LIST_VERSIONS:
5255         /* no input data */
5256         break;
5257     case DM_DEV_RENAME:
5258     case DM_DEV_SET_GEOMETRY:
5259         /* data contains only strings */
5260         memcpy(host_data, argptr, guest_data_size);
5261         break;
5262     case DM_TARGET_MSG:
5263         memcpy(host_data, argptr, guest_data_size);
5264         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5265         break;
5266     case DM_TABLE_LOAD:
5267     {
5268         void *gspec = argptr;
5269         void *cur_data = host_data;
5270         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5271         int spec_size = thunk_type_size(arg_type, 0);
5272         int i;
5273 
5274         for (i = 0; i < host_dm->target_count; i++) {
5275             struct dm_target_spec *spec = cur_data;
5276             uint32_t next;
5277             int slen;
5278 
5279             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5280             slen = strlen((char*)gspec + spec_size) + 1;
5281             next = spec->next;
5282             spec->next = sizeof(*spec) + slen;
5283             strcpy((char*)&spec[1], gspec + spec_size);
5284             gspec += next;
5285             cur_data += spec->next;
5286         }
5287         break;
5288     }
5289     default:
5290         ret = -TARGET_EINVAL;
5291         unlock_user(argptr, guest_data, 0);
5292         goto out;
5293     }
5294     unlock_user(argptr, guest_data, 0);
5295 
5296     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5297     if (!is_error(ret)) {
5298         guest_data = arg + host_dm->data_start;
5299         guest_data_size = host_dm->data_size - host_dm->data_start;
5300         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5301         switch (ie->host_cmd) {
5302         case DM_REMOVE_ALL:
5303         case DM_DEV_CREATE:
5304         case DM_DEV_REMOVE:
5305         case DM_DEV_RENAME:
5306         case DM_DEV_SUSPEND:
5307         case DM_DEV_STATUS:
5308         case DM_TABLE_LOAD:
5309         case DM_TABLE_CLEAR:
5310         case DM_TARGET_MSG:
5311         case DM_DEV_SET_GEOMETRY:
5312             /* no return data */
5313             break;
5314         case DM_LIST_DEVICES:
5315         {
5316             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5317             uint32_t remaining_data = guest_data_size;
5318             void *cur_data = argptr;
5319             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5320             int nl_size = 12; /* can't use thunk_size due to alignment */
5321 
5322             while (1) {
5323                 uint32_t next = nl->next;
5324                 if (next) {
5325                     nl->next = nl_size + (strlen(nl->name) + 1);
5326                 }
5327                 if (remaining_data < nl->next) {
5328                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5329                     break;
5330                 }
5331                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5332                 strcpy(cur_data + nl_size, nl->name);
5333                 cur_data += nl->next;
5334                 remaining_data -= nl->next;
5335                 if (!next) {
5336                     break;
5337                 }
5338                 nl = (void*)nl + next;
5339             }
5340             break;
5341         }
5342         case DM_DEV_WAIT:
5343         case DM_TABLE_STATUS:
5344         {
5345             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5346             void *cur_data = argptr;
5347             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5348             int spec_size = thunk_type_size(arg_type, 0);
5349             int i;
5350 
5351             for (i = 0; i < host_dm->target_count; i++) {
5352                 uint32_t next = spec->next;
5353                 int slen = strlen((char*)&spec[1]) + 1;
5354                 spec->next = (cur_data - argptr) + spec_size + slen;
5355                 if (guest_data_size < spec->next) {
5356                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5357                     break;
5358                 }
5359                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5360                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5361                 cur_data = argptr + spec->next;
5362                 spec = (void*)host_dm + host_dm->data_start + next;
5363             }
5364             break;
5365         }
5366         case DM_TABLE_DEPS:
5367         {
5368             void *hdata = (void*)host_dm + host_dm->data_start;
5369             int count = *(uint32_t*)hdata;
5370             uint64_t *hdev = hdata + 8;
5371             uint64_t *gdev = argptr + 8;
5372             int i;
5373 
5374             *(uint32_t*)argptr = tswap32(count);
5375             for (i = 0; i < count; i++) {
5376                 *gdev = tswap64(*hdev);
5377                 gdev++;
5378                 hdev++;
5379             }
5380             break;
5381         }
5382         case DM_LIST_VERSIONS:
5383         {
5384             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5385             uint32_t remaining_data = guest_data_size;
5386             void *cur_data = argptr;
5387             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5388             int vers_size = thunk_type_size(arg_type, 0);
5389 
5390             while (1) {
5391                 uint32_t next = vers->next;
5392                 if (next) {
5393                     vers->next = vers_size + (strlen(vers->name) + 1);
5394                 }
5395                 if (remaining_data < vers->next) {
5396                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5397                     break;
5398                 }
5399                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5400                 strcpy(cur_data + vers_size, vers->name);
5401                 cur_data += vers->next;
5402                 remaining_data -= vers->next;
5403                 if (!next) {
5404                     break;
5405                 }
5406                 vers = (void*)vers + next;
5407             }
5408             break;
5409         }
5410         default:
5411             unlock_user(argptr, guest_data, 0);
5412             ret = -TARGET_EINVAL;
5413             goto out;
5414         }
5415         unlock_user(argptr, guest_data, guest_data_size);
5416 
5417         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5418         if (!argptr) {
5419             ret = -TARGET_EFAULT;
5420             goto out;
5421         }
5422         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5423         unlock_user(argptr, arg, target_size);
5424     }
5425 out:
5426     g_free(big_buf);
5427     return ret;
5428 }
5429 
5430 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5431                                int cmd, abi_long arg)
5432 {
5433     void *argptr;
5434     int target_size;
5435     const argtype *arg_type = ie->arg_type;
5436     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5437     abi_long ret;
5438 
5439     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5440     struct blkpg_partition host_part;
5441 
5442     /* Read and convert blkpg */
5443     arg_type++;
5444     target_size = thunk_type_size(arg_type, 0);
5445     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5446     if (!argptr) {
5447         ret = -TARGET_EFAULT;
5448         goto out;
5449     }
5450     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5451     unlock_user(argptr, arg, 0);
5452 
5453     switch (host_blkpg->op) {
5454     case BLKPG_ADD_PARTITION:
5455     case BLKPG_DEL_PARTITION:
5456         /* payload is struct blkpg_partition */
5457         break;
5458     default:
5459         /* Unknown opcode */
5460         ret = -TARGET_EINVAL;
5461         goto out;
5462     }
5463 
5464     /* Read and convert blkpg->data */
5465     arg = (abi_long)(uintptr_t)host_blkpg->data;
5466     target_size = thunk_type_size(part_arg_type, 0);
5467     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5468     if (!argptr) {
5469         ret = -TARGET_EFAULT;
5470         goto out;
5471     }
5472     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5473     unlock_user(argptr, arg, 0);
5474 
5475     /* Swizzle the data pointer to our local copy and call! */
5476     host_blkpg->data = &host_part;
5477     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5478 
5479 out:
5480     return ret;
5481 }
5482 
5483 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5484                                 int fd, int cmd, abi_long arg)
5485 {
5486     const argtype *arg_type = ie->arg_type;
5487     const StructEntry *se;
5488     const argtype *field_types;
5489     const int *dst_offsets, *src_offsets;
5490     int target_size;
5491     void *argptr;
5492     abi_ulong *target_rt_dev_ptr = NULL;
5493     unsigned long *host_rt_dev_ptr = NULL;
5494     abi_long ret;
5495     int i;
5496 
5497     assert(ie->access == IOC_W);
5498     assert(*arg_type == TYPE_PTR);
5499     arg_type++;
5500     assert(*arg_type == TYPE_STRUCT);
5501     target_size = thunk_type_size(arg_type, 0);
5502     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5503     if (!argptr) {
5504         return -TARGET_EFAULT;
5505     }
5506     arg_type++;
5507     assert(*arg_type == (int)STRUCT_rtentry);
5508     se = struct_entries + *arg_type++;
5509     assert(se->convert[0] == NULL);
5510     /* convert struct here to be able to catch rt_dev string */
5511     field_types = se->field_types;
5512     dst_offsets = se->field_offsets[THUNK_HOST];
5513     src_offsets = se->field_offsets[THUNK_TARGET];
5514     for (i = 0; i < se->nb_fields; i++) {
5515         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5516             assert(*field_types == TYPE_PTRVOID);
5517             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5518             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5519             if (*target_rt_dev_ptr != 0) {
5520                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5521                                                   tswapal(*target_rt_dev_ptr));
5522                 if (!*host_rt_dev_ptr) {
5523                     unlock_user(argptr, arg, 0);
5524                     return -TARGET_EFAULT;
5525                 }
5526             } else {
5527                 *host_rt_dev_ptr = 0;
5528             }
5529             field_types++;
5530             continue;
5531         }
5532         field_types = thunk_convert(buf_temp + dst_offsets[i],
5533                                     argptr + src_offsets[i],
5534                                     field_types, THUNK_HOST);
5535     }
5536     unlock_user(argptr, arg, 0);
5537 
5538     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5539 
5540     assert(host_rt_dev_ptr != NULL);
5541     assert(target_rt_dev_ptr != NULL);
5542     if (*host_rt_dev_ptr != 0) {
5543         unlock_user((void *)*host_rt_dev_ptr,
5544                     *target_rt_dev_ptr, 0);
5545     }
5546     return ret;
5547 }
5548 
5549 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5550                                      int fd, int cmd, abi_long arg)
5551 {
5552     int sig = target_to_host_signal(arg);
5553     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5554 }
5555 
5556 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5557                                     int fd, int cmd, abi_long arg)
5558 {
5559     struct timeval tv;
5560     abi_long ret;
5561 
5562     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5563     if (is_error(ret)) {
5564         return ret;
5565     }
5566 
5567     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5568         if (copy_to_user_timeval(arg, &tv)) {
5569             return -TARGET_EFAULT;
5570         }
5571     } else {
5572         if (copy_to_user_timeval64(arg, &tv)) {
5573             return -TARGET_EFAULT;
5574         }
5575     }
5576 
5577     return ret;
5578 }
5579 
5580 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5581                                       int fd, int cmd, abi_long arg)
5582 {
5583     struct timespec ts;
5584     abi_long ret;
5585 
5586     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5587     if (is_error(ret)) {
5588         return ret;
5589     }
5590 
5591     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5592         if (host_to_target_timespec(arg, &ts)) {
5593             return -TARGET_EFAULT;
5594         }
5595     } else{
5596         if (host_to_target_timespec64(arg, &ts)) {
5597             return -TARGET_EFAULT;
5598         }
5599     }
5600 
5601     return ret;
5602 }
5603 
5604 #ifdef TIOCGPTPEER
5605 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5606                                      int fd, int cmd, abi_long arg)
5607 {
5608     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5609     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5610 }
5611 #endif
5612 
5613 #ifdef HAVE_DRM_H
5614 
5615 static void unlock_drm_version(struct drm_version *host_ver,
5616                                struct target_drm_version *target_ver,
5617                                bool copy)
5618 {
5619     unlock_user(host_ver->name, target_ver->name,
5620                                 copy ? host_ver->name_len : 0);
5621     unlock_user(host_ver->date, target_ver->date,
5622                                 copy ? host_ver->date_len : 0);
5623     unlock_user(host_ver->desc, target_ver->desc,
5624                                 copy ? host_ver->desc_len : 0);
5625 }
5626 
5627 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5628                                           struct target_drm_version *target_ver)
5629 {
5630     memset(host_ver, 0, sizeof(*host_ver));
5631 
5632     __get_user(host_ver->name_len, &target_ver->name_len);
5633     if (host_ver->name_len) {
5634         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5635                                    target_ver->name_len, 0);
5636         if (!host_ver->name) {
5637             return -EFAULT;
5638         }
5639     }
5640 
5641     __get_user(host_ver->date_len, &target_ver->date_len);
5642     if (host_ver->date_len) {
5643         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5644                                    target_ver->date_len, 0);
5645         if (!host_ver->date) {
5646             goto err;
5647         }
5648     }
5649 
5650     __get_user(host_ver->desc_len, &target_ver->desc_len);
5651     if (host_ver->desc_len) {
5652         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5653                                    target_ver->desc_len, 0);
5654         if (!host_ver->desc) {
5655             goto err;
5656         }
5657     }
5658 
5659     return 0;
5660 err:
5661     unlock_drm_version(host_ver, target_ver, false);
5662     return -EFAULT;
5663 }
5664 
5665 static inline void host_to_target_drmversion(
5666                                           struct target_drm_version *target_ver,
5667                                           struct drm_version *host_ver)
5668 {
5669     __put_user(host_ver->version_major, &target_ver->version_major);
5670     __put_user(host_ver->version_minor, &target_ver->version_minor);
5671     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5672     __put_user(host_ver->name_len, &target_ver->name_len);
5673     __put_user(host_ver->date_len, &target_ver->date_len);
5674     __put_user(host_ver->desc_len, &target_ver->desc_len);
5675     unlock_drm_version(host_ver, target_ver, true);
5676 }
5677 
5678 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5679                              int fd, int cmd, abi_long arg)
5680 {
5681     struct drm_version *ver;
5682     struct target_drm_version *target_ver;
5683     abi_long ret;
5684 
5685     switch (ie->host_cmd) {
5686     case DRM_IOCTL_VERSION:
5687         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5688             return -TARGET_EFAULT;
5689         }
5690         ver = (struct drm_version *)buf_temp;
5691         ret = target_to_host_drmversion(ver, target_ver);
5692         if (!is_error(ret)) {
5693             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5694             if (is_error(ret)) {
5695                 unlock_drm_version(ver, target_ver, false);
5696             } else {
5697                 host_to_target_drmversion(target_ver, ver);
5698             }
5699         }
5700         unlock_user_struct(target_ver, arg, 0);
5701         return ret;
5702     }
5703     return -TARGET_ENOSYS;
5704 }
5705 
5706 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5707                                            struct drm_i915_getparam *gparam,
5708                                            int fd, abi_long arg)
5709 {
5710     abi_long ret;
5711     int value;
5712     struct target_drm_i915_getparam *target_gparam;
5713 
5714     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5715         return -TARGET_EFAULT;
5716     }
5717 
5718     __get_user(gparam->param, &target_gparam->param);
5719     gparam->value = &value;
5720     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5721     put_user_s32(value, target_gparam->value);
5722 
5723     unlock_user_struct(target_gparam, arg, 0);
5724     return ret;
5725 }
5726 
5727 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5728                                   int fd, int cmd, abi_long arg)
5729 {
5730     switch (ie->host_cmd) {
5731     case DRM_IOCTL_I915_GETPARAM:
5732         return do_ioctl_drm_i915_getparam(ie,
5733                                           (struct drm_i915_getparam *)buf_temp,
5734                                           fd, arg);
5735     default:
5736         return -TARGET_ENOSYS;
5737     }
5738 }
5739 
5740 #endif
5741 
5742 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5743                                         int fd, int cmd, abi_long arg)
5744 {
5745     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5746     struct tun_filter *target_filter;
5747     char *target_addr;
5748 
5749     assert(ie->access == IOC_W);
5750 
5751     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5752     if (!target_filter) {
5753         return -TARGET_EFAULT;
5754     }
5755     filter->flags = tswap16(target_filter->flags);
5756     filter->count = tswap16(target_filter->count);
5757     unlock_user(target_filter, arg, 0);
5758 
5759     if (filter->count) {
5760         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5761             MAX_STRUCT_SIZE) {
5762             return -TARGET_EFAULT;
5763         }
5764 
5765         target_addr = lock_user(VERIFY_READ,
5766                                 arg + offsetof(struct tun_filter, addr),
5767                                 filter->count * ETH_ALEN, 1);
5768         if (!target_addr) {
5769             return -TARGET_EFAULT;
5770         }
5771         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5772         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5773     }
5774 
5775     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5776 }
5777 
5778 IOCTLEntry ioctl_entries[] = {
5779 #define IOCTL(cmd, access, ...) \
5780     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5781 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5782     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5783 #define IOCTL_IGNORE(cmd) \
5784     { TARGET_ ## cmd, 0, #cmd },
5785 #include "ioctls.h"
5786     { 0, 0, },
5787 };
5788 
5789 /* ??? Implement proper locking for ioctls.  */
5790 /* do_ioctl() Must return target values and target errnos. */
5791 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5792 {
5793     const IOCTLEntry *ie;
5794     const argtype *arg_type;
5795     abi_long ret;
5796     uint8_t buf_temp[MAX_STRUCT_SIZE];
5797     int target_size;
5798     void *argptr;
5799 
5800     ie = ioctl_entries;
5801     for(;;) {
5802         if (ie->target_cmd == 0) {
5803             qemu_log_mask(
5804                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5805             return -TARGET_ENOSYS;
5806         }
5807         if (ie->target_cmd == cmd)
5808             break;
5809         ie++;
5810     }
5811     arg_type = ie->arg_type;
5812     if (ie->do_ioctl) {
5813         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5814     } else if (!ie->host_cmd) {
5815         /* Some architectures define BSD ioctls in their headers
5816            that are not implemented in Linux.  */
5817         return -TARGET_ENOSYS;
5818     }
5819 
5820     switch(arg_type[0]) {
5821     case TYPE_NULL:
5822         /* no argument */
5823         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5824         break;
5825     case TYPE_PTRVOID:
5826     case TYPE_INT:
5827     case TYPE_LONG:
5828     case TYPE_ULONG:
5829         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5830         break;
5831     case TYPE_PTR:
5832         arg_type++;
5833         target_size = thunk_type_size(arg_type, 0);
5834         switch(ie->access) {
5835         case IOC_R:
5836             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5837             if (!is_error(ret)) {
5838                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5839                 if (!argptr)
5840                     return -TARGET_EFAULT;
5841                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5842                 unlock_user(argptr, arg, target_size);
5843             }
5844             break;
5845         case IOC_W:
5846             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5847             if (!argptr)
5848                 return -TARGET_EFAULT;
5849             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5850             unlock_user(argptr, arg, 0);
5851             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5852             break;
5853         default:
5854         case IOC_RW:
5855             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5856             if (!argptr)
5857                 return -TARGET_EFAULT;
5858             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5859             unlock_user(argptr, arg, 0);
5860             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5861             if (!is_error(ret)) {
5862                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5863                 if (!argptr)
5864                     return -TARGET_EFAULT;
5865                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5866                 unlock_user(argptr, arg, target_size);
5867             }
5868             break;
5869         }
5870         break;
5871     default:
5872         qemu_log_mask(LOG_UNIMP,
5873                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5874                       (long)cmd, arg_type[0]);
5875         ret = -TARGET_ENOSYS;
5876         break;
5877     }
5878     return ret;
5879 }
5880 
5881 static const bitmask_transtbl iflag_tbl[] = {
5882         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5883         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5884         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5885         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5886         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5887         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5888         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5889         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5890         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5891         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5892         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5893         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5894         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5895         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5896         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5897         { 0, 0, 0, 0 }
5898 };
5899 
5900 static const bitmask_transtbl oflag_tbl[] = {
5901 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5902 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5903 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5904 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5905 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5906 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5907 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5908 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5909 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5910 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5911 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5912 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5913 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5914 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5915 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5916 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5917 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5918 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5919 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5920 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5921 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5922 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5923 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5924 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5925 	{ 0, 0, 0, 0 }
5926 };
5927 
5928 static const bitmask_transtbl cflag_tbl[] = {
5929 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5930 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5931 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5932 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5933 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5934 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5935 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5936 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5937 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5938 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5939 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5940 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5941 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5942 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5943 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5944 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5945 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5946 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5947 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5948 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5949 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5950 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5951 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5952 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5953 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5954 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5955 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5956 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5957 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5958 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5959 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5960 	{ 0, 0, 0, 0 }
5961 };
5962 
5963 static const bitmask_transtbl lflag_tbl[] = {
5964   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5965   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5966   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5967   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5968   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5969   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5970   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5971   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5972   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5973   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5974   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5975   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5976   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5977   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5978   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5979   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5980   { 0, 0, 0, 0 }
5981 };
5982 
5983 static void target_to_host_termios (void *dst, const void *src)
5984 {
5985     struct host_termios *host = dst;
5986     const struct target_termios *target = src;
5987 
5988     host->c_iflag =
5989         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5990     host->c_oflag =
5991         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5992     host->c_cflag =
5993         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5994     host->c_lflag =
5995         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5996     host->c_line = target->c_line;
5997 
5998     memset(host->c_cc, 0, sizeof(host->c_cc));
5999     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
6000     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
6001     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
6002     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
6003     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
6004     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
6005     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
6006     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
6007     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
6008     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
6009     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
6010     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
6011     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
6012     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
6013     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
6014     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
6015     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
6016 }
6017 
6018 static void host_to_target_termios (void *dst, const void *src)
6019 {
6020     struct target_termios *target = dst;
6021     const struct host_termios *host = src;
6022 
6023     target->c_iflag =
6024         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6025     target->c_oflag =
6026         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6027     target->c_cflag =
6028         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6029     target->c_lflag =
6030         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6031     target->c_line = host->c_line;
6032 
6033     memset(target->c_cc, 0, sizeof(target->c_cc));
6034     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6035     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6036     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6037     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6038     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6039     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6040     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6041     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6042     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6043     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6044     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6045     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6046     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6047     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6048     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6049     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6050     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6051 }
6052 
6053 static const StructEntry struct_termios_def = {
6054     .convert = { host_to_target_termios, target_to_host_termios },
6055     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6056     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6057     .print = print_termios,
6058 };
6059 
6060 static bitmask_transtbl mmap_flags_tbl[] = {
6061     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6062     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6063     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6064     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6065       MAP_ANONYMOUS, MAP_ANONYMOUS },
6066     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6067       MAP_GROWSDOWN, MAP_GROWSDOWN },
6068     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6069       MAP_DENYWRITE, MAP_DENYWRITE },
6070     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6071       MAP_EXECUTABLE, MAP_EXECUTABLE },
6072     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6073     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6074       MAP_NORESERVE, MAP_NORESERVE },
6075     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6076     /* MAP_STACK had been ignored by the kernel for quite some time.
6077        Recognize it for the target insofar as we do not want to pass
6078        it through to the host.  */
6079     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6080     { 0, 0, 0, 0 }
6081 };
6082 
6083 /*
6084  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6085  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6086  */
6087 #if defined(TARGET_I386)
6088 
6089 /* NOTE: there is really one LDT for all the threads */
6090 static uint8_t *ldt_table;
6091 
6092 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6093 {
6094     int size;
6095     void *p;
6096 
6097     if (!ldt_table)
6098         return 0;
6099     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6100     if (size > bytecount)
6101         size = bytecount;
6102     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6103     if (!p)
6104         return -TARGET_EFAULT;
6105     /* ??? Should this by byteswapped?  */
6106     memcpy(p, ldt_table, size);
6107     unlock_user(p, ptr, size);
6108     return size;
6109 }
6110 
6111 /* XXX: add locking support */
6112 static abi_long write_ldt(CPUX86State *env,
6113                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6114 {
6115     struct target_modify_ldt_ldt_s ldt_info;
6116     struct target_modify_ldt_ldt_s *target_ldt_info;
6117     int seg_32bit, contents, read_exec_only, limit_in_pages;
6118     int seg_not_present, useable, lm;
6119     uint32_t *lp, entry_1, entry_2;
6120 
6121     if (bytecount != sizeof(ldt_info))
6122         return -TARGET_EINVAL;
6123     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6124         return -TARGET_EFAULT;
6125     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6126     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6127     ldt_info.limit = tswap32(target_ldt_info->limit);
6128     ldt_info.flags = tswap32(target_ldt_info->flags);
6129     unlock_user_struct(target_ldt_info, ptr, 0);
6130 
6131     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6132         return -TARGET_EINVAL;
6133     seg_32bit = ldt_info.flags & 1;
6134     contents = (ldt_info.flags >> 1) & 3;
6135     read_exec_only = (ldt_info.flags >> 3) & 1;
6136     limit_in_pages = (ldt_info.flags >> 4) & 1;
6137     seg_not_present = (ldt_info.flags >> 5) & 1;
6138     useable = (ldt_info.flags >> 6) & 1;
6139 #ifdef TARGET_ABI32
6140     lm = 0;
6141 #else
6142     lm = (ldt_info.flags >> 7) & 1;
6143 #endif
6144     if (contents == 3) {
6145         if (oldmode)
6146             return -TARGET_EINVAL;
6147         if (seg_not_present == 0)
6148             return -TARGET_EINVAL;
6149     }
6150     /* allocate the LDT */
6151     if (!ldt_table) {
6152         env->ldt.base = target_mmap(0,
6153                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6154                                     PROT_READ|PROT_WRITE,
6155                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6156         if (env->ldt.base == -1)
6157             return -TARGET_ENOMEM;
6158         memset(g2h_untagged(env->ldt.base), 0,
6159                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6160         env->ldt.limit = 0xffff;
6161         ldt_table = g2h_untagged(env->ldt.base);
6162     }
6163 
6164     /* NOTE: same code as Linux kernel */
6165     /* Allow LDTs to be cleared by the user. */
6166     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6167         if (oldmode ||
6168             (contents == 0		&&
6169              read_exec_only == 1	&&
6170              seg_32bit == 0		&&
6171              limit_in_pages == 0	&&
6172              seg_not_present == 1	&&
6173              useable == 0 )) {
6174             entry_1 = 0;
6175             entry_2 = 0;
6176             goto install;
6177         }
6178     }
6179 
6180     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6181         (ldt_info.limit & 0x0ffff);
6182     entry_2 = (ldt_info.base_addr & 0xff000000) |
6183         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6184         (ldt_info.limit & 0xf0000) |
6185         ((read_exec_only ^ 1) << 9) |
6186         (contents << 10) |
6187         ((seg_not_present ^ 1) << 15) |
6188         (seg_32bit << 22) |
6189         (limit_in_pages << 23) |
6190         (lm << 21) |
6191         0x7000;
6192     if (!oldmode)
6193         entry_2 |= (useable << 20);
6194 
6195     /* Install the new entry ...  */
6196 install:
6197     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6198     lp[0] = tswap32(entry_1);
6199     lp[1] = tswap32(entry_2);
6200     return 0;
6201 }
6202 
6203 /* specific and weird i386 syscalls */
6204 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6205                               unsigned long bytecount)
6206 {
6207     abi_long ret;
6208 
6209     switch (func) {
6210     case 0:
6211         ret = read_ldt(ptr, bytecount);
6212         break;
6213     case 1:
6214         ret = write_ldt(env, ptr, bytecount, 1);
6215         break;
6216     case 0x11:
6217         ret = write_ldt(env, ptr, bytecount, 0);
6218         break;
6219     default:
6220         ret = -TARGET_ENOSYS;
6221         break;
6222     }
6223     return ret;
6224 }
6225 
6226 #if defined(TARGET_ABI32)
6227 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6228 {
6229     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6230     struct target_modify_ldt_ldt_s ldt_info;
6231     struct target_modify_ldt_ldt_s *target_ldt_info;
6232     int seg_32bit, contents, read_exec_only, limit_in_pages;
6233     int seg_not_present, useable, lm;
6234     uint32_t *lp, entry_1, entry_2;
6235     int i;
6236 
6237     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6238     if (!target_ldt_info)
6239         return -TARGET_EFAULT;
6240     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6241     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6242     ldt_info.limit = tswap32(target_ldt_info->limit);
6243     ldt_info.flags = tswap32(target_ldt_info->flags);
6244     if (ldt_info.entry_number == -1) {
6245         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6246             if (gdt_table[i] == 0) {
6247                 ldt_info.entry_number = i;
6248                 target_ldt_info->entry_number = tswap32(i);
6249                 break;
6250             }
6251         }
6252     }
6253     unlock_user_struct(target_ldt_info, ptr, 1);
6254 
6255     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6256         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6257            return -TARGET_EINVAL;
6258     seg_32bit = ldt_info.flags & 1;
6259     contents = (ldt_info.flags >> 1) & 3;
6260     read_exec_only = (ldt_info.flags >> 3) & 1;
6261     limit_in_pages = (ldt_info.flags >> 4) & 1;
6262     seg_not_present = (ldt_info.flags >> 5) & 1;
6263     useable = (ldt_info.flags >> 6) & 1;
6264 #ifdef TARGET_ABI32
6265     lm = 0;
6266 #else
6267     lm = (ldt_info.flags >> 7) & 1;
6268 #endif
6269 
6270     if (contents == 3) {
6271         if (seg_not_present == 0)
6272             return -TARGET_EINVAL;
6273     }
6274 
6275     /* NOTE: same code as Linux kernel */
6276     /* Allow LDTs to be cleared by the user. */
6277     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6278         if ((contents == 0             &&
6279              read_exec_only == 1       &&
6280              seg_32bit == 0            &&
6281              limit_in_pages == 0       &&
6282              seg_not_present == 1      &&
6283              useable == 0 )) {
6284             entry_1 = 0;
6285             entry_2 = 0;
6286             goto install;
6287         }
6288     }
6289 
6290     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6291         (ldt_info.limit & 0x0ffff);
6292     entry_2 = (ldt_info.base_addr & 0xff000000) |
6293         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6294         (ldt_info.limit & 0xf0000) |
6295         ((read_exec_only ^ 1) << 9) |
6296         (contents << 10) |
6297         ((seg_not_present ^ 1) << 15) |
6298         (seg_32bit << 22) |
6299         (limit_in_pages << 23) |
6300         (useable << 20) |
6301         (lm << 21) |
6302         0x7000;
6303 
6304     /* Install the new entry ...  */
6305 install:
6306     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6307     lp[0] = tswap32(entry_1);
6308     lp[1] = tswap32(entry_2);
6309     return 0;
6310 }
6311 
6312 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6313 {
6314     struct target_modify_ldt_ldt_s *target_ldt_info;
6315     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6316     uint32_t base_addr, limit, flags;
6317     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6318     int seg_not_present, useable, lm;
6319     uint32_t *lp, entry_1, entry_2;
6320 
6321     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6322     if (!target_ldt_info)
6323         return -TARGET_EFAULT;
6324     idx = tswap32(target_ldt_info->entry_number);
6325     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6326         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6327         unlock_user_struct(target_ldt_info, ptr, 1);
6328         return -TARGET_EINVAL;
6329     }
6330     lp = (uint32_t *)(gdt_table + idx);
6331     entry_1 = tswap32(lp[0]);
6332     entry_2 = tswap32(lp[1]);
6333 
6334     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6335     contents = (entry_2 >> 10) & 3;
6336     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6337     seg_32bit = (entry_2 >> 22) & 1;
6338     limit_in_pages = (entry_2 >> 23) & 1;
6339     useable = (entry_2 >> 20) & 1;
6340 #ifdef TARGET_ABI32
6341     lm = 0;
6342 #else
6343     lm = (entry_2 >> 21) & 1;
6344 #endif
6345     flags = (seg_32bit << 0) | (contents << 1) |
6346         (read_exec_only << 3) | (limit_in_pages << 4) |
6347         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6348     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6349     base_addr = (entry_1 >> 16) |
6350         (entry_2 & 0xff000000) |
6351         ((entry_2 & 0xff) << 16);
6352     target_ldt_info->base_addr = tswapal(base_addr);
6353     target_ldt_info->limit = tswap32(limit);
6354     target_ldt_info->flags = tswap32(flags);
6355     unlock_user_struct(target_ldt_info, ptr, 1);
6356     return 0;
6357 }
6358 
6359 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6360 {
6361     return -TARGET_ENOSYS;
6362 }
6363 #else
6364 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6365 {
6366     abi_long ret = 0;
6367     abi_ulong val;
6368     int idx;
6369 
6370     switch(code) {
6371     case TARGET_ARCH_SET_GS:
6372     case TARGET_ARCH_SET_FS:
6373         if (code == TARGET_ARCH_SET_GS)
6374             idx = R_GS;
6375         else
6376             idx = R_FS;
6377         cpu_x86_load_seg(env, idx, 0);
6378         env->segs[idx].base = addr;
6379         break;
6380     case TARGET_ARCH_GET_GS:
6381     case TARGET_ARCH_GET_FS:
6382         if (code == TARGET_ARCH_GET_GS)
6383             idx = R_GS;
6384         else
6385             idx = R_FS;
6386         val = env->segs[idx].base;
6387         if (put_user(val, addr, abi_ulong))
6388             ret = -TARGET_EFAULT;
6389         break;
6390     default:
6391         ret = -TARGET_EINVAL;
6392         break;
6393     }
6394     return ret;
6395 }
6396 #endif /* defined(TARGET_ABI32 */
6397 
6398 #endif /* defined(TARGET_I386) */
6399 
6400 #define NEW_STACK_SIZE 0x40000
6401 
6402 
6403 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6404 typedef struct {
6405     CPUArchState *env;
6406     pthread_mutex_t mutex;
6407     pthread_cond_t cond;
6408     pthread_t thread;
6409     uint32_t tid;
6410     abi_ulong child_tidptr;
6411     abi_ulong parent_tidptr;
6412     sigset_t sigmask;
6413 } new_thread_info;
6414 
6415 static void *clone_func(void *arg)
6416 {
6417     new_thread_info *info = arg;
6418     CPUArchState *env;
6419     CPUState *cpu;
6420     TaskState *ts;
6421 
6422     rcu_register_thread();
6423     tcg_register_thread();
6424     env = info->env;
6425     cpu = env_cpu(env);
6426     thread_cpu = cpu;
6427     ts = (TaskState *)cpu->opaque;
6428     info->tid = sys_gettid();
6429     task_settid(ts);
6430     if (info->child_tidptr)
6431         put_user_u32(info->tid, info->child_tidptr);
6432     if (info->parent_tidptr)
6433         put_user_u32(info->tid, info->parent_tidptr);
6434     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6435     /* Enable signals.  */
6436     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6437     /* Signal to the parent that we're ready.  */
6438     pthread_mutex_lock(&info->mutex);
6439     pthread_cond_broadcast(&info->cond);
6440     pthread_mutex_unlock(&info->mutex);
6441     /* Wait until the parent has finished initializing the tls state.  */
6442     pthread_mutex_lock(&clone_lock);
6443     pthread_mutex_unlock(&clone_lock);
6444     cpu_loop(env);
6445     /* never exits */
6446     return NULL;
6447 }
6448 
6449 /* do_fork() Must return host values and target errnos (unlike most
6450    do_*() functions). */
6451 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6452                    abi_ulong parent_tidptr, target_ulong newtls,
6453                    abi_ulong child_tidptr)
6454 {
6455     CPUState *cpu = env_cpu(env);
6456     int ret;
6457     TaskState *ts;
6458     CPUState *new_cpu;
6459     CPUArchState *new_env;
6460     sigset_t sigmask;
6461 
6462     flags &= ~CLONE_IGNORED_FLAGS;
6463 
6464     /* Emulate vfork() with fork() */
6465     if (flags & CLONE_VFORK)
6466         flags &= ~(CLONE_VFORK | CLONE_VM);
6467 
6468     if (flags & CLONE_VM) {
6469         TaskState *parent_ts = (TaskState *)cpu->opaque;
6470         new_thread_info info;
6471         pthread_attr_t attr;
6472 
6473         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6474             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6475             return -TARGET_EINVAL;
6476         }
6477 
6478         ts = g_new0(TaskState, 1);
6479         init_task_state(ts);
6480 
6481         /* Grab a mutex so that thread setup appears atomic.  */
6482         pthread_mutex_lock(&clone_lock);
6483 
6484         /*
6485          * If this is our first additional thread, we need to ensure we
6486          * generate code for parallel execution and flush old translations.
6487          * Do this now so that the copy gets CF_PARALLEL too.
6488          */
6489         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6490             cpu->tcg_cflags |= CF_PARALLEL;
6491             tb_flush(cpu);
6492         }
6493 
6494         /* we create a new CPU instance. */
6495         new_env = cpu_copy(env);
6496         /* Init regs that differ from the parent.  */
6497         cpu_clone_regs_child(new_env, newsp, flags);
6498         cpu_clone_regs_parent(env, flags);
6499         new_cpu = env_cpu(new_env);
6500         new_cpu->opaque = ts;
6501         ts->bprm = parent_ts->bprm;
6502         ts->info = parent_ts->info;
6503         ts->signal_mask = parent_ts->signal_mask;
6504 
6505         if (flags & CLONE_CHILD_CLEARTID) {
6506             ts->child_tidptr = child_tidptr;
6507         }
6508 
6509         if (flags & CLONE_SETTLS) {
6510             cpu_set_tls (new_env, newtls);
6511         }
6512 
6513         memset(&info, 0, sizeof(info));
6514         pthread_mutex_init(&info.mutex, NULL);
6515         pthread_mutex_lock(&info.mutex);
6516         pthread_cond_init(&info.cond, NULL);
6517         info.env = new_env;
6518         if (flags & CLONE_CHILD_SETTID) {
6519             info.child_tidptr = child_tidptr;
6520         }
6521         if (flags & CLONE_PARENT_SETTID) {
6522             info.parent_tidptr = parent_tidptr;
6523         }
6524 
6525         ret = pthread_attr_init(&attr);
6526         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6527         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6528         /* It is not safe to deliver signals until the child has finished
6529            initializing, so temporarily block all signals.  */
6530         sigfillset(&sigmask);
6531         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6532         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6533 
6534         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6535         /* TODO: Free new CPU state if thread creation failed.  */
6536 
6537         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6538         pthread_attr_destroy(&attr);
6539         if (ret == 0) {
6540             /* Wait for the child to initialize.  */
6541             pthread_cond_wait(&info.cond, &info.mutex);
6542             ret = info.tid;
6543         } else {
6544             ret = -1;
6545         }
6546         pthread_mutex_unlock(&info.mutex);
6547         pthread_cond_destroy(&info.cond);
6548         pthread_mutex_destroy(&info.mutex);
6549         pthread_mutex_unlock(&clone_lock);
6550     } else {
6551         /* if no CLONE_VM, we consider it is a fork */
6552         if (flags & CLONE_INVALID_FORK_FLAGS) {
6553             return -TARGET_EINVAL;
6554         }
6555 
6556         /* We can't support custom termination signals */
6557         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6558             return -TARGET_EINVAL;
6559         }
6560 
6561         if (block_signals()) {
6562             return -TARGET_ERESTARTSYS;
6563         }
6564 
6565         fork_start();
6566         ret = fork();
6567         if (ret == 0) {
6568             /* Child Process.  */
6569             cpu_clone_regs_child(env, newsp, flags);
6570             fork_end(1);
6571             /* There is a race condition here.  The parent process could
6572                theoretically read the TID in the child process before the child
6573                tid is set.  This would require using either ptrace
6574                (not implemented) or having *_tidptr to point at a shared memory
6575                mapping.  We can't repeat the spinlock hack used above because
6576                the child process gets its own copy of the lock.  */
6577             if (flags & CLONE_CHILD_SETTID)
6578                 put_user_u32(sys_gettid(), child_tidptr);
6579             if (flags & CLONE_PARENT_SETTID)
6580                 put_user_u32(sys_gettid(), parent_tidptr);
6581             ts = (TaskState *)cpu->opaque;
6582             if (flags & CLONE_SETTLS)
6583                 cpu_set_tls (env, newtls);
6584             if (flags & CLONE_CHILD_CLEARTID)
6585                 ts->child_tidptr = child_tidptr;
6586         } else {
6587             cpu_clone_regs_parent(env, flags);
6588             fork_end(0);
6589         }
6590     }
6591     return ret;
6592 }
6593 
6594 /* warning : doesn't handle linux specific flags... */
6595 static int target_to_host_fcntl_cmd(int cmd)
6596 {
6597     int ret;
6598 
6599     switch(cmd) {
6600     case TARGET_F_DUPFD:
6601     case TARGET_F_GETFD:
6602     case TARGET_F_SETFD:
6603     case TARGET_F_GETFL:
6604     case TARGET_F_SETFL:
6605     case TARGET_F_OFD_GETLK:
6606     case TARGET_F_OFD_SETLK:
6607     case TARGET_F_OFD_SETLKW:
6608         ret = cmd;
6609         break;
6610     case TARGET_F_GETLK:
6611         ret = F_GETLK64;
6612         break;
6613     case TARGET_F_SETLK:
6614         ret = F_SETLK64;
6615         break;
6616     case TARGET_F_SETLKW:
6617         ret = F_SETLKW64;
6618         break;
6619     case TARGET_F_GETOWN:
6620         ret = F_GETOWN;
6621         break;
6622     case TARGET_F_SETOWN:
6623         ret = F_SETOWN;
6624         break;
6625     case TARGET_F_GETSIG:
6626         ret = F_GETSIG;
6627         break;
6628     case TARGET_F_SETSIG:
6629         ret = F_SETSIG;
6630         break;
6631 #if TARGET_ABI_BITS == 32
6632     case TARGET_F_GETLK64:
6633         ret = F_GETLK64;
6634         break;
6635     case TARGET_F_SETLK64:
6636         ret = F_SETLK64;
6637         break;
6638     case TARGET_F_SETLKW64:
6639         ret = F_SETLKW64;
6640         break;
6641 #endif
6642     case TARGET_F_SETLEASE:
6643         ret = F_SETLEASE;
6644         break;
6645     case TARGET_F_GETLEASE:
6646         ret = F_GETLEASE;
6647         break;
6648 #ifdef F_DUPFD_CLOEXEC
6649     case TARGET_F_DUPFD_CLOEXEC:
6650         ret = F_DUPFD_CLOEXEC;
6651         break;
6652 #endif
6653     case TARGET_F_NOTIFY:
6654         ret = F_NOTIFY;
6655         break;
6656 #ifdef F_GETOWN_EX
6657     case TARGET_F_GETOWN_EX:
6658         ret = F_GETOWN_EX;
6659         break;
6660 #endif
6661 #ifdef F_SETOWN_EX
6662     case TARGET_F_SETOWN_EX:
6663         ret = F_SETOWN_EX;
6664         break;
6665 #endif
6666 #ifdef F_SETPIPE_SZ
6667     case TARGET_F_SETPIPE_SZ:
6668         ret = F_SETPIPE_SZ;
6669         break;
6670     case TARGET_F_GETPIPE_SZ:
6671         ret = F_GETPIPE_SZ;
6672         break;
6673 #endif
6674 #ifdef F_ADD_SEALS
6675     case TARGET_F_ADD_SEALS:
6676         ret = F_ADD_SEALS;
6677         break;
6678     case TARGET_F_GET_SEALS:
6679         ret = F_GET_SEALS;
6680         break;
6681 #endif
6682     default:
6683         ret = -TARGET_EINVAL;
6684         break;
6685     }
6686 
6687 #if defined(__powerpc64__)
6688     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6689      * is not supported by kernel. The glibc fcntl call actually adjusts
6690      * them to 5, 6 and 7 before making the syscall(). Since we make the
6691      * syscall directly, adjust to what is supported by the kernel.
6692      */
6693     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6694         ret -= F_GETLK64 - 5;
6695     }
6696 #endif
6697 
6698     return ret;
6699 }
6700 
6701 #define FLOCK_TRANSTBL \
6702     switch (type) { \
6703     TRANSTBL_CONVERT(F_RDLCK); \
6704     TRANSTBL_CONVERT(F_WRLCK); \
6705     TRANSTBL_CONVERT(F_UNLCK); \
6706     }
6707 
6708 static int target_to_host_flock(int type)
6709 {
6710 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6711     FLOCK_TRANSTBL
6712 #undef  TRANSTBL_CONVERT
6713     return -TARGET_EINVAL;
6714 }
6715 
6716 static int host_to_target_flock(int type)
6717 {
6718 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6719     FLOCK_TRANSTBL
6720 #undef  TRANSTBL_CONVERT
6721     /* if we don't know how to convert the value coming
6722      * from the host we copy to the target field as-is
6723      */
6724     return type;
6725 }
6726 
6727 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6728                                             abi_ulong target_flock_addr)
6729 {
6730     struct target_flock *target_fl;
6731     int l_type;
6732 
6733     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6734         return -TARGET_EFAULT;
6735     }
6736 
6737     __get_user(l_type, &target_fl->l_type);
6738     l_type = target_to_host_flock(l_type);
6739     if (l_type < 0) {
6740         return l_type;
6741     }
6742     fl->l_type = l_type;
6743     __get_user(fl->l_whence, &target_fl->l_whence);
6744     __get_user(fl->l_start, &target_fl->l_start);
6745     __get_user(fl->l_len, &target_fl->l_len);
6746     __get_user(fl->l_pid, &target_fl->l_pid);
6747     unlock_user_struct(target_fl, target_flock_addr, 0);
6748     return 0;
6749 }
6750 
6751 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6752                                           const struct flock64 *fl)
6753 {
6754     struct target_flock *target_fl;
6755     short l_type;
6756 
6757     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6758         return -TARGET_EFAULT;
6759     }
6760 
6761     l_type = host_to_target_flock(fl->l_type);
6762     __put_user(l_type, &target_fl->l_type);
6763     __put_user(fl->l_whence, &target_fl->l_whence);
6764     __put_user(fl->l_start, &target_fl->l_start);
6765     __put_user(fl->l_len, &target_fl->l_len);
6766     __put_user(fl->l_pid, &target_fl->l_pid);
6767     unlock_user_struct(target_fl, target_flock_addr, 1);
6768     return 0;
6769 }
6770 
6771 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6772 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6773 
6774 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6775 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6776                                                    abi_ulong target_flock_addr)
6777 {
6778     struct target_oabi_flock64 *target_fl;
6779     int l_type;
6780 
6781     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6782         return -TARGET_EFAULT;
6783     }
6784 
6785     __get_user(l_type, &target_fl->l_type);
6786     l_type = target_to_host_flock(l_type);
6787     if (l_type < 0) {
6788         return l_type;
6789     }
6790     fl->l_type = l_type;
6791     __get_user(fl->l_whence, &target_fl->l_whence);
6792     __get_user(fl->l_start, &target_fl->l_start);
6793     __get_user(fl->l_len, &target_fl->l_len);
6794     __get_user(fl->l_pid, &target_fl->l_pid);
6795     unlock_user_struct(target_fl, target_flock_addr, 0);
6796     return 0;
6797 }
6798 
6799 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6800                                                  const struct flock64 *fl)
6801 {
6802     struct target_oabi_flock64 *target_fl;
6803     short l_type;
6804 
6805     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6806         return -TARGET_EFAULT;
6807     }
6808 
6809     l_type = host_to_target_flock(fl->l_type);
6810     __put_user(l_type, &target_fl->l_type);
6811     __put_user(fl->l_whence, &target_fl->l_whence);
6812     __put_user(fl->l_start, &target_fl->l_start);
6813     __put_user(fl->l_len, &target_fl->l_len);
6814     __put_user(fl->l_pid, &target_fl->l_pid);
6815     unlock_user_struct(target_fl, target_flock_addr, 1);
6816     return 0;
6817 }
6818 #endif
6819 
6820 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6821                                               abi_ulong target_flock_addr)
6822 {
6823     struct target_flock64 *target_fl;
6824     int l_type;
6825 
6826     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6827         return -TARGET_EFAULT;
6828     }
6829 
6830     __get_user(l_type, &target_fl->l_type);
6831     l_type = target_to_host_flock(l_type);
6832     if (l_type < 0) {
6833         return l_type;
6834     }
6835     fl->l_type = l_type;
6836     __get_user(fl->l_whence, &target_fl->l_whence);
6837     __get_user(fl->l_start, &target_fl->l_start);
6838     __get_user(fl->l_len, &target_fl->l_len);
6839     __get_user(fl->l_pid, &target_fl->l_pid);
6840     unlock_user_struct(target_fl, target_flock_addr, 0);
6841     return 0;
6842 }
6843 
6844 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6845                                             const struct flock64 *fl)
6846 {
6847     struct target_flock64 *target_fl;
6848     short l_type;
6849 
6850     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6851         return -TARGET_EFAULT;
6852     }
6853 
6854     l_type = host_to_target_flock(fl->l_type);
6855     __put_user(l_type, &target_fl->l_type);
6856     __put_user(fl->l_whence, &target_fl->l_whence);
6857     __put_user(fl->l_start, &target_fl->l_start);
6858     __put_user(fl->l_len, &target_fl->l_len);
6859     __put_user(fl->l_pid, &target_fl->l_pid);
6860     unlock_user_struct(target_fl, target_flock_addr, 1);
6861     return 0;
6862 }
6863 
6864 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6865 {
6866     struct flock64 fl64;
6867 #ifdef F_GETOWN_EX
6868     struct f_owner_ex fox;
6869     struct target_f_owner_ex *target_fox;
6870 #endif
6871     abi_long ret;
6872     int host_cmd = target_to_host_fcntl_cmd(cmd);
6873 
6874     if (host_cmd == -TARGET_EINVAL)
6875 	    return host_cmd;
6876 
6877     switch(cmd) {
6878     case TARGET_F_GETLK:
6879         ret = copy_from_user_flock(&fl64, arg);
6880         if (ret) {
6881             return ret;
6882         }
6883         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6884         if (ret == 0) {
6885             ret = copy_to_user_flock(arg, &fl64);
6886         }
6887         break;
6888 
6889     case TARGET_F_SETLK:
6890     case TARGET_F_SETLKW:
6891         ret = copy_from_user_flock(&fl64, arg);
6892         if (ret) {
6893             return ret;
6894         }
6895         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6896         break;
6897 
6898     case TARGET_F_GETLK64:
6899     case TARGET_F_OFD_GETLK:
6900         ret = copy_from_user_flock64(&fl64, arg);
6901         if (ret) {
6902             return ret;
6903         }
6904         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6905         if (ret == 0) {
6906             ret = copy_to_user_flock64(arg, &fl64);
6907         }
6908         break;
6909     case TARGET_F_SETLK64:
6910     case TARGET_F_SETLKW64:
6911     case TARGET_F_OFD_SETLK:
6912     case TARGET_F_OFD_SETLKW:
6913         ret = copy_from_user_flock64(&fl64, arg);
6914         if (ret) {
6915             return ret;
6916         }
6917         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6918         break;
6919 
6920     case TARGET_F_GETFL:
6921         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6922         if (ret >= 0) {
6923             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6924         }
6925         break;
6926 
6927     case TARGET_F_SETFL:
6928         ret = get_errno(safe_fcntl(fd, host_cmd,
6929                                    target_to_host_bitmask(arg,
6930                                                           fcntl_flags_tbl)));
6931         break;
6932 
6933 #ifdef F_GETOWN_EX
6934     case TARGET_F_GETOWN_EX:
6935         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6936         if (ret >= 0) {
6937             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6938                 return -TARGET_EFAULT;
6939             target_fox->type = tswap32(fox.type);
6940             target_fox->pid = tswap32(fox.pid);
6941             unlock_user_struct(target_fox, arg, 1);
6942         }
6943         break;
6944 #endif
6945 
6946 #ifdef F_SETOWN_EX
6947     case TARGET_F_SETOWN_EX:
6948         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6949             return -TARGET_EFAULT;
6950         fox.type = tswap32(target_fox->type);
6951         fox.pid = tswap32(target_fox->pid);
6952         unlock_user_struct(target_fox, arg, 0);
6953         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6954         break;
6955 #endif
6956 
6957     case TARGET_F_SETSIG:
6958         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6959         break;
6960 
6961     case TARGET_F_GETSIG:
6962         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6963         break;
6964 
6965     case TARGET_F_SETOWN:
6966     case TARGET_F_GETOWN:
6967     case TARGET_F_SETLEASE:
6968     case TARGET_F_GETLEASE:
6969     case TARGET_F_SETPIPE_SZ:
6970     case TARGET_F_GETPIPE_SZ:
6971     case TARGET_F_ADD_SEALS:
6972     case TARGET_F_GET_SEALS:
6973         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6974         break;
6975 
6976     default:
6977         ret = get_errno(safe_fcntl(fd, cmd, arg));
6978         break;
6979     }
6980     return ret;
6981 }
6982 
6983 #ifdef USE_UID16
6984 
6985 static inline int high2lowuid(int uid)
6986 {
6987     if (uid > 65535)
6988         return 65534;
6989     else
6990         return uid;
6991 }
6992 
6993 static inline int high2lowgid(int gid)
6994 {
6995     if (gid > 65535)
6996         return 65534;
6997     else
6998         return gid;
6999 }
7000 
7001 static inline int low2highuid(int uid)
7002 {
7003     if ((int16_t)uid == -1)
7004         return -1;
7005     else
7006         return uid;
7007 }
7008 
7009 static inline int low2highgid(int gid)
7010 {
7011     if ((int16_t)gid == -1)
7012         return -1;
7013     else
7014         return gid;
7015 }
7016 static inline int tswapid(int id)
7017 {
7018     return tswap16(id);
7019 }
7020 
7021 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7022 
7023 #else /* !USE_UID16 */
7024 static inline int high2lowuid(int uid)
7025 {
7026     return uid;
7027 }
7028 static inline int high2lowgid(int gid)
7029 {
7030     return gid;
7031 }
7032 static inline int low2highuid(int uid)
7033 {
7034     return uid;
7035 }
7036 static inline int low2highgid(int gid)
7037 {
7038     return gid;
7039 }
7040 static inline int tswapid(int id)
7041 {
7042     return tswap32(id);
7043 }
7044 
7045 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7046 
7047 #endif /* USE_UID16 */
7048 
7049 /* We must do direct syscalls for setting UID/GID, because we want to
7050  * implement the Linux system call semantics of "change only for this thread",
7051  * not the libc/POSIX semantics of "change for all threads in process".
7052  * (See http://ewontfix.com/17/ for more details.)
7053  * We use the 32-bit version of the syscalls if present; if it is not
7054  * then either the host architecture supports 32-bit UIDs natively with
7055  * the standard syscall, or the 16-bit UID is the best we can do.
7056  */
7057 #ifdef __NR_setuid32
7058 #define __NR_sys_setuid __NR_setuid32
7059 #else
7060 #define __NR_sys_setuid __NR_setuid
7061 #endif
7062 #ifdef __NR_setgid32
7063 #define __NR_sys_setgid __NR_setgid32
7064 #else
7065 #define __NR_sys_setgid __NR_setgid
7066 #endif
7067 #ifdef __NR_setresuid32
7068 #define __NR_sys_setresuid __NR_setresuid32
7069 #else
7070 #define __NR_sys_setresuid __NR_setresuid
7071 #endif
7072 #ifdef __NR_setresgid32
7073 #define __NR_sys_setresgid __NR_setresgid32
7074 #else
7075 #define __NR_sys_setresgid __NR_setresgid
7076 #endif
7077 
7078 _syscall1(int, sys_setuid, uid_t, uid)
7079 _syscall1(int, sys_setgid, gid_t, gid)
7080 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7081 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7082 
7083 void syscall_init(void)
7084 {
7085     IOCTLEntry *ie;
7086     const argtype *arg_type;
7087     int size;
7088     int i;
7089 
7090     thunk_init(STRUCT_MAX);
7091 
7092 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7093 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7094 #include "syscall_types.h"
7095 #undef STRUCT
7096 #undef STRUCT_SPECIAL
7097 
7098     /* Build target_to_host_errno_table[] table from
7099      * host_to_target_errno_table[]. */
7100     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7101         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7102     }
7103 
7104     /* we patch the ioctl size if necessary. We rely on the fact that
7105        no ioctl has all the bits at '1' in the size field */
7106     ie = ioctl_entries;
7107     while (ie->target_cmd != 0) {
7108         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7109             TARGET_IOC_SIZEMASK) {
7110             arg_type = ie->arg_type;
7111             if (arg_type[0] != TYPE_PTR) {
7112                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7113                         ie->target_cmd);
7114                 exit(1);
7115             }
7116             arg_type++;
7117             size = thunk_type_size(arg_type, 0);
7118             ie->target_cmd = (ie->target_cmd &
7119                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7120                 (size << TARGET_IOC_SIZESHIFT);
7121         }
7122 
7123         /* automatic consistency check if same arch */
7124 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7125     (defined(__x86_64__) && defined(TARGET_X86_64))
7126         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7127             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7128                     ie->name, ie->target_cmd, ie->host_cmd);
7129         }
7130 #endif
7131         ie++;
7132     }
7133 }
7134 
7135 #ifdef TARGET_NR_truncate64
7136 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7137                                          abi_long arg2,
7138                                          abi_long arg3,
7139                                          abi_long arg4)
7140 {
7141     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7142         arg2 = arg3;
7143         arg3 = arg4;
7144     }
7145     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7146 }
7147 #endif
7148 
7149 #ifdef TARGET_NR_ftruncate64
7150 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7151                                           abi_long arg2,
7152                                           abi_long arg3,
7153                                           abi_long arg4)
7154 {
7155     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7156         arg2 = arg3;
7157         arg3 = arg4;
7158     }
7159     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7160 }
7161 #endif
7162 
7163 #if defined(TARGET_NR_timer_settime) || \
7164     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7165 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7166                                                  abi_ulong target_addr)
7167 {
7168     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7169                                 offsetof(struct target_itimerspec,
7170                                          it_interval)) ||
7171         target_to_host_timespec(&host_its->it_value, target_addr +
7172                                 offsetof(struct target_itimerspec,
7173                                          it_value))) {
7174         return -TARGET_EFAULT;
7175     }
7176 
7177     return 0;
7178 }
7179 #endif
7180 
7181 #if defined(TARGET_NR_timer_settime64) || \
7182     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7183 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7184                                                    abi_ulong target_addr)
7185 {
7186     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7187                                   offsetof(struct target__kernel_itimerspec,
7188                                            it_interval)) ||
7189         target_to_host_timespec64(&host_its->it_value, target_addr +
7190                                   offsetof(struct target__kernel_itimerspec,
7191                                            it_value))) {
7192         return -TARGET_EFAULT;
7193     }
7194 
7195     return 0;
7196 }
7197 #endif
7198 
7199 #if ((defined(TARGET_NR_timerfd_gettime) || \
7200       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7201       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7202 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7203                                                  struct itimerspec *host_its)
7204 {
7205     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7206                                                        it_interval),
7207                                 &host_its->it_interval) ||
7208         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7209                                                        it_value),
7210                                 &host_its->it_value)) {
7211         return -TARGET_EFAULT;
7212     }
7213     return 0;
7214 }
7215 #endif
7216 
7217 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7218       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7219       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7220 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7221                                                    struct itimerspec *host_its)
7222 {
7223     if (host_to_target_timespec64(target_addr +
7224                                   offsetof(struct target__kernel_itimerspec,
7225                                            it_interval),
7226                                   &host_its->it_interval) ||
7227         host_to_target_timespec64(target_addr +
7228                                   offsetof(struct target__kernel_itimerspec,
7229                                            it_value),
7230                                   &host_its->it_value)) {
7231         return -TARGET_EFAULT;
7232     }
7233     return 0;
7234 }
7235 #endif
7236 
7237 #if defined(TARGET_NR_adjtimex) || \
7238     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7239 static inline abi_long target_to_host_timex(struct timex *host_tx,
7240                                             abi_long target_addr)
7241 {
7242     struct target_timex *target_tx;
7243 
7244     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7245         return -TARGET_EFAULT;
7246     }
7247 
7248     __get_user(host_tx->modes, &target_tx->modes);
7249     __get_user(host_tx->offset, &target_tx->offset);
7250     __get_user(host_tx->freq, &target_tx->freq);
7251     __get_user(host_tx->maxerror, &target_tx->maxerror);
7252     __get_user(host_tx->esterror, &target_tx->esterror);
7253     __get_user(host_tx->status, &target_tx->status);
7254     __get_user(host_tx->constant, &target_tx->constant);
7255     __get_user(host_tx->precision, &target_tx->precision);
7256     __get_user(host_tx->tolerance, &target_tx->tolerance);
7257     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7258     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7259     __get_user(host_tx->tick, &target_tx->tick);
7260     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7261     __get_user(host_tx->jitter, &target_tx->jitter);
7262     __get_user(host_tx->shift, &target_tx->shift);
7263     __get_user(host_tx->stabil, &target_tx->stabil);
7264     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7265     __get_user(host_tx->calcnt, &target_tx->calcnt);
7266     __get_user(host_tx->errcnt, &target_tx->errcnt);
7267     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7268     __get_user(host_tx->tai, &target_tx->tai);
7269 
7270     unlock_user_struct(target_tx, target_addr, 0);
7271     return 0;
7272 }
7273 
7274 static inline abi_long host_to_target_timex(abi_long target_addr,
7275                                             struct timex *host_tx)
7276 {
7277     struct target_timex *target_tx;
7278 
7279     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7280         return -TARGET_EFAULT;
7281     }
7282 
7283     __put_user(host_tx->modes, &target_tx->modes);
7284     __put_user(host_tx->offset, &target_tx->offset);
7285     __put_user(host_tx->freq, &target_tx->freq);
7286     __put_user(host_tx->maxerror, &target_tx->maxerror);
7287     __put_user(host_tx->esterror, &target_tx->esterror);
7288     __put_user(host_tx->status, &target_tx->status);
7289     __put_user(host_tx->constant, &target_tx->constant);
7290     __put_user(host_tx->precision, &target_tx->precision);
7291     __put_user(host_tx->tolerance, &target_tx->tolerance);
7292     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7293     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7294     __put_user(host_tx->tick, &target_tx->tick);
7295     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7296     __put_user(host_tx->jitter, &target_tx->jitter);
7297     __put_user(host_tx->shift, &target_tx->shift);
7298     __put_user(host_tx->stabil, &target_tx->stabil);
7299     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7300     __put_user(host_tx->calcnt, &target_tx->calcnt);
7301     __put_user(host_tx->errcnt, &target_tx->errcnt);
7302     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7303     __put_user(host_tx->tai, &target_tx->tai);
7304 
7305     unlock_user_struct(target_tx, target_addr, 1);
7306     return 0;
7307 }
7308 #endif
7309 
7310 
7311 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7312 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7313                                               abi_long target_addr)
7314 {
7315     struct target__kernel_timex *target_tx;
7316 
7317     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7318                                  offsetof(struct target__kernel_timex,
7319                                           time))) {
7320         return -TARGET_EFAULT;
7321     }
7322 
7323     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7324         return -TARGET_EFAULT;
7325     }
7326 
7327     __get_user(host_tx->modes, &target_tx->modes);
7328     __get_user(host_tx->offset, &target_tx->offset);
7329     __get_user(host_tx->freq, &target_tx->freq);
7330     __get_user(host_tx->maxerror, &target_tx->maxerror);
7331     __get_user(host_tx->esterror, &target_tx->esterror);
7332     __get_user(host_tx->status, &target_tx->status);
7333     __get_user(host_tx->constant, &target_tx->constant);
7334     __get_user(host_tx->precision, &target_tx->precision);
7335     __get_user(host_tx->tolerance, &target_tx->tolerance);
7336     __get_user(host_tx->tick, &target_tx->tick);
7337     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7338     __get_user(host_tx->jitter, &target_tx->jitter);
7339     __get_user(host_tx->shift, &target_tx->shift);
7340     __get_user(host_tx->stabil, &target_tx->stabil);
7341     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7342     __get_user(host_tx->calcnt, &target_tx->calcnt);
7343     __get_user(host_tx->errcnt, &target_tx->errcnt);
7344     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7345     __get_user(host_tx->tai, &target_tx->tai);
7346 
7347     unlock_user_struct(target_tx, target_addr, 0);
7348     return 0;
7349 }
7350 
7351 static inline abi_long host_to_target_timex64(abi_long target_addr,
7352                                               struct timex *host_tx)
7353 {
7354     struct target__kernel_timex *target_tx;
7355 
7356    if (copy_to_user_timeval64(target_addr +
7357                               offsetof(struct target__kernel_timex, time),
7358                               &host_tx->time)) {
7359         return -TARGET_EFAULT;
7360     }
7361 
7362     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7363         return -TARGET_EFAULT;
7364     }
7365 
7366     __put_user(host_tx->modes, &target_tx->modes);
7367     __put_user(host_tx->offset, &target_tx->offset);
7368     __put_user(host_tx->freq, &target_tx->freq);
7369     __put_user(host_tx->maxerror, &target_tx->maxerror);
7370     __put_user(host_tx->esterror, &target_tx->esterror);
7371     __put_user(host_tx->status, &target_tx->status);
7372     __put_user(host_tx->constant, &target_tx->constant);
7373     __put_user(host_tx->precision, &target_tx->precision);
7374     __put_user(host_tx->tolerance, &target_tx->tolerance);
7375     __put_user(host_tx->tick, &target_tx->tick);
7376     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7377     __put_user(host_tx->jitter, &target_tx->jitter);
7378     __put_user(host_tx->shift, &target_tx->shift);
7379     __put_user(host_tx->stabil, &target_tx->stabil);
7380     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7381     __put_user(host_tx->calcnt, &target_tx->calcnt);
7382     __put_user(host_tx->errcnt, &target_tx->errcnt);
7383     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7384     __put_user(host_tx->tai, &target_tx->tai);
7385 
7386     unlock_user_struct(target_tx, target_addr, 1);
7387     return 0;
7388 }
7389 #endif
7390 
7391 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7392                                                abi_ulong target_addr)
7393 {
7394     struct target_sigevent *target_sevp;
7395 
7396     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7397         return -TARGET_EFAULT;
7398     }
7399 
7400     /* This union is awkward on 64 bit systems because it has a 32 bit
7401      * integer and a pointer in it; we follow the conversion approach
7402      * used for handling sigval types in signal.c so the guest should get
7403      * the correct value back even if we did a 64 bit byteswap and it's
7404      * using the 32 bit integer.
7405      */
7406     host_sevp->sigev_value.sival_ptr =
7407         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7408     host_sevp->sigev_signo =
7409         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7410     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7411     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7412 
7413     unlock_user_struct(target_sevp, target_addr, 1);
7414     return 0;
7415 }
7416 
7417 #if defined(TARGET_NR_mlockall)
7418 static inline int target_to_host_mlockall_arg(int arg)
7419 {
7420     int result = 0;
7421 
7422     if (arg & TARGET_MCL_CURRENT) {
7423         result |= MCL_CURRENT;
7424     }
7425     if (arg & TARGET_MCL_FUTURE) {
7426         result |= MCL_FUTURE;
7427     }
7428 #ifdef MCL_ONFAULT
7429     if (arg & TARGET_MCL_ONFAULT) {
7430         result |= MCL_ONFAULT;
7431     }
7432 #endif
7433 
7434     return result;
7435 }
7436 #endif
7437 
7438 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7439      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7440      defined(TARGET_NR_newfstatat))
7441 static inline abi_long host_to_target_stat64(void *cpu_env,
7442                                              abi_ulong target_addr,
7443                                              struct stat *host_st)
7444 {
7445 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7446     if (((CPUARMState *)cpu_env)->eabi) {
7447         struct target_eabi_stat64 *target_st;
7448 
7449         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7450             return -TARGET_EFAULT;
7451         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7452         __put_user(host_st->st_dev, &target_st->st_dev);
7453         __put_user(host_st->st_ino, &target_st->st_ino);
7454 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7455         __put_user(host_st->st_ino, &target_st->__st_ino);
7456 #endif
7457         __put_user(host_st->st_mode, &target_st->st_mode);
7458         __put_user(host_st->st_nlink, &target_st->st_nlink);
7459         __put_user(host_st->st_uid, &target_st->st_uid);
7460         __put_user(host_st->st_gid, &target_st->st_gid);
7461         __put_user(host_st->st_rdev, &target_st->st_rdev);
7462         __put_user(host_st->st_size, &target_st->st_size);
7463         __put_user(host_st->st_blksize, &target_st->st_blksize);
7464         __put_user(host_st->st_blocks, &target_st->st_blocks);
7465         __put_user(host_st->st_atime, &target_st->target_st_atime);
7466         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7467         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7468 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7469         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7470         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7471         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7472 #endif
7473         unlock_user_struct(target_st, target_addr, 1);
7474     } else
7475 #endif
7476     {
7477 #if defined(TARGET_HAS_STRUCT_STAT64)
7478         struct target_stat64 *target_st;
7479 #else
7480         struct target_stat *target_st;
7481 #endif
7482 
7483         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7484             return -TARGET_EFAULT;
7485         memset(target_st, 0, sizeof(*target_st));
7486         __put_user(host_st->st_dev, &target_st->st_dev);
7487         __put_user(host_st->st_ino, &target_st->st_ino);
7488 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7489         __put_user(host_st->st_ino, &target_st->__st_ino);
7490 #endif
7491         __put_user(host_st->st_mode, &target_st->st_mode);
7492         __put_user(host_st->st_nlink, &target_st->st_nlink);
7493         __put_user(host_st->st_uid, &target_st->st_uid);
7494         __put_user(host_st->st_gid, &target_st->st_gid);
7495         __put_user(host_st->st_rdev, &target_st->st_rdev);
7496         /* XXX: better use of kernel struct */
7497         __put_user(host_st->st_size, &target_st->st_size);
7498         __put_user(host_st->st_blksize, &target_st->st_blksize);
7499         __put_user(host_st->st_blocks, &target_st->st_blocks);
7500         __put_user(host_st->st_atime, &target_st->target_st_atime);
7501         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7502         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7503 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7504         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7505         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7506         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7507 #endif
7508         unlock_user_struct(target_st, target_addr, 1);
7509     }
7510 
7511     return 0;
7512 }
7513 #endif
7514 
7515 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7516 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7517                                             abi_ulong target_addr)
7518 {
7519     struct target_statx *target_stx;
7520 
7521     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7522         return -TARGET_EFAULT;
7523     }
7524     memset(target_stx, 0, sizeof(*target_stx));
7525 
7526     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7527     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7528     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7529     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7530     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7531     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7532     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7533     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7534     __put_user(host_stx->stx_size, &target_stx->stx_size);
7535     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7536     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7537     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7538     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7539     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7540     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7541     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7542     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7543     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7544     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7545     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7546     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7547     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7548     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7549 
7550     unlock_user_struct(target_stx, target_addr, 1);
7551 
7552     return 0;
7553 }
7554 #endif
7555 
7556 static int do_sys_futex(int *uaddr, int op, int val,
7557                          const struct timespec *timeout, int *uaddr2,
7558                          int val3)
7559 {
7560 #if HOST_LONG_BITS == 64
7561 #if defined(__NR_futex)
7562     /* always a 64-bit time_t, it doesn't define _time64 version  */
7563     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7564 
7565 #endif
7566 #else /* HOST_LONG_BITS == 64 */
7567 #if defined(__NR_futex_time64)
7568     if (sizeof(timeout->tv_sec) == 8) {
7569         /* _time64 function on 32bit arch */
7570         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7571     }
7572 #endif
7573 #if defined(__NR_futex)
7574     /* old function on 32bit arch */
7575     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7576 #endif
7577 #endif /* HOST_LONG_BITS == 64 */
7578     g_assert_not_reached();
7579 }
7580 
7581 static int do_safe_futex(int *uaddr, int op, int val,
7582                          const struct timespec *timeout, int *uaddr2,
7583                          int val3)
7584 {
7585 #if HOST_LONG_BITS == 64
7586 #if defined(__NR_futex)
7587     /* always a 64-bit time_t, it doesn't define _time64 version  */
7588     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7589 #endif
7590 #else /* HOST_LONG_BITS == 64 */
7591 #if defined(__NR_futex_time64)
7592     if (sizeof(timeout->tv_sec) == 8) {
7593         /* _time64 function on 32bit arch */
7594         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7595                                            val3));
7596     }
7597 #endif
7598 #if defined(__NR_futex)
7599     /* old function on 32bit arch */
7600     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7601 #endif
7602 #endif /* HOST_LONG_BITS == 64 */
7603     return -TARGET_ENOSYS;
7604 }
7605 
7606 /* ??? Using host futex calls even when target atomic operations
7607    are not really atomic probably breaks things.  However implementing
7608    futexes locally would make futexes shared between multiple processes
7609    tricky.  However they're probably useless because guest atomic
7610    operations won't work either.  */
7611 #if defined(TARGET_NR_futex)
7612 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7613                     target_ulong timeout, target_ulong uaddr2, int val3)
7614 {
7615     struct timespec ts, *pts;
7616     int base_op;
7617 
7618     /* ??? We assume FUTEX_* constants are the same on both host
7619        and target.  */
7620 #ifdef FUTEX_CMD_MASK
7621     base_op = op & FUTEX_CMD_MASK;
7622 #else
7623     base_op = op;
7624 #endif
7625     switch (base_op) {
7626     case FUTEX_WAIT:
7627     case FUTEX_WAIT_BITSET:
7628         if (timeout) {
7629             pts = &ts;
7630             target_to_host_timespec(pts, timeout);
7631         } else {
7632             pts = NULL;
7633         }
7634         return do_safe_futex(g2h(cpu, uaddr),
7635                              op, tswap32(val), pts, NULL, val3);
7636     case FUTEX_WAKE:
7637         return do_safe_futex(g2h(cpu, uaddr),
7638                              op, val, NULL, NULL, 0);
7639     case FUTEX_FD:
7640         return do_safe_futex(g2h(cpu, uaddr),
7641                              op, val, NULL, NULL, 0);
7642     case FUTEX_REQUEUE:
7643     case FUTEX_CMP_REQUEUE:
7644     case FUTEX_WAKE_OP:
7645         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7646            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7647            But the prototype takes a `struct timespec *'; insert casts
7648            to satisfy the compiler.  We do not need to tswap TIMEOUT
7649            since it's not compared to guest memory.  */
7650         pts = (struct timespec *)(uintptr_t) timeout;
7651         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7652                              (base_op == FUTEX_CMP_REQUEUE
7653                               ? tswap32(val3) : val3));
7654     default:
7655         return -TARGET_ENOSYS;
7656     }
7657 }
7658 #endif
7659 
7660 #if defined(TARGET_NR_futex_time64)
7661 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7662                            int val, target_ulong timeout,
7663                            target_ulong uaddr2, int val3)
7664 {
7665     struct timespec ts, *pts;
7666     int base_op;
7667 
7668     /* ??? We assume FUTEX_* constants are the same on both host
7669        and target.  */
7670 #ifdef FUTEX_CMD_MASK
7671     base_op = op & FUTEX_CMD_MASK;
7672 #else
7673     base_op = op;
7674 #endif
7675     switch (base_op) {
7676     case FUTEX_WAIT:
7677     case FUTEX_WAIT_BITSET:
7678         if (timeout) {
7679             pts = &ts;
7680             if (target_to_host_timespec64(pts, timeout)) {
7681                 return -TARGET_EFAULT;
7682             }
7683         } else {
7684             pts = NULL;
7685         }
7686         return do_safe_futex(g2h(cpu, uaddr), op,
7687                              tswap32(val), pts, NULL, val3);
7688     case FUTEX_WAKE:
7689         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7690     case FUTEX_FD:
7691         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7692     case FUTEX_REQUEUE:
7693     case FUTEX_CMP_REQUEUE:
7694     case FUTEX_WAKE_OP:
7695         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7696            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7697            But the prototype takes a `struct timespec *'; insert casts
7698            to satisfy the compiler.  We do not need to tswap TIMEOUT
7699            since it's not compared to guest memory.  */
7700         pts = (struct timespec *)(uintptr_t) timeout;
7701         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7702                              (base_op == FUTEX_CMP_REQUEUE
7703                               ? tswap32(val3) : val3));
7704     default:
7705         return -TARGET_ENOSYS;
7706     }
7707 }
7708 #endif
7709 
7710 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7711 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7712                                      abi_long handle, abi_long mount_id,
7713                                      abi_long flags)
7714 {
7715     struct file_handle *target_fh;
7716     struct file_handle *fh;
7717     int mid = 0;
7718     abi_long ret;
7719     char *name;
7720     unsigned int size, total_size;
7721 
7722     if (get_user_s32(size, handle)) {
7723         return -TARGET_EFAULT;
7724     }
7725 
7726     name = lock_user_string(pathname);
7727     if (!name) {
7728         return -TARGET_EFAULT;
7729     }
7730 
7731     total_size = sizeof(struct file_handle) + size;
7732     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7733     if (!target_fh) {
7734         unlock_user(name, pathname, 0);
7735         return -TARGET_EFAULT;
7736     }
7737 
7738     fh = g_malloc0(total_size);
7739     fh->handle_bytes = size;
7740 
7741     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7742     unlock_user(name, pathname, 0);
7743 
7744     /* man name_to_handle_at(2):
7745      * Other than the use of the handle_bytes field, the caller should treat
7746      * the file_handle structure as an opaque data type
7747      */
7748 
7749     memcpy(target_fh, fh, total_size);
7750     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7751     target_fh->handle_type = tswap32(fh->handle_type);
7752     g_free(fh);
7753     unlock_user(target_fh, handle, total_size);
7754 
7755     if (put_user_s32(mid, mount_id)) {
7756         return -TARGET_EFAULT;
7757     }
7758 
7759     return ret;
7760 
7761 }
7762 #endif
7763 
7764 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7765 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7766                                      abi_long flags)
7767 {
7768     struct file_handle *target_fh;
7769     struct file_handle *fh;
7770     unsigned int size, total_size;
7771     abi_long ret;
7772 
7773     if (get_user_s32(size, handle)) {
7774         return -TARGET_EFAULT;
7775     }
7776 
7777     total_size = sizeof(struct file_handle) + size;
7778     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7779     if (!target_fh) {
7780         return -TARGET_EFAULT;
7781     }
7782 
7783     fh = g_memdup(target_fh, total_size);
7784     fh->handle_bytes = size;
7785     fh->handle_type = tswap32(target_fh->handle_type);
7786 
7787     ret = get_errno(open_by_handle_at(mount_fd, fh,
7788                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7789 
7790     g_free(fh);
7791 
7792     unlock_user(target_fh, handle, total_size);
7793 
7794     return ret;
7795 }
7796 #endif
7797 
7798 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7799 
7800 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7801 {
7802     int host_flags;
7803     target_sigset_t *target_mask;
7804     sigset_t host_mask;
7805     abi_long ret;
7806 
7807     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7808         return -TARGET_EINVAL;
7809     }
7810     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7811         return -TARGET_EFAULT;
7812     }
7813 
7814     target_to_host_sigset(&host_mask, target_mask);
7815 
7816     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7817 
7818     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7819     if (ret >= 0) {
7820         fd_trans_register(ret, &target_signalfd_trans);
7821     }
7822 
7823     unlock_user_struct(target_mask, mask, 0);
7824 
7825     return ret;
7826 }
7827 #endif
7828 
7829 /* Map host to target signal numbers for the wait family of syscalls.
7830    Assume all other status bits are the same.  */
7831 int host_to_target_waitstatus(int status)
7832 {
7833     if (WIFSIGNALED(status)) {
7834         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7835     }
7836     if (WIFSTOPPED(status)) {
7837         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7838                | (status & 0xff);
7839     }
7840     return status;
7841 }
7842 
7843 static int open_self_cmdline(void *cpu_env, int fd)
7844 {
7845     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7846     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7847     int i;
7848 
7849     for (i = 0; i < bprm->argc; i++) {
7850         size_t len = strlen(bprm->argv[i]) + 1;
7851 
7852         if (write(fd, bprm->argv[i], len) != len) {
7853             return -1;
7854         }
7855     }
7856 
7857     return 0;
7858 }
7859 
7860 static int open_self_maps(void *cpu_env, int fd)
7861 {
7862     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7863     TaskState *ts = cpu->opaque;
7864     GSList *map_info = read_self_maps();
7865     GSList *s;
7866     int count;
7867 
7868     for (s = map_info; s; s = g_slist_next(s)) {
7869         MapInfo *e = (MapInfo *) s->data;
7870 
7871         if (h2g_valid(e->start)) {
7872             unsigned long min = e->start;
7873             unsigned long max = e->end;
7874             int flags = page_get_flags(h2g(min));
7875             const char *path;
7876 
7877             max = h2g_valid(max - 1) ?
7878                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7879 
7880             if (page_check_range(h2g(min), max - min, flags) == -1) {
7881                 continue;
7882             }
7883 
7884             if (h2g(min) == ts->info->stack_limit) {
7885                 path = "[stack]";
7886             } else {
7887                 path = e->path;
7888             }
7889 
7890             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7891                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7892                             h2g(min), h2g(max - 1) + 1,
7893                             (flags & PAGE_READ) ? 'r' : '-',
7894                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7895                             (flags & PAGE_EXEC) ? 'x' : '-',
7896                             e->is_priv ? 'p' : '-',
7897                             (uint64_t) e->offset, e->dev, e->inode);
7898             if (path) {
7899                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7900             } else {
7901                 dprintf(fd, "\n");
7902             }
7903         }
7904     }
7905 
7906     free_self_maps(map_info);
7907 
7908 #ifdef TARGET_VSYSCALL_PAGE
7909     /*
7910      * We only support execution from the vsyscall page.
7911      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7912      */
7913     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7914                     " --xp 00000000 00:00 0",
7915                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7916     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7917 #endif
7918 
7919     return 0;
7920 }
7921 
7922 static int open_self_stat(void *cpu_env, int fd)
7923 {
7924     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7925     TaskState *ts = cpu->opaque;
7926     g_autoptr(GString) buf = g_string_new(NULL);
7927     int i;
7928 
7929     for (i = 0; i < 44; i++) {
7930         if (i == 0) {
7931             /* pid */
7932             g_string_printf(buf, FMT_pid " ", getpid());
7933         } else if (i == 1) {
7934             /* app name */
7935             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7936             bin = bin ? bin + 1 : ts->bprm->argv[0];
7937             g_string_printf(buf, "(%.15s) ", bin);
7938         } else if (i == 27) {
7939             /* stack bottom */
7940             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7941         } else {
7942             /* for the rest, there is MasterCard */
7943             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7944         }
7945 
7946         if (write(fd, buf->str, buf->len) != buf->len) {
7947             return -1;
7948         }
7949     }
7950 
7951     return 0;
7952 }
7953 
7954 static int open_self_auxv(void *cpu_env, int fd)
7955 {
7956     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7957     TaskState *ts = cpu->opaque;
7958     abi_ulong auxv = ts->info->saved_auxv;
7959     abi_ulong len = ts->info->auxv_len;
7960     char *ptr;
7961 
7962     /*
7963      * Auxiliary vector is stored in target process stack.
7964      * read in whole auxv vector and copy it to file
7965      */
7966     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7967     if (ptr != NULL) {
7968         while (len > 0) {
7969             ssize_t r;
7970             r = write(fd, ptr, len);
7971             if (r <= 0) {
7972                 break;
7973             }
7974             len -= r;
7975             ptr += r;
7976         }
7977         lseek(fd, 0, SEEK_SET);
7978         unlock_user(ptr, auxv, len);
7979     }
7980 
7981     return 0;
7982 }
7983 
7984 static int is_proc_myself(const char *filename, const char *entry)
7985 {
7986     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7987         filename += strlen("/proc/");
7988         if (!strncmp(filename, "self/", strlen("self/"))) {
7989             filename += strlen("self/");
7990         } else if (*filename >= '1' && *filename <= '9') {
7991             char myself[80];
7992             snprintf(myself, sizeof(myself), "%d/", getpid());
7993             if (!strncmp(filename, myself, strlen(myself))) {
7994                 filename += strlen(myself);
7995             } else {
7996                 return 0;
7997             }
7998         } else {
7999             return 0;
8000         }
8001         if (!strcmp(filename, entry)) {
8002             return 1;
8003         }
8004     }
8005     return 0;
8006 }
8007 
8008 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8009     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8010 static int is_proc(const char *filename, const char *entry)
8011 {
8012     return strcmp(filename, entry) == 0;
8013 }
8014 #endif
8015 
8016 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8017 static int open_net_route(void *cpu_env, int fd)
8018 {
8019     FILE *fp;
8020     char *line = NULL;
8021     size_t len = 0;
8022     ssize_t read;
8023 
8024     fp = fopen("/proc/net/route", "r");
8025     if (fp == NULL) {
8026         return -1;
8027     }
8028 
8029     /* read header */
8030 
8031     read = getline(&line, &len, fp);
8032     dprintf(fd, "%s", line);
8033 
8034     /* read routes */
8035 
8036     while ((read = getline(&line, &len, fp)) != -1) {
8037         char iface[16];
8038         uint32_t dest, gw, mask;
8039         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8040         int fields;
8041 
8042         fields = sscanf(line,
8043                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8044                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8045                         &mask, &mtu, &window, &irtt);
8046         if (fields != 11) {
8047             continue;
8048         }
8049         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8050                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8051                 metric, tswap32(mask), mtu, window, irtt);
8052     }
8053 
8054     free(line);
8055     fclose(fp);
8056 
8057     return 0;
8058 }
8059 #endif
8060 
8061 #if defined(TARGET_SPARC)
8062 static int open_cpuinfo(void *cpu_env, int fd)
8063 {
8064     dprintf(fd, "type\t\t: sun4u\n");
8065     return 0;
8066 }
8067 #endif
8068 
8069 #if defined(TARGET_HPPA)
8070 static int open_cpuinfo(void *cpu_env, int fd)
8071 {
8072     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8073     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8074     dprintf(fd, "capabilities\t: os32\n");
8075     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8076     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8077     return 0;
8078 }
8079 #endif
8080 
8081 #if defined(TARGET_M68K)
8082 static int open_hardware(void *cpu_env, int fd)
8083 {
8084     dprintf(fd, "Model:\t\tqemu-m68k\n");
8085     return 0;
8086 }
8087 #endif
8088 
8089 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8090 {
8091     struct fake_open {
8092         const char *filename;
8093         int (*fill)(void *cpu_env, int fd);
8094         int (*cmp)(const char *s1, const char *s2);
8095     };
8096     const struct fake_open *fake_open;
8097     static const struct fake_open fakes[] = {
8098         { "maps", open_self_maps, is_proc_myself },
8099         { "stat", open_self_stat, is_proc_myself },
8100         { "auxv", open_self_auxv, is_proc_myself },
8101         { "cmdline", open_self_cmdline, is_proc_myself },
8102 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8103         { "/proc/net/route", open_net_route, is_proc },
8104 #endif
8105 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8106         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8107 #endif
8108 #if defined(TARGET_M68K)
8109         { "/proc/hardware", open_hardware, is_proc },
8110 #endif
8111         { NULL, NULL, NULL }
8112     };
8113 
8114     if (is_proc_myself(pathname, "exe")) {
8115         int execfd = qemu_getauxval(AT_EXECFD);
8116         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8117     }
8118 
8119     for (fake_open = fakes; fake_open->filename; fake_open++) {
8120         if (fake_open->cmp(pathname, fake_open->filename)) {
8121             break;
8122         }
8123     }
8124 
8125     if (fake_open->filename) {
8126         const char *tmpdir;
8127         char filename[PATH_MAX];
8128         int fd, r;
8129 
8130         /* create temporary file to map stat to */
8131         tmpdir = getenv("TMPDIR");
8132         if (!tmpdir)
8133             tmpdir = "/tmp";
8134         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8135         fd = mkstemp(filename);
8136         if (fd < 0) {
8137             return fd;
8138         }
8139         unlink(filename);
8140 
8141         if ((r = fake_open->fill(cpu_env, fd))) {
8142             int e = errno;
8143             close(fd);
8144             errno = e;
8145             return r;
8146         }
8147         lseek(fd, 0, SEEK_SET);
8148 
8149         return fd;
8150     }
8151 
8152     return safe_openat(dirfd, path(pathname), flags, mode);
8153 }
8154 
8155 #define TIMER_MAGIC 0x0caf0000
8156 #define TIMER_MAGIC_MASK 0xffff0000
8157 
8158 /* Convert QEMU provided timer ID back to internal 16bit index format */
8159 static target_timer_t get_timer_id(abi_long arg)
8160 {
8161     target_timer_t timerid = arg;
8162 
8163     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8164         return -TARGET_EINVAL;
8165     }
8166 
8167     timerid &= 0xffff;
8168 
8169     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8170         return -TARGET_EINVAL;
8171     }
8172 
8173     return timerid;
8174 }
8175 
8176 static int target_to_host_cpu_mask(unsigned long *host_mask,
8177                                    size_t host_size,
8178                                    abi_ulong target_addr,
8179                                    size_t target_size)
8180 {
8181     unsigned target_bits = sizeof(abi_ulong) * 8;
8182     unsigned host_bits = sizeof(*host_mask) * 8;
8183     abi_ulong *target_mask;
8184     unsigned i, j;
8185 
8186     assert(host_size >= target_size);
8187 
8188     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8189     if (!target_mask) {
8190         return -TARGET_EFAULT;
8191     }
8192     memset(host_mask, 0, host_size);
8193 
8194     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8195         unsigned bit = i * target_bits;
8196         abi_ulong val;
8197 
8198         __get_user(val, &target_mask[i]);
8199         for (j = 0; j < target_bits; j++, bit++) {
8200             if (val & (1UL << j)) {
8201                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8202             }
8203         }
8204     }
8205 
8206     unlock_user(target_mask, target_addr, 0);
8207     return 0;
8208 }
8209 
8210 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8211                                    size_t host_size,
8212                                    abi_ulong target_addr,
8213                                    size_t target_size)
8214 {
8215     unsigned target_bits = sizeof(abi_ulong) * 8;
8216     unsigned host_bits = sizeof(*host_mask) * 8;
8217     abi_ulong *target_mask;
8218     unsigned i, j;
8219 
8220     assert(host_size >= target_size);
8221 
8222     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8223     if (!target_mask) {
8224         return -TARGET_EFAULT;
8225     }
8226 
8227     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8228         unsigned bit = i * target_bits;
8229         abi_ulong val = 0;
8230 
8231         for (j = 0; j < target_bits; j++, bit++) {
8232             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8233                 val |= 1UL << j;
8234             }
8235         }
8236         __put_user(val, &target_mask[i]);
8237     }
8238 
8239     unlock_user(target_mask, target_addr, target_size);
8240     return 0;
8241 }
8242 
8243 /* This is an internal helper for do_syscall so that it is easier
8244  * to have a single return point, so that actions, such as logging
8245  * of syscall results, can be performed.
8246  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8247  */
8248 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8249                             abi_long arg2, abi_long arg3, abi_long arg4,
8250                             abi_long arg5, abi_long arg6, abi_long arg7,
8251                             abi_long arg8)
8252 {
8253     CPUState *cpu = env_cpu(cpu_env);
8254     abi_long ret;
8255 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8256     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8257     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8258     || defined(TARGET_NR_statx)
8259     struct stat st;
8260 #endif
8261 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8262     || defined(TARGET_NR_fstatfs)
8263     struct statfs stfs;
8264 #endif
8265     void *p;
8266 
8267     switch(num) {
8268     case TARGET_NR_exit:
8269         /* In old applications this may be used to implement _exit(2).
8270            However in threaded applications it is used for thread termination,
8271            and _exit_group is used for application termination.
8272            Do thread termination if we have more then one thread.  */
8273 
8274         if (block_signals()) {
8275             return -TARGET_ERESTARTSYS;
8276         }
8277 
8278         pthread_mutex_lock(&clone_lock);
8279 
8280         if (CPU_NEXT(first_cpu)) {
8281             TaskState *ts = cpu->opaque;
8282 
8283             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8284             object_unref(OBJECT(cpu));
8285             /*
8286              * At this point the CPU should be unrealized and removed
8287              * from cpu lists. We can clean-up the rest of the thread
8288              * data without the lock held.
8289              */
8290 
8291             pthread_mutex_unlock(&clone_lock);
8292 
8293             if (ts->child_tidptr) {
8294                 put_user_u32(0, ts->child_tidptr);
8295                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8296                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8297             }
8298             thread_cpu = NULL;
8299             g_free(ts);
8300             rcu_unregister_thread();
8301             pthread_exit(NULL);
8302         }
8303 
8304         pthread_mutex_unlock(&clone_lock);
8305         preexit_cleanup(cpu_env, arg1);
8306         _exit(arg1);
8307         return 0; /* avoid warning */
8308     case TARGET_NR_read:
8309         if (arg2 == 0 && arg3 == 0) {
8310             return get_errno(safe_read(arg1, 0, 0));
8311         } else {
8312             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8313                 return -TARGET_EFAULT;
8314             ret = get_errno(safe_read(arg1, p, arg3));
8315             if (ret >= 0 &&
8316                 fd_trans_host_to_target_data(arg1)) {
8317                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8318             }
8319             unlock_user(p, arg2, ret);
8320         }
8321         return ret;
8322     case TARGET_NR_write:
8323         if (arg2 == 0 && arg3 == 0) {
8324             return get_errno(safe_write(arg1, 0, 0));
8325         }
8326         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8327             return -TARGET_EFAULT;
8328         if (fd_trans_target_to_host_data(arg1)) {
8329             void *copy = g_malloc(arg3);
8330             memcpy(copy, p, arg3);
8331             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8332             if (ret >= 0) {
8333                 ret = get_errno(safe_write(arg1, copy, ret));
8334             }
8335             g_free(copy);
8336         } else {
8337             ret = get_errno(safe_write(arg1, p, arg3));
8338         }
8339         unlock_user(p, arg2, 0);
8340         return ret;
8341 
8342 #ifdef TARGET_NR_open
8343     case TARGET_NR_open:
8344         if (!(p = lock_user_string(arg1)))
8345             return -TARGET_EFAULT;
8346         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8347                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8348                                   arg3));
8349         fd_trans_unregister(ret);
8350         unlock_user(p, arg1, 0);
8351         return ret;
8352 #endif
8353     case TARGET_NR_openat:
8354         if (!(p = lock_user_string(arg2)))
8355             return -TARGET_EFAULT;
8356         ret = get_errno(do_openat(cpu_env, arg1, p,
8357                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8358                                   arg4));
8359         fd_trans_unregister(ret);
8360         unlock_user(p, arg2, 0);
8361         return ret;
8362 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8363     case TARGET_NR_name_to_handle_at:
8364         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8365         return ret;
8366 #endif
8367 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8368     case TARGET_NR_open_by_handle_at:
8369         ret = do_open_by_handle_at(arg1, arg2, arg3);
8370         fd_trans_unregister(ret);
8371         return ret;
8372 #endif
8373     case TARGET_NR_close:
8374         fd_trans_unregister(arg1);
8375         return get_errno(close(arg1));
8376 
8377     case TARGET_NR_brk:
8378         return do_brk(arg1);
8379 #ifdef TARGET_NR_fork
8380     case TARGET_NR_fork:
8381         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8382 #endif
8383 #ifdef TARGET_NR_waitpid
8384     case TARGET_NR_waitpid:
8385         {
8386             int status;
8387             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8388             if (!is_error(ret) && arg2 && ret
8389                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8390                 return -TARGET_EFAULT;
8391         }
8392         return ret;
8393 #endif
8394 #ifdef TARGET_NR_waitid
8395     case TARGET_NR_waitid:
8396         {
8397             siginfo_t info;
8398             info.si_pid = 0;
8399             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8400             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8401                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8402                     return -TARGET_EFAULT;
8403                 host_to_target_siginfo(p, &info);
8404                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8405             }
8406         }
8407         return ret;
8408 #endif
8409 #ifdef TARGET_NR_creat /* not on alpha */
8410     case TARGET_NR_creat:
8411         if (!(p = lock_user_string(arg1)))
8412             return -TARGET_EFAULT;
8413         ret = get_errno(creat(p, arg2));
8414         fd_trans_unregister(ret);
8415         unlock_user(p, arg1, 0);
8416         return ret;
8417 #endif
8418 #ifdef TARGET_NR_link
8419     case TARGET_NR_link:
8420         {
8421             void * p2;
8422             p = lock_user_string(arg1);
8423             p2 = lock_user_string(arg2);
8424             if (!p || !p2)
8425                 ret = -TARGET_EFAULT;
8426             else
8427                 ret = get_errno(link(p, p2));
8428             unlock_user(p2, arg2, 0);
8429             unlock_user(p, arg1, 0);
8430         }
8431         return ret;
8432 #endif
8433 #if defined(TARGET_NR_linkat)
8434     case TARGET_NR_linkat:
8435         {
8436             void * p2 = NULL;
8437             if (!arg2 || !arg4)
8438                 return -TARGET_EFAULT;
8439             p  = lock_user_string(arg2);
8440             p2 = lock_user_string(arg4);
8441             if (!p || !p2)
8442                 ret = -TARGET_EFAULT;
8443             else
8444                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8445             unlock_user(p, arg2, 0);
8446             unlock_user(p2, arg4, 0);
8447         }
8448         return ret;
8449 #endif
8450 #ifdef TARGET_NR_unlink
8451     case TARGET_NR_unlink:
8452         if (!(p = lock_user_string(arg1)))
8453             return -TARGET_EFAULT;
8454         ret = get_errno(unlink(p));
8455         unlock_user(p, arg1, 0);
8456         return ret;
8457 #endif
8458 #if defined(TARGET_NR_unlinkat)
8459     case TARGET_NR_unlinkat:
8460         if (!(p = lock_user_string(arg2)))
8461             return -TARGET_EFAULT;
8462         ret = get_errno(unlinkat(arg1, p, arg3));
8463         unlock_user(p, arg2, 0);
8464         return ret;
8465 #endif
8466     case TARGET_NR_execve:
8467         {
8468             char **argp, **envp;
8469             int argc, envc;
8470             abi_ulong gp;
8471             abi_ulong guest_argp;
8472             abi_ulong guest_envp;
8473             abi_ulong addr;
8474             char **q;
8475             int total_size = 0;
8476 
8477             argc = 0;
8478             guest_argp = arg2;
8479             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8480                 if (get_user_ual(addr, gp))
8481                     return -TARGET_EFAULT;
8482                 if (!addr)
8483                     break;
8484                 argc++;
8485             }
8486             envc = 0;
8487             guest_envp = arg3;
8488             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8489                 if (get_user_ual(addr, gp))
8490                     return -TARGET_EFAULT;
8491                 if (!addr)
8492                     break;
8493                 envc++;
8494             }
8495 
8496             argp = g_new0(char *, argc + 1);
8497             envp = g_new0(char *, envc + 1);
8498 
8499             for (gp = guest_argp, q = argp; gp;
8500                   gp += sizeof(abi_ulong), q++) {
8501                 if (get_user_ual(addr, gp))
8502                     goto execve_efault;
8503                 if (!addr)
8504                     break;
8505                 if (!(*q = lock_user_string(addr)))
8506                     goto execve_efault;
8507                 total_size += strlen(*q) + 1;
8508             }
8509             *q = NULL;
8510 
8511             for (gp = guest_envp, q = envp; gp;
8512                   gp += sizeof(abi_ulong), q++) {
8513                 if (get_user_ual(addr, gp))
8514                     goto execve_efault;
8515                 if (!addr)
8516                     break;
8517                 if (!(*q = lock_user_string(addr)))
8518                     goto execve_efault;
8519                 total_size += strlen(*q) + 1;
8520             }
8521             *q = NULL;
8522 
8523             if (!(p = lock_user_string(arg1)))
8524                 goto execve_efault;
8525             /* Although execve() is not an interruptible syscall it is
8526              * a special case where we must use the safe_syscall wrapper:
8527              * if we allow a signal to happen before we make the host
8528              * syscall then we will 'lose' it, because at the point of
8529              * execve the process leaves QEMU's control. So we use the
8530              * safe syscall wrapper to ensure that we either take the
8531              * signal as a guest signal, or else it does not happen
8532              * before the execve completes and makes it the other
8533              * program's problem.
8534              */
8535             ret = get_errno(safe_execve(p, argp, envp));
8536             unlock_user(p, arg1, 0);
8537 
8538             goto execve_end;
8539 
8540         execve_efault:
8541             ret = -TARGET_EFAULT;
8542 
8543         execve_end:
8544             for (gp = guest_argp, q = argp; *q;
8545                   gp += sizeof(abi_ulong), q++) {
8546                 if (get_user_ual(addr, gp)
8547                     || !addr)
8548                     break;
8549                 unlock_user(*q, addr, 0);
8550             }
8551             for (gp = guest_envp, q = envp; *q;
8552                   gp += sizeof(abi_ulong), q++) {
8553                 if (get_user_ual(addr, gp)
8554                     || !addr)
8555                     break;
8556                 unlock_user(*q, addr, 0);
8557             }
8558 
8559             g_free(argp);
8560             g_free(envp);
8561         }
8562         return ret;
8563     case TARGET_NR_chdir:
8564         if (!(p = lock_user_string(arg1)))
8565             return -TARGET_EFAULT;
8566         ret = get_errno(chdir(p));
8567         unlock_user(p, arg1, 0);
8568         return ret;
8569 #ifdef TARGET_NR_time
8570     case TARGET_NR_time:
8571         {
8572             time_t host_time;
8573             ret = get_errno(time(&host_time));
8574             if (!is_error(ret)
8575                 && arg1
8576                 && put_user_sal(host_time, arg1))
8577                 return -TARGET_EFAULT;
8578         }
8579         return ret;
8580 #endif
8581 #ifdef TARGET_NR_mknod
8582     case TARGET_NR_mknod:
8583         if (!(p = lock_user_string(arg1)))
8584             return -TARGET_EFAULT;
8585         ret = get_errno(mknod(p, arg2, arg3));
8586         unlock_user(p, arg1, 0);
8587         return ret;
8588 #endif
8589 #if defined(TARGET_NR_mknodat)
8590     case TARGET_NR_mknodat:
8591         if (!(p = lock_user_string(arg2)))
8592             return -TARGET_EFAULT;
8593         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8594         unlock_user(p, arg2, 0);
8595         return ret;
8596 #endif
8597 #ifdef TARGET_NR_chmod
8598     case TARGET_NR_chmod:
8599         if (!(p = lock_user_string(arg1)))
8600             return -TARGET_EFAULT;
8601         ret = get_errno(chmod(p, arg2));
8602         unlock_user(p, arg1, 0);
8603         return ret;
8604 #endif
8605 #ifdef TARGET_NR_lseek
8606     case TARGET_NR_lseek:
8607         return get_errno(lseek(arg1, arg2, arg3));
8608 #endif
8609 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8610     /* Alpha specific */
8611     case TARGET_NR_getxpid:
8612         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8613         return get_errno(getpid());
8614 #endif
8615 #ifdef TARGET_NR_getpid
8616     case TARGET_NR_getpid:
8617         return get_errno(getpid());
8618 #endif
8619     case TARGET_NR_mount:
8620         {
8621             /* need to look at the data field */
8622             void *p2, *p3;
8623 
8624             if (arg1) {
8625                 p = lock_user_string(arg1);
8626                 if (!p) {
8627                     return -TARGET_EFAULT;
8628                 }
8629             } else {
8630                 p = NULL;
8631             }
8632 
8633             p2 = lock_user_string(arg2);
8634             if (!p2) {
8635                 if (arg1) {
8636                     unlock_user(p, arg1, 0);
8637                 }
8638                 return -TARGET_EFAULT;
8639             }
8640 
8641             if (arg3) {
8642                 p3 = lock_user_string(arg3);
8643                 if (!p3) {
8644                     if (arg1) {
8645                         unlock_user(p, arg1, 0);
8646                     }
8647                     unlock_user(p2, arg2, 0);
8648                     return -TARGET_EFAULT;
8649                 }
8650             } else {
8651                 p3 = NULL;
8652             }
8653 
8654             /* FIXME - arg5 should be locked, but it isn't clear how to
8655              * do that since it's not guaranteed to be a NULL-terminated
8656              * string.
8657              */
8658             if (!arg5) {
8659                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8660             } else {
8661                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8662             }
8663             ret = get_errno(ret);
8664 
8665             if (arg1) {
8666                 unlock_user(p, arg1, 0);
8667             }
8668             unlock_user(p2, arg2, 0);
8669             if (arg3) {
8670                 unlock_user(p3, arg3, 0);
8671             }
8672         }
8673         return ret;
8674 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8675 #if defined(TARGET_NR_umount)
8676     case TARGET_NR_umount:
8677 #endif
8678 #if defined(TARGET_NR_oldumount)
8679     case TARGET_NR_oldumount:
8680 #endif
8681         if (!(p = lock_user_string(arg1)))
8682             return -TARGET_EFAULT;
8683         ret = get_errno(umount(p));
8684         unlock_user(p, arg1, 0);
8685         return ret;
8686 #endif
8687 #ifdef TARGET_NR_stime /* not on alpha */
8688     case TARGET_NR_stime:
8689         {
8690             struct timespec ts;
8691             ts.tv_nsec = 0;
8692             if (get_user_sal(ts.tv_sec, arg1)) {
8693                 return -TARGET_EFAULT;
8694             }
8695             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8696         }
8697 #endif
8698 #ifdef TARGET_NR_alarm /* not on alpha */
8699     case TARGET_NR_alarm:
8700         return alarm(arg1);
8701 #endif
8702 #ifdef TARGET_NR_pause /* not on alpha */
8703     case TARGET_NR_pause:
8704         if (!block_signals()) {
8705             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8706         }
8707         return -TARGET_EINTR;
8708 #endif
8709 #ifdef TARGET_NR_utime
8710     case TARGET_NR_utime:
8711         {
8712             struct utimbuf tbuf, *host_tbuf;
8713             struct target_utimbuf *target_tbuf;
8714             if (arg2) {
8715                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8716                     return -TARGET_EFAULT;
8717                 tbuf.actime = tswapal(target_tbuf->actime);
8718                 tbuf.modtime = tswapal(target_tbuf->modtime);
8719                 unlock_user_struct(target_tbuf, arg2, 0);
8720                 host_tbuf = &tbuf;
8721             } else {
8722                 host_tbuf = NULL;
8723             }
8724             if (!(p = lock_user_string(arg1)))
8725                 return -TARGET_EFAULT;
8726             ret = get_errno(utime(p, host_tbuf));
8727             unlock_user(p, arg1, 0);
8728         }
8729         return ret;
8730 #endif
8731 #ifdef TARGET_NR_utimes
8732     case TARGET_NR_utimes:
8733         {
8734             struct timeval *tvp, tv[2];
8735             if (arg2) {
8736                 if (copy_from_user_timeval(&tv[0], arg2)
8737                     || copy_from_user_timeval(&tv[1],
8738                                               arg2 + sizeof(struct target_timeval)))
8739                     return -TARGET_EFAULT;
8740                 tvp = tv;
8741             } else {
8742                 tvp = NULL;
8743             }
8744             if (!(p = lock_user_string(arg1)))
8745                 return -TARGET_EFAULT;
8746             ret = get_errno(utimes(p, tvp));
8747             unlock_user(p, arg1, 0);
8748         }
8749         return ret;
8750 #endif
8751 #if defined(TARGET_NR_futimesat)
8752     case TARGET_NR_futimesat:
8753         {
8754             struct timeval *tvp, tv[2];
8755             if (arg3) {
8756                 if (copy_from_user_timeval(&tv[0], arg3)
8757                     || copy_from_user_timeval(&tv[1],
8758                                               arg3 + sizeof(struct target_timeval)))
8759                     return -TARGET_EFAULT;
8760                 tvp = tv;
8761             } else {
8762                 tvp = NULL;
8763             }
8764             if (!(p = lock_user_string(arg2))) {
8765                 return -TARGET_EFAULT;
8766             }
8767             ret = get_errno(futimesat(arg1, path(p), tvp));
8768             unlock_user(p, arg2, 0);
8769         }
8770         return ret;
8771 #endif
8772 #ifdef TARGET_NR_access
8773     case TARGET_NR_access:
8774         if (!(p = lock_user_string(arg1))) {
8775             return -TARGET_EFAULT;
8776         }
8777         ret = get_errno(access(path(p), arg2));
8778         unlock_user(p, arg1, 0);
8779         return ret;
8780 #endif
8781 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8782     case TARGET_NR_faccessat:
8783         if (!(p = lock_user_string(arg2))) {
8784             return -TARGET_EFAULT;
8785         }
8786         ret = get_errno(faccessat(arg1, p, arg3, 0));
8787         unlock_user(p, arg2, 0);
8788         return ret;
8789 #endif
8790 #ifdef TARGET_NR_nice /* not on alpha */
8791     case TARGET_NR_nice:
8792         return get_errno(nice(arg1));
8793 #endif
8794     case TARGET_NR_sync:
8795         sync();
8796         return 0;
8797 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8798     case TARGET_NR_syncfs:
8799         return get_errno(syncfs(arg1));
8800 #endif
8801     case TARGET_NR_kill:
8802         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8803 #ifdef TARGET_NR_rename
8804     case TARGET_NR_rename:
8805         {
8806             void *p2;
8807             p = lock_user_string(arg1);
8808             p2 = lock_user_string(arg2);
8809             if (!p || !p2)
8810                 ret = -TARGET_EFAULT;
8811             else
8812                 ret = get_errno(rename(p, p2));
8813             unlock_user(p2, arg2, 0);
8814             unlock_user(p, arg1, 0);
8815         }
8816         return ret;
8817 #endif
8818 #if defined(TARGET_NR_renameat)
8819     case TARGET_NR_renameat:
8820         {
8821             void *p2;
8822             p  = lock_user_string(arg2);
8823             p2 = lock_user_string(arg4);
8824             if (!p || !p2)
8825                 ret = -TARGET_EFAULT;
8826             else
8827                 ret = get_errno(renameat(arg1, p, arg3, p2));
8828             unlock_user(p2, arg4, 0);
8829             unlock_user(p, arg2, 0);
8830         }
8831         return ret;
8832 #endif
8833 #if defined(TARGET_NR_renameat2)
8834     case TARGET_NR_renameat2:
8835         {
8836             void *p2;
8837             p  = lock_user_string(arg2);
8838             p2 = lock_user_string(arg4);
8839             if (!p || !p2) {
8840                 ret = -TARGET_EFAULT;
8841             } else {
8842                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8843             }
8844             unlock_user(p2, arg4, 0);
8845             unlock_user(p, arg2, 0);
8846         }
8847         return ret;
8848 #endif
8849 #ifdef TARGET_NR_mkdir
8850     case TARGET_NR_mkdir:
8851         if (!(p = lock_user_string(arg1)))
8852             return -TARGET_EFAULT;
8853         ret = get_errno(mkdir(p, arg2));
8854         unlock_user(p, arg1, 0);
8855         return ret;
8856 #endif
8857 #if defined(TARGET_NR_mkdirat)
8858     case TARGET_NR_mkdirat:
8859         if (!(p = lock_user_string(arg2)))
8860             return -TARGET_EFAULT;
8861         ret = get_errno(mkdirat(arg1, p, arg3));
8862         unlock_user(p, arg2, 0);
8863         return ret;
8864 #endif
8865 #ifdef TARGET_NR_rmdir
8866     case TARGET_NR_rmdir:
8867         if (!(p = lock_user_string(arg1)))
8868             return -TARGET_EFAULT;
8869         ret = get_errno(rmdir(p));
8870         unlock_user(p, arg1, 0);
8871         return ret;
8872 #endif
8873     case TARGET_NR_dup:
8874         ret = get_errno(dup(arg1));
8875         if (ret >= 0) {
8876             fd_trans_dup(arg1, ret);
8877         }
8878         return ret;
8879 #ifdef TARGET_NR_pipe
8880     case TARGET_NR_pipe:
8881         return do_pipe(cpu_env, arg1, 0, 0);
8882 #endif
8883 #ifdef TARGET_NR_pipe2
8884     case TARGET_NR_pipe2:
8885         return do_pipe(cpu_env, arg1,
8886                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8887 #endif
8888     case TARGET_NR_times:
8889         {
8890             struct target_tms *tmsp;
8891             struct tms tms;
8892             ret = get_errno(times(&tms));
8893             if (arg1) {
8894                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8895                 if (!tmsp)
8896                     return -TARGET_EFAULT;
8897                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8898                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8899                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8900                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8901             }
8902             if (!is_error(ret))
8903                 ret = host_to_target_clock_t(ret);
8904         }
8905         return ret;
8906     case TARGET_NR_acct:
8907         if (arg1 == 0) {
8908             ret = get_errno(acct(NULL));
8909         } else {
8910             if (!(p = lock_user_string(arg1))) {
8911                 return -TARGET_EFAULT;
8912             }
8913             ret = get_errno(acct(path(p)));
8914             unlock_user(p, arg1, 0);
8915         }
8916         return ret;
8917 #ifdef TARGET_NR_umount2
8918     case TARGET_NR_umount2:
8919         if (!(p = lock_user_string(arg1)))
8920             return -TARGET_EFAULT;
8921         ret = get_errno(umount2(p, arg2));
8922         unlock_user(p, arg1, 0);
8923         return ret;
8924 #endif
8925     case TARGET_NR_ioctl:
8926         return do_ioctl(arg1, arg2, arg3);
8927 #ifdef TARGET_NR_fcntl
8928     case TARGET_NR_fcntl:
8929         return do_fcntl(arg1, arg2, arg3);
8930 #endif
8931     case TARGET_NR_setpgid:
8932         return get_errno(setpgid(arg1, arg2));
8933     case TARGET_NR_umask:
8934         return get_errno(umask(arg1));
8935     case TARGET_NR_chroot:
8936         if (!(p = lock_user_string(arg1)))
8937             return -TARGET_EFAULT;
8938         ret = get_errno(chroot(p));
8939         unlock_user(p, arg1, 0);
8940         return ret;
8941 #ifdef TARGET_NR_dup2
8942     case TARGET_NR_dup2:
8943         ret = get_errno(dup2(arg1, arg2));
8944         if (ret >= 0) {
8945             fd_trans_dup(arg1, arg2);
8946         }
8947         return ret;
8948 #endif
8949 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8950     case TARGET_NR_dup3:
8951     {
8952         int host_flags;
8953 
8954         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8955             return -EINVAL;
8956         }
8957         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8958         ret = get_errno(dup3(arg1, arg2, host_flags));
8959         if (ret >= 0) {
8960             fd_trans_dup(arg1, arg2);
8961         }
8962         return ret;
8963     }
8964 #endif
8965 #ifdef TARGET_NR_getppid /* not on alpha */
8966     case TARGET_NR_getppid:
8967         return get_errno(getppid());
8968 #endif
8969 #ifdef TARGET_NR_getpgrp
8970     case TARGET_NR_getpgrp:
8971         return get_errno(getpgrp());
8972 #endif
8973     case TARGET_NR_setsid:
8974         return get_errno(setsid());
8975 #ifdef TARGET_NR_sigaction
8976     case TARGET_NR_sigaction:
8977         {
8978 #if defined(TARGET_ALPHA)
8979             struct target_sigaction act, oact, *pact = 0;
8980             struct target_old_sigaction *old_act;
8981             if (arg2) {
8982                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8983                     return -TARGET_EFAULT;
8984                 act._sa_handler = old_act->_sa_handler;
8985                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8986                 act.sa_flags = old_act->sa_flags;
8987                 act.sa_restorer = 0;
8988                 unlock_user_struct(old_act, arg2, 0);
8989                 pact = &act;
8990             }
8991             ret = get_errno(do_sigaction(arg1, pact, &oact));
8992             if (!is_error(ret) && arg3) {
8993                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8994                     return -TARGET_EFAULT;
8995                 old_act->_sa_handler = oact._sa_handler;
8996                 old_act->sa_mask = oact.sa_mask.sig[0];
8997                 old_act->sa_flags = oact.sa_flags;
8998                 unlock_user_struct(old_act, arg3, 1);
8999             }
9000 #elif defined(TARGET_MIPS)
9001 	    struct target_sigaction act, oact, *pact, *old_act;
9002 
9003 	    if (arg2) {
9004                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9005                     return -TARGET_EFAULT;
9006 		act._sa_handler = old_act->_sa_handler;
9007 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9008 		act.sa_flags = old_act->sa_flags;
9009 		unlock_user_struct(old_act, arg2, 0);
9010 		pact = &act;
9011 	    } else {
9012 		pact = NULL;
9013 	    }
9014 
9015 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
9016 
9017 	    if (!is_error(ret) && arg3) {
9018                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9019                     return -TARGET_EFAULT;
9020 		old_act->_sa_handler = oact._sa_handler;
9021 		old_act->sa_flags = oact.sa_flags;
9022 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9023 		old_act->sa_mask.sig[1] = 0;
9024 		old_act->sa_mask.sig[2] = 0;
9025 		old_act->sa_mask.sig[3] = 0;
9026 		unlock_user_struct(old_act, arg3, 1);
9027 	    }
9028 #else
9029             struct target_old_sigaction *old_act;
9030             struct target_sigaction act, oact, *pact;
9031             if (arg2) {
9032                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9033                     return -TARGET_EFAULT;
9034                 act._sa_handler = old_act->_sa_handler;
9035                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9036                 act.sa_flags = old_act->sa_flags;
9037                 act.sa_restorer = old_act->sa_restorer;
9038 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9039                 act.ka_restorer = 0;
9040 #endif
9041                 unlock_user_struct(old_act, arg2, 0);
9042                 pact = &act;
9043             } else {
9044                 pact = NULL;
9045             }
9046             ret = get_errno(do_sigaction(arg1, pact, &oact));
9047             if (!is_error(ret) && arg3) {
9048                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9049                     return -TARGET_EFAULT;
9050                 old_act->_sa_handler = oact._sa_handler;
9051                 old_act->sa_mask = oact.sa_mask.sig[0];
9052                 old_act->sa_flags = oact.sa_flags;
9053                 old_act->sa_restorer = oact.sa_restorer;
9054                 unlock_user_struct(old_act, arg3, 1);
9055             }
9056 #endif
9057         }
9058         return ret;
9059 #endif
9060     case TARGET_NR_rt_sigaction:
9061         {
9062 #if defined(TARGET_ALPHA)
9063             /* For Alpha and SPARC this is a 5 argument syscall, with
9064              * a 'restorer' parameter which must be copied into the
9065              * sa_restorer field of the sigaction struct.
9066              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9067              * and arg5 is the sigsetsize.
9068              * Alpha also has a separate rt_sigaction struct that it uses
9069              * here; SPARC uses the usual sigaction struct.
9070              */
9071             struct target_rt_sigaction *rt_act;
9072             struct target_sigaction act, oact, *pact = 0;
9073 
9074             if (arg4 != sizeof(target_sigset_t)) {
9075                 return -TARGET_EINVAL;
9076             }
9077             if (arg2) {
9078                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9079                     return -TARGET_EFAULT;
9080                 act._sa_handler = rt_act->_sa_handler;
9081                 act.sa_mask = rt_act->sa_mask;
9082                 act.sa_flags = rt_act->sa_flags;
9083                 act.sa_restorer = arg5;
9084                 unlock_user_struct(rt_act, arg2, 0);
9085                 pact = &act;
9086             }
9087             ret = get_errno(do_sigaction(arg1, pact, &oact));
9088             if (!is_error(ret) && arg3) {
9089                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9090                     return -TARGET_EFAULT;
9091                 rt_act->_sa_handler = oact._sa_handler;
9092                 rt_act->sa_mask = oact.sa_mask;
9093                 rt_act->sa_flags = oact.sa_flags;
9094                 unlock_user_struct(rt_act, arg3, 1);
9095             }
9096 #else
9097 #ifdef TARGET_SPARC
9098             target_ulong restorer = arg4;
9099             target_ulong sigsetsize = arg5;
9100 #else
9101             target_ulong sigsetsize = arg4;
9102 #endif
9103             struct target_sigaction *act;
9104             struct target_sigaction *oact;
9105 
9106             if (sigsetsize != sizeof(target_sigset_t)) {
9107                 return -TARGET_EINVAL;
9108             }
9109             if (arg2) {
9110                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9111                     return -TARGET_EFAULT;
9112                 }
9113 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9114                 act->ka_restorer = restorer;
9115 #endif
9116             } else {
9117                 act = NULL;
9118             }
9119             if (arg3) {
9120                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9121                     ret = -TARGET_EFAULT;
9122                     goto rt_sigaction_fail;
9123                 }
9124             } else
9125                 oact = NULL;
9126             ret = get_errno(do_sigaction(arg1, act, oact));
9127 	rt_sigaction_fail:
9128             if (act)
9129                 unlock_user_struct(act, arg2, 0);
9130             if (oact)
9131                 unlock_user_struct(oact, arg3, 1);
9132 #endif
9133         }
9134         return ret;
9135 #ifdef TARGET_NR_sgetmask /* not on alpha */
9136     case TARGET_NR_sgetmask:
9137         {
9138             sigset_t cur_set;
9139             abi_ulong target_set;
9140             ret = do_sigprocmask(0, NULL, &cur_set);
9141             if (!ret) {
9142                 host_to_target_old_sigset(&target_set, &cur_set);
9143                 ret = target_set;
9144             }
9145         }
9146         return ret;
9147 #endif
9148 #ifdef TARGET_NR_ssetmask /* not on alpha */
9149     case TARGET_NR_ssetmask:
9150         {
9151             sigset_t set, oset;
9152             abi_ulong target_set = arg1;
9153             target_to_host_old_sigset(&set, &target_set);
9154             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9155             if (!ret) {
9156                 host_to_target_old_sigset(&target_set, &oset);
9157                 ret = target_set;
9158             }
9159         }
9160         return ret;
9161 #endif
9162 #ifdef TARGET_NR_sigprocmask
9163     case TARGET_NR_sigprocmask:
9164         {
9165 #if defined(TARGET_ALPHA)
9166             sigset_t set, oldset;
9167             abi_ulong mask;
9168             int how;
9169 
9170             switch (arg1) {
9171             case TARGET_SIG_BLOCK:
9172                 how = SIG_BLOCK;
9173                 break;
9174             case TARGET_SIG_UNBLOCK:
9175                 how = SIG_UNBLOCK;
9176                 break;
9177             case TARGET_SIG_SETMASK:
9178                 how = SIG_SETMASK;
9179                 break;
9180             default:
9181                 return -TARGET_EINVAL;
9182             }
9183             mask = arg2;
9184             target_to_host_old_sigset(&set, &mask);
9185 
9186             ret = do_sigprocmask(how, &set, &oldset);
9187             if (!is_error(ret)) {
9188                 host_to_target_old_sigset(&mask, &oldset);
9189                 ret = mask;
9190                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9191             }
9192 #else
9193             sigset_t set, oldset, *set_ptr;
9194             int how;
9195 
9196             if (arg2) {
9197                 switch (arg1) {
9198                 case TARGET_SIG_BLOCK:
9199                     how = SIG_BLOCK;
9200                     break;
9201                 case TARGET_SIG_UNBLOCK:
9202                     how = SIG_UNBLOCK;
9203                     break;
9204                 case TARGET_SIG_SETMASK:
9205                     how = SIG_SETMASK;
9206                     break;
9207                 default:
9208                     return -TARGET_EINVAL;
9209                 }
9210                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9211                     return -TARGET_EFAULT;
9212                 target_to_host_old_sigset(&set, p);
9213                 unlock_user(p, arg2, 0);
9214                 set_ptr = &set;
9215             } else {
9216                 how = 0;
9217                 set_ptr = NULL;
9218             }
9219             ret = do_sigprocmask(how, set_ptr, &oldset);
9220             if (!is_error(ret) && arg3) {
9221                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9222                     return -TARGET_EFAULT;
9223                 host_to_target_old_sigset(p, &oldset);
9224                 unlock_user(p, arg3, sizeof(target_sigset_t));
9225             }
9226 #endif
9227         }
9228         return ret;
9229 #endif
9230     case TARGET_NR_rt_sigprocmask:
9231         {
9232             int how = arg1;
9233             sigset_t set, oldset, *set_ptr;
9234 
9235             if (arg4 != sizeof(target_sigset_t)) {
9236                 return -TARGET_EINVAL;
9237             }
9238 
9239             if (arg2) {
9240                 switch(how) {
9241                 case TARGET_SIG_BLOCK:
9242                     how = SIG_BLOCK;
9243                     break;
9244                 case TARGET_SIG_UNBLOCK:
9245                     how = SIG_UNBLOCK;
9246                     break;
9247                 case TARGET_SIG_SETMASK:
9248                     how = SIG_SETMASK;
9249                     break;
9250                 default:
9251                     return -TARGET_EINVAL;
9252                 }
9253                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9254                     return -TARGET_EFAULT;
9255                 target_to_host_sigset(&set, p);
9256                 unlock_user(p, arg2, 0);
9257                 set_ptr = &set;
9258             } else {
9259                 how = 0;
9260                 set_ptr = NULL;
9261             }
9262             ret = do_sigprocmask(how, set_ptr, &oldset);
9263             if (!is_error(ret) && arg3) {
9264                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9265                     return -TARGET_EFAULT;
9266                 host_to_target_sigset(p, &oldset);
9267                 unlock_user(p, arg3, sizeof(target_sigset_t));
9268             }
9269         }
9270         return ret;
9271 #ifdef TARGET_NR_sigpending
9272     case TARGET_NR_sigpending:
9273         {
9274             sigset_t set;
9275             ret = get_errno(sigpending(&set));
9276             if (!is_error(ret)) {
9277                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9278                     return -TARGET_EFAULT;
9279                 host_to_target_old_sigset(p, &set);
9280                 unlock_user(p, arg1, sizeof(target_sigset_t));
9281             }
9282         }
9283         return ret;
9284 #endif
9285     case TARGET_NR_rt_sigpending:
9286         {
9287             sigset_t set;
9288 
9289             /* Yes, this check is >, not != like most. We follow the kernel's
9290              * logic and it does it like this because it implements
9291              * NR_sigpending through the same code path, and in that case
9292              * the old_sigset_t is smaller in size.
9293              */
9294             if (arg2 > sizeof(target_sigset_t)) {
9295                 return -TARGET_EINVAL;
9296             }
9297 
9298             ret = get_errno(sigpending(&set));
9299             if (!is_error(ret)) {
9300                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9301                     return -TARGET_EFAULT;
9302                 host_to_target_sigset(p, &set);
9303                 unlock_user(p, arg1, sizeof(target_sigset_t));
9304             }
9305         }
9306         return ret;
9307 #ifdef TARGET_NR_sigsuspend
9308     case TARGET_NR_sigsuspend:
9309         {
9310             TaskState *ts = cpu->opaque;
9311 #if defined(TARGET_ALPHA)
9312             abi_ulong mask = arg1;
9313             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9314 #else
9315             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9316                 return -TARGET_EFAULT;
9317             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9318             unlock_user(p, arg1, 0);
9319 #endif
9320             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9321                                                SIGSET_T_SIZE));
9322             if (ret != -TARGET_ERESTARTSYS) {
9323                 ts->in_sigsuspend = 1;
9324             }
9325         }
9326         return ret;
9327 #endif
9328     case TARGET_NR_rt_sigsuspend:
9329         {
9330             TaskState *ts = cpu->opaque;
9331 
9332             if (arg2 != sizeof(target_sigset_t)) {
9333                 return -TARGET_EINVAL;
9334             }
9335             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9336                 return -TARGET_EFAULT;
9337             target_to_host_sigset(&ts->sigsuspend_mask, p);
9338             unlock_user(p, arg1, 0);
9339             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9340                                                SIGSET_T_SIZE));
9341             if (ret != -TARGET_ERESTARTSYS) {
9342                 ts->in_sigsuspend = 1;
9343             }
9344         }
9345         return ret;
9346 #ifdef TARGET_NR_rt_sigtimedwait
9347     case TARGET_NR_rt_sigtimedwait:
9348         {
9349             sigset_t set;
9350             struct timespec uts, *puts;
9351             siginfo_t uinfo;
9352 
9353             if (arg4 != sizeof(target_sigset_t)) {
9354                 return -TARGET_EINVAL;
9355             }
9356 
9357             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9358                 return -TARGET_EFAULT;
9359             target_to_host_sigset(&set, p);
9360             unlock_user(p, arg1, 0);
9361             if (arg3) {
9362                 puts = &uts;
9363                 if (target_to_host_timespec(puts, arg3)) {
9364                     return -TARGET_EFAULT;
9365                 }
9366             } else {
9367                 puts = NULL;
9368             }
9369             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9370                                                  SIGSET_T_SIZE));
9371             if (!is_error(ret)) {
9372                 if (arg2) {
9373                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9374                                   0);
9375                     if (!p) {
9376                         return -TARGET_EFAULT;
9377                     }
9378                     host_to_target_siginfo(p, &uinfo);
9379                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9380                 }
9381                 ret = host_to_target_signal(ret);
9382             }
9383         }
9384         return ret;
9385 #endif
9386 #ifdef TARGET_NR_rt_sigtimedwait_time64
9387     case TARGET_NR_rt_sigtimedwait_time64:
9388         {
9389             sigset_t set;
9390             struct timespec uts, *puts;
9391             siginfo_t uinfo;
9392 
9393             if (arg4 != sizeof(target_sigset_t)) {
9394                 return -TARGET_EINVAL;
9395             }
9396 
9397             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9398             if (!p) {
9399                 return -TARGET_EFAULT;
9400             }
9401             target_to_host_sigset(&set, p);
9402             unlock_user(p, arg1, 0);
9403             if (arg3) {
9404                 puts = &uts;
9405                 if (target_to_host_timespec64(puts, arg3)) {
9406                     return -TARGET_EFAULT;
9407                 }
9408             } else {
9409                 puts = NULL;
9410             }
9411             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9412                                                  SIGSET_T_SIZE));
9413             if (!is_error(ret)) {
9414                 if (arg2) {
9415                     p = lock_user(VERIFY_WRITE, arg2,
9416                                   sizeof(target_siginfo_t), 0);
9417                     if (!p) {
9418                         return -TARGET_EFAULT;
9419                     }
9420                     host_to_target_siginfo(p, &uinfo);
9421                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9422                 }
9423                 ret = host_to_target_signal(ret);
9424             }
9425         }
9426         return ret;
9427 #endif
9428     case TARGET_NR_rt_sigqueueinfo:
9429         {
9430             siginfo_t uinfo;
9431 
9432             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9433             if (!p) {
9434                 return -TARGET_EFAULT;
9435             }
9436             target_to_host_siginfo(&uinfo, p);
9437             unlock_user(p, arg3, 0);
9438             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9439         }
9440         return ret;
9441     case TARGET_NR_rt_tgsigqueueinfo:
9442         {
9443             siginfo_t uinfo;
9444 
9445             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9446             if (!p) {
9447                 return -TARGET_EFAULT;
9448             }
9449             target_to_host_siginfo(&uinfo, p);
9450             unlock_user(p, arg4, 0);
9451             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9452         }
9453         return ret;
9454 #ifdef TARGET_NR_sigreturn
9455     case TARGET_NR_sigreturn:
9456         if (block_signals()) {
9457             return -TARGET_ERESTARTSYS;
9458         }
9459         return do_sigreturn(cpu_env);
9460 #endif
9461     case TARGET_NR_rt_sigreturn:
9462         if (block_signals()) {
9463             return -TARGET_ERESTARTSYS;
9464         }
9465         return do_rt_sigreturn(cpu_env);
9466     case TARGET_NR_sethostname:
9467         if (!(p = lock_user_string(arg1)))
9468             return -TARGET_EFAULT;
9469         ret = get_errno(sethostname(p, arg2));
9470         unlock_user(p, arg1, 0);
9471         return ret;
9472 #ifdef TARGET_NR_setrlimit
9473     case TARGET_NR_setrlimit:
9474         {
9475             int resource = target_to_host_resource(arg1);
9476             struct target_rlimit *target_rlim;
9477             struct rlimit rlim;
9478             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9479                 return -TARGET_EFAULT;
9480             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9481             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9482             unlock_user_struct(target_rlim, arg2, 0);
9483             /*
9484              * If we just passed through resource limit settings for memory then
9485              * they would also apply to QEMU's own allocations, and QEMU will
9486              * crash or hang or die if its allocations fail. Ideally we would
9487              * track the guest allocations in QEMU and apply the limits ourselves.
9488              * For now, just tell the guest the call succeeded but don't actually
9489              * limit anything.
9490              */
9491             if (resource != RLIMIT_AS &&
9492                 resource != RLIMIT_DATA &&
9493                 resource != RLIMIT_STACK) {
9494                 return get_errno(setrlimit(resource, &rlim));
9495             } else {
9496                 return 0;
9497             }
9498         }
9499 #endif
9500 #ifdef TARGET_NR_getrlimit
9501     case TARGET_NR_getrlimit:
9502         {
9503             int resource = target_to_host_resource(arg1);
9504             struct target_rlimit *target_rlim;
9505             struct rlimit rlim;
9506 
9507             ret = get_errno(getrlimit(resource, &rlim));
9508             if (!is_error(ret)) {
9509                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9510                     return -TARGET_EFAULT;
9511                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9512                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9513                 unlock_user_struct(target_rlim, arg2, 1);
9514             }
9515         }
9516         return ret;
9517 #endif
9518     case TARGET_NR_getrusage:
9519         {
9520             struct rusage rusage;
9521             ret = get_errno(getrusage(arg1, &rusage));
9522             if (!is_error(ret)) {
9523                 ret = host_to_target_rusage(arg2, &rusage);
9524             }
9525         }
9526         return ret;
9527 #if defined(TARGET_NR_gettimeofday)
9528     case TARGET_NR_gettimeofday:
9529         {
9530             struct timeval tv;
9531             struct timezone tz;
9532 
9533             ret = get_errno(gettimeofday(&tv, &tz));
9534             if (!is_error(ret)) {
9535                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9536                     return -TARGET_EFAULT;
9537                 }
9538                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9539                     return -TARGET_EFAULT;
9540                 }
9541             }
9542         }
9543         return ret;
9544 #endif
9545 #if defined(TARGET_NR_settimeofday)
9546     case TARGET_NR_settimeofday:
9547         {
9548             struct timeval tv, *ptv = NULL;
9549             struct timezone tz, *ptz = NULL;
9550 
9551             if (arg1) {
9552                 if (copy_from_user_timeval(&tv, arg1)) {
9553                     return -TARGET_EFAULT;
9554                 }
9555                 ptv = &tv;
9556             }
9557 
9558             if (arg2) {
9559                 if (copy_from_user_timezone(&tz, arg2)) {
9560                     return -TARGET_EFAULT;
9561                 }
9562                 ptz = &tz;
9563             }
9564 
9565             return get_errno(settimeofday(ptv, ptz));
9566         }
9567 #endif
9568 #if defined(TARGET_NR_select)
9569     case TARGET_NR_select:
9570 #if defined(TARGET_WANT_NI_OLD_SELECT)
9571         /* some architectures used to have old_select here
9572          * but now ENOSYS it.
9573          */
9574         ret = -TARGET_ENOSYS;
9575 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9576         ret = do_old_select(arg1);
9577 #else
9578         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9579 #endif
9580         return ret;
9581 #endif
9582 #ifdef TARGET_NR_pselect6
9583     case TARGET_NR_pselect6:
9584         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9585 #endif
9586 #ifdef TARGET_NR_pselect6_time64
9587     case TARGET_NR_pselect6_time64:
9588         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9589 #endif
9590 #ifdef TARGET_NR_symlink
9591     case TARGET_NR_symlink:
9592         {
9593             void *p2;
9594             p = lock_user_string(arg1);
9595             p2 = lock_user_string(arg2);
9596             if (!p || !p2)
9597                 ret = -TARGET_EFAULT;
9598             else
9599                 ret = get_errno(symlink(p, p2));
9600             unlock_user(p2, arg2, 0);
9601             unlock_user(p, arg1, 0);
9602         }
9603         return ret;
9604 #endif
9605 #if defined(TARGET_NR_symlinkat)
9606     case TARGET_NR_symlinkat:
9607         {
9608             void *p2;
9609             p  = lock_user_string(arg1);
9610             p2 = lock_user_string(arg3);
9611             if (!p || !p2)
9612                 ret = -TARGET_EFAULT;
9613             else
9614                 ret = get_errno(symlinkat(p, arg2, p2));
9615             unlock_user(p2, arg3, 0);
9616             unlock_user(p, arg1, 0);
9617         }
9618         return ret;
9619 #endif
9620 #ifdef TARGET_NR_readlink
9621     case TARGET_NR_readlink:
9622         {
9623             void *p2;
9624             p = lock_user_string(arg1);
9625             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9626             if (!p || !p2) {
9627                 ret = -TARGET_EFAULT;
9628             } else if (!arg3) {
9629                 /* Short circuit this for the magic exe check. */
9630                 ret = -TARGET_EINVAL;
9631             } else if (is_proc_myself((const char *)p, "exe")) {
9632                 char real[PATH_MAX], *temp;
9633                 temp = realpath(exec_path, real);
9634                 /* Return value is # of bytes that we wrote to the buffer. */
9635                 if (temp == NULL) {
9636                     ret = get_errno(-1);
9637                 } else {
9638                     /* Don't worry about sign mismatch as earlier mapping
9639                      * logic would have thrown a bad address error. */
9640                     ret = MIN(strlen(real), arg3);
9641                     /* We cannot NUL terminate the string. */
9642                     memcpy(p2, real, ret);
9643                 }
9644             } else {
9645                 ret = get_errno(readlink(path(p), p2, arg3));
9646             }
9647             unlock_user(p2, arg2, ret);
9648             unlock_user(p, arg1, 0);
9649         }
9650         return ret;
9651 #endif
9652 #if defined(TARGET_NR_readlinkat)
9653     case TARGET_NR_readlinkat:
9654         {
9655             void *p2;
9656             p  = lock_user_string(arg2);
9657             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9658             if (!p || !p2) {
9659                 ret = -TARGET_EFAULT;
9660             } else if (is_proc_myself((const char *)p, "exe")) {
9661                 char real[PATH_MAX], *temp;
9662                 temp = realpath(exec_path, real);
9663                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9664                 snprintf((char *)p2, arg4, "%s", real);
9665             } else {
9666                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9667             }
9668             unlock_user(p2, arg3, ret);
9669             unlock_user(p, arg2, 0);
9670         }
9671         return ret;
9672 #endif
9673 #ifdef TARGET_NR_swapon
9674     case TARGET_NR_swapon:
9675         if (!(p = lock_user_string(arg1)))
9676             return -TARGET_EFAULT;
9677         ret = get_errno(swapon(p, arg2));
9678         unlock_user(p, arg1, 0);
9679         return ret;
9680 #endif
9681     case TARGET_NR_reboot:
9682         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9683            /* arg4 must be ignored in all other cases */
9684            p = lock_user_string(arg4);
9685            if (!p) {
9686                return -TARGET_EFAULT;
9687            }
9688            ret = get_errno(reboot(arg1, arg2, arg3, p));
9689            unlock_user(p, arg4, 0);
9690         } else {
9691            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9692         }
9693         return ret;
9694 #ifdef TARGET_NR_mmap
9695     case TARGET_NR_mmap:
9696 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9697     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9698     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9699     || defined(TARGET_S390X)
9700         {
9701             abi_ulong *v;
9702             abi_ulong v1, v2, v3, v4, v5, v6;
9703             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9704                 return -TARGET_EFAULT;
9705             v1 = tswapal(v[0]);
9706             v2 = tswapal(v[1]);
9707             v3 = tswapal(v[2]);
9708             v4 = tswapal(v[3]);
9709             v5 = tswapal(v[4]);
9710             v6 = tswapal(v[5]);
9711             unlock_user(v, arg1, 0);
9712             ret = get_errno(target_mmap(v1, v2, v3,
9713                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9714                                         v5, v6));
9715         }
9716 #else
9717         /* mmap pointers are always untagged */
9718         ret = get_errno(target_mmap(arg1, arg2, arg3,
9719                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9720                                     arg5,
9721                                     arg6));
9722 #endif
9723         return ret;
9724 #endif
9725 #ifdef TARGET_NR_mmap2
9726     case TARGET_NR_mmap2:
9727 #ifndef MMAP_SHIFT
9728 #define MMAP_SHIFT 12
9729 #endif
9730         ret = target_mmap(arg1, arg2, arg3,
9731                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9732                           arg5, arg6 << MMAP_SHIFT);
9733         return get_errno(ret);
9734 #endif
9735     case TARGET_NR_munmap:
9736         arg1 = cpu_untagged_addr(cpu, arg1);
9737         return get_errno(target_munmap(arg1, arg2));
9738     case TARGET_NR_mprotect:
9739         arg1 = cpu_untagged_addr(cpu, arg1);
9740         {
9741             TaskState *ts = cpu->opaque;
9742             /* Special hack to detect libc making the stack executable.  */
9743             if ((arg3 & PROT_GROWSDOWN)
9744                 && arg1 >= ts->info->stack_limit
9745                 && arg1 <= ts->info->start_stack) {
9746                 arg3 &= ~PROT_GROWSDOWN;
9747                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9748                 arg1 = ts->info->stack_limit;
9749             }
9750         }
9751         return get_errno(target_mprotect(arg1, arg2, arg3));
9752 #ifdef TARGET_NR_mremap
9753     case TARGET_NR_mremap:
9754         arg1 = cpu_untagged_addr(cpu, arg1);
9755         /* mremap new_addr (arg5) is always untagged */
9756         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9757 #endif
9758         /* ??? msync/mlock/munlock are broken for softmmu.  */
9759 #ifdef TARGET_NR_msync
9760     case TARGET_NR_msync:
9761         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9762 #endif
9763 #ifdef TARGET_NR_mlock
9764     case TARGET_NR_mlock:
9765         return get_errno(mlock(g2h(cpu, arg1), arg2));
9766 #endif
9767 #ifdef TARGET_NR_munlock
9768     case TARGET_NR_munlock:
9769         return get_errno(munlock(g2h(cpu, arg1), arg2));
9770 #endif
9771 #ifdef TARGET_NR_mlockall
9772     case TARGET_NR_mlockall:
9773         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9774 #endif
9775 #ifdef TARGET_NR_munlockall
9776     case TARGET_NR_munlockall:
9777         return get_errno(munlockall());
9778 #endif
9779 #ifdef TARGET_NR_truncate
9780     case TARGET_NR_truncate:
9781         if (!(p = lock_user_string(arg1)))
9782             return -TARGET_EFAULT;
9783         ret = get_errno(truncate(p, arg2));
9784         unlock_user(p, arg1, 0);
9785         return ret;
9786 #endif
9787 #ifdef TARGET_NR_ftruncate
9788     case TARGET_NR_ftruncate:
9789         return get_errno(ftruncate(arg1, arg2));
9790 #endif
9791     case TARGET_NR_fchmod:
9792         return get_errno(fchmod(arg1, arg2));
9793 #if defined(TARGET_NR_fchmodat)
9794     case TARGET_NR_fchmodat:
9795         if (!(p = lock_user_string(arg2)))
9796             return -TARGET_EFAULT;
9797         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9798         unlock_user(p, arg2, 0);
9799         return ret;
9800 #endif
9801     case TARGET_NR_getpriority:
9802         /* Note that negative values are valid for getpriority, so we must
9803            differentiate based on errno settings.  */
9804         errno = 0;
9805         ret = getpriority(arg1, arg2);
9806         if (ret == -1 && errno != 0) {
9807             return -host_to_target_errno(errno);
9808         }
9809 #ifdef TARGET_ALPHA
9810         /* Return value is the unbiased priority.  Signal no error.  */
9811         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9812 #else
9813         /* Return value is a biased priority to avoid negative numbers.  */
9814         ret = 20 - ret;
9815 #endif
9816         return ret;
9817     case TARGET_NR_setpriority:
9818         return get_errno(setpriority(arg1, arg2, arg3));
9819 #ifdef TARGET_NR_statfs
9820     case TARGET_NR_statfs:
9821         if (!(p = lock_user_string(arg1))) {
9822             return -TARGET_EFAULT;
9823         }
9824         ret = get_errno(statfs(path(p), &stfs));
9825         unlock_user(p, arg1, 0);
9826     convert_statfs:
9827         if (!is_error(ret)) {
9828             struct target_statfs *target_stfs;
9829 
9830             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9831                 return -TARGET_EFAULT;
9832             __put_user(stfs.f_type, &target_stfs->f_type);
9833             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9834             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9835             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9836             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9837             __put_user(stfs.f_files, &target_stfs->f_files);
9838             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9839             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9840             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9841             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9842             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9843 #ifdef _STATFS_F_FLAGS
9844             __put_user(stfs.f_flags, &target_stfs->f_flags);
9845 #else
9846             __put_user(0, &target_stfs->f_flags);
9847 #endif
9848             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9849             unlock_user_struct(target_stfs, arg2, 1);
9850         }
9851         return ret;
9852 #endif
9853 #ifdef TARGET_NR_fstatfs
9854     case TARGET_NR_fstatfs:
9855         ret = get_errno(fstatfs(arg1, &stfs));
9856         goto convert_statfs;
9857 #endif
9858 #ifdef TARGET_NR_statfs64
9859     case TARGET_NR_statfs64:
9860         if (!(p = lock_user_string(arg1))) {
9861             return -TARGET_EFAULT;
9862         }
9863         ret = get_errno(statfs(path(p), &stfs));
9864         unlock_user(p, arg1, 0);
9865     convert_statfs64:
9866         if (!is_error(ret)) {
9867             struct target_statfs64 *target_stfs;
9868 
9869             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9870                 return -TARGET_EFAULT;
9871             __put_user(stfs.f_type, &target_stfs->f_type);
9872             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9873             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9874             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9875             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9876             __put_user(stfs.f_files, &target_stfs->f_files);
9877             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9878             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9879             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9880             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9881             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9882 #ifdef _STATFS_F_FLAGS
9883             __put_user(stfs.f_flags, &target_stfs->f_flags);
9884 #else
9885             __put_user(0, &target_stfs->f_flags);
9886 #endif
9887             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9888             unlock_user_struct(target_stfs, arg3, 1);
9889         }
9890         return ret;
9891     case TARGET_NR_fstatfs64:
9892         ret = get_errno(fstatfs(arg1, &stfs));
9893         goto convert_statfs64;
9894 #endif
9895 #ifdef TARGET_NR_socketcall
9896     case TARGET_NR_socketcall:
9897         return do_socketcall(arg1, arg2);
9898 #endif
9899 #ifdef TARGET_NR_accept
9900     case TARGET_NR_accept:
9901         return do_accept4(arg1, arg2, arg3, 0);
9902 #endif
9903 #ifdef TARGET_NR_accept4
9904     case TARGET_NR_accept4:
9905         return do_accept4(arg1, arg2, arg3, arg4);
9906 #endif
9907 #ifdef TARGET_NR_bind
9908     case TARGET_NR_bind:
9909         return do_bind(arg1, arg2, arg3);
9910 #endif
9911 #ifdef TARGET_NR_connect
9912     case TARGET_NR_connect:
9913         return do_connect(arg1, arg2, arg3);
9914 #endif
9915 #ifdef TARGET_NR_getpeername
9916     case TARGET_NR_getpeername:
9917         return do_getpeername(arg1, arg2, arg3);
9918 #endif
9919 #ifdef TARGET_NR_getsockname
9920     case TARGET_NR_getsockname:
9921         return do_getsockname(arg1, arg2, arg3);
9922 #endif
9923 #ifdef TARGET_NR_getsockopt
9924     case TARGET_NR_getsockopt:
9925         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9926 #endif
9927 #ifdef TARGET_NR_listen
9928     case TARGET_NR_listen:
9929         return get_errno(listen(arg1, arg2));
9930 #endif
9931 #ifdef TARGET_NR_recv
9932     case TARGET_NR_recv:
9933         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9934 #endif
9935 #ifdef TARGET_NR_recvfrom
9936     case TARGET_NR_recvfrom:
9937         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9938 #endif
9939 #ifdef TARGET_NR_recvmsg
9940     case TARGET_NR_recvmsg:
9941         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9942 #endif
9943 #ifdef TARGET_NR_send
9944     case TARGET_NR_send:
9945         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9946 #endif
9947 #ifdef TARGET_NR_sendmsg
9948     case TARGET_NR_sendmsg:
9949         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9950 #endif
9951 #ifdef TARGET_NR_sendmmsg
9952     case TARGET_NR_sendmmsg:
9953         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9954 #endif
9955 #ifdef TARGET_NR_recvmmsg
9956     case TARGET_NR_recvmmsg:
9957         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9958 #endif
9959 #ifdef TARGET_NR_sendto
9960     case TARGET_NR_sendto:
9961         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9962 #endif
9963 #ifdef TARGET_NR_shutdown
9964     case TARGET_NR_shutdown:
9965         return get_errno(shutdown(arg1, arg2));
9966 #endif
9967 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9968     case TARGET_NR_getrandom:
9969         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9970         if (!p) {
9971             return -TARGET_EFAULT;
9972         }
9973         ret = get_errno(getrandom(p, arg2, arg3));
9974         unlock_user(p, arg1, ret);
9975         return ret;
9976 #endif
9977 #ifdef TARGET_NR_socket
9978     case TARGET_NR_socket:
9979         return do_socket(arg1, arg2, arg3);
9980 #endif
9981 #ifdef TARGET_NR_socketpair
9982     case TARGET_NR_socketpair:
9983         return do_socketpair(arg1, arg2, arg3, arg4);
9984 #endif
9985 #ifdef TARGET_NR_setsockopt
9986     case TARGET_NR_setsockopt:
9987         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9988 #endif
9989 #if defined(TARGET_NR_syslog)
9990     case TARGET_NR_syslog:
9991         {
9992             int len = arg2;
9993 
9994             switch (arg1) {
9995             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9996             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9997             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9998             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9999             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10000             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10001             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10002             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10003                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10004             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10005             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10006             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10007                 {
10008                     if (len < 0) {
10009                         return -TARGET_EINVAL;
10010                     }
10011                     if (len == 0) {
10012                         return 0;
10013                     }
10014                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10015                     if (!p) {
10016                         return -TARGET_EFAULT;
10017                     }
10018                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10019                     unlock_user(p, arg2, arg3);
10020                 }
10021                 return ret;
10022             default:
10023                 return -TARGET_EINVAL;
10024             }
10025         }
10026         break;
10027 #endif
10028     case TARGET_NR_setitimer:
10029         {
10030             struct itimerval value, ovalue, *pvalue;
10031 
10032             if (arg2) {
10033                 pvalue = &value;
10034                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10035                     || copy_from_user_timeval(&pvalue->it_value,
10036                                               arg2 + sizeof(struct target_timeval)))
10037                     return -TARGET_EFAULT;
10038             } else {
10039                 pvalue = NULL;
10040             }
10041             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10042             if (!is_error(ret) && arg3) {
10043                 if (copy_to_user_timeval(arg3,
10044                                          &ovalue.it_interval)
10045                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10046                                             &ovalue.it_value))
10047                     return -TARGET_EFAULT;
10048             }
10049         }
10050         return ret;
10051     case TARGET_NR_getitimer:
10052         {
10053             struct itimerval value;
10054 
10055             ret = get_errno(getitimer(arg1, &value));
10056             if (!is_error(ret) && arg2) {
10057                 if (copy_to_user_timeval(arg2,
10058                                          &value.it_interval)
10059                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10060                                             &value.it_value))
10061                     return -TARGET_EFAULT;
10062             }
10063         }
10064         return ret;
10065 #ifdef TARGET_NR_stat
10066     case TARGET_NR_stat:
10067         if (!(p = lock_user_string(arg1))) {
10068             return -TARGET_EFAULT;
10069         }
10070         ret = get_errno(stat(path(p), &st));
10071         unlock_user(p, arg1, 0);
10072         goto do_stat;
10073 #endif
10074 #ifdef TARGET_NR_lstat
10075     case TARGET_NR_lstat:
10076         if (!(p = lock_user_string(arg1))) {
10077             return -TARGET_EFAULT;
10078         }
10079         ret = get_errno(lstat(path(p), &st));
10080         unlock_user(p, arg1, 0);
10081         goto do_stat;
10082 #endif
10083 #ifdef TARGET_NR_fstat
10084     case TARGET_NR_fstat:
10085         {
10086             ret = get_errno(fstat(arg1, &st));
10087 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10088         do_stat:
10089 #endif
10090             if (!is_error(ret)) {
10091                 struct target_stat *target_st;
10092 
10093                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10094                     return -TARGET_EFAULT;
10095                 memset(target_st, 0, sizeof(*target_st));
10096                 __put_user(st.st_dev, &target_st->st_dev);
10097                 __put_user(st.st_ino, &target_st->st_ino);
10098                 __put_user(st.st_mode, &target_st->st_mode);
10099                 __put_user(st.st_uid, &target_st->st_uid);
10100                 __put_user(st.st_gid, &target_st->st_gid);
10101                 __put_user(st.st_nlink, &target_st->st_nlink);
10102                 __put_user(st.st_rdev, &target_st->st_rdev);
10103                 __put_user(st.st_size, &target_st->st_size);
10104                 __put_user(st.st_blksize, &target_st->st_blksize);
10105                 __put_user(st.st_blocks, &target_st->st_blocks);
10106                 __put_user(st.st_atime, &target_st->target_st_atime);
10107                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10108                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10109 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10110     defined(TARGET_STAT_HAVE_NSEC)
10111                 __put_user(st.st_atim.tv_nsec,
10112                            &target_st->target_st_atime_nsec);
10113                 __put_user(st.st_mtim.tv_nsec,
10114                            &target_st->target_st_mtime_nsec);
10115                 __put_user(st.st_ctim.tv_nsec,
10116                            &target_st->target_st_ctime_nsec);
10117 #endif
10118                 unlock_user_struct(target_st, arg2, 1);
10119             }
10120         }
10121         return ret;
10122 #endif
10123     case TARGET_NR_vhangup:
10124         return get_errno(vhangup());
10125 #ifdef TARGET_NR_syscall
10126     case TARGET_NR_syscall:
10127         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10128                           arg6, arg7, arg8, 0);
10129 #endif
10130 #if defined(TARGET_NR_wait4)
10131     case TARGET_NR_wait4:
10132         {
10133             int status;
10134             abi_long status_ptr = arg2;
10135             struct rusage rusage, *rusage_ptr;
10136             abi_ulong target_rusage = arg4;
10137             abi_long rusage_err;
10138             if (target_rusage)
10139                 rusage_ptr = &rusage;
10140             else
10141                 rusage_ptr = NULL;
10142             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10143             if (!is_error(ret)) {
10144                 if (status_ptr && ret) {
10145                     status = host_to_target_waitstatus(status);
10146                     if (put_user_s32(status, status_ptr))
10147                         return -TARGET_EFAULT;
10148                 }
10149                 if (target_rusage) {
10150                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10151                     if (rusage_err) {
10152                         ret = rusage_err;
10153                     }
10154                 }
10155             }
10156         }
10157         return ret;
10158 #endif
10159 #ifdef TARGET_NR_swapoff
10160     case TARGET_NR_swapoff:
10161         if (!(p = lock_user_string(arg1)))
10162             return -TARGET_EFAULT;
10163         ret = get_errno(swapoff(p));
10164         unlock_user(p, arg1, 0);
10165         return ret;
10166 #endif
10167     case TARGET_NR_sysinfo:
10168         {
10169             struct target_sysinfo *target_value;
10170             struct sysinfo value;
10171             ret = get_errno(sysinfo(&value));
10172             if (!is_error(ret) && arg1)
10173             {
10174                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10175                     return -TARGET_EFAULT;
10176                 __put_user(value.uptime, &target_value->uptime);
10177                 __put_user(value.loads[0], &target_value->loads[0]);
10178                 __put_user(value.loads[1], &target_value->loads[1]);
10179                 __put_user(value.loads[2], &target_value->loads[2]);
10180                 __put_user(value.totalram, &target_value->totalram);
10181                 __put_user(value.freeram, &target_value->freeram);
10182                 __put_user(value.sharedram, &target_value->sharedram);
10183                 __put_user(value.bufferram, &target_value->bufferram);
10184                 __put_user(value.totalswap, &target_value->totalswap);
10185                 __put_user(value.freeswap, &target_value->freeswap);
10186                 __put_user(value.procs, &target_value->procs);
10187                 __put_user(value.totalhigh, &target_value->totalhigh);
10188                 __put_user(value.freehigh, &target_value->freehigh);
10189                 __put_user(value.mem_unit, &target_value->mem_unit);
10190                 unlock_user_struct(target_value, arg1, 1);
10191             }
10192         }
10193         return ret;
10194 #ifdef TARGET_NR_ipc
10195     case TARGET_NR_ipc:
10196         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10197 #endif
10198 #ifdef TARGET_NR_semget
10199     case TARGET_NR_semget:
10200         return get_errno(semget(arg1, arg2, arg3));
10201 #endif
10202 #ifdef TARGET_NR_semop
10203     case TARGET_NR_semop:
10204         return do_semtimedop(arg1, arg2, arg3, 0, false);
10205 #endif
10206 #ifdef TARGET_NR_semtimedop
10207     case TARGET_NR_semtimedop:
10208         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10209 #endif
10210 #ifdef TARGET_NR_semtimedop_time64
10211     case TARGET_NR_semtimedop_time64:
10212         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10213 #endif
10214 #ifdef TARGET_NR_semctl
10215     case TARGET_NR_semctl:
10216         return do_semctl(arg1, arg2, arg3, arg4);
10217 #endif
10218 #ifdef TARGET_NR_msgctl
10219     case TARGET_NR_msgctl:
10220         return do_msgctl(arg1, arg2, arg3);
10221 #endif
10222 #ifdef TARGET_NR_msgget
10223     case TARGET_NR_msgget:
10224         return get_errno(msgget(arg1, arg2));
10225 #endif
10226 #ifdef TARGET_NR_msgrcv
10227     case TARGET_NR_msgrcv:
10228         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10229 #endif
10230 #ifdef TARGET_NR_msgsnd
10231     case TARGET_NR_msgsnd:
10232         return do_msgsnd(arg1, arg2, arg3, arg4);
10233 #endif
10234 #ifdef TARGET_NR_shmget
10235     case TARGET_NR_shmget:
10236         return get_errno(shmget(arg1, arg2, arg3));
10237 #endif
10238 #ifdef TARGET_NR_shmctl
10239     case TARGET_NR_shmctl:
10240         return do_shmctl(arg1, arg2, arg3);
10241 #endif
10242 #ifdef TARGET_NR_shmat
10243     case TARGET_NR_shmat:
10244         return do_shmat(cpu_env, arg1, arg2, arg3);
10245 #endif
10246 #ifdef TARGET_NR_shmdt
10247     case TARGET_NR_shmdt:
10248         return do_shmdt(arg1);
10249 #endif
10250     case TARGET_NR_fsync:
10251         return get_errno(fsync(arg1));
10252     case TARGET_NR_clone:
10253         /* Linux manages to have three different orderings for its
10254          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10255          * match the kernel's CONFIG_CLONE_* settings.
10256          * Microblaze is further special in that it uses a sixth
10257          * implicit argument to clone for the TLS pointer.
10258          */
10259 #if defined(TARGET_MICROBLAZE)
10260         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10261 #elif defined(TARGET_CLONE_BACKWARDS)
10262         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10263 #elif defined(TARGET_CLONE_BACKWARDS2)
10264         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10265 #else
10266         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10267 #endif
10268         return ret;
10269 #ifdef __NR_exit_group
10270         /* new thread calls */
10271     case TARGET_NR_exit_group:
10272         preexit_cleanup(cpu_env, arg1);
10273         return get_errno(exit_group(arg1));
10274 #endif
10275     case TARGET_NR_setdomainname:
10276         if (!(p = lock_user_string(arg1)))
10277             return -TARGET_EFAULT;
10278         ret = get_errno(setdomainname(p, arg2));
10279         unlock_user(p, arg1, 0);
10280         return ret;
10281     case TARGET_NR_uname:
10282         /* no need to transcode because we use the linux syscall */
10283         {
10284             struct new_utsname * buf;
10285 
10286             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10287                 return -TARGET_EFAULT;
10288             ret = get_errno(sys_uname(buf));
10289             if (!is_error(ret)) {
10290                 /* Overwrite the native machine name with whatever is being
10291                    emulated. */
10292                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10293                           sizeof(buf->machine));
10294                 /* Allow the user to override the reported release.  */
10295                 if (qemu_uname_release && *qemu_uname_release) {
10296                     g_strlcpy(buf->release, qemu_uname_release,
10297                               sizeof(buf->release));
10298                 }
10299             }
10300             unlock_user_struct(buf, arg1, 1);
10301         }
10302         return ret;
10303 #ifdef TARGET_I386
10304     case TARGET_NR_modify_ldt:
10305         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10306 #if !defined(TARGET_X86_64)
10307     case TARGET_NR_vm86:
10308         return do_vm86(cpu_env, arg1, arg2);
10309 #endif
10310 #endif
10311 #if defined(TARGET_NR_adjtimex)
10312     case TARGET_NR_adjtimex:
10313         {
10314             struct timex host_buf;
10315 
10316             if (target_to_host_timex(&host_buf, arg1) != 0) {
10317                 return -TARGET_EFAULT;
10318             }
10319             ret = get_errno(adjtimex(&host_buf));
10320             if (!is_error(ret)) {
10321                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10322                     return -TARGET_EFAULT;
10323                 }
10324             }
10325         }
10326         return ret;
10327 #endif
10328 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10329     case TARGET_NR_clock_adjtime:
10330         {
10331             struct timex htx, *phtx = &htx;
10332 
10333             if (target_to_host_timex(phtx, arg2) != 0) {
10334                 return -TARGET_EFAULT;
10335             }
10336             ret = get_errno(clock_adjtime(arg1, phtx));
10337             if (!is_error(ret) && phtx) {
10338                 if (host_to_target_timex(arg2, phtx) != 0) {
10339                     return -TARGET_EFAULT;
10340                 }
10341             }
10342         }
10343         return ret;
10344 #endif
10345 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10346     case TARGET_NR_clock_adjtime64:
10347         {
10348             struct timex htx;
10349 
10350             if (target_to_host_timex64(&htx, arg2) != 0) {
10351                 return -TARGET_EFAULT;
10352             }
10353             ret = get_errno(clock_adjtime(arg1, &htx));
10354             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10355                     return -TARGET_EFAULT;
10356             }
10357         }
10358         return ret;
10359 #endif
10360     case TARGET_NR_getpgid:
10361         return get_errno(getpgid(arg1));
10362     case TARGET_NR_fchdir:
10363         return get_errno(fchdir(arg1));
10364     case TARGET_NR_personality:
10365         return get_errno(personality(arg1));
10366 #ifdef TARGET_NR__llseek /* Not on alpha */
10367     case TARGET_NR__llseek:
10368         {
10369             int64_t res;
10370 #if !defined(__NR_llseek)
10371             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10372             if (res == -1) {
10373                 ret = get_errno(res);
10374             } else {
10375                 ret = 0;
10376             }
10377 #else
10378             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10379 #endif
10380             if ((ret == 0) && put_user_s64(res, arg4)) {
10381                 return -TARGET_EFAULT;
10382             }
10383         }
10384         return ret;
10385 #endif
10386 #ifdef TARGET_NR_getdents
10387     case TARGET_NR_getdents:
10388 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10389 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10390         {
10391             struct target_dirent *target_dirp;
10392             struct linux_dirent *dirp;
10393             abi_long count = arg3;
10394 
10395             dirp = g_try_malloc(count);
10396             if (!dirp) {
10397                 return -TARGET_ENOMEM;
10398             }
10399 
10400             ret = get_errno(sys_getdents(arg1, dirp, count));
10401             if (!is_error(ret)) {
10402                 struct linux_dirent *de;
10403 		struct target_dirent *tde;
10404                 int len = ret;
10405                 int reclen, treclen;
10406 		int count1, tnamelen;
10407 
10408 		count1 = 0;
10409                 de = dirp;
10410                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10411                     return -TARGET_EFAULT;
10412 		tde = target_dirp;
10413                 while (len > 0) {
10414                     reclen = de->d_reclen;
10415                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10416                     assert(tnamelen >= 0);
10417                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10418                     assert(count1 + treclen <= count);
10419                     tde->d_reclen = tswap16(treclen);
10420                     tde->d_ino = tswapal(de->d_ino);
10421                     tde->d_off = tswapal(de->d_off);
10422                     memcpy(tde->d_name, de->d_name, tnamelen);
10423                     de = (struct linux_dirent *)((char *)de + reclen);
10424                     len -= reclen;
10425                     tde = (struct target_dirent *)((char *)tde + treclen);
10426 		    count1 += treclen;
10427                 }
10428 		ret = count1;
10429                 unlock_user(target_dirp, arg2, ret);
10430             }
10431             g_free(dirp);
10432         }
10433 #else
10434         {
10435             struct linux_dirent *dirp;
10436             abi_long count = arg3;
10437 
10438             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10439                 return -TARGET_EFAULT;
10440             ret = get_errno(sys_getdents(arg1, dirp, count));
10441             if (!is_error(ret)) {
10442                 struct linux_dirent *de;
10443                 int len = ret;
10444                 int reclen;
10445                 de = dirp;
10446                 while (len > 0) {
10447                     reclen = de->d_reclen;
10448                     if (reclen > len)
10449                         break;
10450                     de->d_reclen = tswap16(reclen);
10451                     tswapls(&de->d_ino);
10452                     tswapls(&de->d_off);
10453                     de = (struct linux_dirent *)((char *)de + reclen);
10454                     len -= reclen;
10455                 }
10456             }
10457             unlock_user(dirp, arg2, ret);
10458         }
10459 #endif
10460 #else
10461         /* Implement getdents in terms of getdents64 */
10462         {
10463             struct linux_dirent64 *dirp;
10464             abi_long count = arg3;
10465 
10466             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10467             if (!dirp) {
10468                 return -TARGET_EFAULT;
10469             }
10470             ret = get_errno(sys_getdents64(arg1, dirp, count));
10471             if (!is_error(ret)) {
10472                 /* Convert the dirent64 structs to target dirent.  We do this
10473                  * in-place, since we can guarantee that a target_dirent is no
10474                  * larger than a dirent64; however this means we have to be
10475                  * careful to read everything before writing in the new format.
10476                  */
10477                 struct linux_dirent64 *de;
10478                 struct target_dirent *tde;
10479                 int len = ret;
10480                 int tlen = 0;
10481 
10482                 de = dirp;
10483                 tde = (struct target_dirent *)dirp;
10484                 while (len > 0) {
10485                     int namelen, treclen;
10486                     int reclen = de->d_reclen;
10487                     uint64_t ino = de->d_ino;
10488                     int64_t off = de->d_off;
10489                     uint8_t type = de->d_type;
10490 
10491                     namelen = strlen(de->d_name);
10492                     treclen = offsetof(struct target_dirent, d_name)
10493                         + namelen + 2;
10494                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10495 
10496                     memmove(tde->d_name, de->d_name, namelen + 1);
10497                     tde->d_ino = tswapal(ino);
10498                     tde->d_off = tswapal(off);
10499                     tde->d_reclen = tswap16(treclen);
10500                     /* The target_dirent type is in what was formerly a padding
10501                      * byte at the end of the structure:
10502                      */
10503                     *(((char *)tde) + treclen - 1) = type;
10504 
10505                     de = (struct linux_dirent64 *)((char *)de + reclen);
10506                     tde = (struct target_dirent *)((char *)tde + treclen);
10507                     len -= reclen;
10508                     tlen += treclen;
10509                 }
10510                 ret = tlen;
10511             }
10512             unlock_user(dirp, arg2, ret);
10513         }
10514 #endif
10515         return ret;
10516 #endif /* TARGET_NR_getdents */
10517 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10518     case TARGET_NR_getdents64:
10519         {
10520             struct linux_dirent64 *dirp;
10521             abi_long count = arg3;
10522             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10523                 return -TARGET_EFAULT;
10524             ret = get_errno(sys_getdents64(arg1, dirp, count));
10525             if (!is_error(ret)) {
10526                 struct linux_dirent64 *de;
10527                 int len = ret;
10528                 int reclen;
10529                 de = dirp;
10530                 while (len > 0) {
10531                     reclen = de->d_reclen;
10532                     if (reclen > len)
10533                         break;
10534                     de->d_reclen = tswap16(reclen);
10535                     tswap64s((uint64_t *)&de->d_ino);
10536                     tswap64s((uint64_t *)&de->d_off);
10537                     de = (struct linux_dirent64 *)((char *)de + reclen);
10538                     len -= reclen;
10539                 }
10540             }
10541             unlock_user(dirp, arg2, ret);
10542         }
10543         return ret;
10544 #endif /* TARGET_NR_getdents64 */
10545 #if defined(TARGET_NR__newselect)
10546     case TARGET_NR__newselect:
10547         return do_select(arg1, arg2, arg3, arg4, arg5);
10548 #endif
10549 #ifdef TARGET_NR_poll
10550     case TARGET_NR_poll:
10551         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10552 #endif
10553 #ifdef TARGET_NR_ppoll
10554     case TARGET_NR_ppoll:
10555         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10556 #endif
10557 #ifdef TARGET_NR_ppoll_time64
10558     case TARGET_NR_ppoll_time64:
10559         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10560 #endif
10561     case TARGET_NR_flock:
10562         /* NOTE: the flock constant seems to be the same for every
10563            Linux platform */
10564         return get_errno(safe_flock(arg1, arg2));
10565     case TARGET_NR_readv:
10566         {
10567             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10568             if (vec != NULL) {
10569                 ret = get_errno(safe_readv(arg1, vec, arg3));
10570                 unlock_iovec(vec, arg2, arg3, 1);
10571             } else {
10572                 ret = -host_to_target_errno(errno);
10573             }
10574         }
10575         return ret;
10576     case TARGET_NR_writev:
10577         {
10578             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10579             if (vec != NULL) {
10580                 ret = get_errno(safe_writev(arg1, vec, arg3));
10581                 unlock_iovec(vec, arg2, arg3, 0);
10582             } else {
10583                 ret = -host_to_target_errno(errno);
10584             }
10585         }
10586         return ret;
10587 #if defined(TARGET_NR_preadv)
10588     case TARGET_NR_preadv:
10589         {
10590             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10591             if (vec != NULL) {
10592                 unsigned long low, high;
10593 
10594                 target_to_host_low_high(arg4, arg5, &low, &high);
10595                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10596                 unlock_iovec(vec, arg2, arg3, 1);
10597             } else {
10598                 ret = -host_to_target_errno(errno);
10599            }
10600         }
10601         return ret;
10602 #endif
10603 #if defined(TARGET_NR_pwritev)
10604     case TARGET_NR_pwritev:
10605         {
10606             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10607             if (vec != NULL) {
10608                 unsigned long low, high;
10609 
10610                 target_to_host_low_high(arg4, arg5, &low, &high);
10611                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10612                 unlock_iovec(vec, arg2, arg3, 0);
10613             } else {
10614                 ret = -host_to_target_errno(errno);
10615            }
10616         }
10617         return ret;
10618 #endif
10619     case TARGET_NR_getsid:
10620         return get_errno(getsid(arg1));
10621 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10622     case TARGET_NR_fdatasync:
10623         return get_errno(fdatasync(arg1));
10624 #endif
10625     case TARGET_NR_sched_getaffinity:
10626         {
10627             unsigned int mask_size;
10628             unsigned long *mask;
10629 
10630             /*
10631              * sched_getaffinity needs multiples of ulong, so need to take
10632              * care of mismatches between target ulong and host ulong sizes.
10633              */
10634             if (arg2 & (sizeof(abi_ulong) - 1)) {
10635                 return -TARGET_EINVAL;
10636             }
10637             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10638 
10639             mask = alloca(mask_size);
10640             memset(mask, 0, mask_size);
10641             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10642 
10643             if (!is_error(ret)) {
10644                 if (ret > arg2) {
10645                     /* More data returned than the caller's buffer will fit.
10646                      * This only happens if sizeof(abi_long) < sizeof(long)
10647                      * and the caller passed us a buffer holding an odd number
10648                      * of abi_longs. If the host kernel is actually using the
10649                      * extra 4 bytes then fail EINVAL; otherwise we can just
10650                      * ignore them and only copy the interesting part.
10651                      */
10652                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10653                     if (numcpus > arg2 * 8) {
10654                         return -TARGET_EINVAL;
10655                     }
10656                     ret = arg2;
10657                 }
10658 
10659                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10660                     return -TARGET_EFAULT;
10661                 }
10662             }
10663         }
10664         return ret;
10665     case TARGET_NR_sched_setaffinity:
10666         {
10667             unsigned int mask_size;
10668             unsigned long *mask;
10669 
10670             /*
10671              * sched_setaffinity needs multiples of ulong, so need to take
10672              * care of mismatches between target ulong and host ulong sizes.
10673              */
10674             if (arg2 & (sizeof(abi_ulong) - 1)) {
10675                 return -TARGET_EINVAL;
10676             }
10677             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10678             mask = alloca(mask_size);
10679 
10680             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10681             if (ret) {
10682                 return ret;
10683             }
10684 
10685             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10686         }
10687     case TARGET_NR_getcpu:
10688         {
10689             unsigned cpu, node;
10690             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10691                                        arg2 ? &node : NULL,
10692                                        NULL));
10693             if (is_error(ret)) {
10694                 return ret;
10695             }
10696             if (arg1 && put_user_u32(cpu, arg1)) {
10697                 return -TARGET_EFAULT;
10698             }
10699             if (arg2 && put_user_u32(node, arg2)) {
10700                 return -TARGET_EFAULT;
10701             }
10702         }
10703         return ret;
10704     case TARGET_NR_sched_setparam:
10705         {
10706             struct sched_param *target_schp;
10707             struct sched_param schp;
10708 
10709             if (arg2 == 0) {
10710                 return -TARGET_EINVAL;
10711             }
10712             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10713                 return -TARGET_EFAULT;
10714             schp.sched_priority = tswap32(target_schp->sched_priority);
10715             unlock_user_struct(target_schp, arg2, 0);
10716             return get_errno(sched_setparam(arg1, &schp));
10717         }
10718     case TARGET_NR_sched_getparam:
10719         {
10720             struct sched_param *target_schp;
10721             struct sched_param schp;
10722 
10723             if (arg2 == 0) {
10724                 return -TARGET_EINVAL;
10725             }
10726             ret = get_errno(sched_getparam(arg1, &schp));
10727             if (!is_error(ret)) {
10728                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10729                     return -TARGET_EFAULT;
10730                 target_schp->sched_priority = tswap32(schp.sched_priority);
10731                 unlock_user_struct(target_schp, arg2, 1);
10732             }
10733         }
10734         return ret;
10735     case TARGET_NR_sched_setscheduler:
10736         {
10737             struct sched_param *target_schp;
10738             struct sched_param schp;
10739             if (arg3 == 0) {
10740                 return -TARGET_EINVAL;
10741             }
10742             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10743                 return -TARGET_EFAULT;
10744             schp.sched_priority = tswap32(target_schp->sched_priority);
10745             unlock_user_struct(target_schp, arg3, 0);
10746             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10747         }
10748     case TARGET_NR_sched_getscheduler:
10749         return get_errno(sched_getscheduler(arg1));
10750     case TARGET_NR_sched_yield:
10751         return get_errno(sched_yield());
10752     case TARGET_NR_sched_get_priority_max:
10753         return get_errno(sched_get_priority_max(arg1));
10754     case TARGET_NR_sched_get_priority_min:
10755         return get_errno(sched_get_priority_min(arg1));
10756 #ifdef TARGET_NR_sched_rr_get_interval
10757     case TARGET_NR_sched_rr_get_interval:
10758         {
10759             struct timespec ts;
10760             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10761             if (!is_error(ret)) {
10762                 ret = host_to_target_timespec(arg2, &ts);
10763             }
10764         }
10765         return ret;
10766 #endif
10767 #ifdef TARGET_NR_sched_rr_get_interval_time64
10768     case TARGET_NR_sched_rr_get_interval_time64:
10769         {
10770             struct timespec ts;
10771             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10772             if (!is_error(ret)) {
10773                 ret = host_to_target_timespec64(arg2, &ts);
10774             }
10775         }
10776         return ret;
10777 #endif
10778 #if defined(TARGET_NR_nanosleep)
10779     case TARGET_NR_nanosleep:
10780         {
10781             struct timespec req, rem;
10782             target_to_host_timespec(&req, arg1);
10783             ret = get_errno(safe_nanosleep(&req, &rem));
10784             if (is_error(ret) && arg2) {
10785                 host_to_target_timespec(arg2, &rem);
10786             }
10787         }
10788         return ret;
10789 #endif
10790     case TARGET_NR_prctl:
10791         switch (arg1) {
10792         case PR_GET_PDEATHSIG:
10793         {
10794             int deathsig;
10795             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10796             if (!is_error(ret) && arg2
10797                 && put_user_s32(deathsig, arg2)) {
10798                 return -TARGET_EFAULT;
10799             }
10800             return ret;
10801         }
10802 #ifdef PR_GET_NAME
10803         case PR_GET_NAME:
10804         {
10805             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10806             if (!name) {
10807                 return -TARGET_EFAULT;
10808             }
10809             ret = get_errno(prctl(arg1, (unsigned long)name,
10810                                   arg3, arg4, arg5));
10811             unlock_user(name, arg2, 16);
10812             return ret;
10813         }
10814         case PR_SET_NAME:
10815         {
10816             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10817             if (!name) {
10818                 return -TARGET_EFAULT;
10819             }
10820             ret = get_errno(prctl(arg1, (unsigned long)name,
10821                                   arg3, arg4, arg5));
10822             unlock_user(name, arg2, 0);
10823             return ret;
10824         }
10825 #endif
10826 #ifdef TARGET_MIPS
10827         case TARGET_PR_GET_FP_MODE:
10828         {
10829             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10830             ret = 0;
10831             if (env->CP0_Status & (1 << CP0St_FR)) {
10832                 ret |= TARGET_PR_FP_MODE_FR;
10833             }
10834             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10835                 ret |= TARGET_PR_FP_MODE_FRE;
10836             }
10837             return ret;
10838         }
10839         case TARGET_PR_SET_FP_MODE:
10840         {
10841             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10842             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10843             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10844             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10845             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10846 
10847             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10848                                             TARGET_PR_FP_MODE_FRE;
10849 
10850             /* If nothing to change, return right away, successfully.  */
10851             if (old_fr == new_fr && old_fre == new_fre) {
10852                 return 0;
10853             }
10854             /* Check the value is valid */
10855             if (arg2 & ~known_bits) {
10856                 return -TARGET_EOPNOTSUPP;
10857             }
10858             /* Setting FRE without FR is not supported.  */
10859             if (new_fre && !new_fr) {
10860                 return -TARGET_EOPNOTSUPP;
10861             }
10862             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10863                 /* FR1 is not supported */
10864                 return -TARGET_EOPNOTSUPP;
10865             }
10866             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10867                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10868                 /* cannot set FR=0 */
10869                 return -TARGET_EOPNOTSUPP;
10870             }
10871             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10872                 /* Cannot set FRE=1 */
10873                 return -TARGET_EOPNOTSUPP;
10874             }
10875 
10876             int i;
10877             fpr_t *fpr = env->active_fpu.fpr;
10878             for (i = 0; i < 32 ; i += 2) {
10879                 if (!old_fr && new_fr) {
10880                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10881                 } else if (old_fr && !new_fr) {
10882                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10883                 }
10884             }
10885 
10886             if (new_fr) {
10887                 env->CP0_Status |= (1 << CP0St_FR);
10888                 env->hflags |= MIPS_HFLAG_F64;
10889             } else {
10890                 env->CP0_Status &= ~(1 << CP0St_FR);
10891                 env->hflags &= ~MIPS_HFLAG_F64;
10892             }
10893             if (new_fre) {
10894                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10895                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10896                     env->hflags |= MIPS_HFLAG_FRE;
10897                 }
10898             } else {
10899                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10900                 env->hflags &= ~MIPS_HFLAG_FRE;
10901             }
10902 
10903             return 0;
10904         }
10905 #endif /* MIPS */
10906 #ifdef TARGET_AARCH64
10907         case TARGET_PR_SVE_SET_VL:
10908             /*
10909              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10910              * PR_SVE_VL_INHERIT.  Note the kernel definition
10911              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10912              * even though the current architectural maximum is VQ=16.
10913              */
10914             ret = -TARGET_EINVAL;
10915             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10916                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10917                 CPUARMState *env = cpu_env;
10918                 ARMCPU *cpu = env_archcpu(env);
10919                 uint32_t vq, old_vq;
10920 
10921                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10922                 vq = MAX(arg2 / 16, 1);
10923                 vq = MIN(vq, cpu->sve_max_vq);
10924 
10925                 if (vq < old_vq) {
10926                     aarch64_sve_narrow_vq(env, vq);
10927                 }
10928                 env->vfp.zcr_el[1] = vq - 1;
10929                 arm_rebuild_hflags(env);
10930                 ret = vq * 16;
10931             }
10932             return ret;
10933         case TARGET_PR_SVE_GET_VL:
10934             ret = -TARGET_EINVAL;
10935             {
10936                 ARMCPU *cpu = env_archcpu(cpu_env);
10937                 if (cpu_isar_feature(aa64_sve, cpu)) {
10938                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10939                 }
10940             }
10941             return ret;
10942         case TARGET_PR_PAC_RESET_KEYS:
10943             {
10944                 CPUARMState *env = cpu_env;
10945                 ARMCPU *cpu = env_archcpu(env);
10946 
10947                 if (arg3 || arg4 || arg5) {
10948                     return -TARGET_EINVAL;
10949                 }
10950                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10951                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10952                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10953                                TARGET_PR_PAC_APGAKEY);
10954                     int ret = 0;
10955                     Error *err = NULL;
10956 
10957                     if (arg2 == 0) {
10958                         arg2 = all;
10959                     } else if (arg2 & ~all) {
10960                         return -TARGET_EINVAL;
10961                     }
10962                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10963                         ret |= qemu_guest_getrandom(&env->keys.apia,
10964                                                     sizeof(ARMPACKey), &err);
10965                     }
10966                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10967                         ret |= qemu_guest_getrandom(&env->keys.apib,
10968                                                     sizeof(ARMPACKey), &err);
10969                     }
10970                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10971                         ret |= qemu_guest_getrandom(&env->keys.apda,
10972                                                     sizeof(ARMPACKey), &err);
10973                     }
10974                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10975                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10976                                                     sizeof(ARMPACKey), &err);
10977                     }
10978                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10979                         ret |= qemu_guest_getrandom(&env->keys.apga,
10980                                                     sizeof(ARMPACKey), &err);
10981                     }
10982                     if (ret != 0) {
10983                         /*
10984                          * Some unknown failure in the crypto.  The best
10985                          * we can do is log it and fail the syscall.
10986                          * The real syscall cannot fail this way.
10987                          */
10988                         qemu_log_mask(LOG_UNIMP,
10989                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10990                                       error_get_pretty(err));
10991                         error_free(err);
10992                         return -TARGET_EIO;
10993                     }
10994                     return 0;
10995                 }
10996             }
10997             return -TARGET_EINVAL;
10998         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10999             {
11000                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
11001                 CPUARMState *env = cpu_env;
11002                 ARMCPU *cpu = env_archcpu(env);
11003 
11004                 if (cpu_isar_feature(aa64_mte, cpu)) {
11005                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
11006                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
11007                 }
11008 
11009                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
11010                     return -TARGET_EINVAL;
11011                 }
11012                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
11013 
11014                 if (cpu_isar_feature(aa64_mte, cpu)) {
11015                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
11016                     case TARGET_PR_MTE_TCF_NONE:
11017                     case TARGET_PR_MTE_TCF_SYNC:
11018                     case TARGET_PR_MTE_TCF_ASYNC:
11019                         break;
11020                     default:
11021                         return -EINVAL;
11022                     }
11023 
11024                     /*
11025                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
11026                      * Note that the syscall values are consistent with hw.
11027                      */
11028                     env->cp15.sctlr_el[1] =
11029                         deposit64(env->cp15.sctlr_el[1], 38, 2,
11030                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
11031 
11032                     /*
11033                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
11034                      * Note that the syscall uses an include mask,
11035                      * and hardware uses an exclude mask -- invert.
11036                      */
11037                     env->cp15.gcr_el1 =
11038                         deposit64(env->cp15.gcr_el1, 0, 16,
11039                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
11040                     arm_rebuild_hflags(env);
11041                 }
11042                 return 0;
11043             }
11044         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
11045             {
11046                 abi_long ret = 0;
11047                 CPUARMState *env = cpu_env;
11048                 ARMCPU *cpu = env_archcpu(env);
11049 
11050                 if (arg2 || arg3 || arg4 || arg5) {
11051                     return -TARGET_EINVAL;
11052                 }
11053                 if (env->tagged_addr_enable) {
11054                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
11055                 }
11056                 if (cpu_isar_feature(aa64_mte, cpu)) {
11057                     /* See above. */
11058                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
11059                             << TARGET_PR_MTE_TCF_SHIFT);
11060                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
11061                                     ~env->cp15.gcr_el1);
11062                 }
11063                 return ret;
11064             }
11065 #endif /* AARCH64 */
11066         case PR_GET_SECCOMP:
11067         case PR_SET_SECCOMP:
11068             /* Disable seccomp to prevent the target disabling syscalls we
11069              * need. */
11070             return -TARGET_EINVAL;
11071         default:
11072             /* Most prctl options have no pointer arguments */
11073             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
11074         }
11075         break;
11076 #ifdef TARGET_NR_arch_prctl
11077     case TARGET_NR_arch_prctl:
11078         return do_arch_prctl(cpu_env, arg1, arg2);
11079 #endif
11080 #ifdef TARGET_NR_pread64
11081     case TARGET_NR_pread64:
11082         if (regpairs_aligned(cpu_env, num)) {
11083             arg4 = arg5;
11084             arg5 = arg6;
11085         }
11086         if (arg2 == 0 && arg3 == 0) {
11087             /* Special-case NULL buffer and zero length, which should succeed */
11088             p = 0;
11089         } else {
11090             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11091             if (!p) {
11092                 return -TARGET_EFAULT;
11093             }
11094         }
11095         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11096         unlock_user(p, arg2, ret);
11097         return ret;
11098     case TARGET_NR_pwrite64:
11099         if (regpairs_aligned(cpu_env, num)) {
11100             arg4 = arg5;
11101             arg5 = arg6;
11102         }
11103         if (arg2 == 0 && arg3 == 0) {
11104             /* Special-case NULL buffer and zero length, which should succeed */
11105             p = 0;
11106         } else {
11107             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11108             if (!p) {
11109                 return -TARGET_EFAULT;
11110             }
11111         }
11112         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11113         unlock_user(p, arg2, 0);
11114         return ret;
11115 #endif
11116     case TARGET_NR_getcwd:
11117         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11118             return -TARGET_EFAULT;
11119         ret = get_errno(sys_getcwd1(p, arg2));
11120         unlock_user(p, arg1, ret);
11121         return ret;
11122     case TARGET_NR_capget:
11123     case TARGET_NR_capset:
11124     {
11125         struct target_user_cap_header *target_header;
11126         struct target_user_cap_data *target_data = NULL;
11127         struct __user_cap_header_struct header;
11128         struct __user_cap_data_struct data[2];
11129         struct __user_cap_data_struct *dataptr = NULL;
11130         int i, target_datalen;
11131         int data_items = 1;
11132 
11133         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11134             return -TARGET_EFAULT;
11135         }
11136         header.version = tswap32(target_header->version);
11137         header.pid = tswap32(target_header->pid);
11138 
11139         if (header.version != _LINUX_CAPABILITY_VERSION) {
11140             /* Version 2 and up takes pointer to two user_data structs */
11141             data_items = 2;
11142         }
11143 
11144         target_datalen = sizeof(*target_data) * data_items;
11145 
11146         if (arg2) {
11147             if (num == TARGET_NR_capget) {
11148                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11149             } else {
11150                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11151             }
11152             if (!target_data) {
11153                 unlock_user_struct(target_header, arg1, 0);
11154                 return -TARGET_EFAULT;
11155             }
11156 
11157             if (num == TARGET_NR_capset) {
11158                 for (i = 0; i < data_items; i++) {
11159                     data[i].effective = tswap32(target_data[i].effective);
11160                     data[i].permitted = tswap32(target_data[i].permitted);
11161                     data[i].inheritable = tswap32(target_data[i].inheritable);
11162                 }
11163             }
11164 
11165             dataptr = data;
11166         }
11167 
11168         if (num == TARGET_NR_capget) {
11169             ret = get_errno(capget(&header, dataptr));
11170         } else {
11171             ret = get_errno(capset(&header, dataptr));
11172         }
11173 
11174         /* The kernel always updates version for both capget and capset */
11175         target_header->version = tswap32(header.version);
11176         unlock_user_struct(target_header, arg1, 1);
11177 
11178         if (arg2) {
11179             if (num == TARGET_NR_capget) {
11180                 for (i = 0; i < data_items; i++) {
11181                     target_data[i].effective = tswap32(data[i].effective);
11182                     target_data[i].permitted = tswap32(data[i].permitted);
11183                     target_data[i].inheritable = tswap32(data[i].inheritable);
11184                 }
11185                 unlock_user(target_data, arg2, target_datalen);
11186             } else {
11187                 unlock_user(target_data, arg2, 0);
11188             }
11189         }
11190         return ret;
11191     }
11192     case TARGET_NR_sigaltstack:
11193         return do_sigaltstack(arg1, arg2,
11194                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11195 
11196 #ifdef CONFIG_SENDFILE
11197 #ifdef TARGET_NR_sendfile
11198     case TARGET_NR_sendfile:
11199     {
11200         off_t *offp = NULL;
11201         off_t off;
11202         if (arg3) {
11203             ret = get_user_sal(off, arg3);
11204             if (is_error(ret)) {
11205                 return ret;
11206             }
11207             offp = &off;
11208         }
11209         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11210         if (!is_error(ret) && arg3) {
11211             abi_long ret2 = put_user_sal(off, arg3);
11212             if (is_error(ret2)) {
11213                 ret = ret2;
11214             }
11215         }
11216         return ret;
11217     }
11218 #endif
11219 #ifdef TARGET_NR_sendfile64
11220     case TARGET_NR_sendfile64:
11221     {
11222         off_t *offp = NULL;
11223         off_t off;
11224         if (arg3) {
11225             ret = get_user_s64(off, arg3);
11226             if (is_error(ret)) {
11227                 return ret;
11228             }
11229             offp = &off;
11230         }
11231         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11232         if (!is_error(ret) && arg3) {
11233             abi_long ret2 = put_user_s64(off, arg3);
11234             if (is_error(ret2)) {
11235                 ret = ret2;
11236             }
11237         }
11238         return ret;
11239     }
11240 #endif
11241 #endif
11242 #ifdef TARGET_NR_vfork
11243     case TARGET_NR_vfork:
11244         return get_errno(do_fork(cpu_env,
11245                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11246                          0, 0, 0, 0));
11247 #endif
11248 #ifdef TARGET_NR_ugetrlimit
11249     case TARGET_NR_ugetrlimit:
11250     {
11251 	struct rlimit rlim;
11252 	int resource = target_to_host_resource(arg1);
11253 	ret = get_errno(getrlimit(resource, &rlim));
11254 	if (!is_error(ret)) {
11255 	    struct target_rlimit *target_rlim;
11256             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11257                 return -TARGET_EFAULT;
11258 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11259 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11260             unlock_user_struct(target_rlim, arg2, 1);
11261 	}
11262         return ret;
11263     }
11264 #endif
11265 #ifdef TARGET_NR_truncate64
11266     case TARGET_NR_truncate64:
11267         if (!(p = lock_user_string(arg1)))
11268             return -TARGET_EFAULT;
11269 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11270         unlock_user(p, arg1, 0);
11271         return ret;
11272 #endif
11273 #ifdef TARGET_NR_ftruncate64
11274     case TARGET_NR_ftruncate64:
11275         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11276 #endif
11277 #ifdef TARGET_NR_stat64
11278     case TARGET_NR_stat64:
11279         if (!(p = lock_user_string(arg1))) {
11280             return -TARGET_EFAULT;
11281         }
11282         ret = get_errno(stat(path(p), &st));
11283         unlock_user(p, arg1, 0);
11284         if (!is_error(ret))
11285             ret = host_to_target_stat64(cpu_env, arg2, &st);
11286         return ret;
11287 #endif
11288 #ifdef TARGET_NR_lstat64
11289     case TARGET_NR_lstat64:
11290         if (!(p = lock_user_string(arg1))) {
11291             return -TARGET_EFAULT;
11292         }
11293         ret = get_errno(lstat(path(p), &st));
11294         unlock_user(p, arg1, 0);
11295         if (!is_error(ret))
11296             ret = host_to_target_stat64(cpu_env, arg2, &st);
11297         return ret;
11298 #endif
11299 #ifdef TARGET_NR_fstat64
11300     case TARGET_NR_fstat64:
11301         ret = get_errno(fstat(arg1, &st));
11302         if (!is_error(ret))
11303             ret = host_to_target_stat64(cpu_env, arg2, &st);
11304         return ret;
11305 #endif
11306 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11307 #ifdef TARGET_NR_fstatat64
11308     case TARGET_NR_fstatat64:
11309 #endif
11310 #ifdef TARGET_NR_newfstatat
11311     case TARGET_NR_newfstatat:
11312 #endif
11313         if (!(p = lock_user_string(arg2))) {
11314             return -TARGET_EFAULT;
11315         }
11316         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11317         unlock_user(p, arg2, 0);
11318         if (!is_error(ret))
11319             ret = host_to_target_stat64(cpu_env, arg3, &st);
11320         return ret;
11321 #endif
11322 #if defined(TARGET_NR_statx)
11323     case TARGET_NR_statx:
11324         {
11325             struct target_statx *target_stx;
11326             int dirfd = arg1;
11327             int flags = arg3;
11328 
11329             p = lock_user_string(arg2);
11330             if (p == NULL) {
11331                 return -TARGET_EFAULT;
11332             }
11333 #if defined(__NR_statx)
11334             {
11335                 /*
11336                  * It is assumed that struct statx is architecture independent.
11337                  */
11338                 struct target_statx host_stx;
11339                 int mask = arg4;
11340 
11341                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11342                 if (!is_error(ret)) {
11343                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11344                         unlock_user(p, arg2, 0);
11345                         return -TARGET_EFAULT;
11346                     }
11347                 }
11348 
11349                 if (ret != -TARGET_ENOSYS) {
11350                     unlock_user(p, arg2, 0);
11351                     return ret;
11352                 }
11353             }
11354 #endif
11355             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11356             unlock_user(p, arg2, 0);
11357 
11358             if (!is_error(ret)) {
11359                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11360                     return -TARGET_EFAULT;
11361                 }
11362                 memset(target_stx, 0, sizeof(*target_stx));
11363                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11364                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11365                 __put_user(st.st_ino, &target_stx->stx_ino);
11366                 __put_user(st.st_mode, &target_stx->stx_mode);
11367                 __put_user(st.st_uid, &target_stx->stx_uid);
11368                 __put_user(st.st_gid, &target_stx->stx_gid);
11369                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11370                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11371                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11372                 __put_user(st.st_size, &target_stx->stx_size);
11373                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11374                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11375                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11376                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11377                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11378                 unlock_user_struct(target_stx, arg5, 1);
11379             }
11380         }
11381         return ret;
11382 #endif
11383 #ifdef TARGET_NR_lchown
11384     case TARGET_NR_lchown:
11385         if (!(p = lock_user_string(arg1)))
11386             return -TARGET_EFAULT;
11387         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11388         unlock_user(p, arg1, 0);
11389         return ret;
11390 #endif
11391 #ifdef TARGET_NR_getuid
11392     case TARGET_NR_getuid:
11393         return get_errno(high2lowuid(getuid()));
11394 #endif
11395 #ifdef TARGET_NR_getgid
11396     case TARGET_NR_getgid:
11397         return get_errno(high2lowgid(getgid()));
11398 #endif
11399 #ifdef TARGET_NR_geteuid
11400     case TARGET_NR_geteuid:
11401         return get_errno(high2lowuid(geteuid()));
11402 #endif
11403 #ifdef TARGET_NR_getegid
11404     case TARGET_NR_getegid:
11405         return get_errno(high2lowgid(getegid()));
11406 #endif
11407     case TARGET_NR_setreuid:
11408         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11409     case TARGET_NR_setregid:
11410         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11411     case TARGET_NR_getgroups:
11412         {
11413             int gidsetsize = arg1;
11414             target_id *target_grouplist;
11415             gid_t *grouplist;
11416             int i;
11417 
11418             grouplist = alloca(gidsetsize * sizeof(gid_t));
11419             ret = get_errno(getgroups(gidsetsize, grouplist));
11420             if (gidsetsize == 0)
11421                 return ret;
11422             if (!is_error(ret)) {
11423                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11424                 if (!target_grouplist)
11425                     return -TARGET_EFAULT;
11426                 for(i = 0;i < ret; i++)
11427                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11428                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11429             }
11430         }
11431         return ret;
11432     case TARGET_NR_setgroups:
11433         {
11434             int gidsetsize = arg1;
11435             target_id *target_grouplist;
11436             gid_t *grouplist = NULL;
11437             int i;
11438             if (gidsetsize) {
11439                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11440                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11441                 if (!target_grouplist) {
11442                     return -TARGET_EFAULT;
11443                 }
11444                 for (i = 0; i < gidsetsize; i++) {
11445                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11446                 }
11447                 unlock_user(target_grouplist, arg2, 0);
11448             }
11449             return get_errno(setgroups(gidsetsize, grouplist));
11450         }
11451     case TARGET_NR_fchown:
11452         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11453 #if defined(TARGET_NR_fchownat)
11454     case TARGET_NR_fchownat:
11455         if (!(p = lock_user_string(arg2)))
11456             return -TARGET_EFAULT;
11457         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11458                                  low2highgid(arg4), arg5));
11459         unlock_user(p, arg2, 0);
11460         return ret;
11461 #endif
11462 #ifdef TARGET_NR_setresuid
11463     case TARGET_NR_setresuid:
11464         return get_errno(sys_setresuid(low2highuid(arg1),
11465                                        low2highuid(arg2),
11466                                        low2highuid(arg3)));
11467 #endif
11468 #ifdef TARGET_NR_getresuid
11469     case TARGET_NR_getresuid:
11470         {
11471             uid_t ruid, euid, suid;
11472             ret = get_errno(getresuid(&ruid, &euid, &suid));
11473             if (!is_error(ret)) {
11474                 if (put_user_id(high2lowuid(ruid), arg1)
11475                     || put_user_id(high2lowuid(euid), arg2)
11476                     || put_user_id(high2lowuid(suid), arg3))
11477                     return -TARGET_EFAULT;
11478             }
11479         }
11480         return ret;
11481 #endif
11482 #ifdef TARGET_NR_getresgid
11483     case TARGET_NR_setresgid:
11484         return get_errno(sys_setresgid(low2highgid(arg1),
11485                                        low2highgid(arg2),
11486                                        low2highgid(arg3)));
11487 #endif
11488 #ifdef TARGET_NR_getresgid
11489     case TARGET_NR_getresgid:
11490         {
11491             gid_t rgid, egid, sgid;
11492             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11493             if (!is_error(ret)) {
11494                 if (put_user_id(high2lowgid(rgid), arg1)
11495                     || put_user_id(high2lowgid(egid), arg2)
11496                     || put_user_id(high2lowgid(sgid), arg3))
11497                     return -TARGET_EFAULT;
11498             }
11499         }
11500         return ret;
11501 #endif
11502 #ifdef TARGET_NR_chown
11503     case TARGET_NR_chown:
11504         if (!(p = lock_user_string(arg1)))
11505             return -TARGET_EFAULT;
11506         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11507         unlock_user(p, arg1, 0);
11508         return ret;
11509 #endif
11510     case TARGET_NR_setuid:
11511         return get_errno(sys_setuid(low2highuid(arg1)));
11512     case TARGET_NR_setgid:
11513         return get_errno(sys_setgid(low2highgid(arg1)));
11514     case TARGET_NR_setfsuid:
11515         return get_errno(setfsuid(arg1));
11516     case TARGET_NR_setfsgid:
11517         return get_errno(setfsgid(arg1));
11518 
11519 #ifdef TARGET_NR_lchown32
11520     case TARGET_NR_lchown32:
11521         if (!(p = lock_user_string(arg1)))
11522             return -TARGET_EFAULT;
11523         ret = get_errno(lchown(p, arg2, arg3));
11524         unlock_user(p, arg1, 0);
11525         return ret;
11526 #endif
11527 #ifdef TARGET_NR_getuid32
11528     case TARGET_NR_getuid32:
11529         return get_errno(getuid());
11530 #endif
11531 
11532 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11533    /* Alpha specific */
11534     case TARGET_NR_getxuid:
11535          {
11536             uid_t euid;
11537             euid=geteuid();
11538             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11539          }
11540         return get_errno(getuid());
11541 #endif
11542 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11543    /* Alpha specific */
11544     case TARGET_NR_getxgid:
11545          {
11546             uid_t egid;
11547             egid=getegid();
11548             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11549          }
11550         return get_errno(getgid());
11551 #endif
11552 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11553     /* Alpha specific */
11554     case TARGET_NR_osf_getsysinfo:
11555         ret = -TARGET_EOPNOTSUPP;
11556         switch (arg1) {
11557           case TARGET_GSI_IEEE_FP_CONTROL:
11558             {
11559                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11560                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11561 
11562                 swcr &= ~SWCR_STATUS_MASK;
11563                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11564 
11565                 if (put_user_u64 (swcr, arg2))
11566                         return -TARGET_EFAULT;
11567                 ret = 0;
11568             }
11569             break;
11570 
11571           /* case GSI_IEEE_STATE_AT_SIGNAL:
11572              -- Not implemented in linux kernel.
11573              case GSI_UACPROC:
11574              -- Retrieves current unaligned access state; not much used.
11575              case GSI_PROC_TYPE:
11576              -- Retrieves implver information; surely not used.
11577              case GSI_GET_HWRPB:
11578              -- Grabs a copy of the HWRPB; surely not used.
11579           */
11580         }
11581         return ret;
11582 #endif
11583 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11584     /* Alpha specific */
11585     case TARGET_NR_osf_setsysinfo:
11586         ret = -TARGET_EOPNOTSUPP;
11587         switch (arg1) {
11588           case TARGET_SSI_IEEE_FP_CONTROL:
11589             {
11590                 uint64_t swcr, fpcr;
11591 
11592                 if (get_user_u64 (swcr, arg2)) {
11593                     return -TARGET_EFAULT;
11594                 }
11595 
11596                 /*
11597                  * The kernel calls swcr_update_status to update the
11598                  * status bits from the fpcr at every point that it
11599                  * could be queried.  Therefore, we store the status
11600                  * bits only in FPCR.
11601                  */
11602                 ((CPUAlphaState *)cpu_env)->swcr
11603                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11604 
11605                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11606                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11607                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11608                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11609                 ret = 0;
11610             }
11611             break;
11612 
11613           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11614             {
11615                 uint64_t exc, fpcr, fex;
11616 
11617                 if (get_user_u64(exc, arg2)) {
11618                     return -TARGET_EFAULT;
11619                 }
11620                 exc &= SWCR_STATUS_MASK;
11621                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11622 
11623                 /* Old exceptions are not signaled.  */
11624                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11625                 fex = exc & ~fex;
11626                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11627                 fex &= ((CPUArchState *)cpu_env)->swcr;
11628 
11629                 /* Update the hardware fpcr.  */
11630                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11631                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11632 
11633                 if (fex) {
11634                     int si_code = TARGET_FPE_FLTUNK;
11635                     target_siginfo_t info;
11636 
11637                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11638                         si_code = TARGET_FPE_FLTUND;
11639                     }
11640                     if (fex & SWCR_TRAP_ENABLE_INE) {
11641                         si_code = TARGET_FPE_FLTRES;
11642                     }
11643                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11644                         si_code = TARGET_FPE_FLTUND;
11645                     }
11646                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11647                         si_code = TARGET_FPE_FLTOVF;
11648                     }
11649                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11650                         si_code = TARGET_FPE_FLTDIV;
11651                     }
11652                     if (fex & SWCR_TRAP_ENABLE_INV) {
11653                         si_code = TARGET_FPE_FLTINV;
11654                     }
11655 
11656                     info.si_signo = SIGFPE;
11657                     info.si_errno = 0;
11658                     info.si_code = si_code;
11659                     info._sifields._sigfault._addr
11660                         = ((CPUArchState *)cpu_env)->pc;
11661                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11662                                  QEMU_SI_FAULT, &info);
11663                 }
11664                 ret = 0;
11665             }
11666             break;
11667 
11668           /* case SSI_NVPAIRS:
11669              -- Used with SSIN_UACPROC to enable unaligned accesses.
11670              case SSI_IEEE_STATE_AT_SIGNAL:
11671              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11672              -- Not implemented in linux kernel
11673           */
11674         }
11675         return ret;
11676 #endif
11677 #ifdef TARGET_NR_osf_sigprocmask
11678     /* Alpha specific.  */
11679     case TARGET_NR_osf_sigprocmask:
11680         {
11681             abi_ulong mask;
11682             int how;
11683             sigset_t set, oldset;
11684 
11685             switch(arg1) {
11686             case TARGET_SIG_BLOCK:
11687                 how = SIG_BLOCK;
11688                 break;
11689             case TARGET_SIG_UNBLOCK:
11690                 how = SIG_UNBLOCK;
11691                 break;
11692             case TARGET_SIG_SETMASK:
11693                 how = SIG_SETMASK;
11694                 break;
11695             default:
11696                 return -TARGET_EINVAL;
11697             }
11698             mask = arg2;
11699             target_to_host_old_sigset(&set, &mask);
11700             ret = do_sigprocmask(how, &set, &oldset);
11701             if (!ret) {
11702                 host_to_target_old_sigset(&mask, &oldset);
11703                 ret = mask;
11704             }
11705         }
11706         return ret;
11707 #endif
11708 
11709 #ifdef TARGET_NR_getgid32
11710     case TARGET_NR_getgid32:
11711         return get_errno(getgid());
11712 #endif
11713 #ifdef TARGET_NR_geteuid32
11714     case TARGET_NR_geteuid32:
11715         return get_errno(geteuid());
11716 #endif
11717 #ifdef TARGET_NR_getegid32
11718     case TARGET_NR_getegid32:
11719         return get_errno(getegid());
11720 #endif
11721 #ifdef TARGET_NR_setreuid32
11722     case TARGET_NR_setreuid32:
11723         return get_errno(setreuid(arg1, arg2));
11724 #endif
11725 #ifdef TARGET_NR_setregid32
11726     case TARGET_NR_setregid32:
11727         return get_errno(setregid(arg1, arg2));
11728 #endif
11729 #ifdef TARGET_NR_getgroups32
11730     case TARGET_NR_getgroups32:
11731         {
11732             int gidsetsize = arg1;
11733             uint32_t *target_grouplist;
11734             gid_t *grouplist;
11735             int i;
11736 
11737             grouplist = alloca(gidsetsize * sizeof(gid_t));
11738             ret = get_errno(getgroups(gidsetsize, grouplist));
11739             if (gidsetsize == 0)
11740                 return ret;
11741             if (!is_error(ret)) {
11742                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11743                 if (!target_grouplist) {
11744                     return -TARGET_EFAULT;
11745                 }
11746                 for(i = 0;i < ret; i++)
11747                     target_grouplist[i] = tswap32(grouplist[i]);
11748                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11749             }
11750         }
11751         return ret;
11752 #endif
11753 #ifdef TARGET_NR_setgroups32
11754     case TARGET_NR_setgroups32:
11755         {
11756             int gidsetsize = arg1;
11757             uint32_t *target_grouplist;
11758             gid_t *grouplist;
11759             int i;
11760 
11761             grouplist = alloca(gidsetsize * sizeof(gid_t));
11762             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11763             if (!target_grouplist) {
11764                 return -TARGET_EFAULT;
11765             }
11766             for(i = 0;i < gidsetsize; i++)
11767                 grouplist[i] = tswap32(target_grouplist[i]);
11768             unlock_user(target_grouplist, arg2, 0);
11769             return get_errno(setgroups(gidsetsize, grouplist));
11770         }
11771 #endif
11772 #ifdef TARGET_NR_fchown32
11773     case TARGET_NR_fchown32:
11774         return get_errno(fchown(arg1, arg2, arg3));
11775 #endif
11776 #ifdef TARGET_NR_setresuid32
11777     case TARGET_NR_setresuid32:
11778         return get_errno(sys_setresuid(arg1, arg2, arg3));
11779 #endif
11780 #ifdef TARGET_NR_getresuid32
11781     case TARGET_NR_getresuid32:
11782         {
11783             uid_t ruid, euid, suid;
11784             ret = get_errno(getresuid(&ruid, &euid, &suid));
11785             if (!is_error(ret)) {
11786                 if (put_user_u32(ruid, arg1)
11787                     || put_user_u32(euid, arg2)
11788                     || put_user_u32(suid, arg3))
11789                     return -TARGET_EFAULT;
11790             }
11791         }
11792         return ret;
11793 #endif
11794 #ifdef TARGET_NR_setresgid32
11795     case TARGET_NR_setresgid32:
11796         return get_errno(sys_setresgid(arg1, arg2, arg3));
11797 #endif
11798 #ifdef TARGET_NR_getresgid32
11799     case TARGET_NR_getresgid32:
11800         {
11801             gid_t rgid, egid, sgid;
11802             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11803             if (!is_error(ret)) {
11804                 if (put_user_u32(rgid, arg1)
11805                     || put_user_u32(egid, arg2)
11806                     || put_user_u32(sgid, arg3))
11807                     return -TARGET_EFAULT;
11808             }
11809         }
11810         return ret;
11811 #endif
11812 #ifdef TARGET_NR_chown32
11813     case TARGET_NR_chown32:
11814         if (!(p = lock_user_string(arg1)))
11815             return -TARGET_EFAULT;
11816         ret = get_errno(chown(p, arg2, arg3));
11817         unlock_user(p, arg1, 0);
11818         return ret;
11819 #endif
11820 #ifdef TARGET_NR_setuid32
11821     case TARGET_NR_setuid32:
11822         return get_errno(sys_setuid(arg1));
11823 #endif
11824 #ifdef TARGET_NR_setgid32
11825     case TARGET_NR_setgid32:
11826         return get_errno(sys_setgid(arg1));
11827 #endif
11828 #ifdef TARGET_NR_setfsuid32
11829     case TARGET_NR_setfsuid32:
11830         return get_errno(setfsuid(arg1));
11831 #endif
11832 #ifdef TARGET_NR_setfsgid32
11833     case TARGET_NR_setfsgid32:
11834         return get_errno(setfsgid(arg1));
11835 #endif
11836 #ifdef TARGET_NR_mincore
11837     case TARGET_NR_mincore:
11838         {
11839             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11840             if (!a) {
11841                 return -TARGET_ENOMEM;
11842             }
11843             p = lock_user_string(arg3);
11844             if (!p) {
11845                 ret = -TARGET_EFAULT;
11846             } else {
11847                 ret = get_errno(mincore(a, arg2, p));
11848                 unlock_user(p, arg3, ret);
11849             }
11850             unlock_user(a, arg1, 0);
11851         }
11852         return ret;
11853 #endif
11854 #ifdef TARGET_NR_arm_fadvise64_64
11855     case TARGET_NR_arm_fadvise64_64:
11856         /* arm_fadvise64_64 looks like fadvise64_64 but
11857          * with different argument order: fd, advice, offset, len
11858          * rather than the usual fd, offset, len, advice.
11859          * Note that offset and len are both 64-bit so appear as
11860          * pairs of 32-bit registers.
11861          */
11862         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11863                             target_offset64(arg5, arg6), arg2);
11864         return -host_to_target_errno(ret);
11865 #endif
11866 
11867 #if TARGET_ABI_BITS == 32
11868 
11869 #ifdef TARGET_NR_fadvise64_64
11870     case TARGET_NR_fadvise64_64:
11871 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11872         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11873         ret = arg2;
11874         arg2 = arg3;
11875         arg3 = arg4;
11876         arg4 = arg5;
11877         arg5 = arg6;
11878         arg6 = ret;
11879 #else
11880         /* 6 args: fd, offset (high, low), len (high, low), advice */
11881         if (regpairs_aligned(cpu_env, num)) {
11882             /* offset is in (3,4), len in (5,6) and advice in 7 */
11883             arg2 = arg3;
11884             arg3 = arg4;
11885             arg4 = arg5;
11886             arg5 = arg6;
11887             arg6 = arg7;
11888         }
11889 #endif
11890         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11891                             target_offset64(arg4, arg5), arg6);
11892         return -host_to_target_errno(ret);
11893 #endif
11894 
11895 #ifdef TARGET_NR_fadvise64
11896     case TARGET_NR_fadvise64:
11897         /* 5 args: fd, offset (high, low), len, advice */
11898         if (regpairs_aligned(cpu_env, num)) {
11899             /* offset is in (3,4), len in 5 and advice in 6 */
11900             arg2 = arg3;
11901             arg3 = arg4;
11902             arg4 = arg5;
11903             arg5 = arg6;
11904         }
11905         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11906         return -host_to_target_errno(ret);
11907 #endif
11908 
11909 #else /* not a 32-bit ABI */
11910 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11911 #ifdef TARGET_NR_fadvise64_64
11912     case TARGET_NR_fadvise64_64:
11913 #endif
11914 #ifdef TARGET_NR_fadvise64
11915     case TARGET_NR_fadvise64:
11916 #endif
11917 #ifdef TARGET_S390X
11918         switch (arg4) {
11919         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11920         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11921         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11922         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11923         default: break;
11924         }
11925 #endif
11926         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11927 #endif
11928 #endif /* end of 64-bit ABI fadvise handling */
11929 
11930 #ifdef TARGET_NR_madvise
11931     case TARGET_NR_madvise:
11932         /* A straight passthrough may not be safe because qemu sometimes
11933            turns private file-backed mappings into anonymous mappings.
11934            This will break MADV_DONTNEED.
11935            This is a hint, so ignoring and returning success is ok.  */
11936         return 0;
11937 #endif
11938 #ifdef TARGET_NR_fcntl64
11939     case TARGET_NR_fcntl64:
11940     {
11941         int cmd;
11942         struct flock64 fl;
11943         from_flock64_fn *copyfrom = copy_from_user_flock64;
11944         to_flock64_fn *copyto = copy_to_user_flock64;
11945 
11946 #ifdef TARGET_ARM
11947         if (!((CPUARMState *)cpu_env)->eabi) {
11948             copyfrom = copy_from_user_oabi_flock64;
11949             copyto = copy_to_user_oabi_flock64;
11950         }
11951 #endif
11952 
11953         cmd = target_to_host_fcntl_cmd(arg2);
11954         if (cmd == -TARGET_EINVAL) {
11955             return cmd;
11956         }
11957 
11958         switch(arg2) {
11959         case TARGET_F_GETLK64:
11960             ret = copyfrom(&fl, arg3);
11961             if (ret) {
11962                 break;
11963             }
11964             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11965             if (ret == 0) {
11966                 ret = copyto(arg3, &fl);
11967             }
11968 	    break;
11969 
11970         case TARGET_F_SETLK64:
11971         case TARGET_F_SETLKW64:
11972             ret = copyfrom(&fl, arg3);
11973             if (ret) {
11974                 break;
11975             }
11976             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11977 	    break;
11978         default:
11979             ret = do_fcntl(arg1, arg2, arg3);
11980             break;
11981         }
11982         return ret;
11983     }
11984 #endif
11985 #ifdef TARGET_NR_cacheflush
11986     case TARGET_NR_cacheflush:
11987         /* self-modifying code is handled automatically, so nothing needed */
11988         return 0;
11989 #endif
11990 #ifdef TARGET_NR_getpagesize
11991     case TARGET_NR_getpagesize:
11992         return TARGET_PAGE_SIZE;
11993 #endif
11994     case TARGET_NR_gettid:
11995         return get_errno(sys_gettid());
11996 #ifdef TARGET_NR_readahead
11997     case TARGET_NR_readahead:
11998 #if TARGET_ABI_BITS == 32
11999         if (regpairs_aligned(cpu_env, num)) {
12000             arg2 = arg3;
12001             arg3 = arg4;
12002             arg4 = arg5;
12003         }
12004         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12005 #else
12006         ret = get_errno(readahead(arg1, arg2, arg3));
12007 #endif
12008         return ret;
12009 #endif
12010 #ifdef CONFIG_ATTR
12011 #ifdef TARGET_NR_setxattr
12012     case TARGET_NR_listxattr:
12013     case TARGET_NR_llistxattr:
12014     {
12015         void *p, *b = 0;
12016         if (arg2) {
12017             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12018             if (!b) {
12019                 return -TARGET_EFAULT;
12020             }
12021         }
12022         p = lock_user_string(arg1);
12023         if (p) {
12024             if (num == TARGET_NR_listxattr) {
12025                 ret = get_errno(listxattr(p, b, arg3));
12026             } else {
12027                 ret = get_errno(llistxattr(p, b, arg3));
12028             }
12029         } else {
12030             ret = -TARGET_EFAULT;
12031         }
12032         unlock_user(p, arg1, 0);
12033         unlock_user(b, arg2, arg3);
12034         return ret;
12035     }
12036     case TARGET_NR_flistxattr:
12037     {
12038         void *b = 0;
12039         if (arg2) {
12040             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12041             if (!b) {
12042                 return -TARGET_EFAULT;
12043             }
12044         }
12045         ret = get_errno(flistxattr(arg1, b, arg3));
12046         unlock_user(b, arg2, arg3);
12047         return ret;
12048     }
12049     case TARGET_NR_setxattr:
12050     case TARGET_NR_lsetxattr:
12051         {
12052             void *p, *n, *v = 0;
12053             if (arg3) {
12054                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12055                 if (!v) {
12056                     return -TARGET_EFAULT;
12057                 }
12058             }
12059             p = lock_user_string(arg1);
12060             n = lock_user_string(arg2);
12061             if (p && n) {
12062                 if (num == TARGET_NR_setxattr) {
12063                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12064                 } else {
12065                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12066                 }
12067             } else {
12068                 ret = -TARGET_EFAULT;
12069             }
12070             unlock_user(p, arg1, 0);
12071             unlock_user(n, arg2, 0);
12072             unlock_user(v, arg3, 0);
12073         }
12074         return ret;
12075     case TARGET_NR_fsetxattr:
12076         {
12077             void *n, *v = 0;
12078             if (arg3) {
12079                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12080                 if (!v) {
12081                     return -TARGET_EFAULT;
12082                 }
12083             }
12084             n = lock_user_string(arg2);
12085             if (n) {
12086                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12087             } else {
12088                 ret = -TARGET_EFAULT;
12089             }
12090             unlock_user(n, arg2, 0);
12091             unlock_user(v, arg3, 0);
12092         }
12093         return ret;
12094     case TARGET_NR_getxattr:
12095     case TARGET_NR_lgetxattr:
12096         {
12097             void *p, *n, *v = 0;
12098             if (arg3) {
12099                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12100                 if (!v) {
12101                     return -TARGET_EFAULT;
12102                 }
12103             }
12104             p = lock_user_string(arg1);
12105             n = lock_user_string(arg2);
12106             if (p && n) {
12107                 if (num == TARGET_NR_getxattr) {
12108                     ret = get_errno(getxattr(p, n, v, arg4));
12109                 } else {
12110                     ret = get_errno(lgetxattr(p, n, v, arg4));
12111                 }
12112             } else {
12113                 ret = -TARGET_EFAULT;
12114             }
12115             unlock_user(p, arg1, 0);
12116             unlock_user(n, arg2, 0);
12117             unlock_user(v, arg3, arg4);
12118         }
12119         return ret;
12120     case TARGET_NR_fgetxattr:
12121         {
12122             void *n, *v = 0;
12123             if (arg3) {
12124                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12125                 if (!v) {
12126                     return -TARGET_EFAULT;
12127                 }
12128             }
12129             n = lock_user_string(arg2);
12130             if (n) {
12131                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12132             } else {
12133                 ret = -TARGET_EFAULT;
12134             }
12135             unlock_user(n, arg2, 0);
12136             unlock_user(v, arg3, arg4);
12137         }
12138         return ret;
12139     case TARGET_NR_removexattr:
12140     case TARGET_NR_lremovexattr:
12141         {
12142             void *p, *n;
12143             p = lock_user_string(arg1);
12144             n = lock_user_string(arg2);
12145             if (p && n) {
12146                 if (num == TARGET_NR_removexattr) {
12147                     ret = get_errno(removexattr(p, n));
12148                 } else {
12149                     ret = get_errno(lremovexattr(p, n));
12150                 }
12151             } else {
12152                 ret = -TARGET_EFAULT;
12153             }
12154             unlock_user(p, arg1, 0);
12155             unlock_user(n, arg2, 0);
12156         }
12157         return ret;
12158     case TARGET_NR_fremovexattr:
12159         {
12160             void *n;
12161             n = lock_user_string(arg2);
12162             if (n) {
12163                 ret = get_errno(fremovexattr(arg1, n));
12164             } else {
12165                 ret = -TARGET_EFAULT;
12166             }
12167             unlock_user(n, arg2, 0);
12168         }
12169         return ret;
12170 #endif
12171 #endif /* CONFIG_ATTR */
12172 #ifdef TARGET_NR_set_thread_area
12173     case TARGET_NR_set_thread_area:
12174 #if defined(TARGET_MIPS)
12175       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12176       return 0;
12177 #elif defined(TARGET_CRIS)
12178       if (arg1 & 0xff)
12179           ret = -TARGET_EINVAL;
12180       else {
12181           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12182           ret = 0;
12183       }
12184       return ret;
12185 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12186       return do_set_thread_area(cpu_env, arg1);
12187 #elif defined(TARGET_M68K)
12188       {
12189           TaskState *ts = cpu->opaque;
12190           ts->tp_value = arg1;
12191           return 0;
12192       }
12193 #else
12194       return -TARGET_ENOSYS;
12195 #endif
12196 #endif
12197 #ifdef TARGET_NR_get_thread_area
12198     case TARGET_NR_get_thread_area:
12199 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12200         return do_get_thread_area(cpu_env, arg1);
12201 #elif defined(TARGET_M68K)
12202         {
12203             TaskState *ts = cpu->opaque;
12204             return ts->tp_value;
12205         }
12206 #else
12207         return -TARGET_ENOSYS;
12208 #endif
12209 #endif
12210 #ifdef TARGET_NR_getdomainname
12211     case TARGET_NR_getdomainname:
12212         return -TARGET_ENOSYS;
12213 #endif
12214 
12215 #ifdef TARGET_NR_clock_settime
12216     case TARGET_NR_clock_settime:
12217     {
12218         struct timespec ts;
12219 
12220         ret = target_to_host_timespec(&ts, arg2);
12221         if (!is_error(ret)) {
12222             ret = get_errno(clock_settime(arg1, &ts));
12223         }
12224         return ret;
12225     }
12226 #endif
12227 #ifdef TARGET_NR_clock_settime64
12228     case TARGET_NR_clock_settime64:
12229     {
12230         struct timespec ts;
12231 
12232         ret = target_to_host_timespec64(&ts, arg2);
12233         if (!is_error(ret)) {
12234             ret = get_errno(clock_settime(arg1, &ts));
12235         }
12236         return ret;
12237     }
12238 #endif
12239 #ifdef TARGET_NR_clock_gettime
12240     case TARGET_NR_clock_gettime:
12241     {
12242         struct timespec ts;
12243         ret = get_errno(clock_gettime(arg1, &ts));
12244         if (!is_error(ret)) {
12245             ret = host_to_target_timespec(arg2, &ts);
12246         }
12247         return ret;
12248     }
12249 #endif
12250 #ifdef TARGET_NR_clock_gettime64
12251     case TARGET_NR_clock_gettime64:
12252     {
12253         struct timespec ts;
12254         ret = get_errno(clock_gettime(arg1, &ts));
12255         if (!is_error(ret)) {
12256             ret = host_to_target_timespec64(arg2, &ts);
12257         }
12258         return ret;
12259     }
12260 #endif
12261 #ifdef TARGET_NR_clock_getres
12262     case TARGET_NR_clock_getres:
12263     {
12264         struct timespec ts;
12265         ret = get_errno(clock_getres(arg1, &ts));
12266         if (!is_error(ret)) {
12267             host_to_target_timespec(arg2, &ts);
12268         }
12269         return ret;
12270     }
12271 #endif
12272 #ifdef TARGET_NR_clock_getres_time64
12273     case TARGET_NR_clock_getres_time64:
12274     {
12275         struct timespec ts;
12276         ret = get_errno(clock_getres(arg1, &ts));
12277         if (!is_error(ret)) {
12278             host_to_target_timespec64(arg2, &ts);
12279         }
12280         return ret;
12281     }
12282 #endif
12283 #ifdef TARGET_NR_clock_nanosleep
12284     case TARGET_NR_clock_nanosleep:
12285     {
12286         struct timespec ts;
12287         if (target_to_host_timespec(&ts, arg3)) {
12288             return -TARGET_EFAULT;
12289         }
12290         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12291                                              &ts, arg4 ? &ts : NULL));
12292         /*
12293          * if the call is interrupted by a signal handler, it fails
12294          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12295          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12296          */
12297         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12298             host_to_target_timespec(arg4, &ts)) {
12299               return -TARGET_EFAULT;
12300         }
12301 
12302         return ret;
12303     }
12304 #endif
12305 #ifdef TARGET_NR_clock_nanosleep_time64
12306     case TARGET_NR_clock_nanosleep_time64:
12307     {
12308         struct timespec ts;
12309 
12310         if (target_to_host_timespec64(&ts, arg3)) {
12311             return -TARGET_EFAULT;
12312         }
12313 
12314         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12315                                              &ts, arg4 ? &ts : NULL));
12316 
12317         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12318             host_to_target_timespec64(arg4, &ts)) {
12319             return -TARGET_EFAULT;
12320         }
12321         return ret;
12322     }
12323 #endif
12324 
12325 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12326     case TARGET_NR_set_tid_address:
12327         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12328 #endif
12329 
12330     case TARGET_NR_tkill:
12331         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12332 
12333     case TARGET_NR_tgkill:
12334         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12335                          target_to_host_signal(arg3)));
12336 
12337 #ifdef TARGET_NR_set_robust_list
12338     case TARGET_NR_set_robust_list:
12339     case TARGET_NR_get_robust_list:
12340         /* The ABI for supporting robust futexes has userspace pass
12341          * the kernel a pointer to a linked list which is updated by
12342          * userspace after the syscall; the list is walked by the kernel
12343          * when the thread exits. Since the linked list in QEMU guest
12344          * memory isn't a valid linked list for the host and we have
12345          * no way to reliably intercept the thread-death event, we can't
12346          * support these. Silently return ENOSYS so that guest userspace
12347          * falls back to a non-robust futex implementation (which should
12348          * be OK except in the corner case of the guest crashing while
12349          * holding a mutex that is shared with another process via
12350          * shared memory).
12351          */
12352         return -TARGET_ENOSYS;
12353 #endif
12354 
12355 #if defined(TARGET_NR_utimensat)
12356     case TARGET_NR_utimensat:
12357         {
12358             struct timespec *tsp, ts[2];
12359             if (!arg3) {
12360                 tsp = NULL;
12361             } else {
12362                 if (target_to_host_timespec(ts, arg3)) {
12363                     return -TARGET_EFAULT;
12364                 }
12365                 if (target_to_host_timespec(ts + 1, arg3 +
12366                                             sizeof(struct target_timespec))) {
12367                     return -TARGET_EFAULT;
12368                 }
12369                 tsp = ts;
12370             }
12371             if (!arg2)
12372                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12373             else {
12374                 if (!(p = lock_user_string(arg2))) {
12375                     return -TARGET_EFAULT;
12376                 }
12377                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12378                 unlock_user(p, arg2, 0);
12379             }
12380         }
12381         return ret;
12382 #endif
12383 #ifdef TARGET_NR_utimensat_time64
12384     case TARGET_NR_utimensat_time64:
12385         {
12386             struct timespec *tsp, ts[2];
12387             if (!arg3) {
12388                 tsp = NULL;
12389             } else {
12390                 if (target_to_host_timespec64(ts, arg3)) {
12391                     return -TARGET_EFAULT;
12392                 }
12393                 if (target_to_host_timespec64(ts + 1, arg3 +
12394                                      sizeof(struct target__kernel_timespec))) {
12395                     return -TARGET_EFAULT;
12396                 }
12397                 tsp = ts;
12398             }
12399             if (!arg2)
12400                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12401             else {
12402                 p = lock_user_string(arg2);
12403                 if (!p) {
12404                     return -TARGET_EFAULT;
12405                 }
12406                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12407                 unlock_user(p, arg2, 0);
12408             }
12409         }
12410         return ret;
12411 #endif
12412 #ifdef TARGET_NR_futex
12413     case TARGET_NR_futex:
12414         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12415 #endif
12416 #ifdef TARGET_NR_futex_time64
12417     case TARGET_NR_futex_time64:
12418         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12419 #endif
12420 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12421     case TARGET_NR_inotify_init:
12422         ret = get_errno(sys_inotify_init());
12423         if (ret >= 0) {
12424             fd_trans_register(ret, &target_inotify_trans);
12425         }
12426         return ret;
12427 #endif
12428 #ifdef CONFIG_INOTIFY1
12429 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12430     case TARGET_NR_inotify_init1:
12431         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12432                                           fcntl_flags_tbl)));
12433         if (ret >= 0) {
12434             fd_trans_register(ret, &target_inotify_trans);
12435         }
12436         return ret;
12437 #endif
12438 #endif
12439 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12440     case TARGET_NR_inotify_add_watch:
12441         p = lock_user_string(arg2);
12442         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12443         unlock_user(p, arg2, 0);
12444         return ret;
12445 #endif
12446 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12447     case TARGET_NR_inotify_rm_watch:
12448         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12449 #endif
12450 
12451 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12452     case TARGET_NR_mq_open:
12453         {
12454             struct mq_attr posix_mq_attr;
12455             struct mq_attr *pposix_mq_attr;
12456             int host_flags;
12457 
12458             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12459             pposix_mq_attr = NULL;
12460             if (arg4) {
12461                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12462                     return -TARGET_EFAULT;
12463                 }
12464                 pposix_mq_attr = &posix_mq_attr;
12465             }
12466             p = lock_user_string(arg1 - 1);
12467             if (!p) {
12468                 return -TARGET_EFAULT;
12469             }
12470             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12471             unlock_user (p, arg1, 0);
12472         }
12473         return ret;
12474 
12475     case TARGET_NR_mq_unlink:
12476         p = lock_user_string(arg1 - 1);
12477         if (!p) {
12478             return -TARGET_EFAULT;
12479         }
12480         ret = get_errno(mq_unlink(p));
12481         unlock_user (p, arg1, 0);
12482         return ret;
12483 
12484 #ifdef TARGET_NR_mq_timedsend
12485     case TARGET_NR_mq_timedsend:
12486         {
12487             struct timespec ts;
12488 
12489             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12490             if (arg5 != 0) {
12491                 if (target_to_host_timespec(&ts, arg5)) {
12492                     return -TARGET_EFAULT;
12493                 }
12494                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12495                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12496                     return -TARGET_EFAULT;
12497                 }
12498             } else {
12499                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12500             }
12501             unlock_user (p, arg2, arg3);
12502         }
12503         return ret;
12504 #endif
12505 #ifdef TARGET_NR_mq_timedsend_time64
12506     case TARGET_NR_mq_timedsend_time64:
12507         {
12508             struct timespec ts;
12509 
12510             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12511             if (arg5 != 0) {
12512                 if (target_to_host_timespec64(&ts, arg5)) {
12513                     return -TARGET_EFAULT;
12514                 }
12515                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12516                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12517                     return -TARGET_EFAULT;
12518                 }
12519             } else {
12520                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12521             }
12522             unlock_user(p, arg2, arg3);
12523         }
12524         return ret;
12525 #endif
12526 
12527 #ifdef TARGET_NR_mq_timedreceive
12528     case TARGET_NR_mq_timedreceive:
12529         {
12530             struct timespec ts;
12531             unsigned int prio;
12532 
12533             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12534             if (arg5 != 0) {
12535                 if (target_to_host_timespec(&ts, arg5)) {
12536                     return -TARGET_EFAULT;
12537                 }
12538                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12539                                                      &prio, &ts));
12540                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12541                     return -TARGET_EFAULT;
12542                 }
12543             } else {
12544                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12545                                                      &prio, NULL));
12546             }
12547             unlock_user (p, arg2, arg3);
12548             if (arg4 != 0)
12549                 put_user_u32(prio, arg4);
12550         }
12551         return ret;
12552 #endif
12553 #ifdef TARGET_NR_mq_timedreceive_time64
12554     case TARGET_NR_mq_timedreceive_time64:
12555         {
12556             struct timespec ts;
12557             unsigned int prio;
12558 
12559             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12560             if (arg5 != 0) {
12561                 if (target_to_host_timespec64(&ts, arg5)) {
12562                     return -TARGET_EFAULT;
12563                 }
12564                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12565                                                      &prio, &ts));
12566                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12567                     return -TARGET_EFAULT;
12568                 }
12569             } else {
12570                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12571                                                      &prio, NULL));
12572             }
12573             unlock_user(p, arg2, arg3);
12574             if (arg4 != 0) {
12575                 put_user_u32(prio, arg4);
12576             }
12577         }
12578         return ret;
12579 #endif
12580 
12581     /* Not implemented for now... */
12582 /*     case TARGET_NR_mq_notify: */
12583 /*         break; */
12584 
12585     case TARGET_NR_mq_getsetattr:
12586         {
12587             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12588             ret = 0;
12589             if (arg2 != 0) {
12590                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12591                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12592                                            &posix_mq_attr_out));
12593             } else if (arg3 != 0) {
12594                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12595             }
12596             if (ret == 0 && arg3 != 0) {
12597                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12598             }
12599         }
12600         return ret;
12601 #endif
12602 
12603 #ifdef CONFIG_SPLICE
12604 #ifdef TARGET_NR_tee
12605     case TARGET_NR_tee:
12606         {
12607             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12608         }
12609         return ret;
12610 #endif
12611 #ifdef TARGET_NR_splice
12612     case TARGET_NR_splice:
12613         {
12614             loff_t loff_in, loff_out;
12615             loff_t *ploff_in = NULL, *ploff_out = NULL;
12616             if (arg2) {
12617                 if (get_user_u64(loff_in, arg2)) {
12618                     return -TARGET_EFAULT;
12619                 }
12620                 ploff_in = &loff_in;
12621             }
12622             if (arg4) {
12623                 if (get_user_u64(loff_out, arg4)) {
12624                     return -TARGET_EFAULT;
12625                 }
12626                 ploff_out = &loff_out;
12627             }
12628             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12629             if (arg2) {
12630                 if (put_user_u64(loff_in, arg2)) {
12631                     return -TARGET_EFAULT;
12632                 }
12633             }
12634             if (arg4) {
12635                 if (put_user_u64(loff_out, arg4)) {
12636                     return -TARGET_EFAULT;
12637                 }
12638             }
12639         }
12640         return ret;
12641 #endif
12642 #ifdef TARGET_NR_vmsplice
12643 	case TARGET_NR_vmsplice:
12644         {
12645             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12646             if (vec != NULL) {
12647                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12648                 unlock_iovec(vec, arg2, arg3, 0);
12649             } else {
12650                 ret = -host_to_target_errno(errno);
12651             }
12652         }
12653         return ret;
12654 #endif
12655 #endif /* CONFIG_SPLICE */
12656 #ifdef CONFIG_EVENTFD
12657 #if defined(TARGET_NR_eventfd)
12658     case TARGET_NR_eventfd:
12659         ret = get_errno(eventfd(arg1, 0));
12660         if (ret >= 0) {
12661             fd_trans_register(ret, &target_eventfd_trans);
12662         }
12663         return ret;
12664 #endif
12665 #if defined(TARGET_NR_eventfd2)
12666     case TARGET_NR_eventfd2:
12667     {
12668         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12669         if (arg2 & TARGET_O_NONBLOCK) {
12670             host_flags |= O_NONBLOCK;
12671         }
12672         if (arg2 & TARGET_O_CLOEXEC) {
12673             host_flags |= O_CLOEXEC;
12674         }
12675         ret = get_errno(eventfd(arg1, host_flags));
12676         if (ret >= 0) {
12677             fd_trans_register(ret, &target_eventfd_trans);
12678         }
12679         return ret;
12680     }
12681 #endif
12682 #endif /* CONFIG_EVENTFD  */
12683 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12684     case TARGET_NR_fallocate:
12685 #if TARGET_ABI_BITS == 32
12686         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12687                                   target_offset64(arg5, arg6)));
12688 #else
12689         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12690 #endif
12691         return ret;
12692 #endif
12693 #if defined(CONFIG_SYNC_FILE_RANGE)
12694 #if defined(TARGET_NR_sync_file_range)
12695     case TARGET_NR_sync_file_range:
12696 #if TARGET_ABI_BITS == 32
12697 #if defined(TARGET_MIPS)
12698         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12699                                         target_offset64(arg5, arg6), arg7));
12700 #else
12701         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12702                                         target_offset64(arg4, arg5), arg6));
12703 #endif /* !TARGET_MIPS */
12704 #else
12705         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12706 #endif
12707         return ret;
12708 #endif
12709 #if defined(TARGET_NR_sync_file_range2) || \
12710     defined(TARGET_NR_arm_sync_file_range)
12711 #if defined(TARGET_NR_sync_file_range2)
12712     case TARGET_NR_sync_file_range2:
12713 #endif
12714 #if defined(TARGET_NR_arm_sync_file_range)
12715     case TARGET_NR_arm_sync_file_range:
12716 #endif
12717         /* This is like sync_file_range but the arguments are reordered */
12718 #if TARGET_ABI_BITS == 32
12719         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12720                                         target_offset64(arg5, arg6), arg2));
12721 #else
12722         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12723 #endif
12724         return ret;
12725 #endif
12726 #endif
12727 #if defined(TARGET_NR_signalfd4)
12728     case TARGET_NR_signalfd4:
12729         return do_signalfd4(arg1, arg2, arg4);
12730 #endif
12731 #if defined(TARGET_NR_signalfd)
12732     case TARGET_NR_signalfd:
12733         return do_signalfd4(arg1, arg2, 0);
12734 #endif
12735 #if defined(CONFIG_EPOLL)
12736 #if defined(TARGET_NR_epoll_create)
12737     case TARGET_NR_epoll_create:
12738         return get_errno(epoll_create(arg1));
12739 #endif
12740 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12741     case TARGET_NR_epoll_create1:
12742         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12743 #endif
12744 #if defined(TARGET_NR_epoll_ctl)
12745     case TARGET_NR_epoll_ctl:
12746     {
12747         struct epoll_event ep;
12748         struct epoll_event *epp = 0;
12749         if (arg4) {
12750             if (arg2 != EPOLL_CTL_DEL) {
12751                 struct target_epoll_event *target_ep;
12752                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12753                     return -TARGET_EFAULT;
12754                 }
12755                 ep.events = tswap32(target_ep->events);
12756                 /*
12757                  * The epoll_data_t union is just opaque data to the kernel,
12758                  * so we transfer all 64 bits across and need not worry what
12759                  * actual data type it is.
12760                  */
12761                 ep.data.u64 = tswap64(target_ep->data.u64);
12762                 unlock_user_struct(target_ep, arg4, 0);
12763             }
12764             /*
12765              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12766              * non-null pointer, even though this argument is ignored.
12767              *
12768              */
12769             epp = &ep;
12770         }
12771         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12772     }
12773 #endif
12774 
12775 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12776 #if defined(TARGET_NR_epoll_wait)
12777     case TARGET_NR_epoll_wait:
12778 #endif
12779 #if defined(TARGET_NR_epoll_pwait)
12780     case TARGET_NR_epoll_pwait:
12781 #endif
12782     {
12783         struct target_epoll_event *target_ep;
12784         struct epoll_event *ep;
12785         int epfd = arg1;
12786         int maxevents = arg3;
12787         int timeout = arg4;
12788 
12789         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12790             return -TARGET_EINVAL;
12791         }
12792 
12793         target_ep = lock_user(VERIFY_WRITE, arg2,
12794                               maxevents * sizeof(struct target_epoll_event), 1);
12795         if (!target_ep) {
12796             return -TARGET_EFAULT;
12797         }
12798 
12799         ep = g_try_new(struct epoll_event, maxevents);
12800         if (!ep) {
12801             unlock_user(target_ep, arg2, 0);
12802             return -TARGET_ENOMEM;
12803         }
12804 
12805         switch (num) {
12806 #if defined(TARGET_NR_epoll_pwait)
12807         case TARGET_NR_epoll_pwait:
12808         {
12809             target_sigset_t *target_set;
12810             sigset_t _set, *set = &_set;
12811 
12812             if (arg5) {
12813                 if (arg6 != sizeof(target_sigset_t)) {
12814                     ret = -TARGET_EINVAL;
12815                     break;
12816                 }
12817 
12818                 target_set = lock_user(VERIFY_READ, arg5,
12819                                        sizeof(target_sigset_t), 1);
12820                 if (!target_set) {
12821                     ret = -TARGET_EFAULT;
12822                     break;
12823                 }
12824                 target_to_host_sigset(set, target_set);
12825                 unlock_user(target_set, arg5, 0);
12826             } else {
12827                 set = NULL;
12828             }
12829 
12830             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12831                                              set, SIGSET_T_SIZE));
12832             break;
12833         }
12834 #endif
12835 #if defined(TARGET_NR_epoll_wait)
12836         case TARGET_NR_epoll_wait:
12837             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12838                                              NULL, 0));
12839             break;
12840 #endif
12841         default:
12842             ret = -TARGET_ENOSYS;
12843         }
12844         if (!is_error(ret)) {
12845             int i;
12846             for (i = 0; i < ret; i++) {
12847                 target_ep[i].events = tswap32(ep[i].events);
12848                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12849             }
12850             unlock_user(target_ep, arg2,
12851                         ret * sizeof(struct target_epoll_event));
12852         } else {
12853             unlock_user(target_ep, arg2, 0);
12854         }
12855         g_free(ep);
12856         return ret;
12857     }
12858 #endif
12859 #endif
12860 #ifdef TARGET_NR_prlimit64
12861     case TARGET_NR_prlimit64:
12862     {
12863         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12864         struct target_rlimit64 *target_rnew, *target_rold;
12865         struct host_rlimit64 rnew, rold, *rnewp = 0;
12866         int resource = target_to_host_resource(arg2);
12867 
12868         if (arg3 && (resource != RLIMIT_AS &&
12869                      resource != RLIMIT_DATA &&
12870                      resource != RLIMIT_STACK)) {
12871             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12872                 return -TARGET_EFAULT;
12873             }
12874             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12875             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12876             unlock_user_struct(target_rnew, arg3, 0);
12877             rnewp = &rnew;
12878         }
12879 
12880         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12881         if (!is_error(ret) && arg4) {
12882             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12883                 return -TARGET_EFAULT;
12884             }
12885             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12886             target_rold->rlim_max = tswap64(rold.rlim_max);
12887             unlock_user_struct(target_rold, arg4, 1);
12888         }
12889         return ret;
12890     }
12891 #endif
12892 #ifdef TARGET_NR_gethostname
12893     case TARGET_NR_gethostname:
12894     {
12895         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12896         if (name) {
12897             ret = get_errno(gethostname(name, arg2));
12898             unlock_user(name, arg1, arg2);
12899         } else {
12900             ret = -TARGET_EFAULT;
12901         }
12902         return ret;
12903     }
12904 #endif
12905 #ifdef TARGET_NR_atomic_cmpxchg_32
12906     case TARGET_NR_atomic_cmpxchg_32:
12907     {
12908         /* should use start_exclusive from main.c */
12909         abi_ulong mem_value;
12910         if (get_user_u32(mem_value, arg6)) {
12911             target_siginfo_t info;
12912             info.si_signo = SIGSEGV;
12913             info.si_errno = 0;
12914             info.si_code = TARGET_SEGV_MAPERR;
12915             info._sifields._sigfault._addr = arg6;
12916             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12917                          QEMU_SI_FAULT, &info);
12918             ret = 0xdeadbeef;
12919 
12920         }
12921         if (mem_value == arg2)
12922             put_user_u32(arg1, arg6);
12923         return mem_value;
12924     }
12925 #endif
12926 #ifdef TARGET_NR_atomic_barrier
12927     case TARGET_NR_atomic_barrier:
12928         /* Like the kernel implementation and the
12929            qemu arm barrier, no-op this? */
12930         return 0;
12931 #endif
12932 
12933 #ifdef TARGET_NR_timer_create
12934     case TARGET_NR_timer_create:
12935     {
12936         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12937 
12938         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12939 
12940         int clkid = arg1;
12941         int timer_index = next_free_host_timer();
12942 
12943         if (timer_index < 0) {
12944             ret = -TARGET_EAGAIN;
12945         } else {
12946             timer_t *phtimer = g_posix_timers  + timer_index;
12947 
12948             if (arg2) {
12949                 phost_sevp = &host_sevp;
12950                 ret = target_to_host_sigevent(phost_sevp, arg2);
12951                 if (ret != 0) {
12952                     return ret;
12953                 }
12954             }
12955 
12956             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12957             if (ret) {
12958                 phtimer = NULL;
12959             } else {
12960                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12961                     return -TARGET_EFAULT;
12962                 }
12963             }
12964         }
12965         return ret;
12966     }
12967 #endif
12968 
12969 #ifdef TARGET_NR_timer_settime
12970     case TARGET_NR_timer_settime:
12971     {
12972         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12973          * struct itimerspec * old_value */
12974         target_timer_t timerid = get_timer_id(arg1);
12975 
12976         if (timerid < 0) {
12977             ret = timerid;
12978         } else if (arg3 == 0) {
12979             ret = -TARGET_EINVAL;
12980         } else {
12981             timer_t htimer = g_posix_timers[timerid];
12982             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12983 
12984             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12985                 return -TARGET_EFAULT;
12986             }
12987             ret = get_errno(
12988                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12989             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12990                 return -TARGET_EFAULT;
12991             }
12992         }
12993         return ret;
12994     }
12995 #endif
12996 
12997 #ifdef TARGET_NR_timer_settime64
12998     case TARGET_NR_timer_settime64:
12999     {
13000         target_timer_t timerid = get_timer_id(arg1);
13001 
13002         if (timerid < 0) {
13003             ret = timerid;
13004         } else if (arg3 == 0) {
13005             ret = -TARGET_EINVAL;
13006         } else {
13007             timer_t htimer = g_posix_timers[timerid];
13008             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13009 
13010             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13011                 return -TARGET_EFAULT;
13012             }
13013             ret = get_errno(
13014                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13015             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13016                 return -TARGET_EFAULT;
13017             }
13018         }
13019         return ret;
13020     }
13021 #endif
13022 
13023 #ifdef TARGET_NR_timer_gettime
13024     case TARGET_NR_timer_gettime:
13025     {
13026         /* args: timer_t timerid, struct itimerspec *curr_value */
13027         target_timer_t timerid = get_timer_id(arg1);
13028 
13029         if (timerid < 0) {
13030             ret = timerid;
13031         } else if (!arg2) {
13032             ret = -TARGET_EFAULT;
13033         } else {
13034             timer_t htimer = g_posix_timers[timerid];
13035             struct itimerspec hspec;
13036             ret = get_errno(timer_gettime(htimer, &hspec));
13037 
13038             if (host_to_target_itimerspec(arg2, &hspec)) {
13039                 ret = -TARGET_EFAULT;
13040             }
13041         }
13042         return ret;
13043     }
13044 #endif
13045 
13046 #ifdef TARGET_NR_timer_gettime64
13047     case TARGET_NR_timer_gettime64:
13048     {
13049         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13050         target_timer_t timerid = get_timer_id(arg1);
13051 
13052         if (timerid < 0) {
13053             ret = timerid;
13054         } else if (!arg2) {
13055             ret = -TARGET_EFAULT;
13056         } else {
13057             timer_t htimer = g_posix_timers[timerid];
13058             struct itimerspec hspec;
13059             ret = get_errno(timer_gettime(htimer, &hspec));
13060 
13061             if (host_to_target_itimerspec64(arg2, &hspec)) {
13062                 ret = -TARGET_EFAULT;
13063             }
13064         }
13065         return ret;
13066     }
13067 #endif
13068 
13069 #ifdef TARGET_NR_timer_getoverrun
13070     case TARGET_NR_timer_getoverrun:
13071     {
13072         /* args: timer_t timerid */
13073         target_timer_t timerid = get_timer_id(arg1);
13074 
13075         if (timerid < 0) {
13076             ret = timerid;
13077         } else {
13078             timer_t htimer = g_posix_timers[timerid];
13079             ret = get_errno(timer_getoverrun(htimer));
13080         }
13081         return ret;
13082     }
13083 #endif
13084 
13085 #ifdef TARGET_NR_timer_delete
13086     case TARGET_NR_timer_delete:
13087     {
13088         /* args: timer_t timerid */
13089         target_timer_t timerid = get_timer_id(arg1);
13090 
13091         if (timerid < 0) {
13092             ret = timerid;
13093         } else {
13094             timer_t htimer = g_posix_timers[timerid];
13095             ret = get_errno(timer_delete(htimer));
13096             g_posix_timers[timerid] = 0;
13097         }
13098         return ret;
13099     }
13100 #endif
13101 
13102 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13103     case TARGET_NR_timerfd_create:
13104         return get_errno(timerfd_create(arg1,
13105                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13106 #endif
13107 
13108 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13109     case TARGET_NR_timerfd_gettime:
13110         {
13111             struct itimerspec its_curr;
13112 
13113             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13114 
13115             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13116                 return -TARGET_EFAULT;
13117             }
13118         }
13119         return ret;
13120 #endif
13121 
13122 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13123     case TARGET_NR_timerfd_gettime64:
13124         {
13125             struct itimerspec its_curr;
13126 
13127             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13128 
13129             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13130                 return -TARGET_EFAULT;
13131             }
13132         }
13133         return ret;
13134 #endif
13135 
13136 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13137     case TARGET_NR_timerfd_settime:
13138         {
13139             struct itimerspec its_new, its_old, *p_new;
13140 
13141             if (arg3) {
13142                 if (target_to_host_itimerspec(&its_new, arg3)) {
13143                     return -TARGET_EFAULT;
13144                 }
13145                 p_new = &its_new;
13146             } else {
13147                 p_new = NULL;
13148             }
13149 
13150             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13151 
13152             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13153                 return -TARGET_EFAULT;
13154             }
13155         }
13156         return ret;
13157 #endif
13158 
13159 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13160     case TARGET_NR_timerfd_settime64:
13161         {
13162             struct itimerspec its_new, its_old, *p_new;
13163 
13164             if (arg3) {
13165                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13166                     return -TARGET_EFAULT;
13167                 }
13168                 p_new = &its_new;
13169             } else {
13170                 p_new = NULL;
13171             }
13172 
13173             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13174 
13175             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13176                 return -TARGET_EFAULT;
13177             }
13178         }
13179         return ret;
13180 #endif
13181 
13182 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13183     case TARGET_NR_ioprio_get:
13184         return get_errno(ioprio_get(arg1, arg2));
13185 #endif
13186 
13187 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13188     case TARGET_NR_ioprio_set:
13189         return get_errno(ioprio_set(arg1, arg2, arg3));
13190 #endif
13191 
13192 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13193     case TARGET_NR_setns:
13194         return get_errno(setns(arg1, arg2));
13195 #endif
13196 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13197     case TARGET_NR_unshare:
13198         return get_errno(unshare(arg1));
13199 #endif
13200 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13201     case TARGET_NR_kcmp:
13202         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13203 #endif
13204 #ifdef TARGET_NR_swapcontext
13205     case TARGET_NR_swapcontext:
13206         /* PowerPC specific.  */
13207         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13208 #endif
13209 #ifdef TARGET_NR_memfd_create
13210     case TARGET_NR_memfd_create:
13211         p = lock_user_string(arg1);
13212         if (!p) {
13213             return -TARGET_EFAULT;
13214         }
13215         ret = get_errno(memfd_create(p, arg2));
13216         fd_trans_unregister(ret);
13217         unlock_user(p, arg1, 0);
13218         return ret;
13219 #endif
13220 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13221     case TARGET_NR_membarrier:
13222         return get_errno(membarrier(arg1, arg2));
13223 #endif
13224 
13225 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13226     case TARGET_NR_copy_file_range:
13227         {
13228             loff_t inoff, outoff;
13229             loff_t *pinoff = NULL, *poutoff = NULL;
13230 
13231             if (arg2) {
13232                 if (get_user_u64(inoff, arg2)) {
13233                     return -TARGET_EFAULT;
13234                 }
13235                 pinoff = &inoff;
13236             }
13237             if (arg4) {
13238                 if (get_user_u64(outoff, arg4)) {
13239                     return -TARGET_EFAULT;
13240                 }
13241                 poutoff = &outoff;
13242             }
13243             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13244                                                  arg5, arg6));
13245             if (!is_error(ret) && ret > 0) {
13246                 if (arg2) {
13247                     if (put_user_u64(inoff, arg2)) {
13248                         return -TARGET_EFAULT;
13249                     }
13250                 }
13251                 if (arg4) {
13252                     if (put_user_u64(outoff, arg4)) {
13253                         return -TARGET_EFAULT;
13254                     }
13255                 }
13256             }
13257         }
13258         return ret;
13259 #endif
13260 
13261     default:
13262         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13263         return -TARGET_ENOSYS;
13264     }
13265     return ret;
13266 }
13267 
13268 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13269                     abi_long arg2, abi_long arg3, abi_long arg4,
13270                     abi_long arg5, abi_long arg6, abi_long arg7,
13271                     abi_long arg8)
13272 {
13273     CPUState *cpu = env_cpu(cpu_env);
13274     abi_long ret;
13275 
13276 #ifdef DEBUG_ERESTARTSYS
13277     /* Debug-only code for exercising the syscall-restart code paths
13278      * in the per-architecture cpu main loops: restart every syscall
13279      * the guest makes once before letting it through.
13280      */
13281     {
13282         static bool flag;
13283         flag = !flag;
13284         if (flag) {
13285             return -TARGET_ERESTARTSYS;
13286         }
13287     }
13288 #endif
13289 
13290     record_syscall_start(cpu, num, arg1,
13291                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13292 
13293     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13294         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13295     }
13296 
13297     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13298                       arg5, arg6, arg7, arg8);
13299 
13300     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13301         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13302                           arg3, arg4, arg5, arg6);
13303     }
13304 
13305     record_syscall_return(cpu, num, ret);
13306     return ret;
13307 }
13308