xref: /qemu/linux-user/syscall.c (revision 04f6fb89)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include <elf.h>
29 #include <endian.h>
30 #include <grp.h>
31 #include <sys/ipc.h>
32 #include <sys/msg.h>
33 #include <sys/wait.h>
34 #include <sys/mount.h>
35 #include <sys/file.h>
36 #include <sys/fsuid.h>
37 #include <sys/personality.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/swap.h>
41 #include <linux/capability.h>
42 #include <sched.h>
43 #include <sys/timex.h>
44 #include <sys/socket.h>
45 #include <linux/sockios.h>
46 #include <sys/un.h>
47 #include <sys/uio.h>
48 #include <poll.h>
49 #include <sys/times.h>
50 #include <sys/shm.h>
51 #include <sys/sem.h>
52 #include <sys/statfs.h>
53 #include <utime.h>
54 #include <sys/sysinfo.h>
55 #include <sys/signalfd.h>
56 //#include <sys/user.h>
57 #include <netinet/in.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <netinet/udp.h>
61 #include <linux/wireless.h>
62 #include <linux/icmp.h>
63 #include <linux/icmpv6.h>
64 #include <linux/if_tun.h>
65 #include <linux/in6.h>
66 #include <linux/errqueue.h>
67 #include <linux/random.h>
68 #ifdef CONFIG_TIMERFD
69 #include <sys/timerfd.h>
70 #endif
71 #ifdef CONFIG_EVENTFD
72 #include <sys/eventfd.h>
73 #endif
74 #ifdef CONFIG_EPOLL
75 #include <sys/epoll.h>
76 #endif
77 #ifdef CONFIG_ATTR
78 #include "qemu/xattr.h"
79 #endif
80 #ifdef CONFIG_SENDFILE
81 #include <sys/sendfile.h>
82 #endif
83 #ifdef HAVE_SYS_KCOV_H
84 #include <sys/kcov.h>
85 #endif
86 
87 #define termios host_termios
88 #define winsize host_winsize
89 #define termio host_termio
90 #define sgttyb host_sgttyb /* same as target */
91 #define tchars host_tchars /* same as target */
92 #define ltchars host_ltchars /* same as target */
93 
94 #include <linux/termios.h>
95 #include <linux/unistd.h>
96 #include <linux/cdrom.h>
97 #include <linux/hdreg.h>
98 #include <linux/soundcard.h>
99 #include <linux/kd.h>
100 #include <linux/mtio.h>
101 #include <linux/fs.h>
102 #include <linux/fd.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
105 #endif
106 #include <linux/fb.h>
107 #if defined(CONFIG_USBFS)
108 #include <linux/usbdevice_fs.h>
109 #include <linux/usb/ch9.h>
110 #endif
111 #include <linux/vt.h>
112 #include <linux/dm-ioctl.h>
113 #include <linux/reboot.h>
114 #include <linux/route.h>
115 #include <linux/filter.h>
116 #include <linux/blkpg.h>
117 #include <netpacket/packet.h>
118 #include <linux/netlink.h>
119 #include <linux/if_alg.h>
120 #include <linux/rtc.h>
121 #include <sound/asound.h>
122 #ifdef HAVE_BTRFS_H
123 #include <linux/btrfs.h>
124 #endif
125 #ifdef HAVE_DRM_H
126 #include <libdrm/drm.h>
127 #include <libdrm/i915_drm.h>
128 #endif
129 #include "linux_loop.h"
130 #include "uname.h"
131 
132 #include "qemu.h"
133 #include "user-internals.h"
134 #include "strace.h"
135 #include "signal-common.h"
136 #include "loader.h"
137 #include "user-mmap.h"
138 #include "user/safe-syscall.h"
139 #include "qemu/guest-random.h"
140 #include "qemu/selfmap.h"
141 #include "user/syscall-trace.h"
142 #include "special-errno.h"
143 #include "qapi/error.h"
144 #include "fd-trans.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321           loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363     uint32_t size;
364     uint32_t sched_policy;
365     uint64_t sched_flags;
366     int32_t sched_nice;
367     uint32_t sched_priority;
368     uint64_t sched_runtime;
369     uint64_t sched_deadline;
370     uint64_t sched_period;
371     uint32_t sched_util_min;
372     uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376           unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384           const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387           struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390           const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394           void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396           struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408 
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411           unsigned long, idx1, unsigned long, idx2)
412 #endif
413 
414 /*
415  * It is assumed that struct statx is architecture independent.
416  */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419           unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424 
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439 #if defined(O_DIRECT)
440   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441 #endif
442 #if defined(O_NOATIME)
443   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444 #endif
445 #if defined(O_CLOEXEC)
446   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447 #endif
448 #if defined(O_PATH)
449   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450 #endif
451 #if defined(O_TMPFILE)
452   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453 #endif
454   /* Don't terminate the list prematurely on 64-bit host+guest.  */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458 };
459 
460 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
461 
462 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
463 #if defined(__NR_utimensat)
464 #define __NR_sys_utimensat __NR_utimensat
465 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
466           const struct timespec *,tsp,int,flags)
467 #else
468 static int sys_utimensat(int dirfd, const char *pathname,
469                          const struct timespec times[2], int flags)
470 {
471     errno = ENOSYS;
472     return -1;
473 }
474 #endif
475 #endif /* TARGET_NR_utimensat */
476 
477 #ifdef TARGET_NR_renameat2
478 #if defined(__NR_renameat2)
479 #define __NR_sys_renameat2 __NR_renameat2
480 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
481           const char *, new, unsigned int, flags)
482 #else
483 static int sys_renameat2(int oldfd, const char *old,
484                          int newfd, const char *new, int flags)
485 {
486     if (flags == 0) {
487         return renameat(oldfd, old, newfd, new);
488     }
489     errno = ENOSYS;
490     return -1;
491 }
492 #endif
493 #endif /* TARGET_NR_renameat2 */
494 
495 #ifdef CONFIG_INOTIFY
496 #include <sys/inotify.h>
497 #else
498 /* Userspace can usually survive runtime without inotify */
499 #undef TARGET_NR_inotify_init
500 #undef TARGET_NR_inotify_init1
501 #undef TARGET_NR_inotify_add_watch
502 #undef TARGET_NR_inotify_rm_watch
503 #endif /* CONFIG_INOTIFY  */
504 
505 #if defined(TARGET_NR_prlimit64)
506 #ifndef __NR_prlimit64
507 # define __NR_prlimit64 -1
508 #endif
509 #define __NR_sys_prlimit64 __NR_prlimit64
510 /* The glibc rlimit structure may not be that used by the underlying syscall */
511 struct host_rlimit64 {
512     uint64_t rlim_cur;
513     uint64_t rlim_max;
514 };
515 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
516           const struct host_rlimit64 *, new_limit,
517           struct host_rlimit64 *, old_limit)
518 #endif
519 
520 
521 #if defined(TARGET_NR_timer_create)
522 /* Maximum of 32 active POSIX timers allowed at any one time. */
523 #define GUEST_TIMER_MAX 32
524 static timer_t g_posix_timers[GUEST_TIMER_MAX];
525 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
526 
527 static inline int next_free_host_timer(void)
528 {
529     int k;
530     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
531         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
532             return k;
533         }
534     }
535     return -1;
536 }
537 
538 static inline void free_host_timer_slot(int id)
539 {
540     qatomic_store_release(g_posix_timer_allocated + id, 0);
541 }
542 #endif
543 
544 static inline int host_to_target_errno(int host_errno)
545 {
546     switch (host_errno) {
547 #define E(X)  case X: return TARGET_##X;
548 #include "errnos.c.inc"
549 #undef E
550     default:
551         return host_errno;
552     }
553 }
554 
555 static inline int target_to_host_errno(int target_errno)
556 {
557     switch (target_errno) {
558 #define E(X)  case TARGET_##X: return X;
559 #include "errnos.c.inc"
560 #undef E
561     default:
562         return target_errno;
563     }
564 }
565 
566 abi_long get_errno(abi_long ret)
567 {
568     if (ret == -1)
569         return -host_to_target_errno(errno);
570     else
571         return ret;
572 }
573 
574 const char *target_strerror(int err)
575 {
576     if (err == QEMU_ERESTARTSYS) {
577         return "To be restarted";
578     }
579     if (err == QEMU_ESIGRETURN) {
580         return "Successful exit from sigreturn";
581     }
582 
583     return strerror(target_to_host_errno(err));
584 }
585 
586 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
587 {
588     int i;
589     uint8_t b;
590     if (usize <= ksize) {
591         return 1;
592     }
593     for (i = ksize; i < usize; i++) {
594         if (get_user_u8(b, addr + i)) {
595             return -TARGET_EFAULT;
596         }
597         if (b != 0) {
598             return 0;
599         }
600     }
601     return 1;
602 }
603 
604 #define safe_syscall0(type, name) \
605 static type safe_##name(void) \
606 { \
607     return safe_syscall(__NR_##name); \
608 }
609 
610 #define safe_syscall1(type, name, type1, arg1) \
611 static type safe_##name(type1 arg1) \
612 { \
613     return safe_syscall(__NR_##name, arg1); \
614 }
615 
616 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
617 static type safe_##name(type1 arg1, type2 arg2) \
618 { \
619     return safe_syscall(__NR_##name, arg1, arg2); \
620 }
621 
622 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
623 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
624 { \
625     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
626 }
627 
628 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
629     type4, arg4) \
630 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
631 { \
632     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
633 }
634 
635 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
636     type4, arg4, type5, arg5) \
637 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
638     type5 arg5) \
639 { \
640     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
641 }
642 
643 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
644     type4, arg4, type5, arg5, type6, arg6) \
645 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
646     type5 arg5, type6 arg6) \
647 { \
648     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
649 }
650 
651 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
652 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
653 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
654               int, flags, mode_t, mode)
655 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
656 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
657               struct rusage *, rusage)
658 #endif
659 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
660               int, options, struct rusage *, rusage)
661 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
662 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
663               char **, argv, char **, envp, int, flags)
664 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
665     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
666 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
667               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
668 #endif
669 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
670 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
671               struct timespec *, tsp, const sigset_t *, sigmask,
672               size_t, sigsetsize)
673 #endif
674 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
675               int, maxevents, int, timeout, const sigset_t *, sigmask,
676               size_t, sigsetsize)
677 #if defined(__NR_futex)
678 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
679               const struct timespec *,timeout,int *,uaddr2,int,val3)
680 #endif
681 #if defined(__NR_futex_time64)
682 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
683               const struct timespec *,timeout,int *,uaddr2,int,val3)
684 #endif
685 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
686 safe_syscall2(int, kill, pid_t, pid, int, sig)
687 safe_syscall2(int, tkill, int, tid, int, sig)
688 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
689 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
690 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
692               unsigned long, pos_l, unsigned long, pos_h)
693 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
694               unsigned long, pos_l, unsigned long, pos_h)
695 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
696               socklen_t, addrlen)
697 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
698               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
699 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
700               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
701 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
702 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
703 safe_syscall2(int, flock, int, fd, int, operation)
704 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
705 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
706               const struct timespec *, uts, size_t, sigsetsize)
707 #endif
708 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
709               int, flags)
710 #if defined(TARGET_NR_nanosleep)
711 safe_syscall2(int, nanosleep, const struct timespec *, req,
712               struct timespec *, rem)
713 #endif
714 #if defined(TARGET_NR_clock_nanosleep) || \
715     defined(TARGET_NR_clock_nanosleep_time64)
716 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
717               const struct timespec *, req, struct timespec *, rem)
718 #endif
719 #ifdef __NR_ipc
720 #ifdef __s390x__
721 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
722               void *, ptr)
723 #else
724 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
725               void *, ptr, long, fifth)
726 #endif
727 #endif
728 #ifdef __NR_msgsnd
729 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
730               int, flags)
731 #endif
732 #ifdef __NR_msgrcv
733 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
734               long, msgtype, int, flags)
735 #endif
736 #ifdef __NR_semtimedop
737 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
738               unsigned, nsops, const struct timespec *, timeout)
739 #endif
740 #if defined(TARGET_NR_mq_timedsend) || \
741     defined(TARGET_NR_mq_timedsend_time64)
742 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
743               size_t, len, unsigned, prio, const struct timespec *, timeout)
744 #endif
745 #if defined(TARGET_NR_mq_timedreceive) || \
746     defined(TARGET_NR_mq_timedreceive_time64)
747 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
748               size_t, len, unsigned *, prio, const struct timespec *, timeout)
749 #endif
750 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
751 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
752               int, outfd, loff_t *, poutoff, size_t, length,
753               unsigned int, flags)
754 #endif
755 
756 /* We do ioctl like this rather than via safe_syscall3 to preserve the
757  * "third argument might be integer or pointer or not present" behaviour of
758  * the libc function.
759  */
760 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
761 /* Similarly for fcntl. Note that callers must always:
762  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
763  *  use the flock64 struct rather than unsuffixed flock
764  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
765  */
766 #ifdef __NR_fcntl64
767 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
768 #else
769 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
770 #endif
771 
772 static inline int host_to_target_sock_type(int host_type)
773 {
774     int target_type;
775 
776     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
777     case SOCK_DGRAM:
778         target_type = TARGET_SOCK_DGRAM;
779         break;
780     case SOCK_STREAM:
781         target_type = TARGET_SOCK_STREAM;
782         break;
783     default:
784         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
785         break;
786     }
787 
788 #if defined(SOCK_CLOEXEC)
789     if (host_type & SOCK_CLOEXEC) {
790         target_type |= TARGET_SOCK_CLOEXEC;
791     }
792 #endif
793 
794 #if defined(SOCK_NONBLOCK)
795     if (host_type & SOCK_NONBLOCK) {
796         target_type |= TARGET_SOCK_NONBLOCK;
797     }
798 #endif
799 
800     return target_type;
801 }
802 
803 static abi_ulong target_brk, initial_target_brk;
804 
805 void target_set_brk(abi_ulong new_brk)
806 {
807     target_brk = TARGET_PAGE_ALIGN(new_brk);
808     initial_target_brk = target_brk;
809 }
810 
811 /* do_brk() must return target values and target errnos. */
812 abi_long do_brk(abi_ulong brk_val)
813 {
814     abi_long mapped_addr;
815     abi_ulong new_brk;
816     abi_ulong old_brk;
817 
818     /* brk pointers are always untagged */
819 
820     /* do not allow to shrink below initial brk value */
821     if (brk_val < initial_target_brk) {
822         return target_brk;
823     }
824 
825     new_brk = TARGET_PAGE_ALIGN(brk_val);
826     old_brk = TARGET_PAGE_ALIGN(target_brk);
827 
828     /* new and old target_brk might be on the same page */
829     if (new_brk == old_brk) {
830         target_brk = brk_val;
831         return target_brk;
832     }
833 
834     /* Release heap if necessary */
835     if (new_brk < old_brk) {
836         target_munmap(new_brk, old_brk - new_brk);
837 
838         target_brk = brk_val;
839         return target_brk;
840     }
841 
842     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
843                               PROT_READ | PROT_WRITE,
844                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
845                               -1, 0);
846 
847     if (mapped_addr == old_brk) {
848         target_brk = brk_val;
849         return target_brk;
850     }
851 
852 #if defined(TARGET_ALPHA)
853     /* We (partially) emulate OSF/1 on Alpha, which requires we
854        return a proper errno, not an unchanged brk value.  */
855     return -TARGET_ENOMEM;
856 #endif
857     /* For everything else, return the previous break. */
858     return target_brk;
859 }
860 
861 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
862     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
863 static inline abi_long copy_from_user_fdset(fd_set *fds,
864                                             abi_ulong target_fds_addr,
865                                             int n)
866 {
867     int i, nw, j, k;
868     abi_ulong b, *target_fds;
869 
870     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
871     if (!(target_fds = lock_user(VERIFY_READ,
872                                  target_fds_addr,
873                                  sizeof(abi_ulong) * nw,
874                                  1)))
875         return -TARGET_EFAULT;
876 
877     FD_ZERO(fds);
878     k = 0;
879     for (i = 0; i < nw; i++) {
880         /* grab the abi_ulong */
881         __get_user(b, &target_fds[i]);
882         for (j = 0; j < TARGET_ABI_BITS; j++) {
883             /* check the bit inside the abi_ulong */
884             if ((b >> j) & 1)
885                 FD_SET(k, fds);
886             k++;
887         }
888     }
889 
890     unlock_user(target_fds, target_fds_addr, 0);
891 
892     return 0;
893 }
894 
895 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
896                                                  abi_ulong target_fds_addr,
897                                                  int n)
898 {
899     if (target_fds_addr) {
900         if (copy_from_user_fdset(fds, target_fds_addr, n))
901             return -TARGET_EFAULT;
902         *fds_ptr = fds;
903     } else {
904         *fds_ptr = NULL;
905     }
906     return 0;
907 }
908 
909 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
910                                           const fd_set *fds,
911                                           int n)
912 {
913     int i, nw, j, k;
914     abi_long v;
915     abi_ulong *target_fds;
916 
917     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
918     if (!(target_fds = lock_user(VERIFY_WRITE,
919                                  target_fds_addr,
920                                  sizeof(abi_ulong) * nw,
921                                  0)))
922         return -TARGET_EFAULT;
923 
924     k = 0;
925     for (i = 0; i < nw; i++) {
926         v = 0;
927         for (j = 0; j < TARGET_ABI_BITS; j++) {
928             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
929             k++;
930         }
931         __put_user(v, &target_fds[i]);
932     }
933 
934     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
935 
936     return 0;
937 }
938 #endif
939 
940 #if defined(__alpha__)
941 #define HOST_HZ 1024
942 #else
943 #define HOST_HZ 100
944 #endif
945 
946 static inline abi_long host_to_target_clock_t(long ticks)
947 {
948 #if HOST_HZ == TARGET_HZ
949     return ticks;
950 #else
951     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
952 #endif
953 }
954 
955 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
956                                              const struct rusage *rusage)
957 {
958     struct target_rusage *target_rusage;
959 
960     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
961         return -TARGET_EFAULT;
962     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
963     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
964     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
965     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
966     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
967     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
968     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
969     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
970     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
971     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
972     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
973     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
974     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
975     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
976     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
977     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
978     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
979     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
980     unlock_user_struct(target_rusage, target_addr, 1);
981 
982     return 0;
983 }
984 
985 #ifdef TARGET_NR_setrlimit
986 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
987 {
988     abi_ulong target_rlim_swap;
989     rlim_t result;
990 
991     target_rlim_swap = tswapal(target_rlim);
992     if (target_rlim_swap == TARGET_RLIM_INFINITY)
993         return RLIM_INFINITY;
994 
995     result = target_rlim_swap;
996     if (target_rlim_swap != (rlim_t)result)
997         return RLIM_INFINITY;
998 
999     return result;
1000 }
1001 #endif
1002 
1003 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1004 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1005 {
1006     abi_ulong target_rlim_swap;
1007     abi_ulong result;
1008 
1009     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1010         target_rlim_swap = TARGET_RLIM_INFINITY;
1011     else
1012         target_rlim_swap = rlim;
1013     result = tswapal(target_rlim_swap);
1014 
1015     return result;
1016 }
1017 #endif
1018 
1019 static inline int target_to_host_resource(int code)
1020 {
1021     switch (code) {
1022     case TARGET_RLIMIT_AS:
1023         return RLIMIT_AS;
1024     case TARGET_RLIMIT_CORE:
1025         return RLIMIT_CORE;
1026     case TARGET_RLIMIT_CPU:
1027         return RLIMIT_CPU;
1028     case TARGET_RLIMIT_DATA:
1029         return RLIMIT_DATA;
1030     case TARGET_RLIMIT_FSIZE:
1031         return RLIMIT_FSIZE;
1032     case TARGET_RLIMIT_LOCKS:
1033         return RLIMIT_LOCKS;
1034     case TARGET_RLIMIT_MEMLOCK:
1035         return RLIMIT_MEMLOCK;
1036     case TARGET_RLIMIT_MSGQUEUE:
1037         return RLIMIT_MSGQUEUE;
1038     case TARGET_RLIMIT_NICE:
1039         return RLIMIT_NICE;
1040     case TARGET_RLIMIT_NOFILE:
1041         return RLIMIT_NOFILE;
1042     case TARGET_RLIMIT_NPROC:
1043         return RLIMIT_NPROC;
1044     case TARGET_RLIMIT_RSS:
1045         return RLIMIT_RSS;
1046     case TARGET_RLIMIT_RTPRIO:
1047         return RLIMIT_RTPRIO;
1048 #ifdef RLIMIT_RTTIME
1049     case TARGET_RLIMIT_RTTIME:
1050         return RLIMIT_RTTIME;
1051 #endif
1052     case TARGET_RLIMIT_SIGPENDING:
1053         return RLIMIT_SIGPENDING;
1054     case TARGET_RLIMIT_STACK:
1055         return RLIMIT_STACK;
1056     default:
1057         return code;
1058     }
1059 }
1060 
1061 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1062                                               abi_ulong target_tv_addr)
1063 {
1064     struct target_timeval *target_tv;
1065 
1066     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1067         return -TARGET_EFAULT;
1068     }
1069 
1070     __get_user(tv->tv_sec, &target_tv->tv_sec);
1071     __get_user(tv->tv_usec, &target_tv->tv_usec);
1072 
1073     unlock_user_struct(target_tv, target_tv_addr, 0);
1074 
1075     return 0;
1076 }
1077 
1078 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1079                                             const struct timeval *tv)
1080 {
1081     struct target_timeval *target_tv;
1082 
1083     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1084         return -TARGET_EFAULT;
1085     }
1086 
1087     __put_user(tv->tv_sec, &target_tv->tv_sec);
1088     __put_user(tv->tv_usec, &target_tv->tv_usec);
1089 
1090     unlock_user_struct(target_tv, target_tv_addr, 1);
1091 
1092     return 0;
1093 }
1094 
1095 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1096 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1097                                                 abi_ulong target_tv_addr)
1098 {
1099     struct target__kernel_sock_timeval *target_tv;
1100 
1101     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1102         return -TARGET_EFAULT;
1103     }
1104 
1105     __get_user(tv->tv_sec, &target_tv->tv_sec);
1106     __get_user(tv->tv_usec, &target_tv->tv_usec);
1107 
1108     unlock_user_struct(target_tv, target_tv_addr, 0);
1109 
1110     return 0;
1111 }
1112 #endif
1113 
1114 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1115                                               const struct timeval *tv)
1116 {
1117     struct target__kernel_sock_timeval *target_tv;
1118 
1119     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1120         return -TARGET_EFAULT;
1121     }
1122 
1123     __put_user(tv->tv_sec, &target_tv->tv_sec);
1124     __put_user(tv->tv_usec, &target_tv->tv_usec);
1125 
1126     unlock_user_struct(target_tv, target_tv_addr, 1);
1127 
1128     return 0;
1129 }
1130 
1131 #if defined(TARGET_NR_futex) || \
1132     defined(TARGET_NR_rt_sigtimedwait) || \
1133     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1134     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1135     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1136     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1137     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1138     defined(TARGET_NR_timer_settime) || \
1139     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1140 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1141                                                abi_ulong target_addr)
1142 {
1143     struct target_timespec *target_ts;
1144 
1145     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1146         return -TARGET_EFAULT;
1147     }
1148     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1149     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1150     unlock_user_struct(target_ts, target_addr, 0);
1151     return 0;
1152 }
1153 #endif
1154 
1155 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1156     defined(TARGET_NR_timer_settime64) || \
1157     defined(TARGET_NR_mq_timedsend_time64) || \
1158     defined(TARGET_NR_mq_timedreceive_time64) || \
1159     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1160     defined(TARGET_NR_clock_nanosleep_time64) || \
1161     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1162     defined(TARGET_NR_utimensat) || \
1163     defined(TARGET_NR_utimensat_time64) || \
1164     defined(TARGET_NR_semtimedop_time64) || \
1165     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1166 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1167                                                  abi_ulong target_addr)
1168 {
1169     struct target__kernel_timespec *target_ts;
1170 
1171     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1172         return -TARGET_EFAULT;
1173     }
1174     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1175     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1176     /* in 32bit mode, this drops the padding */
1177     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1178     unlock_user_struct(target_ts, target_addr, 0);
1179     return 0;
1180 }
1181 #endif
1182 
1183 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1184                                                struct timespec *host_ts)
1185 {
1186     struct target_timespec *target_ts;
1187 
1188     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1189         return -TARGET_EFAULT;
1190     }
1191     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1192     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1193     unlock_user_struct(target_ts, target_addr, 1);
1194     return 0;
1195 }
1196 
1197 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1198                                                  struct timespec *host_ts)
1199 {
1200     struct target__kernel_timespec *target_ts;
1201 
1202     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1203         return -TARGET_EFAULT;
1204     }
1205     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1206     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1207     unlock_user_struct(target_ts, target_addr, 1);
1208     return 0;
1209 }
1210 
1211 #if defined(TARGET_NR_gettimeofday)
1212 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1213                                              struct timezone *tz)
1214 {
1215     struct target_timezone *target_tz;
1216 
1217     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1218         return -TARGET_EFAULT;
1219     }
1220 
1221     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1222     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1223 
1224     unlock_user_struct(target_tz, target_tz_addr, 1);
1225 
1226     return 0;
1227 }
1228 #endif
1229 
1230 #if defined(TARGET_NR_settimeofday)
1231 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1232                                                abi_ulong target_tz_addr)
1233 {
1234     struct target_timezone *target_tz;
1235 
1236     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1237         return -TARGET_EFAULT;
1238     }
1239 
1240     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1241     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1242 
1243     unlock_user_struct(target_tz, target_tz_addr, 0);
1244 
1245     return 0;
1246 }
1247 #endif
1248 
1249 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1250 #include <mqueue.h>
1251 
1252 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1253                                               abi_ulong target_mq_attr_addr)
1254 {
1255     struct target_mq_attr *target_mq_attr;
1256 
1257     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1258                           target_mq_attr_addr, 1))
1259         return -TARGET_EFAULT;
1260 
1261     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1262     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1263     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1264     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1265 
1266     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1267 
1268     return 0;
1269 }
1270 
1271 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1272                                             const struct mq_attr *attr)
1273 {
1274     struct target_mq_attr *target_mq_attr;
1275 
1276     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1277                           target_mq_attr_addr, 0))
1278         return -TARGET_EFAULT;
1279 
1280     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1281     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1282     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1283     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1284 
1285     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1286 
1287     return 0;
1288 }
1289 #endif
1290 
1291 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1292 /* do_select() must return target values and target errnos. */
1293 static abi_long do_select(int n,
1294                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1295                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1296 {
1297     fd_set rfds, wfds, efds;
1298     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1299     struct timeval tv;
1300     struct timespec ts, *ts_ptr;
1301     abi_long ret;
1302 
1303     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1304     if (ret) {
1305         return ret;
1306     }
1307     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1308     if (ret) {
1309         return ret;
1310     }
1311     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1312     if (ret) {
1313         return ret;
1314     }
1315 
1316     if (target_tv_addr) {
1317         if (copy_from_user_timeval(&tv, target_tv_addr))
1318             return -TARGET_EFAULT;
1319         ts.tv_sec = tv.tv_sec;
1320         ts.tv_nsec = tv.tv_usec * 1000;
1321         ts_ptr = &ts;
1322     } else {
1323         ts_ptr = NULL;
1324     }
1325 
1326     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1327                                   ts_ptr, NULL));
1328 
1329     if (!is_error(ret)) {
1330         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1331             return -TARGET_EFAULT;
1332         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1333             return -TARGET_EFAULT;
1334         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1335             return -TARGET_EFAULT;
1336 
1337         if (target_tv_addr) {
1338             tv.tv_sec = ts.tv_sec;
1339             tv.tv_usec = ts.tv_nsec / 1000;
1340             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1341                 return -TARGET_EFAULT;
1342             }
1343         }
1344     }
1345 
1346     return ret;
1347 }
1348 
1349 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1350 static abi_long do_old_select(abi_ulong arg1)
1351 {
1352     struct target_sel_arg_struct *sel;
1353     abi_ulong inp, outp, exp, tvp;
1354     long nsel;
1355 
1356     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1357         return -TARGET_EFAULT;
1358     }
1359 
1360     nsel = tswapal(sel->n);
1361     inp = tswapal(sel->inp);
1362     outp = tswapal(sel->outp);
1363     exp = tswapal(sel->exp);
1364     tvp = tswapal(sel->tvp);
1365 
1366     unlock_user_struct(sel, arg1, 0);
1367 
1368     return do_select(nsel, inp, outp, exp, tvp);
1369 }
1370 #endif
1371 #endif
1372 
1373 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1374 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1375                             abi_long arg4, abi_long arg5, abi_long arg6,
1376                             bool time64)
1377 {
1378     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1379     fd_set rfds, wfds, efds;
1380     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1381     struct timespec ts, *ts_ptr;
1382     abi_long ret;
1383 
1384     /*
1385      * The 6th arg is actually two args smashed together,
1386      * so we cannot use the C library.
1387      */
1388     struct {
1389         sigset_t *set;
1390         size_t size;
1391     } sig, *sig_ptr;
1392 
1393     abi_ulong arg_sigset, arg_sigsize, *arg7;
1394 
1395     n = arg1;
1396     rfd_addr = arg2;
1397     wfd_addr = arg3;
1398     efd_addr = arg4;
1399     ts_addr = arg5;
1400 
1401     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1402     if (ret) {
1403         return ret;
1404     }
1405     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1406     if (ret) {
1407         return ret;
1408     }
1409     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1410     if (ret) {
1411         return ret;
1412     }
1413 
1414     /*
1415      * This takes a timespec, and not a timeval, so we cannot
1416      * use the do_select() helper ...
1417      */
1418     if (ts_addr) {
1419         if (time64) {
1420             if (target_to_host_timespec64(&ts, ts_addr)) {
1421                 return -TARGET_EFAULT;
1422             }
1423         } else {
1424             if (target_to_host_timespec(&ts, ts_addr)) {
1425                 return -TARGET_EFAULT;
1426             }
1427         }
1428             ts_ptr = &ts;
1429     } else {
1430         ts_ptr = NULL;
1431     }
1432 
1433     /* Extract the two packed args for the sigset */
1434     sig_ptr = NULL;
1435     if (arg6) {
1436         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1437         if (!arg7) {
1438             return -TARGET_EFAULT;
1439         }
1440         arg_sigset = tswapal(arg7[0]);
1441         arg_sigsize = tswapal(arg7[1]);
1442         unlock_user(arg7, arg6, 0);
1443 
1444         if (arg_sigset) {
1445             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1446             if (ret != 0) {
1447                 return ret;
1448             }
1449             sig_ptr = &sig;
1450             sig.size = SIGSET_T_SIZE;
1451         }
1452     }
1453 
1454     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1455                                   ts_ptr, sig_ptr));
1456 
1457     if (sig_ptr) {
1458         finish_sigsuspend_mask(ret);
1459     }
1460 
1461     if (!is_error(ret)) {
1462         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1463             return -TARGET_EFAULT;
1464         }
1465         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1466             return -TARGET_EFAULT;
1467         }
1468         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1469             return -TARGET_EFAULT;
1470         }
1471         if (time64) {
1472             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1473                 return -TARGET_EFAULT;
1474             }
1475         } else {
1476             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1477                 return -TARGET_EFAULT;
1478             }
1479         }
1480     }
1481     return ret;
1482 }
1483 #endif
1484 
1485 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1486     defined(TARGET_NR_ppoll_time64)
1487 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1488                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1489 {
1490     struct target_pollfd *target_pfd;
1491     unsigned int nfds = arg2;
1492     struct pollfd *pfd;
1493     unsigned int i;
1494     abi_long ret;
1495 
1496     pfd = NULL;
1497     target_pfd = NULL;
1498     if (nfds) {
1499         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1500             return -TARGET_EINVAL;
1501         }
1502         target_pfd = lock_user(VERIFY_WRITE, arg1,
1503                                sizeof(struct target_pollfd) * nfds, 1);
1504         if (!target_pfd) {
1505             return -TARGET_EFAULT;
1506         }
1507 
1508         pfd = alloca(sizeof(struct pollfd) * nfds);
1509         for (i = 0; i < nfds; i++) {
1510             pfd[i].fd = tswap32(target_pfd[i].fd);
1511             pfd[i].events = tswap16(target_pfd[i].events);
1512         }
1513     }
1514     if (ppoll) {
1515         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1516         sigset_t *set = NULL;
1517 
1518         if (arg3) {
1519             if (time64) {
1520                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1521                     unlock_user(target_pfd, arg1, 0);
1522                     return -TARGET_EFAULT;
1523                 }
1524             } else {
1525                 if (target_to_host_timespec(timeout_ts, arg3)) {
1526                     unlock_user(target_pfd, arg1, 0);
1527                     return -TARGET_EFAULT;
1528                 }
1529             }
1530         } else {
1531             timeout_ts = NULL;
1532         }
1533 
1534         if (arg4) {
1535             ret = process_sigsuspend_mask(&set, arg4, arg5);
1536             if (ret != 0) {
1537                 unlock_user(target_pfd, arg1, 0);
1538                 return ret;
1539             }
1540         }
1541 
1542         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1543                                    set, SIGSET_T_SIZE));
1544 
1545         if (set) {
1546             finish_sigsuspend_mask(ret);
1547         }
1548         if (!is_error(ret) && arg3) {
1549             if (time64) {
1550                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1551                     return -TARGET_EFAULT;
1552                 }
1553             } else {
1554                 if (host_to_target_timespec(arg3, timeout_ts)) {
1555                     return -TARGET_EFAULT;
1556                 }
1557             }
1558         }
1559     } else {
1560           struct timespec ts, *pts;
1561 
1562           if (arg3 >= 0) {
1563               /* Convert ms to secs, ns */
1564               ts.tv_sec = arg3 / 1000;
1565               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1566               pts = &ts;
1567           } else {
1568               /* -ve poll() timeout means "infinite" */
1569               pts = NULL;
1570           }
1571           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1572     }
1573 
1574     if (!is_error(ret)) {
1575         for (i = 0; i < nfds; i++) {
1576             target_pfd[i].revents = tswap16(pfd[i].revents);
1577         }
1578     }
1579     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1580     return ret;
1581 }
1582 #endif
1583 
1584 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1585                         int flags, int is_pipe2)
1586 {
1587     int host_pipe[2];
1588     abi_long ret;
1589     ret = pipe2(host_pipe, flags);
1590 
1591     if (is_error(ret))
1592         return get_errno(ret);
1593 
1594     /* Several targets have special calling conventions for the original
1595        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1596     if (!is_pipe2) {
1597 #if defined(TARGET_ALPHA)
1598         cpu_env->ir[IR_A4] = host_pipe[1];
1599         return host_pipe[0];
1600 #elif defined(TARGET_MIPS)
1601         cpu_env->active_tc.gpr[3] = host_pipe[1];
1602         return host_pipe[0];
1603 #elif defined(TARGET_SH4)
1604         cpu_env->gregs[1] = host_pipe[1];
1605         return host_pipe[0];
1606 #elif defined(TARGET_SPARC)
1607         cpu_env->regwptr[1] = host_pipe[1];
1608         return host_pipe[0];
1609 #endif
1610     }
1611 
1612     if (put_user_s32(host_pipe[0], pipedes)
1613         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1614         return -TARGET_EFAULT;
1615     return get_errno(ret);
1616 }
1617 
1618 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1619                                               abi_ulong target_addr,
1620                                               socklen_t len)
1621 {
1622     struct target_ip_mreqn *target_smreqn;
1623 
1624     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1625     if (!target_smreqn)
1626         return -TARGET_EFAULT;
1627     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1628     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1629     if (len == sizeof(struct target_ip_mreqn))
1630         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1631     unlock_user(target_smreqn, target_addr, 0);
1632 
1633     return 0;
1634 }
1635 
1636 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1637                                                abi_ulong target_addr,
1638                                                socklen_t len)
1639 {
1640     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1641     sa_family_t sa_family;
1642     struct target_sockaddr *target_saddr;
1643 
1644     if (fd_trans_target_to_host_addr(fd)) {
1645         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1646     }
1647 
1648     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1649     if (!target_saddr)
1650         return -TARGET_EFAULT;
1651 
1652     sa_family = tswap16(target_saddr->sa_family);
1653 
1654     /* Oops. The caller might send a incomplete sun_path; sun_path
1655      * must be terminated by \0 (see the manual page), but
1656      * unfortunately it is quite common to specify sockaddr_un
1657      * length as "strlen(x->sun_path)" while it should be
1658      * "strlen(...) + 1". We'll fix that here if needed.
1659      * Linux kernel has a similar feature.
1660      */
1661 
1662     if (sa_family == AF_UNIX) {
1663         if (len < unix_maxlen && len > 0) {
1664             char *cp = (char*)target_saddr;
1665 
1666             if ( cp[len-1] && !cp[len] )
1667                 len++;
1668         }
1669         if (len > unix_maxlen)
1670             len = unix_maxlen;
1671     }
1672 
1673     memcpy(addr, target_saddr, len);
1674     addr->sa_family = sa_family;
1675     if (sa_family == AF_NETLINK) {
1676         struct sockaddr_nl *nladdr;
1677 
1678         nladdr = (struct sockaddr_nl *)addr;
1679         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1680         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1681     } else if (sa_family == AF_PACKET) {
1682 	struct target_sockaddr_ll *lladdr;
1683 
1684 	lladdr = (struct target_sockaddr_ll *)addr;
1685 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1686 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1687     } else if (sa_family == AF_INET6) {
1688         struct sockaddr_in6 *in6addr;
1689 
1690         in6addr = (struct sockaddr_in6 *)addr;
1691         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1692     }
1693     unlock_user(target_saddr, target_addr, 0);
1694 
1695     return 0;
1696 }
1697 
1698 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1699                                                struct sockaddr *addr,
1700                                                socklen_t len)
1701 {
1702     struct target_sockaddr *target_saddr;
1703 
1704     if (len == 0) {
1705         return 0;
1706     }
1707     assert(addr);
1708 
1709     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1710     if (!target_saddr)
1711         return -TARGET_EFAULT;
1712     memcpy(target_saddr, addr, len);
1713     if (len >= offsetof(struct target_sockaddr, sa_family) +
1714         sizeof(target_saddr->sa_family)) {
1715         target_saddr->sa_family = tswap16(addr->sa_family);
1716     }
1717     if (addr->sa_family == AF_NETLINK &&
1718         len >= sizeof(struct target_sockaddr_nl)) {
1719         struct target_sockaddr_nl *target_nl =
1720                (struct target_sockaddr_nl *)target_saddr;
1721         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1722         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1723     } else if (addr->sa_family == AF_PACKET) {
1724         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1725         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1726         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1727     } else if (addr->sa_family == AF_INET6 &&
1728                len >= sizeof(struct target_sockaddr_in6)) {
1729         struct target_sockaddr_in6 *target_in6 =
1730                (struct target_sockaddr_in6 *)target_saddr;
1731         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1732     }
1733     unlock_user(target_saddr, target_addr, len);
1734 
1735     return 0;
1736 }
1737 
1738 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1739                                            struct target_msghdr *target_msgh)
1740 {
1741     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1742     abi_long msg_controllen;
1743     abi_ulong target_cmsg_addr;
1744     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1745     socklen_t space = 0;
1746 
1747     msg_controllen = tswapal(target_msgh->msg_controllen);
1748     if (msg_controllen < sizeof (struct target_cmsghdr))
1749         goto the_end;
1750     target_cmsg_addr = tswapal(target_msgh->msg_control);
1751     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1752     target_cmsg_start = target_cmsg;
1753     if (!target_cmsg)
1754         return -TARGET_EFAULT;
1755 
1756     while (cmsg && target_cmsg) {
1757         void *data = CMSG_DATA(cmsg);
1758         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1759 
1760         int len = tswapal(target_cmsg->cmsg_len)
1761             - sizeof(struct target_cmsghdr);
1762 
1763         space += CMSG_SPACE(len);
1764         if (space > msgh->msg_controllen) {
1765             space -= CMSG_SPACE(len);
1766             /* This is a QEMU bug, since we allocated the payload
1767              * area ourselves (unlike overflow in host-to-target
1768              * conversion, which is just the guest giving us a buffer
1769              * that's too small). It can't happen for the payload types
1770              * we currently support; if it becomes an issue in future
1771              * we would need to improve our allocation strategy to
1772              * something more intelligent than "twice the size of the
1773              * target buffer we're reading from".
1774              */
1775             qemu_log_mask(LOG_UNIMP,
1776                           ("Unsupported ancillary data %d/%d: "
1777                            "unhandled msg size\n"),
1778                           tswap32(target_cmsg->cmsg_level),
1779                           tswap32(target_cmsg->cmsg_type));
1780             break;
1781         }
1782 
1783         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1784             cmsg->cmsg_level = SOL_SOCKET;
1785         } else {
1786             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1787         }
1788         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1789         cmsg->cmsg_len = CMSG_LEN(len);
1790 
1791         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1792             int *fd = (int *)data;
1793             int *target_fd = (int *)target_data;
1794             int i, numfds = len / sizeof(int);
1795 
1796             for (i = 0; i < numfds; i++) {
1797                 __get_user(fd[i], target_fd + i);
1798             }
1799         } else if (cmsg->cmsg_level == SOL_SOCKET
1800                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1801             struct ucred *cred = (struct ucred *)data;
1802             struct target_ucred *target_cred =
1803                 (struct target_ucred *)target_data;
1804 
1805             __get_user(cred->pid, &target_cred->pid);
1806             __get_user(cred->uid, &target_cred->uid);
1807             __get_user(cred->gid, &target_cred->gid);
1808         } else if (cmsg->cmsg_level == SOL_ALG) {
1809             uint32_t *dst = (uint32_t *)data;
1810 
1811             memcpy(dst, target_data, len);
1812             /* fix endianness of first 32-bit word */
1813             if (len >= sizeof(uint32_t)) {
1814                 *dst = tswap32(*dst);
1815             }
1816         } else {
1817             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1818                           cmsg->cmsg_level, cmsg->cmsg_type);
1819             memcpy(data, target_data, len);
1820         }
1821 
1822         cmsg = CMSG_NXTHDR(msgh, cmsg);
1823         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1824                                          target_cmsg_start);
1825     }
1826     unlock_user(target_cmsg, target_cmsg_addr, 0);
1827  the_end:
1828     msgh->msg_controllen = space;
1829     return 0;
1830 }
1831 
1832 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1833                                            struct msghdr *msgh)
1834 {
1835     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1836     abi_long msg_controllen;
1837     abi_ulong target_cmsg_addr;
1838     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1839     socklen_t space = 0;
1840 
1841     msg_controllen = tswapal(target_msgh->msg_controllen);
1842     if (msg_controllen < sizeof (struct target_cmsghdr))
1843         goto the_end;
1844     target_cmsg_addr = tswapal(target_msgh->msg_control);
1845     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1846     target_cmsg_start = target_cmsg;
1847     if (!target_cmsg)
1848         return -TARGET_EFAULT;
1849 
1850     while (cmsg && target_cmsg) {
1851         void *data = CMSG_DATA(cmsg);
1852         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1853 
1854         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1855         int tgt_len, tgt_space;
1856 
1857         /* We never copy a half-header but may copy half-data;
1858          * this is Linux's behaviour in put_cmsg(). Note that
1859          * truncation here is a guest problem (which we report
1860          * to the guest via the CTRUNC bit), unlike truncation
1861          * in target_to_host_cmsg, which is a QEMU bug.
1862          */
1863         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1864             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1865             break;
1866         }
1867 
1868         if (cmsg->cmsg_level == SOL_SOCKET) {
1869             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1870         } else {
1871             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1872         }
1873         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1874 
1875         /* Payload types which need a different size of payload on
1876          * the target must adjust tgt_len here.
1877          */
1878         tgt_len = len;
1879         switch (cmsg->cmsg_level) {
1880         case SOL_SOCKET:
1881             switch (cmsg->cmsg_type) {
1882             case SO_TIMESTAMP:
1883                 tgt_len = sizeof(struct target_timeval);
1884                 break;
1885             default:
1886                 break;
1887             }
1888             break;
1889         default:
1890             break;
1891         }
1892 
1893         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1894             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1895             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1896         }
1897 
1898         /* We must now copy-and-convert len bytes of payload
1899          * into tgt_len bytes of destination space. Bear in mind
1900          * that in both source and destination we may be dealing
1901          * with a truncated value!
1902          */
1903         switch (cmsg->cmsg_level) {
1904         case SOL_SOCKET:
1905             switch (cmsg->cmsg_type) {
1906             case SCM_RIGHTS:
1907             {
1908                 int *fd = (int *)data;
1909                 int *target_fd = (int *)target_data;
1910                 int i, numfds = tgt_len / sizeof(int);
1911 
1912                 for (i = 0; i < numfds; i++) {
1913                     __put_user(fd[i], target_fd + i);
1914                 }
1915                 break;
1916             }
1917             case SO_TIMESTAMP:
1918             {
1919                 struct timeval *tv = (struct timeval *)data;
1920                 struct target_timeval *target_tv =
1921                     (struct target_timeval *)target_data;
1922 
1923                 if (len != sizeof(struct timeval) ||
1924                     tgt_len != sizeof(struct target_timeval)) {
1925                     goto unimplemented;
1926                 }
1927 
1928                 /* copy struct timeval to target */
1929                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1930                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1931                 break;
1932             }
1933             case SCM_CREDENTIALS:
1934             {
1935                 struct ucred *cred = (struct ucred *)data;
1936                 struct target_ucred *target_cred =
1937                     (struct target_ucred *)target_data;
1938 
1939                 __put_user(cred->pid, &target_cred->pid);
1940                 __put_user(cred->uid, &target_cred->uid);
1941                 __put_user(cred->gid, &target_cred->gid);
1942                 break;
1943             }
1944             default:
1945                 goto unimplemented;
1946             }
1947             break;
1948 
1949         case SOL_IP:
1950             switch (cmsg->cmsg_type) {
1951             case IP_TTL:
1952             {
1953                 uint32_t *v = (uint32_t *)data;
1954                 uint32_t *t_int = (uint32_t *)target_data;
1955 
1956                 if (len != sizeof(uint32_t) ||
1957                     tgt_len != sizeof(uint32_t)) {
1958                     goto unimplemented;
1959                 }
1960                 __put_user(*v, t_int);
1961                 break;
1962             }
1963             case IP_RECVERR:
1964             {
1965                 struct errhdr_t {
1966                    struct sock_extended_err ee;
1967                    struct sockaddr_in offender;
1968                 };
1969                 struct errhdr_t *errh = (struct errhdr_t *)data;
1970                 struct errhdr_t *target_errh =
1971                     (struct errhdr_t *)target_data;
1972 
1973                 if (len != sizeof(struct errhdr_t) ||
1974                     tgt_len != sizeof(struct errhdr_t)) {
1975                     goto unimplemented;
1976                 }
1977                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1978                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1979                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1980                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1981                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1982                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1983                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1984                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1985                     (void *) &errh->offender, sizeof(errh->offender));
1986                 break;
1987             }
1988             default:
1989                 goto unimplemented;
1990             }
1991             break;
1992 
1993         case SOL_IPV6:
1994             switch (cmsg->cmsg_type) {
1995             case IPV6_HOPLIMIT:
1996             {
1997                 uint32_t *v = (uint32_t *)data;
1998                 uint32_t *t_int = (uint32_t *)target_data;
1999 
2000                 if (len != sizeof(uint32_t) ||
2001                     tgt_len != sizeof(uint32_t)) {
2002                     goto unimplemented;
2003                 }
2004                 __put_user(*v, t_int);
2005                 break;
2006             }
2007             case IPV6_RECVERR:
2008             {
2009                 struct errhdr6_t {
2010                    struct sock_extended_err ee;
2011                    struct sockaddr_in6 offender;
2012                 };
2013                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2014                 struct errhdr6_t *target_errh =
2015                     (struct errhdr6_t *)target_data;
2016 
2017                 if (len != sizeof(struct errhdr6_t) ||
2018                     tgt_len != sizeof(struct errhdr6_t)) {
2019                     goto unimplemented;
2020                 }
2021                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2022                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2023                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2024                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2025                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2026                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2027                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2028                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2029                     (void *) &errh->offender, sizeof(errh->offender));
2030                 break;
2031             }
2032             default:
2033                 goto unimplemented;
2034             }
2035             break;
2036 
2037         default:
2038         unimplemented:
2039             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2040                           cmsg->cmsg_level, cmsg->cmsg_type);
2041             memcpy(target_data, data, MIN(len, tgt_len));
2042             if (tgt_len > len) {
2043                 memset(target_data + len, 0, tgt_len - len);
2044             }
2045         }
2046 
2047         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2048         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2049         if (msg_controllen < tgt_space) {
2050             tgt_space = msg_controllen;
2051         }
2052         msg_controllen -= tgt_space;
2053         space += tgt_space;
2054         cmsg = CMSG_NXTHDR(msgh, cmsg);
2055         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2056                                          target_cmsg_start);
2057     }
2058     unlock_user(target_cmsg, target_cmsg_addr, space);
2059  the_end:
2060     target_msgh->msg_controllen = tswapal(space);
2061     return 0;
2062 }
2063 
2064 /* do_setsockopt() Must return target values and target errnos. */
2065 static abi_long do_setsockopt(int sockfd, int level, int optname,
2066                               abi_ulong optval_addr, socklen_t optlen)
2067 {
2068     abi_long ret;
2069     int val;
2070     struct ip_mreqn *ip_mreq;
2071     struct ip_mreq_source *ip_mreq_source;
2072 
2073     switch(level) {
2074     case SOL_TCP:
2075     case SOL_UDP:
2076         /* TCP and UDP options all take an 'int' value.  */
2077         if (optlen < sizeof(uint32_t))
2078             return -TARGET_EINVAL;
2079 
2080         if (get_user_u32(val, optval_addr))
2081             return -TARGET_EFAULT;
2082         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2083         break;
2084     case SOL_IP:
2085         switch(optname) {
2086         case IP_TOS:
2087         case IP_TTL:
2088         case IP_HDRINCL:
2089         case IP_ROUTER_ALERT:
2090         case IP_RECVOPTS:
2091         case IP_RETOPTS:
2092         case IP_PKTINFO:
2093         case IP_MTU_DISCOVER:
2094         case IP_RECVERR:
2095         case IP_RECVTTL:
2096         case IP_RECVTOS:
2097 #ifdef IP_FREEBIND
2098         case IP_FREEBIND:
2099 #endif
2100         case IP_MULTICAST_TTL:
2101         case IP_MULTICAST_LOOP:
2102             val = 0;
2103             if (optlen >= sizeof(uint32_t)) {
2104                 if (get_user_u32(val, optval_addr))
2105                     return -TARGET_EFAULT;
2106             } else if (optlen >= 1) {
2107                 if (get_user_u8(val, optval_addr))
2108                     return -TARGET_EFAULT;
2109             }
2110             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2111             break;
2112         case IP_ADD_MEMBERSHIP:
2113         case IP_DROP_MEMBERSHIP:
2114             if (optlen < sizeof (struct target_ip_mreq) ||
2115                 optlen > sizeof (struct target_ip_mreqn))
2116                 return -TARGET_EINVAL;
2117 
2118             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2119             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2120             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2121             break;
2122 
2123         case IP_BLOCK_SOURCE:
2124         case IP_UNBLOCK_SOURCE:
2125         case IP_ADD_SOURCE_MEMBERSHIP:
2126         case IP_DROP_SOURCE_MEMBERSHIP:
2127             if (optlen != sizeof (struct target_ip_mreq_source))
2128                 return -TARGET_EINVAL;
2129 
2130             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2131             if (!ip_mreq_source) {
2132                 return -TARGET_EFAULT;
2133             }
2134             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2135             unlock_user (ip_mreq_source, optval_addr, 0);
2136             break;
2137 
2138         default:
2139             goto unimplemented;
2140         }
2141         break;
2142     case SOL_IPV6:
2143         switch (optname) {
2144         case IPV6_MTU_DISCOVER:
2145         case IPV6_MTU:
2146         case IPV6_V6ONLY:
2147         case IPV6_RECVPKTINFO:
2148         case IPV6_UNICAST_HOPS:
2149         case IPV6_MULTICAST_HOPS:
2150         case IPV6_MULTICAST_LOOP:
2151         case IPV6_RECVERR:
2152         case IPV6_RECVHOPLIMIT:
2153         case IPV6_2292HOPLIMIT:
2154         case IPV6_CHECKSUM:
2155         case IPV6_ADDRFORM:
2156         case IPV6_2292PKTINFO:
2157         case IPV6_RECVTCLASS:
2158         case IPV6_RECVRTHDR:
2159         case IPV6_2292RTHDR:
2160         case IPV6_RECVHOPOPTS:
2161         case IPV6_2292HOPOPTS:
2162         case IPV6_RECVDSTOPTS:
2163         case IPV6_2292DSTOPTS:
2164         case IPV6_TCLASS:
2165         case IPV6_ADDR_PREFERENCES:
2166 #ifdef IPV6_RECVPATHMTU
2167         case IPV6_RECVPATHMTU:
2168 #endif
2169 #ifdef IPV6_TRANSPARENT
2170         case IPV6_TRANSPARENT:
2171 #endif
2172 #ifdef IPV6_FREEBIND
2173         case IPV6_FREEBIND:
2174 #endif
2175 #ifdef IPV6_RECVORIGDSTADDR
2176         case IPV6_RECVORIGDSTADDR:
2177 #endif
2178             val = 0;
2179             if (optlen < sizeof(uint32_t)) {
2180                 return -TARGET_EINVAL;
2181             }
2182             if (get_user_u32(val, optval_addr)) {
2183                 return -TARGET_EFAULT;
2184             }
2185             ret = get_errno(setsockopt(sockfd, level, optname,
2186                                        &val, sizeof(val)));
2187             break;
2188         case IPV6_PKTINFO:
2189         {
2190             struct in6_pktinfo pki;
2191 
2192             if (optlen < sizeof(pki)) {
2193                 return -TARGET_EINVAL;
2194             }
2195 
2196             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2197                 return -TARGET_EFAULT;
2198             }
2199 
2200             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2201 
2202             ret = get_errno(setsockopt(sockfd, level, optname,
2203                                        &pki, sizeof(pki)));
2204             break;
2205         }
2206         case IPV6_ADD_MEMBERSHIP:
2207         case IPV6_DROP_MEMBERSHIP:
2208         {
2209             struct ipv6_mreq ipv6mreq;
2210 
2211             if (optlen < sizeof(ipv6mreq)) {
2212                 return -TARGET_EINVAL;
2213             }
2214 
2215             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2216                 return -TARGET_EFAULT;
2217             }
2218 
2219             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2220 
2221             ret = get_errno(setsockopt(sockfd, level, optname,
2222                                        &ipv6mreq, sizeof(ipv6mreq)));
2223             break;
2224         }
2225         default:
2226             goto unimplemented;
2227         }
2228         break;
2229     case SOL_ICMPV6:
2230         switch (optname) {
2231         case ICMPV6_FILTER:
2232         {
2233             struct icmp6_filter icmp6f;
2234 
2235             if (optlen > sizeof(icmp6f)) {
2236                 optlen = sizeof(icmp6f);
2237             }
2238 
2239             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2240                 return -TARGET_EFAULT;
2241             }
2242 
2243             for (val = 0; val < 8; val++) {
2244                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2245             }
2246 
2247             ret = get_errno(setsockopt(sockfd, level, optname,
2248                                        &icmp6f, optlen));
2249             break;
2250         }
2251         default:
2252             goto unimplemented;
2253         }
2254         break;
2255     case SOL_RAW:
2256         switch (optname) {
2257         case ICMP_FILTER:
2258         case IPV6_CHECKSUM:
2259             /* those take an u32 value */
2260             if (optlen < sizeof(uint32_t)) {
2261                 return -TARGET_EINVAL;
2262             }
2263 
2264             if (get_user_u32(val, optval_addr)) {
2265                 return -TARGET_EFAULT;
2266             }
2267             ret = get_errno(setsockopt(sockfd, level, optname,
2268                                        &val, sizeof(val)));
2269             break;
2270 
2271         default:
2272             goto unimplemented;
2273         }
2274         break;
2275 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2276     case SOL_ALG:
2277         switch (optname) {
2278         case ALG_SET_KEY:
2279         {
2280             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2281             if (!alg_key) {
2282                 return -TARGET_EFAULT;
2283             }
2284             ret = get_errno(setsockopt(sockfd, level, optname,
2285                                        alg_key, optlen));
2286             unlock_user(alg_key, optval_addr, optlen);
2287             break;
2288         }
2289         case ALG_SET_AEAD_AUTHSIZE:
2290         {
2291             ret = get_errno(setsockopt(sockfd, level, optname,
2292                                        NULL, optlen));
2293             break;
2294         }
2295         default:
2296             goto unimplemented;
2297         }
2298         break;
2299 #endif
2300     case TARGET_SOL_SOCKET:
2301         switch (optname) {
2302         case TARGET_SO_RCVTIMEO:
2303         {
2304                 struct timeval tv;
2305 
2306                 optname = SO_RCVTIMEO;
2307 
2308 set_timeout:
2309                 if (optlen != sizeof(struct target_timeval)) {
2310                     return -TARGET_EINVAL;
2311                 }
2312 
2313                 if (copy_from_user_timeval(&tv, optval_addr)) {
2314                     return -TARGET_EFAULT;
2315                 }
2316 
2317                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2318                                 &tv, sizeof(tv)));
2319                 return ret;
2320         }
2321         case TARGET_SO_SNDTIMEO:
2322                 optname = SO_SNDTIMEO;
2323                 goto set_timeout;
2324         case TARGET_SO_ATTACH_FILTER:
2325         {
2326                 struct target_sock_fprog *tfprog;
2327                 struct target_sock_filter *tfilter;
2328                 struct sock_fprog fprog;
2329                 struct sock_filter *filter;
2330                 int i;
2331 
2332                 if (optlen != sizeof(*tfprog)) {
2333                     return -TARGET_EINVAL;
2334                 }
2335                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2336                     return -TARGET_EFAULT;
2337                 }
2338                 if (!lock_user_struct(VERIFY_READ, tfilter,
2339                                       tswapal(tfprog->filter), 0)) {
2340                     unlock_user_struct(tfprog, optval_addr, 1);
2341                     return -TARGET_EFAULT;
2342                 }
2343 
2344                 fprog.len = tswap16(tfprog->len);
2345                 filter = g_try_new(struct sock_filter, fprog.len);
2346                 if (filter == NULL) {
2347                     unlock_user_struct(tfilter, tfprog->filter, 1);
2348                     unlock_user_struct(tfprog, optval_addr, 1);
2349                     return -TARGET_ENOMEM;
2350                 }
2351                 for (i = 0; i < fprog.len; i++) {
2352                     filter[i].code = tswap16(tfilter[i].code);
2353                     filter[i].jt = tfilter[i].jt;
2354                     filter[i].jf = tfilter[i].jf;
2355                     filter[i].k = tswap32(tfilter[i].k);
2356                 }
2357                 fprog.filter = filter;
2358 
2359                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2360                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2361                 g_free(filter);
2362 
2363                 unlock_user_struct(tfilter, tfprog->filter, 1);
2364                 unlock_user_struct(tfprog, optval_addr, 1);
2365                 return ret;
2366         }
2367 	case TARGET_SO_BINDTODEVICE:
2368 	{
2369 		char *dev_ifname, *addr_ifname;
2370 
2371 		if (optlen > IFNAMSIZ - 1) {
2372 		    optlen = IFNAMSIZ - 1;
2373 		}
2374 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2375 		if (!dev_ifname) {
2376 		    return -TARGET_EFAULT;
2377 		}
2378 		optname = SO_BINDTODEVICE;
2379 		addr_ifname = alloca(IFNAMSIZ);
2380 		memcpy(addr_ifname, dev_ifname, optlen);
2381 		addr_ifname[optlen] = 0;
2382 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2383                                            addr_ifname, optlen));
2384 		unlock_user (dev_ifname, optval_addr, 0);
2385 		return ret;
2386 	}
2387         case TARGET_SO_LINGER:
2388         {
2389                 struct linger lg;
2390                 struct target_linger *tlg;
2391 
2392                 if (optlen != sizeof(struct target_linger)) {
2393                     return -TARGET_EINVAL;
2394                 }
2395                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2396                     return -TARGET_EFAULT;
2397                 }
2398                 __get_user(lg.l_onoff, &tlg->l_onoff);
2399                 __get_user(lg.l_linger, &tlg->l_linger);
2400                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2401                                 &lg, sizeof(lg)));
2402                 unlock_user_struct(tlg, optval_addr, 0);
2403                 return ret;
2404         }
2405             /* Options with 'int' argument.  */
2406         case TARGET_SO_DEBUG:
2407 		optname = SO_DEBUG;
2408 		break;
2409         case TARGET_SO_REUSEADDR:
2410 		optname = SO_REUSEADDR;
2411 		break;
2412 #ifdef SO_REUSEPORT
2413         case TARGET_SO_REUSEPORT:
2414                 optname = SO_REUSEPORT;
2415                 break;
2416 #endif
2417         case TARGET_SO_TYPE:
2418 		optname = SO_TYPE;
2419 		break;
2420         case TARGET_SO_ERROR:
2421 		optname = SO_ERROR;
2422 		break;
2423         case TARGET_SO_DONTROUTE:
2424 		optname = SO_DONTROUTE;
2425 		break;
2426         case TARGET_SO_BROADCAST:
2427 		optname = SO_BROADCAST;
2428 		break;
2429         case TARGET_SO_SNDBUF:
2430 		optname = SO_SNDBUF;
2431 		break;
2432         case TARGET_SO_SNDBUFFORCE:
2433                 optname = SO_SNDBUFFORCE;
2434                 break;
2435         case TARGET_SO_RCVBUF:
2436 		optname = SO_RCVBUF;
2437 		break;
2438         case TARGET_SO_RCVBUFFORCE:
2439                 optname = SO_RCVBUFFORCE;
2440                 break;
2441         case TARGET_SO_KEEPALIVE:
2442 		optname = SO_KEEPALIVE;
2443 		break;
2444         case TARGET_SO_OOBINLINE:
2445 		optname = SO_OOBINLINE;
2446 		break;
2447         case TARGET_SO_NO_CHECK:
2448 		optname = SO_NO_CHECK;
2449 		break;
2450         case TARGET_SO_PRIORITY:
2451 		optname = SO_PRIORITY;
2452 		break;
2453 #ifdef SO_BSDCOMPAT
2454         case TARGET_SO_BSDCOMPAT:
2455 		optname = SO_BSDCOMPAT;
2456 		break;
2457 #endif
2458         case TARGET_SO_PASSCRED:
2459 		optname = SO_PASSCRED;
2460 		break;
2461         case TARGET_SO_PASSSEC:
2462                 optname = SO_PASSSEC;
2463                 break;
2464         case TARGET_SO_TIMESTAMP:
2465 		optname = SO_TIMESTAMP;
2466 		break;
2467         case TARGET_SO_RCVLOWAT:
2468 		optname = SO_RCVLOWAT;
2469 		break;
2470         default:
2471             goto unimplemented;
2472         }
2473 	if (optlen < sizeof(uint32_t))
2474             return -TARGET_EINVAL;
2475 
2476 	if (get_user_u32(val, optval_addr))
2477             return -TARGET_EFAULT;
2478 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2479         break;
2480 #ifdef SOL_NETLINK
2481     case SOL_NETLINK:
2482         switch (optname) {
2483         case NETLINK_PKTINFO:
2484         case NETLINK_ADD_MEMBERSHIP:
2485         case NETLINK_DROP_MEMBERSHIP:
2486         case NETLINK_BROADCAST_ERROR:
2487         case NETLINK_NO_ENOBUFS:
2488 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2489         case NETLINK_LISTEN_ALL_NSID:
2490         case NETLINK_CAP_ACK:
2491 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2493         case NETLINK_EXT_ACK:
2494 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2495 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2496         case NETLINK_GET_STRICT_CHK:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2498             break;
2499         default:
2500             goto unimplemented;
2501         }
2502         val = 0;
2503         if (optlen < sizeof(uint32_t)) {
2504             return -TARGET_EINVAL;
2505         }
2506         if (get_user_u32(val, optval_addr)) {
2507             return -TARGET_EFAULT;
2508         }
2509         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2510                                    sizeof(val)));
2511         break;
2512 #endif /* SOL_NETLINK */
2513     default:
2514     unimplemented:
2515         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2516                       level, optname);
2517         ret = -TARGET_ENOPROTOOPT;
2518     }
2519     return ret;
2520 }
2521 
2522 /* do_getsockopt() Must return target values and target errnos. */
2523 static abi_long do_getsockopt(int sockfd, int level, int optname,
2524                               abi_ulong optval_addr, abi_ulong optlen)
2525 {
2526     abi_long ret;
2527     int len, val;
2528     socklen_t lv;
2529 
2530     switch(level) {
2531     case TARGET_SOL_SOCKET:
2532         level = SOL_SOCKET;
2533         switch (optname) {
2534         /* These don't just return a single integer */
2535         case TARGET_SO_PEERNAME:
2536             goto unimplemented;
2537         case TARGET_SO_RCVTIMEO: {
2538             struct timeval tv;
2539             socklen_t tvlen;
2540 
2541             optname = SO_RCVTIMEO;
2542 
2543 get_timeout:
2544             if (get_user_u32(len, optlen)) {
2545                 return -TARGET_EFAULT;
2546             }
2547             if (len < 0) {
2548                 return -TARGET_EINVAL;
2549             }
2550 
2551             tvlen = sizeof(tv);
2552             ret = get_errno(getsockopt(sockfd, level, optname,
2553                                        &tv, &tvlen));
2554             if (ret < 0) {
2555                 return ret;
2556             }
2557             if (len > sizeof(struct target_timeval)) {
2558                 len = sizeof(struct target_timeval);
2559             }
2560             if (copy_to_user_timeval(optval_addr, &tv)) {
2561                 return -TARGET_EFAULT;
2562             }
2563             if (put_user_u32(len, optlen)) {
2564                 return -TARGET_EFAULT;
2565             }
2566             break;
2567         }
2568         case TARGET_SO_SNDTIMEO:
2569             optname = SO_SNDTIMEO;
2570             goto get_timeout;
2571         case TARGET_SO_PEERCRED: {
2572             struct ucred cr;
2573             socklen_t crlen;
2574             struct target_ucred *tcr;
2575 
2576             if (get_user_u32(len, optlen)) {
2577                 return -TARGET_EFAULT;
2578             }
2579             if (len < 0) {
2580                 return -TARGET_EINVAL;
2581             }
2582 
2583             crlen = sizeof(cr);
2584             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2585                                        &cr, &crlen));
2586             if (ret < 0) {
2587                 return ret;
2588             }
2589             if (len > crlen) {
2590                 len = crlen;
2591             }
2592             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2593                 return -TARGET_EFAULT;
2594             }
2595             __put_user(cr.pid, &tcr->pid);
2596             __put_user(cr.uid, &tcr->uid);
2597             __put_user(cr.gid, &tcr->gid);
2598             unlock_user_struct(tcr, optval_addr, 1);
2599             if (put_user_u32(len, optlen)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             break;
2603         }
2604         case TARGET_SO_PEERSEC: {
2605             char *name;
2606 
2607             if (get_user_u32(len, optlen)) {
2608                 return -TARGET_EFAULT;
2609             }
2610             if (len < 0) {
2611                 return -TARGET_EINVAL;
2612             }
2613             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2614             if (!name) {
2615                 return -TARGET_EFAULT;
2616             }
2617             lv = len;
2618             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2619                                        name, &lv));
2620             if (put_user_u32(lv, optlen)) {
2621                 ret = -TARGET_EFAULT;
2622             }
2623             unlock_user(name, optval_addr, lv);
2624             break;
2625         }
2626         case TARGET_SO_LINGER:
2627         {
2628             struct linger lg;
2629             socklen_t lglen;
2630             struct target_linger *tlg;
2631 
2632             if (get_user_u32(len, optlen)) {
2633                 return -TARGET_EFAULT;
2634             }
2635             if (len < 0) {
2636                 return -TARGET_EINVAL;
2637             }
2638 
2639             lglen = sizeof(lg);
2640             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2641                                        &lg, &lglen));
2642             if (ret < 0) {
2643                 return ret;
2644             }
2645             if (len > lglen) {
2646                 len = lglen;
2647             }
2648             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2649                 return -TARGET_EFAULT;
2650             }
2651             __put_user(lg.l_onoff, &tlg->l_onoff);
2652             __put_user(lg.l_linger, &tlg->l_linger);
2653             unlock_user_struct(tlg, optval_addr, 1);
2654             if (put_user_u32(len, optlen)) {
2655                 return -TARGET_EFAULT;
2656             }
2657             break;
2658         }
2659         /* Options with 'int' argument.  */
2660         case TARGET_SO_DEBUG:
2661             optname = SO_DEBUG;
2662             goto int_case;
2663         case TARGET_SO_REUSEADDR:
2664             optname = SO_REUSEADDR;
2665             goto int_case;
2666 #ifdef SO_REUSEPORT
2667         case TARGET_SO_REUSEPORT:
2668             optname = SO_REUSEPORT;
2669             goto int_case;
2670 #endif
2671         case TARGET_SO_TYPE:
2672             optname = SO_TYPE;
2673             goto int_case;
2674         case TARGET_SO_ERROR:
2675             optname = SO_ERROR;
2676             goto int_case;
2677         case TARGET_SO_DONTROUTE:
2678             optname = SO_DONTROUTE;
2679             goto int_case;
2680         case TARGET_SO_BROADCAST:
2681             optname = SO_BROADCAST;
2682             goto int_case;
2683         case TARGET_SO_SNDBUF:
2684             optname = SO_SNDBUF;
2685             goto int_case;
2686         case TARGET_SO_RCVBUF:
2687             optname = SO_RCVBUF;
2688             goto int_case;
2689         case TARGET_SO_KEEPALIVE:
2690             optname = SO_KEEPALIVE;
2691             goto int_case;
2692         case TARGET_SO_OOBINLINE:
2693             optname = SO_OOBINLINE;
2694             goto int_case;
2695         case TARGET_SO_NO_CHECK:
2696             optname = SO_NO_CHECK;
2697             goto int_case;
2698         case TARGET_SO_PRIORITY:
2699             optname = SO_PRIORITY;
2700             goto int_case;
2701 #ifdef SO_BSDCOMPAT
2702         case TARGET_SO_BSDCOMPAT:
2703             optname = SO_BSDCOMPAT;
2704             goto int_case;
2705 #endif
2706         case TARGET_SO_PASSCRED:
2707             optname = SO_PASSCRED;
2708             goto int_case;
2709         case TARGET_SO_TIMESTAMP:
2710             optname = SO_TIMESTAMP;
2711             goto int_case;
2712         case TARGET_SO_RCVLOWAT:
2713             optname = SO_RCVLOWAT;
2714             goto int_case;
2715         case TARGET_SO_ACCEPTCONN:
2716             optname = SO_ACCEPTCONN;
2717             goto int_case;
2718         case TARGET_SO_PROTOCOL:
2719             optname = SO_PROTOCOL;
2720             goto int_case;
2721         case TARGET_SO_DOMAIN:
2722             optname = SO_DOMAIN;
2723             goto int_case;
2724         default:
2725             goto int_case;
2726         }
2727         break;
2728     case SOL_TCP:
2729     case SOL_UDP:
2730         /* TCP and UDP options all take an 'int' value.  */
2731     int_case:
2732         if (get_user_u32(len, optlen))
2733             return -TARGET_EFAULT;
2734         if (len < 0)
2735             return -TARGET_EINVAL;
2736         lv = sizeof(lv);
2737         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2738         if (ret < 0)
2739             return ret;
2740         switch (optname) {
2741         case SO_TYPE:
2742             val = host_to_target_sock_type(val);
2743             break;
2744         case SO_ERROR:
2745             val = host_to_target_errno(val);
2746             break;
2747         }
2748         if (len > lv)
2749             len = lv;
2750         if (len == 4) {
2751             if (put_user_u32(val, optval_addr))
2752                 return -TARGET_EFAULT;
2753         } else {
2754             if (put_user_u8(val, optval_addr))
2755                 return -TARGET_EFAULT;
2756         }
2757         if (put_user_u32(len, optlen))
2758             return -TARGET_EFAULT;
2759         break;
2760     case SOL_IP:
2761         switch(optname) {
2762         case IP_TOS:
2763         case IP_TTL:
2764         case IP_HDRINCL:
2765         case IP_ROUTER_ALERT:
2766         case IP_RECVOPTS:
2767         case IP_RETOPTS:
2768         case IP_PKTINFO:
2769         case IP_MTU_DISCOVER:
2770         case IP_RECVERR:
2771         case IP_RECVTOS:
2772 #ifdef IP_FREEBIND
2773         case IP_FREEBIND:
2774 #endif
2775         case IP_MULTICAST_TTL:
2776         case IP_MULTICAST_LOOP:
2777             if (get_user_u32(len, optlen))
2778                 return -TARGET_EFAULT;
2779             if (len < 0)
2780                 return -TARGET_EINVAL;
2781             lv = sizeof(lv);
2782             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2783             if (ret < 0)
2784                 return ret;
2785             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2786                 len = 1;
2787                 if (put_user_u32(len, optlen)
2788                     || put_user_u8(val, optval_addr))
2789                     return -TARGET_EFAULT;
2790             } else {
2791                 if (len > sizeof(int))
2792                     len = sizeof(int);
2793                 if (put_user_u32(len, optlen)
2794                     || put_user_u32(val, optval_addr))
2795                     return -TARGET_EFAULT;
2796             }
2797             break;
2798         default:
2799             ret = -TARGET_ENOPROTOOPT;
2800             break;
2801         }
2802         break;
2803     case SOL_IPV6:
2804         switch (optname) {
2805         case IPV6_MTU_DISCOVER:
2806         case IPV6_MTU:
2807         case IPV6_V6ONLY:
2808         case IPV6_RECVPKTINFO:
2809         case IPV6_UNICAST_HOPS:
2810         case IPV6_MULTICAST_HOPS:
2811         case IPV6_MULTICAST_LOOP:
2812         case IPV6_RECVERR:
2813         case IPV6_RECVHOPLIMIT:
2814         case IPV6_2292HOPLIMIT:
2815         case IPV6_CHECKSUM:
2816         case IPV6_ADDRFORM:
2817         case IPV6_2292PKTINFO:
2818         case IPV6_RECVTCLASS:
2819         case IPV6_RECVRTHDR:
2820         case IPV6_2292RTHDR:
2821         case IPV6_RECVHOPOPTS:
2822         case IPV6_2292HOPOPTS:
2823         case IPV6_RECVDSTOPTS:
2824         case IPV6_2292DSTOPTS:
2825         case IPV6_TCLASS:
2826         case IPV6_ADDR_PREFERENCES:
2827 #ifdef IPV6_RECVPATHMTU
2828         case IPV6_RECVPATHMTU:
2829 #endif
2830 #ifdef IPV6_TRANSPARENT
2831         case IPV6_TRANSPARENT:
2832 #endif
2833 #ifdef IPV6_FREEBIND
2834         case IPV6_FREEBIND:
2835 #endif
2836 #ifdef IPV6_RECVORIGDSTADDR
2837         case IPV6_RECVORIGDSTADDR:
2838 #endif
2839             if (get_user_u32(len, optlen))
2840                 return -TARGET_EFAULT;
2841             if (len < 0)
2842                 return -TARGET_EINVAL;
2843             lv = sizeof(lv);
2844             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2845             if (ret < 0)
2846                 return ret;
2847             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2848                 len = 1;
2849                 if (put_user_u32(len, optlen)
2850                     || put_user_u8(val, optval_addr))
2851                     return -TARGET_EFAULT;
2852             } else {
2853                 if (len > sizeof(int))
2854                     len = sizeof(int);
2855                 if (put_user_u32(len, optlen)
2856                     || put_user_u32(val, optval_addr))
2857                     return -TARGET_EFAULT;
2858             }
2859             break;
2860         default:
2861             ret = -TARGET_ENOPROTOOPT;
2862             break;
2863         }
2864         break;
2865 #ifdef SOL_NETLINK
2866     case SOL_NETLINK:
2867         switch (optname) {
2868         case NETLINK_PKTINFO:
2869         case NETLINK_BROADCAST_ERROR:
2870         case NETLINK_NO_ENOBUFS:
2871 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2872         case NETLINK_LISTEN_ALL_NSID:
2873         case NETLINK_CAP_ACK:
2874 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2875 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2876         case NETLINK_EXT_ACK:
2877 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2878 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2879         case NETLINK_GET_STRICT_CHK:
2880 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2881             if (get_user_u32(len, optlen)) {
2882                 return -TARGET_EFAULT;
2883             }
2884             if (len != sizeof(val)) {
2885                 return -TARGET_EINVAL;
2886             }
2887             lv = len;
2888             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2889             if (ret < 0) {
2890                 return ret;
2891             }
2892             if (put_user_u32(lv, optlen)
2893                 || put_user_u32(val, optval_addr)) {
2894                 return -TARGET_EFAULT;
2895             }
2896             break;
2897 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2898         case NETLINK_LIST_MEMBERSHIPS:
2899         {
2900             uint32_t *results;
2901             int i;
2902             if (get_user_u32(len, optlen)) {
2903                 return -TARGET_EFAULT;
2904             }
2905             if (len < 0) {
2906                 return -TARGET_EINVAL;
2907             }
2908             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2909             if (!results && len > 0) {
2910                 return -TARGET_EFAULT;
2911             }
2912             lv = len;
2913             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2914             if (ret < 0) {
2915                 unlock_user(results, optval_addr, 0);
2916                 return ret;
2917             }
2918             /* swap host endianness to target endianness. */
2919             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2920                 results[i] = tswap32(results[i]);
2921             }
2922             if (put_user_u32(lv, optlen)) {
2923                 return -TARGET_EFAULT;
2924             }
2925             unlock_user(results, optval_addr, 0);
2926             break;
2927         }
2928 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2929         default:
2930             goto unimplemented;
2931         }
2932         break;
2933 #endif /* SOL_NETLINK */
2934     default:
2935     unimplemented:
2936         qemu_log_mask(LOG_UNIMP,
2937                       "getsockopt level=%d optname=%d not yet supported\n",
2938                       level, optname);
2939         ret = -TARGET_EOPNOTSUPP;
2940         break;
2941     }
2942     return ret;
2943 }
2944 
2945 /* Convert target low/high pair representing file offset into the host
2946  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2947  * as the kernel doesn't handle them either.
2948  */
2949 static void target_to_host_low_high(abi_ulong tlow,
2950                                     abi_ulong thigh,
2951                                     unsigned long *hlow,
2952                                     unsigned long *hhigh)
2953 {
2954     uint64_t off = tlow |
2955         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2956         TARGET_LONG_BITS / 2;
2957 
2958     *hlow = off;
2959     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2960 }
2961 
2962 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2963                                 abi_ulong count, int copy)
2964 {
2965     struct target_iovec *target_vec;
2966     struct iovec *vec;
2967     abi_ulong total_len, max_len;
2968     int i;
2969     int err = 0;
2970     bool bad_address = false;
2971 
2972     if (count == 0) {
2973         errno = 0;
2974         return NULL;
2975     }
2976     if (count > IOV_MAX) {
2977         errno = EINVAL;
2978         return NULL;
2979     }
2980 
2981     vec = g_try_new0(struct iovec, count);
2982     if (vec == NULL) {
2983         errno = ENOMEM;
2984         return NULL;
2985     }
2986 
2987     target_vec = lock_user(VERIFY_READ, target_addr,
2988                            count * sizeof(struct target_iovec), 1);
2989     if (target_vec == NULL) {
2990         err = EFAULT;
2991         goto fail2;
2992     }
2993 
2994     /* ??? If host page size > target page size, this will result in a
2995        value larger than what we can actually support.  */
2996     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2997     total_len = 0;
2998 
2999     for (i = 0; i < count; i++) {
3000         abi_ulong base = tswapal(target_vec[i].iov_base);
3001         abi_long len = tswapal(target_vec[i].iov_len);
3002 
3003         if (len < 0) {
3004             err = EINVAL;
3005             goto fail;
3006         } else if (len == 0) {
3007             /* Zero length pointer is ignored.  */
3008             vec[i].iov_base = 0;
3009         } else {
3010             vec[i].iov_base = lock_user(type, base, len, copy);
3011             /* If the first buffer pointer is bad, this is a fault.  But
3012              * subsequent bad buffers will result in a partial write; this
3013              * is realized by filling the vector with null pointers and
3014              * zero lengths. */
3015             if (!vec[i].iov_base) {
3016                 if (i == 0) {
3017                     err = EFAULT;
3018                     goto fail;
3019                 } else {
3020                     bad_address = true;
3021                 }
3022             }
3023             if (bad_address) {
3024                 len = 0;
3025             }
3026             if (len > max_len - total_len) {
3027                 len = max_len - total_len;
3028             }
3029         }
3030         vec[i].iov_len = len;
3031         total_len += len;
3032     }
3033 
3034     unlock_user(target_vec, target_addr, 0);
3035     return vec;
3036 
3037  fail:
3038     while (--i >= 0) {
3039         if (tswapal(target_vec[i].iov_len) > 0) {
3040             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3041         }
3042     }
3043     unlock_user(target_vec, target_addr, 0);
3044  fail2:
3045     g_free(vec);
3046     errno = err;
3047     return NULL;
3048 }
3049 
3050 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3051                          abi_ulong count, int copy)
3052 {
3053     struct target_iovec *target_vec;
3054     int i;
3055 
3056     target_vec = lock_user(VERIFY_READ, target_addr,
3057                            count * sizeof(struct target_iovec), 1);
3058     if (target_vec) {
3059         for (i = 0; i < count; i++) {
3060             abi_ulong base = tswapal(target_vec[i].iov_base);
3061             abi_long len = tswapal(target_vec[i].iov_len);
3062             if (len < 0) {
3063                 break;
3064             }
3065             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3066         }
3067         unlock_user(target_vec, target_addr, 0);
3068     }
3069 
3070     g_free(vec);
3071 }
3072 
3073 static inline int target_to_host_sock_type(int *type)
3074 {
3075     int host_type = 0;
3076     int target_type = *type;
3077 
3078     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3079     case TARGET_SOCK_DGRAM:
3080         host_type = SOCK_DGRAM;
3081         break;
3082     case TARGET_SOCK_STREAM:
3083         host_type = SOCK_STREAM;
3084         break;
3085     default:
3086         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3087         break;
3088     }
3089     if (target_type & TARGET_SOCK_CLOEXEC) {
3090 #if defined(SOCK_CLOEXEC)
3091         host_type |= SOCK_CLOEXEC;
3092 #else
3093         return -TARGET_EINVAL;
3094 #endif
3095     }
3096     if (target_type & TARGET_SOCK_NONBLOCK) {
3097 #if defined(SOCK_NONBLOCK)
3098         host_type |= SOCK_NONBLOCK;
3099 #elif !defined(O_NONBLOCK)
3100         return -TARGET_EINVAL;
3101 #endif
3102     }
3103     *type = host_type;
3104     return 0;
3105 }
3106 
3107 /* Try to emulate socket type flags after socket creation.  */
3108 static int sock_flags_fixup(int fd, int target_type)
3109 {
3110 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3111     if (target_type & TARGET_SOCK_NONBLOCK) {
3112         int flags = fcntl(fd, F_GETFL);
3113         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3114             close(fd);
3115             return -TARGET_EINVAL;
3116         }
3117     }
3118 #endif
3119     return fd;
3120 }
3121 
3122 /* do_socket() Must return target values and target errnos. */
3123 static abi_long do_socket(int domain, int type, int protocol)
3124 {
3125     int target_type = type;
3126     int ret;
3127 
3128     ret = target_to_host_sock_type(&type);
3129     if (ret) {
3130         return ret;
3131     }
3132 
3133     if (domain == PF_NETLINK && !(
3134 #ifdef CONFIG_RTNETLINK
3135          protocol == NETLINK_ROUTE ||
3136 #endif
3137          protocol == NETLINK_KOBJECT_UEVENT ||
3138          protocol == NETLINK_AUDIT)) {
3139         return -TARGET_EPROTONOSUPPORT;
3140     }
3141 
3142     if (domain == AF_PACKET ||
3143         (domain == AF_INET && type == SOCK_PACKET)) {
3144         protocol = tswap16(protocol);
3145     }
3146 
3147     ret = get_errno(socket(domain, type, protocol));
3148     if (ret >= 0) {
3149         ret = sock_flags_fixup(ret, target_type);
3150         if (type == SOCK_PACKET) {
3151             /* Manage an obsolete case :
3152              * if socket type is SOCK_PACKET, bind by name
3153              */
3154             fd_trans_register(ret, &target_packet_trans);
3155         } else if (domain == PF_NETLINK) {
3156             switch (protocol) {
3157 #ifdef CONFIG_RTNETLINK
3158             case NETLINK_ROUTE:
3159                 fd_trans_register(ret, &target_netlink_route_trans);
3160                 break;
3161 #endif
3162             case NETLINK_KOBJECT_UEVENT:
3163                 /* nothing to do: messages are strings */
3164                 break;
3165             case NETLINK_AUDIT:
3166                 fd_trans_register(ret, &target_netlink_audit_trans);
3167                 break;
3168             default:
3169                 g_assert_not_reached();
3170             }
3171         }
3172     }
3173     return ret;
3174 }
3175 
3176 /* do_bind() Must return target values and target errnos. */
3177 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3178                         socklen_t addrlen)
3179 {
3180     void *addr;
3181     abi_long ret;
3182 
3183     if ((int)addrlen < 0) {
3184         return -TARGET_EINVAL;
3185     }
3186 
3187     addr = alloca(addrlen+1);
3188 
3189     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3190     if (ret)
3191         return ret;
3192 
3193     return get_errno(bind(sockfd, addr, addrlen));
3194 }
3195 
3196 /* do_connect() Must return target values and target errnos. */
3197 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3198                            socklen_t addrlen)
3199 {
3200     void *addr;
3201     abi_long ret;
3202 
3203     if ((int)addrlen < 0) {
3204         return -TARGET_EINVAL;
3205     }
3206 
3207     addr = alloca(addrlen+1);
3208 
3209     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3210     if (ret)
3211         return ret;
3212 
3213     return get_errno(safe_connect(sockfd, addr, addrlen));
3214 }
3215 
3216 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3217 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3218                                       int flags, int send)
3219 {
3220     abi_long ret, len;
3221     struct msghdr msg;
3222     abi_ulong count;
3223     struct iovec *vec;
3224     abi_ulong target_vec;
3225 
3226     if (msgp->msg_name) {
3227         msg.msg_namelen = tswap32(msgp->msg_namelen);
3228         msg.msg_name = alloca(msg.msg_namelen+1);
3229         ret = target_to_host_sockaddr(fd, msg.msg_name,
3230                                       tswapal(msgp->msg_name),
3231                                       msg.msg_namelen);
3232         if (ret == -TARGET_EFAULT) {
3233             /* For connected sockets msg_name and msg_namelen must
3234              * be ignored, so returning EFAULT immediately is wrong.
3235              * Instead, pass a bad msg_name to the host kernel, and
3236              * let it decide whether to return EFAULT or not.
3237              */
3238             msg.msg_name = (void *)-1;
3239         } else if (ret) {
3240             goto out2;
3241         }
3242     } else {
3243         msg.msg_name = NULL;
3244         msg.msg_namelen = 0;
3245     }
3246     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3247     msg.msg_control = alloca(msg.msg_controllen);
3248     memset(msg.msg_control, 0, msg.msg_controllen);
3249 
3250     msg.msg_flags = tswap32(msgp->msg_flags);
3251 
3252     count = tswapal(msgp->msg_iovlen);
3253     target_vec = tswapal(msgp->msg_iov);
3254 
3255     if (count > IOV_MAX) {
3256         /* sendrcvmsg returns a different errno for this condition than
3257          * readv/writev, so we must catch it here before lock_iovec() does.
3258          */
3259         ret = -TARGET_EMSGSIZE;
3260         goto out2;
3261     }
3262 
3263     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3264                      target_vec, count, send);
3265     if (vec == NULL) {
3266         ret = -host_to_target_errno(errno);
3267         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3268         if (!send || ret) {
3269             goto out2;
3270         }
3271     }
3272     msg.msg_iovlen = count;
3273     msg.msg_iov = vec;
3274 
3275     if (send) {
3276         if (fd_trans_target_to_host_data(fd)) {
3277             void *host_msg;
3278 
3279             host_msg = g_malloc(msg.msg_iov->iov_len);
3280             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3281             ret = fd_trans_target_to_host_data(fd)(host_msg,
3282                                                    msg.msg_iov->iov_len);
3283             if (ret >= 0) {
3284                 msg.msg_iov->iov_base = host_msg;
3285                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3286             }
3287             g_free(host_msg);
3288         } else {
3289             ret = target_to_host_cmsg(&msg, msgp);
3290             if (ret == 0) {
3291                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3292             }
3293         }
3294     } else {
3295         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3296         if (!is_error(ret)) {
3297             len = ret;
3298             if (fd_trans_host_to_target_data(fd)) {
3299                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3300                                                MIN(msg.msg_iov->iov_len, len));
3301             }
3302             if (!is_error(ret)) {
3303                 ret = host_to_target_cmsg(msgp, &msg);
3304             }
3305             if (!is_error(ret)) {
3306                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3307                 msgp->msg_flags = tswap32(msg.msg_flags);
3308                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3309                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3310                                     msg.msg_name, msg.msg_namelen);
3311                     if (ret) {
3312                         goto out;
3313                     }
3314                 }
3315 
3316                 ret = len;
3317             }
3318         }
3319     }
3320 
3321 out:
3322     if (vec) {
3323         unlock_iovec(vec, target_vec, count, !send);
3324     }
3325 out2:
3326     return ret;
3327 }
3328 
3329 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3330                                int flags, int send)
3331 {
3332     abi_long ret;
3333     struct target_msghdr *msgp;
3334 
3335     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3336                           msgp,
3337                           target_msg,
3338                           send ? 1 : 0)) {
3339         return -TARGET_EFAULT;
3340     }
3341     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3342     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3343     return ret;
3344 }
3345 
3346 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3347  * so it might not have this *mmsg-specific flag either.
3348  */
3349 #ifndef MSG_WAITFORONE
3350 #define MSG_WAITFORONE 0x10000
3351 #endif
3352 
3353 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3354                                 unsigned int vlen, unsigned int flags,
3355                                 int send)
3356 {
3357     struct target_mmsghdr *mmsgp;
3358     abi_long ret = 0;
3359     int i;
3360 
3361     if (vlen > UIO_MAXIOV) {
3362         vlen = UIO_MAXIOV;
3363     }
3364 
3365     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3366     if (!mmsgp) {
3367         return -TARGET_EFAULT;
3368     }
3369 
3370     for (i = 0; i < vlen; i++) {
3371         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3372         if (is_error(ret)) {
3373             break;
3374         }
3375         mmsgp[i].msg_len = tswap32(ret);
3376         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3377         if (flags & MSG_WAITFORONE) {
3378             flags |= MSG_DONTWAIT;
3379         }
3380     }
3381 
3382     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3383 
3384     /* Return number of datagrams sent if we sent any at all;
3385      * otherwise return the error.
3386      */
3387     if (i) {
3388         return i;
3389     }
3390     return ret;
3391 }
3392 
3393 /* do_accept4() Must return target values and target errnos. */
3394 static abi_long do_accept4(int fd, abi_ulong target_addr,
3395                            abi_ulong target_addrlen_addr, int flags)
3396 {
3397     socklen_t addrlen, ret_addrlen;
3398     void *addr;
3399     abi_long ret;
3400     int host_flags;
3401 
3402     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3403         return -TARGET_EINVAL;
3404     }
3405 
3406     host_flags = 0;
3407     if (flags & TARGET_SOCK_NONBLOCK) {
3408         host_flags |= SOCK_NONBLOCK;
3409     }
3410     if (flags & TARGET_SOCK_CLOEXEC) {
3411         host_flags |= SOCK_CLOEXEC;
3412     }
3413 
3414     if (target_addr == 0) {
3415         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3416     }
3417 
3418     /* linux returns EFAULT if addrlen pointer is invalid */
3419     if (get_user_u32(addrlen, target_addrlen_addr))
3420         return -TARGET_EFAULT;
3421 
3422     if ((int)addrlen < 0) {
3423         return -TARGET_EINVAL;
3424     }
3425 
3426     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3427         return -TARGET_EFAULT;
3428     }
3429 
3430     addr = alloca(addrlen);
3431 
3432     ret_addrlen = addrlen;
3433     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3434     if (!is_error(ret)) {
3435         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3436         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3437             ret = -TARGET_EFAULT;
3438         }
3439     }
3440     return ret;
3441 }
3442 
3443 /* do_getpeername() Must return target values and target errnos. */
3444 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3445                                abi_ulong target_addrlen_addr)
3446 {
3447     socklen_t addrlen, ret_addrlen;
3448     void *addr;
3449     abi_long ret;
3450 
3451     if (get_user_u32(addrlen, target_addrlen_addr))
3452         return -TARGET_EFAULT;
3453 
3454     if ((int)addrlen < 0) {
3455         return -TARGET_EINVAL;
3456     }
3457 
3458     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3459         return -TARGET_EFAULT;
3460     }
3461 
3462     addr = alloca(addrlen);
3463 
3464     ret_addrlen = addrlen;
3465     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3466     if (!is_error(ret)) {
3467         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3468         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3469             ret = -TARGET_EFAULT;
3470         }
3471     }
3472     return ret;
3473 }
3474 
3475 /* do_getsockname() Must return target values and target errnos. */
3476 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3477                                abi_ulong target_addrlen_addr)
3478 {
3479     socklen_t addrlen, ret_addrlen;
3480     void *addr;
3481     abi_long ret;
3482 
3483     if (get_user_u32(addrlen, target_addrlen_addr))
3484         return -TARGET_EFAULT;
3485 
3486     if ((int)addrlen < 0) {
3487         return -TARGET_EINVAL;
3488     }
3489 
3490     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3491         return -TARGET_EFAULT;
3492     }
3493 
3494     addr = alloca(addrlen);
3495 
3496     ret_addrlen = addrlen;
3497     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3498     if (!is_error(ret)) {
3499         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3500         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3501             ret = -TARGET_EFAULT;
3502         }
3503     }
3504     return ret;
3505 }
3506 
3507 /* do_socketpair() Must return target values and target errnos. */
3508 static abi_long do_socketpair(int domain, int type, int protocol,
3509                               abi_ulong target_tab_addr)
3510 {
3511     int tab[2];
3512     abi_long ret;
3513 
3514     target_to_host_sock_type(&type);
3515 
3516     ret = get_errno(socketpair(domain, type, protocol, tab));
3517     if (!is_error(ret)) {
3518         if (put_user_s32(tab[0], target_tab_addr)
3519             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3520             ret = -TARGET_EFAULT;
3521     }
3522     return ret;
3523 }
3524 
3525 /* do_sendto() Must return target values and target errnos. */
3526 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3527                           abi_ulong target_addr, socklen_t addrlen)
3528 {
3529     void *addr;
3530     void *host_msg;
3531     void *copy_msg = NULL;
3532     abi_long ret;
3533 
3534     if ((int)addrlen < 0) {
3535         return -TARGET_EINVAL;
3536     }
3537 
3538     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3539     if (!host_msg)
3540         return -TARGET_EFAULT;
3541     if (fd_trans_target_to_host_data(fd)) {
3542         copy_msg = host_msg;
3543         host_msg = g_malloc(len);
3544         memcpy(host_msg, copy_msg, len);
3545         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3546         if (ret < 0) {
3547             goto fail;
3548         }
3549     }
3550     if (target_addr) {
3551         addr = alloca(addrlen+1);
3552         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3553         if (ret) {
3554             goto fail;
3555         }
3556         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3557     } else {
3558         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3559     }
3560 fail:
3561     if (copy_msg) {
3562         g_free(host_msg);
3563         host_msg = copy_msg;
3564     }
3565     unlock_user(host_msg, msg, 0);
3566     return ret;
3567 }
3568 
3569 /* do_recvfrom() Must return target values and target errnos. */
3570 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3571                             abi_ulong target_addr,
3572                             abi_ulong target_addrlen)
3573 {
3574     socklen_t addrlen, ret_addrlen;
3575     void *addr;
3576     void *host_msg;
3577     abi_long ret;
3578 
3579     if (!msg) {
3580         host_msg = NULL;
3581     } else {
3582         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3583         if (!host_msg) {
3584             return -TARGET_EFAULT;
3585         }
3586     }
3587     if (target_addr) {
3588         if (get_user_u32(addrlen, target_addrlen)) {
3589             ret = -TARGET_EFAULT;
3590             goto fail;
3591         }
3592         if ((int)addrlen < 0) {
3593             ret = -TARGET_EINVAL;
3594             goto fail;
3595         }
3596         addr = alloca(addrlen);
3597         ret_addrlen = addrlen;
3598         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3599                                       addr, &ret_addrlen));
3600     } else {
3601         addr = NULL; /* To keep compiler quiet.  */
3602         addrlen = 0; /* To keep compiler quiet.  */
3603         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3604     }
3605     if (!is_error(ret)) {
3606         if (fd_trans_host_to_target_data(fd)) {
3607             abi_long trans;
3608             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3609             if (is_error(trans)) {
3610                 ret = trans;
3611                 goto fail;
3612             }
3613         }
3614         if (target_addr) {
3615             host_to_target_sockaddr(target_addr, addr,
3616                                     MIN(addrlen, ret_addrlen));
3617             if (put_user_u32(ret_addrlen, target_addrlen)) {
3618                 ret = -TARGET_EFAULT;
3619                 goto fail;
3620             }
3621         }
3622         unlock_user(host_msg, msg, len);
3623     } else {
3624 fail:
3625         unlock_user(host_msg, msg, 0);
3626     }
3627     return ret;
3628 }
3629 
3630 #ifdef TARGET_NR_socketcall
3631 /* do_socketcall() must return target values and target errnos. */
3632 static abi_long do_socketcall(int num, abi_ulong vptr)
3633 {
3634     static const unsigned nargs[] = { /* number of arguments per operation */
3635         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3636         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3637         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3638         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3639         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3640         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3641         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3642         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3643         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3644         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3645         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3646         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3647         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3648         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3649         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3650         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3651         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3652         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3653         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3654         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3655     };
3656     abi_long a[6]; /* max 6 args */
3657     unsigned i;
3658 
3659     /* check the range of the first argument num */
3660     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3661     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3662         return -TARGET_EINVAL;
3663     }
3664     /* ensure we have space for args */
3665     if (nargs[num] > ARRAY_SIZE(a)) {
3666         return -TARGET_EINVAL;
3667     }
3668     /* collect the arguments in a[] according to nargs[] */
3669     for (i = 0; i < nargs[num]; ++i) {
3670         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3671             return -TARGET_EFAULT;
3672         }
3673     }
3674     /* now when we have the args, invoke the appropriate underlying function */
3675     switch (num) {
3676     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3677         return do_socket(a[0], a[1], a[2]);
3678     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3679         return do_bind(a[0], a[1], a[2]);
3680     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3681         return do_connect(a[0], a[1], a[2]);
3682     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3683         return get_errno(listen(a[0], a[1]));
3684     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3685         return do_accept4(a[0], a[1], a[2], 0);
3686     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3687         return do_getsockname(a[0], a[1], a[2]);
3688     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3689         return do_getpeername(a[0], a[1], a[2]);
3690     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3691         return do_socketpair(a[0], a[1], a[2], a[3]);
3692     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3693         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3694     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3695         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3696     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3697         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3698     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3699         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3700     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3701         return get_errno(shutdown(a[0], a[1]));
3702     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3703         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3704     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3705         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3706     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3707         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3708     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3709         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3710     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3711         return do_accept4(a[0], a[1], a[2], a[3]);
3712     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3713         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3714     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3715         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3716     default:
3717         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3718         return -TARGET_EINVAL;
3719     }
3720 }
3721 #endif
3722 
3723 #ifndef TARGET_SEMID64_DS
3724 /* asm-generic version of this struct */
3725 struct target_semid64_ds
3726 {
3727   struct target_ipc_perm sem_perm;
3728   abi_ulong sem_otime;
3729 #if TARGET_ABI_BITS == 32
3730   abi_ulong __unused1;
3731 #endif
3732   abi_ulong sem_ctime;
3733 #if TARGET_ABI_BITS == 32
3734   abi_ulong __unused2;
3735 #endif
3736   abi_ulong sem_nsems;
3737   abi_ulong __unused3;
3738   abi_ulong __unused4;
3739 };
3740 #endif
3741 
3742 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3743                                                abi_ulong target_addr)
3744 {
3745     struct target_ipc_perm *target_ip;
3746     struct target_semid64_ds *target_sd;
3747 
3748     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3749         return -TARGET_EFAULT;
3750     target_ip = &(target_sd->sem_perm);
3751     host_ip->__key = tswap32(target_ip->__key);
3752     host_ip->uid = tswap32(target_ip->uid);
3753     host_ip->gid = tswap32(target_ip->gid);
3754     host_ip->cuid = tswap32(target_ip->cuid);
3755     host_ip->cgid = tswap32(target_ip->cgid);
3756 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3757     host_ip->mode = tswap32(target_ip->mode);
3758 #else
3759     host_ip->mode = tswap16(target_ip->mode);
3760 #endif
3761 #if defined(TARGET_PPC)
3762     host_ip->__seq = tswap32(target_ip->__seq);
3763 #else
3764     host_ip->__seq = tswap16(target_ip->__seq);
3765 #endif
3766     unlock_user_struct(target_sd, target_addr, 0);
3767     return 0;
3768 }
3769 
3770 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3771                                                struct ipc_perm *host_ip)
3772 {
3773     struct target_ipc_perm *target_ip;
3774     struct target_semid64_ds *target_sd;
3775 
3776     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3777         return -TARGET_EFAULT;
3778     target_ip = &(target_sd->sem_perm);
3779     target_ip->__key = tswap32(host_ip->__key);
3780     target_ip->uid = tswap32(host_ip->uid);
3781     target_ip->gid = tswap32(host_ip->gid);
3782     target_ip->cuid = tswap32(host_ip->cuid);
3783     target_ip->cgid = tswap32(host_ip->cgid);
3784 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3785     target_ip->mode = tswap32(host_ip->mode);
3786 #else
3787     target_ip->mode = tswap16(host_ip->mode);
3788 #endif
3789 #if defined(TARGET_PPC)
3790     target_ip->__seq = tswap32(host_ip->__seq);
3791 #else
3792     target_ip->__seq = tswap16(host_ip->__seq);
3793 #endif
3794     unlock_user_struct(target_sd, target_addr, 1);
3795     return 0;
3796 }
3797 
3798 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3799                                                abi_ulong target_addr)
3800 {
3801     struct target_semid64_ds *target_sd;
3802 
3803     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3804         return -TARGET_EFAULT;
3805     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3806         return -TARGET_EFAULT;
3807     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3808     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3809     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3810     unlock_user_struct(target_sd, target_addr, 0);
3811     return 0;
3812 }
3813 
3814 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3815                                                struct semid_ds *host_sd)
3816 {
3817     struct target_semid64_ds *target_sd;
3818 
3819     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3820         return -TARGET_EFAULT;
3821     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3822         return -TARGET_EFAULT;
3823     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3824     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3825     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3826     unlock_user_struct(target_sd, target_addr, 1);
3827     return 0;
3828 }
3829 
3830 struct target_seminfo {
3831     int semmap;
3832     int semmni;
3833     int semmns;
3834     int semmnu;
3835     int semmsl;
3836     int semopm;
3837     int semume;
3838     int semusz;
3839     int semvmx;
3840     int semaem;
3841 };
3842 
3843 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3844                                               struct seminfo *host_seminfo)
3845 {
3846     struct target_seminfo *target_seminfo;
3847     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3848         return -TARGET_EFAULT;
3849     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3850     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3851     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3852     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3853     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3854     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3855     __put_user(host_seminfo->semume, &target_seminfo->semume);
3856     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3857     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3858     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3859     unlock_user_struct(target_seminfo, target_addr, 1);
3860     return 0;
3861 }
3862 
3863 union semun {
3864 	int val;
3865 	struct semid_ds *buf;
3866 	unsigned short *array;
3867 	struct seminfo *__buf;
3868 };
3869 
3870 union target_semun {
3871 	int val;
3872 	abi_ulong buf;
3873 	abi_ulong array;
3874 	abi_ulong __buf;
3875 };
3876 
3877 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3878                                                abi_ulong target_addr)
3879 {
3880     int nsems;
3881     unsigned short *array;
3882     union semun semun;
3883     struct semid_ds semid_ds;
3884     int i, ret;
3885 
3886     semun.buf = &semid_ds;
3887 
3888     ret = semctl(semid, 0, IPC_STAT, semun);
3889     if (ret == -1)
3890         return get_errno(ret);
3891 
3892     nsems = semid_ds.sem_nsems;
3893 
3894     *host_array = g_try_new(unsigned short, nsems);
3895     if (!*host_array) {
3896         return -TARGET_ENOMEM;
3897     }
3898     array = lock_user(VERIFY_READ, target_addr,
3899                       nsems*sizeof(unsigned short), 1);
3900     if (!array) {
3901         g_free(*host_array);
3902         return -TARGET_EFAULT;
3903     }
3904 
3905     for(i=0; i<nsems; i++) {
3906         __get_user((*host_array)[i], &array[i]);
3907     }
3908     unlock_user(array, target_addr, 0);
3909 
3910     return 0;
3911 }
3912 
3913 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3914                                                unsigned short **host_array)
3915 {
3916     int nsems;
3917     unsigned short *array;
3918     union semun semun;
3919     struct semid_ds semid_ds;
3920     int i, ret;
3921 
3922     semun.buf = &semid_ds;
3923 
3924     ret = semctl(semid, 0, IPC_STAT, semun);
3925     if (ret == -1)
3926         return get_errno(ret);
3927 
3928     nsems = semid_ds.sem_nsems;
3929 
3930     array = lock_user(VERIFY_WRITE, target_addr,
3931                       nsems*sizeof(unsigned short), 0);
3932     if (!array)
3933         return -TARGET_EFAULT;
3934 
3935     for(i=0; i<nsems; i++) {
3936         __put_user((*host_array)[i], &array[i]);
3937     }
3938     g_free(*host_array);
3939     unlock_user(array, target_addr, 1);
3940 
3941     return 0;
3942 }
3943 
3944 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3945                                  abi_ulong target_arg)
3946 {
3947     union target_semun target_su = { .buf = target_arg };
3948     union semun arg;
3949     struct semid_ds dsarg;
3950     unsigned short *array = NULL;
3951     struct seminfo seminfo;
3952     abi_long ret = -TARGET_EINVAL;
3953     abi_long err;
3954     cmd &= 0xff;
3955 
3956     switch( cmd ) {
3957 	case GETVAL:
3958 	case SETVAL:
3959             /* In 64 bit cross-endian situations, we will erroneously pick up
3960              * the wrong half of the union for the "val" element.  To rectify
3961              * this, the entire 8-byte structure is byteswapped, followed by
3962 	     * a swap of the 4 byte val field. In other cases, the data is
3963 	     * already in proper host byte order. */
3964 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3965 		target_su.buf = tswapal(target_su.buf);
3966 		arg.val = tswap32(target_su.val);
3967 	    } else {
3968 		arg.val = target_su.val;
3969 	    }
3970             ret = get_errno(semctl(semid, semnum, cmd, arg));
3971             break;
3972 	case GETALL:
3973 	case SETALL:
3974             err = target_to_host_semarray(semid, &array, target_su.array);
3975             if (err)
3976                 return err;
3977             arg.array = array;
3978             ret = get_errno(semctl(semid, semnum, cmd, arg));
3979             err = host_to_target_semarray(semid, target_su.array, &array);
3980             if (err)
3981                 return err;
3982             break;
3983 	case IPC_STAT:
3984 	case IPC_SET:
3985 	case SEM_STAT:
3986             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3987             if (err)
3988                 return err;
3989             arg.buf = &dsarg;
3990             ret = get_errno(semctl(semid, semnum, cmd, arg));
3991             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3992             if (err)
3993                 return err;
3994             break;
3995 	case IPC_INFO:
3996 	case SEM_INFO:
3997             arg.__buf = &seminfo;
3998             ret = get_errno(semctl(semid, semnum, cmd, arg));
3999             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4000             if (err)
4001                 return err;
4002             break;
4003 	case IPC_RMID:
4004 	case GETPID:
4005 	case GETNCNT:
4006 	case GETZCNT:
4007             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4008             break;
4009     }
4010 
4011     return ret;
4012 }
4013 
4014 struct target_sembuf {
4015     unsigned short sem_num;
4016     short sem_op;
4017     short sem_flg;
4018 };
4019 
4020 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4021                                              abi_ulong target_addr,
4022                                              unsigned nsops)
4023 {
4024     struct target_sembuf *target_sembuf;
4025     int i;
4026 
4027     target_sembuf = lock_user(VERIFY_READ, target_addr,
4028                               nsops*sizeof(struct target_sembuf), 1);
4029     if (!target_sembuf)
4030         return -TARGET_EFAULT;
4031 
4032     for(i=0; i<nsops; i++) {
4033         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4034         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4035         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4036     }
4037 
4038     unlock_user(target_sembuf, target_addr, 0);
4039 
4040     return 0;
4041 }
4042 
4043 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4044     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4045 
4046 /*
4047  * This macro is required to handle the s390 variants, which passes the
4048  * arguments in a different order than default.
4049  */
4050 #ifdef __s390x__
4051 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4052   (__nsops), (__timeout), (__sops)
4053 #else
4054 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4055   (__nsops), 0, (__sops), (__timeout)
4056 #endif
4057 
4058 static inline abi_long do_semtimedop(int semid,
4059                                      abi_long ptr,
4060                                      unsigned nsops,
4061                                      abi_long timeout, bool time64)
4062 {
4063     struct sembuf *sops;
4064     struct timespec ts, *pts = NULL;
4065     abi_long ret;
4066 
4067     if (timeout) {
4068         pts = &ts;
4069         if (time64) {
4070             if (target_to_host_timespec64(pts, timeout)) {
4071                 return -TARGET_EFAULT;
4072             }
4073         } else {
4074             if (target_to_host_timespec(pts, timeout)) {
4075                 return -TARGET_EFAULT;
4076             }
4077         }
4078     }
4079 
4080     if (nsops > TARGET_SEMOPM) {
4081         return -TARGET_E2BIG;
4082     }
4083 
4084     sops = g_new(struct sembuf, nsops);
4085 
4086     if (target_to_host_sembuf(sops, ptr, nsops)) {
4087         g_free(sops);
4088         return -TARGET_EFAULT;
4089     }
4090 
4091     ret = -TARGET_ENOSYS;
4092 #ifdef __NR_semtimedop
4093     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4094 #endif
4095 #ifdef __NR_ipc
4096     if (ret == -TARGET_ENOSYS) {
4097         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4098                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4099     }
4100 #endif
4101     g_free(sops);
4102     return ret;
4103 }
4104 #endif
4105 
4106 struct target_msqid_ds
4107 {
4108     struct target_ipc_perm msg_perm;
4109     abi_ulong msg_stime;
4110 #if TARGET_ABI_BITS == 32
4111     abi_ulong __unused1;
4112 #endif
4113     abi_ulong msg_rtime;
4114 #if TARGET_ABI_BITS == 32
4115     abi_ulong __unused2;
4116 #endif
4117     abi_ulong msg_ctime;
4118 #if TARGET_ABI_BITS == 32
4119     abi_ulong __unused3;
4120 #endif
4121     abi_ulong __msg_cbytes;
4122     abi_ulong msg_qnum;
4123     abi_ulong msg_qbytes;
4124     abi_ulong msg_lspid;
4125     abi_ulong msg_lrpid;
4126     abi_ulong __unused4;
4127     abi_ulong __unused5;
4128 };
4129 
4130 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4131                                                abi_ulong target_addr)
4132 {
4133     struct target_msqid_ds *target_md;
4134 
4135     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4136         return -TARGET_EFAULT;
4137     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4138         return -TARGET_EFAULT;
4139     host_md->msg_stime = tswapal(target_md->msg_stime);
4140     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4141     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4142     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4143     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4144     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4145     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4146     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4147     unlock_user_struct(target_md, target_addr, 0);
4148     return 0;
4149 }
4150 
4151 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4152                                                struct msqid_ds *host_md)
4153 {
4154     struct target_msqid_ds *target_md;
4155 
4156     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4157         return -TARGET_EFAULT;
4158     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4159         return -TARGET_EFAULT;
4160     target_md->msg_stime = tswapal(host_md->msg_stime);
4161     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4162     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4163     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4164     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4165     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4166     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4167     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4168     unlock_user_struct(target_md, target_addr, 1);
4169     return 0;
4170 }
4171 
4172 struct target_msginfo {
4173     int msgpool;
4174     int msgmap;
4175     int msgmax;
4176     int msgmnb;
4177     int msgmni;
4178     int msgssz;
4179     int msgtql;
4180     unsigned short int msgseg;
4181 };
4182 
4183 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4184                                               struct msginfo *host_msginfo)
4185 {
4186     struct target_msginfo *target_msginfo;
4187     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4188         return -TARGET_EFAULT;
4189     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4190     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4191     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4192     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4193     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4194     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4195     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4196     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4197     unlock_user_struct(target_msginfo, target_addr, 1);
4198     return 0;
4199 }
4200 
4201 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4202 {
4203     struct msqid_ds dsarg;
4204     struct msginfo msginfo;
4205     abi_long ret = -TARGET_EINVAL;
4206 
4207     cmd &= 0xff;
4208 
4209     switch (cmd) {
4210     case IPC_STAT:
4211     case IPC_SET:
4212     case MSG_STAT:
4213         if (target_to_host_msqid_ds(&dsarg,ptr))
4214             return -TARGET_EFAULT;
4215         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4216         if (host_to_target_msqid_ds(ptr,&dsarg))
4217             return -TARGET_EFAULT;
4218         break;
4219     case IPC_RMID:
4220         ret = get_errno(msgctl(msgid, cmd, NULL));
4221         break;
4222     case IPC_INFO:
4223     case MSG_INFO:
4224         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4225         if (host_to_target_msginfo(ptr, &msginfo))
4226             return -TARGET_EFAULT;
4227         break;
4228     }
4229 
4230     return ret;
4231 }
4232 
4233 struct target_msgbuf {
4234     abi_long mtype;
4235     char	mtext[1];
4236 };
4237 
4238 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4239                                  ssize_t msgsz, int msgflg)
4240 {
4241     struct target_msgbuf *target_mb;
4242     struct msgbuf *host_mb;
4243     abi_long ret = 0;
4244 
4245     if (msgsz < 0) {
4246         return -TARGET_EINVAL;
4247     }
4248 
4249     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4250         return -TARGET_EFAULT;
4251     host_mb = g_try_malloc(msgsz + sizeof(long));
4252     if (!host_mb) {
4253         unlock_user_struct(target_mb, msgp, 0);
4254         return -TARGET_ENOMEM;
4255     }
4256     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4257     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4258     ret = -TARGET_ENOSYS;
4259 #ifdef __NR_msgsnd
4260     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4261 #endif
4262 #ifdef __NR_ipc
4263     if (ret == -TARGET_ENOSYS) {
4264 #ifdef __s390x__
4265         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4266                                  host_mb));
4267 #else
4268         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4269                                  host_mb, 0));
4270 #endif
4271     }
4272 #endif
4273     g_free(host_mb);
4274     unlock_user_struct(target_mb, msgp, 0);
4275 
4276     return ret;
4277 }
4278 
4279 #ifdef __NR_ipc
4280 #if defined(__sparc__)
4281 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4282 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4283 #elif defined(__s390x__)
4284 /* The s390 sys_ipc variant has only five parameters.  */
4285 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4286     ((long int[]){(long int)__msgp, __msgtyp})
4287 #else
4288 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4289     ((long int[]){(long int)__msgp, __msgtyp}), 0
4290 #endif
4291 #endif
4292 
4293 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4294                                  ssize_t msgsz, abi_long msgtyp,
4295                                  int msgflg)
4296 {
4297     struct target_msgbuf *target_mb;
4298     char *target_mtext;
4299     struct msgbuf *host_mb;
4300     abi_long ret = 0;
4301 
4302     if (msgsz < 0) {
4303         return -TARGET_EINVAL;
4304     }
4305 
4306     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4307         return -TARGET_EFAULT;
4308 
4309     host_mb = g_try_malloc(msgsz + sizeof(long));
4310     if (!host_mb) {
4311         ret = -TARGET_ENOMEM;
4312         goto end;
4313     }
4314     ret = -TARGET_ENOSYS;
4315 #ifdef __NR_msgrcv
4316     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4317 #endif
4318 #ifdef __NR_ipc
4319     if (ret == -TARGET_ENOSYS) {
4320         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4321                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4322     }
4323 #endif
4324 
4325     if (ret > 0) {
4326         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4327         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4328         if (!target_mtext) {
4329             ret = -TARGET_EFAULT;
4330             goto end;
4331         }
4332         memcpy(target_mb->mtext, host_mb->mtext, ret);
4333         unlock_user(target_mtext, target_mtext_addr, ret);
4334     }
4335 
4336     target_mb->mtype = tswapal(host_mb->mtype);
4337 
4338 end:
4339     if (target_mb)
4340         unlock_user_struct(target_mb, msgp, 1);
4341     g_free(host_mb);
4342     return ret;
4343 }
4344 
4345 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4346                                                abi_ulong target_addr)
4347 {
4348     struct target_shmid_ds *target_sd;
4349 
4350     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4351         return -TARGET_EFAULT;
4352     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4353         return -TARGET_EFAULT;
4354     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4355     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4356     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4357     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4358     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4359     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4360     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4361     unlock_user_struct(target_sd, target_addr, 0);
4362     return 0;
4363 }
4364 
4365 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4366                                                struct shmid_ds *host_sd)
4367 {
4368     struct target_shmid_ds *target_sd;
4369 
4370     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4371         return -TARGET_EFAULT;
4372     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4373         return -TARGET_EFAULT;
4374     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4375     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4376     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4377     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4378     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4379     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4380     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4381     unlock_user_struct(target_sd, target_addr, 1);
4382     return 0;
4383 }
4384 
4385 struct  target_shminfo {
4386     abi_ulong shmmax;
4387     abi_ulong shmmin;
4388     abi_ulong shmmni;
4389     abi_ulong shmseg;
4390     abi_ulong shmall;
4391 };
4392 
4393 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4394                                               struct shminfo *host_shminfo)
4395 {
4396     struct target_shminfo *target_shminfo;
4397     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4398         return -TARGET_EFAULT;
4399     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4400     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4401     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4402     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4403     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4404     unlock_user_struct(target_shminfo, target_addr, 1);
4405     return 0;
4406 }
4407 
4408 struct target_shm_info {
4409     int used_ids;
4410     abi_ulong shm_tot;
4411     abi_ulong shm_rss;
4412     abi_ulong shm_swp;
4413     abi_ulong swap_attempts;
4414     abi_ulong swap_successes;
4415 };
4416 
4417 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4418                                                struct shm_info *host_shm_info)
4419 {
4420     struct target_shm_info *target_shm_info;
4421     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4422         return -TARGET_EFAULT;
4423     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4424     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4425     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4426     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4427     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4428     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4429     unlock_user_struct(target_shm_info, target_addr, 1);
4430     return 0;
4431 }
4432 
4433 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4434 {
4435     struct shmid_ds dsarg;
4436     struct shminfo shminfo;
4437     struct shm_info shm_info;
4438     abi_long ret = -TARGET_EINVAL;
4439 
4440     cmd &= 0xff;
4441 
4442     switch(cmd) {
4443     case IPC_STAT:
4444     case IPC_SET:
4445     case SHM_STAT:
4446         if (target_to_host_shmid_ds(&dsarg, buf))
4447             return -TARGET_EFAULT;
4448         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4449         if (host_to_target_shmid_ds(buf, &dsarg))
4450             return -TARGET_EFAULT;
4451         break;
4452     case IPC_INFO:
4453         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4454         if (host_to_target_shminfo(buf, &shminfo))
4455             return -TARGET_EFAULT;
4456         break;
4457     case SHM_INFO:
4458         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4459         if (host_to_target_shm_info(buf, &shm_info))
4460             return -TARGET_EFAULT;
4461         break;
4462     case IPC_RMID:
4463     case SHM_LOCK:
4464     case SHM_UNLOCK:
4465         ret = get_errno(shmctl(shmid, cmd, NULL));
4466         break;
4467     }
4468 
4469     return ret;
4470 }
4471 
4472 #ifdef TARGET_NR_ipc
4473 /* ??? This only works with linear mappings.  */
4474 /* do_ipc() must return target values and target errnos. */
4475 static abi_long do_ipc(CPUArchState *cpu_env,
4476                        unsigned int call, abi_long first,
4477                        abi_long second, abi_long third,
4478                        abi_long ptr, abi_long fifth)
4479 {
4480     int version;
4481     abi_long ret = 0;
4482 
4483     version = call >> 16;
4484     call &= 0xffff;
4485 
4486     switch (call) {
4487     case IPCOP_semop:
4488         ret = do_semtimedop(first, ptr, second, 0, false);
4489         break;
4490     case IPCOP_semtimedop:
4491     /*
4492      * The s390 sys_ipc variant has only five parameters instead of six
4493      * (as for default variant) and the only difference is the handling of
4494      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4495      * to a struct timespec where the generic variant uses fifth parameter.
4496      */
4497 #if defined(TARGET_S390X)
4498         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4499 #else
4500         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4501 #endif
4502         break;
4503 
4504     case IPCOP_semget:
4505         ret = get_errno(semget(first, second, third));
4506         break;
4507 
4508     case IPCOP_semctl: {
4509         /* The semun argument to semctl is passed by value, so dereference the
4510          * ptr argument. */
4511         abi_ulong atptr;
4512         get_user_ual(atptr, ptr);
4513         ret = do_semctl(first, second, third, atptr);
4514         break;
4515     }
4516 
4517     case IPCOP_msgget:
4518         ret = get_errno(msgget(first, second));
4519         break;
4520 
4521     case IPCOP_msgsnd:
4522         ret = do_msgsnd(first, ptr, second, third);
4523         break;
4524 
4525     case IPCOP_msgctl:
4526         ret = do_msgctl(first, second, ptr);
4527         break;
4528 
4529     case IPCOP_msgrcv:
4530         switch (version) {
4531         case 0:
4532             {
4533                 struct target_ipc_kludge {
4534                     abi_long msgp;
4535                     abi_long msgtyp;
4536                 } *tmp;
4537 
4538                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4539                     ret = -TARGET_EFAULT;
4540                     break;
4541                 }
4542 
4543                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4544 
4545                 unlock_user_struct(tmp, ptr, 0);
4546                 break;
4547             }
4548         default:
4549             ret = do_msgrcv(first, ptr, second, fifth, third);
4550         }
4551         break;
4552 
4553     case IPCOP_shmat:
4554         switch (version) {
4555         default:
4556         {
4557             abi_ulong raddr;
4558             raddr = target_shmat(cpu_env, first, ptr, second);
4559             if (is_error(raddr))
4560                 return get_errno(raddr);
4561             if (put_user_ual(raddr, third))
4562                 return -TARGET_EFAULT;
4563             break;
4564         }
4565         case 1:
4566             ret = -TARGET_EINVAL;
4567             break;
4568         }
4569 	break;
4570     case IPCOP_shmdt:
4571         ret = target_shmdt(ptr);
4572 	break;
4573 
4574     case IPCOP_shmget:
4575 	/* IPC_* flag values are the same on all linux platforms */
4576 	ret = get_errno(shmget(first, second, third));
4577 	break;
4578 
4579 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4580     case IPCOP_shmctl:
4581         ret = do_shmctl(first, second, ptr);
4582         break;
4583     default:
4584         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4585                       call, version);
4586 	ret = -TARGET_ENOSYS;
4587 	break;
4588     }
4589     return ret;
4590 }
4591 #endif
4592 
4593 /* kernel structure types definitions */
4594 
4595 #define STRUCT(name, ...) STRUCT_ ## name,
4596 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4597 enum {
4598 #include "syscall_types.h"
4599 STRUCT_MAX
4600 };
4601 #undef STRUCT
4602 #undef STRUCT_SPECIAL
4603 
4604 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4605 #define STRUCT_SPECIAL(name)
4606 #include "syscall_types.h"
4607 #undef STRUCT
4608 #undef STRUCT_SPECIAL
4609 
4610 #define MAX_STRUCT_SIZE 4096
4611 
4612 #ifdef CONFIG_FIEMAP
4613 /* So fiemap access checks don't overflow on 32 bit systems.
4614  * This is very slightly smaller than the limit imposed by
4615  * the underlying kernel.
4616  */
4617 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4618                             / sizeof(struct fiemap_extent))
4619 
4620 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4621                                        int fd, int cmd, abi_long arg)
4622 {
4623     /* The parameter for this ioctl is a struct fiemap followed
4624      * by an array of struct fiemap_extent whose size is set
4625      * in fiemap->fm_extent_count. The array is filled in by the
4626      * ioctl.
4627      */
4628     int target_size_in, target_size_out;
4629     struct fiemap *fm;
4630     const argtype *arg_type = ie->arg_type;
4631     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4632     void *argptr, *p;
4633     abi_long ret;
4634     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4635     uint32_t outbufsz;
4636     int free_fm = 0;
4637 
4638     assert(arg_type[0] == TYPE_PTR);
4639     assert(ie->access == IOC_RW);
4640     arg_type++;
4641     target_size_in = thunk_type_size(arg_type, 0);
4642     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4643     if (!argptr) {
4644         return -TARGET_EFAULT;
4645     }
4646     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4647     unlock_user(argptr, arg, 0);
4648     fm = (struct fiemap *)buf_temp;
4649     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4650         return -TARGET_EINVAL;
4651     }
4652 
4653     outbufsz = sizeof (*fm) +
4654         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4655 
4656     if (outbufsz > MAX_STRUCT_SIZE) {
4657         /* We can't fit all the extents into the fixed size buffer.
4658          * Allocate one that is large enough and use it instead.
4659          */
4660         fm = g_try_malloc(outbufsz);
4661         if (!fm) {
4662             return -TARGET_ENOMEM;
4663         }
4664         memcpy(fm, buf_temp, sizeof(struct fiemap));
4665         free_fm = 1;
4666     }
4667     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4668     if (!is_error(ret)) {
4669         target_size_out = target_size_in;
4670         /* An extent_count of 0 means we were only counting the extents
4671          * so there are no structs to copy
4672          */
4673         if (fm->fm_extent_count != 0) {
4674             target_size_out += fm->fm_mapped_extents * extent_size;
4675         }
4676         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4677         if (!argptr) {
4678             ret = -TARGET_EFAULT;
4679         } else {
4680             /* Convert the struct fiemap */
4681             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4682             if (fm->fm_extent_count != 0) {
4683                 p = argptr + target_size_in;
4684                 /* ...and then all the struct fiemap_extents */
4685                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4686                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4687                                   THUNK_TARGET);
4688                     p += extent_size;
4689                 }
4690             }
4691             unlock_user(argptr, arg, target_size_out);
4692         }
4693     }
4694     if (free_fm) {
4695         g_free(fm);
4696     }
4697     return ret;
4698 }
4699 #endif
4700 
4701 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4702                                 int fd, int cmd, abi_long arg)
4703 {
4704     const argtype *arg_type = ie->arg_type;
4705     int target_size;
4706     void *argptr;
4707     int ret;
4708     struct ifconf *host_ifconf;
4709     uint32_t outbufsz;
4710     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4711     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4712     int target_ifreq_size;
4713     int nb_ifreq;
4714     int free_buf = 0;
4715     int i;
4716     int target_ifc_len;
4717     abi_long target_ifc_buf;
4718     int host_ifc_len;
4719     char *host_ifc_buf;
4720 
4721     assert(arg_type[0] == TYPE_PTR);
4722     assert(ie->access == IOC_RW);
4723 
4724     arg_type++;
4725     target_size = thunk_type_size(arg_type, 0);
4726 
4727     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4728     if (!argptr)
4729         return -TARGET_EFAULT;
4730     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4731     unlock_user(argptr, arg, 0);
4732 
4733     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4734     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4735     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4736 
4737     if (target_ifc_buf != 0) {
4738         target_ifc_len = host_ifconf->ifc_len;
4739         nb_ifreq = target_ifc_len / target_ifreq_size;
4740         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4741 
4742         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4743         if (outbufsz > MAX_STRUCT_SIZE) {
4744             /*
4745              * We can't fit all the extents into the fixed size buffer.
4746              * Allocate one that is large enough and use it instead.
4747              */
4748             host_ifconf = g_try_malloc(outbufsz);
4749             if (!host_ifconf) {
4750                 return -TARGET_ENOMEM;
4751             }
4752             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4753             free_buf = 1;
4754         }
4755         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4756 
4757         host_ifconf->ifc_len = host_ifc_len;
4758     } else {
4759       host_ifc_buf = NULL;
4760     }
4761     host_ifconf->ifc_buf = host_ifc_buf;
4762 
4763     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4764     if (!is_error(ret)) {
4765 	/* convert host ifc_len to target ifc_len */
4766 
4767         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4768         target_ifc_len = nb_ifreq * target_ifreq_size;
4769         host_ifconf->ifc_len = target_ifc_len;
4770 
4771 	/* restore target ifc_buf */
4772 
4773         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4774 
4775 	/* copy struct ifconf to target user */
4776 
4777         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4778         if (!argptr)
4779             return -TARGET_EFAULT;
4780         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4781         unlock_user(argptr, arg, target_size);
4782 
4783         if (target_ifc_buf != 0) {
4784             /* copy ifreq[] to target user */
4785             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4786             for (i = 0; i < nb_ifreq ; i++) {
4787                 thunk_convert(argptr + i * target_ifreq_size,
4788                               host_ifc_buf + i * sizeof(struct ifreq),
4789                               ifreq_arg_type, THUNK_TARGET);
4790             }
4791             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4792         }
4793     }
4794 
4795     if (free_buf) {
4796         g_free(host_ifconf);
4797     }
4798 
4799     return ret;
4800 }
4801 
4802 #if defined(CONFIG_USBFS)
4803 #if HOST_LONG_BITS > 64
4804 #error USBDEVFS thunks do not support >64 bit hosts yet.
4805 #endif
4806 struct live_urb {
4807     uint64_t target_urb_adr;
4808     uint64_t target_buf_adr;
4809     char *target_buf_ptr;
4810     struct usbdevfs_urb host_urb;
4811 };
4812 
4813 static GHashTable *usbdevfs_urb_hashtable(void)
4814 {
4815     static GHashTable *urb_hashtable;
4816 
4817     if (!urb_hashtable) {
4818         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4819     }
4820     return urb_hashtable;
4821 }
4822 
4823 static void urb_hashtable_insert(struct live_urb *urb)
4824 {
4825     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4826     g_hash_table_insert(urb_hashtable, urb, urb);
4827 }
4828 
4829 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4830 {
4831     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4832     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4833 }
4834 
4835 static void urb_hashtable_remove(struct live_urb *urb)
4836 {
4837     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4838     g_hash_table_remove(urb_hashtable, urb);
4839 }
4840 
4841 static abi_long
4842 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4843                           int fd, int cmd, abi_long arg)
4844 {
4845     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4846     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4847     struct live_urb *lurb;
4848     void *argptr;
4849     uint64_t hurb;
4850     int target_size;
4851     uintptr_t target_urb_adr;
4852     abi_long ret;
4853 
4854     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4855 
4856     memset(buf_temp, 0, sizeof(uint64_t));
4857     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4858     if (is_error(ret)) {
4859         return ret;
4860     }
4861 
4862     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4863     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4864     if (!lurb->target_urb_adr) {
4865         return -TARGET_EFAULT;
4866     }
4867     urb_hashtable_remove(lurb);
4868     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4869         lurb->host_urb.buffer_length);
4870     lurb->target_buf_ptr = NULL;
4871 
4872     /* restore the guest buffer pointer */
4873     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4874 
4875     /* update the guest urb struct */
4876     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4877     if (!argptr) {
4878         g_free(lurb);
4879         return -TARGET_EFAULT;
4880     }
4881     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4882     unlock_user(argptr, lurb->target_urb_adr, target_size);
4883 
4884     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4885     /* write back the urb handle */
4886     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4887     if (!argptr) {
4888         g_free(lurb);
4889         return -TARGET_EFAULT;
4890     }
4891 
4892     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4893     target_urb_adr = lurb->target_urb_adr;
4894     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4895     unlock_user(argptr, arg, target_size);
4896 
4897     g_free(lurb);
4898     return ret;
4899 }
4900 
4901 static abi_long
4902 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4903                              uint8_t *buf_temp __attribute__((unused)),
4904                              int fd, int cmd, abi_long arg)
4905 {
4906     struct live_urb *lurb;
4907 
4908     /* map target address back to host URB with metadata. */
4909     lurb = urb_hashtable_lookup(arg);
4910     if (!lurb) {
4911         return -TARGET_EFAULT;
4912     }
4913     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4914 }
4915 
4916 static abi_long
4917 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4918                             int fd, int cmd, abi_long arg)
4919 {
4920     const argtype *arg_type = ie->arg_type;
4921     int target_size;
4922     abi_long ret;
4923     void *argptr;
4924     int rw_dir;
4925     struct live_urb *lurb;
4926 
4927     /*
4928      * each submitted URB needs to map to a unique ID for the
4929      * kernel, and that unique ID needs to be a pointer to
4930      * host memory.  hence, we need to malloc for each URB.
4931      * isochronous transfers have a variable length struct.
4932      */
4933     arg_type++;
4934     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4935 
4936     /* construct host copy of urb and metadata */
4937     lurb = g_try_new0(struct live_urb, 1);
4938     if (!lurb) {
4939         return -TARGET_ENOMEM;
4940     }
4941 
4942     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4943     if (!argptr) {
4944         g_free(lurb);
4945         return -TARGET_EFAULT;
4946     }
4947     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4948     unlock_user(argptr, arg, 0);
4949 
4950     lurb->target_urb_adr = arg;
4951     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4952 
4953     /* buffer space used depends on endpoint type so lock the entire buffer */
4954     /* control type urbs should check the buffer contents for true direction */
4955     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4956     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4957         lurb->host_urb.buffer_length, 1);
4958     if (lurb->target_buf_ptr == NULL) {
4959         g_free(lurb);
4960         return -TARGET_EFAULT;
4961     }
4962 
4963     /* update buffer pointer in host copy */
4964     lurb->host_urb.buffer = lurb->target_buf_ptr;
4965 
4966     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4967     if (is_error(ret)) {
4968         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4969         g_free(lurb);
4970     } else {
4971         urb_hashtable_insert(lurb);
4972     }
4973 
4974     return ret;
4975 }
4976 #endif /* CONFIG_USBFS */
4977 
4978 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4979                             int cmd, abi_long arg)
4980 {
4981     void *argptr;
4982     struct dm_ioctl *host_dm;
4983     abi_long guest_data;
4984     uint32_t guest_data_size;
4985     int target_size;
4986     const argtype *arg_type = ie->arg_type;
4987     abi_long ret;
4988     void *big_buf = NULL;
4989     char *host_data;
4990 
4991     arg_type++;
4992     target_size = thunk_type_size(arg_type, 0);
4993     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4994     if (!argptr) {
4995         ret = -TARGET_EFAULT;
4996         goto out;
4997     }
4998     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4999     unlock_user(argptr, arg, 0);
5000 
5001     /* buf_temp is too small, so fetch things into a bigger buffer */
5002     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5003     memcpy(big_buf, buf_temp, target_size);
5004     buf_temp = big_buf;
5005     host_dm = big_buf;
5006 
5007     guest_data = arg + host_dm->data_start;
5008     if ((guest_data - arg) < 0) {
5009         ret = -TARGET_EINVAL;
5010         goto out;
5011     }
5012     guest_data_size = host_dm->data_size - host_dm->data_start;
5013     host_data = (char*)host_dm + host_dm->data_start;
5014 
5015     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5016     if (!argptr) {
5017         ret = -TARGET_EFAULT;
5018         goto out;
5019     }
5020 
5021     switch (ie->host_cmd) {
5022     case DM_REMOVE_ALL:
5023     case DM_LIST_DEVICES:
5024     case DM_DEV_CREATE:
5025     case DM_DEV_REMOVE:
5026     case DM_DEV_SUSPEND:
5027     case DM_DEV_STATUS:
5028     case DM_DEV_WAIT:
5029     case DM_TABLE_STATUS:
5030     case DM_TABLE_CLEAR:
5031     case DM_TABLE_DEPS:
5032     case DM_LIST_VERSIONS:
5033         /* no input data */
5034         break;
5035     case DM_DEV_RENAME:
5036     case DM_DEV_SET_GEOMETRY:
5037         /* data contains only strings */
5038         memcpy(host_data, argptr, guest_data_size);
5039         break;
5040     case DM_TARGET_MSG:
5041         memcpy(host_data, argptr, guest_data_size);
5042         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5043         break;
5044     case DM_TABLE_LOAD:
5045     {
5046         void *gspec = argptr;
5047         void *cur_data = host_data;
5048         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5049         int spec_size = thunk_type_size(dm_arg_type, 0);
5050         int i;
5051 
5052         for (i = 0; i < host_dm->target_count; i++) {
5053             struct dm_target_spec *spec = cur_data;
5054             uint32_t next;
5055             int slen;
5056 
5057             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5058             slen = strlen((char*)gspec + spec_size) + 1;
5059             next = spec->next;
5060             spec->next = sizeof(*spec) + slen;
5061             strcpy((char*)&spec[1], gspec + spec_size);
5062             gspec += next;
5063             cur_data += spec->next;
5064         }
5065         break;
5066     }
5067     default:
5068         ret = -TARGET_EINVAL;
5069         unlock_user(argptr, guest_data, 0);
5070         goto out;
5071     }
5072     unlock_user(argptr, guest_data, 0);
5073 
5074     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5075     if (!is_error(ret)) {
5076         guest_data = arg + host_dm->data_start;
5077         guest_data_size = host_dm->data_size - host_dm->data_start;
5078         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5079         switch (ie->host_cmd) {
5080         case DM_REMOVE_ALL:
5081         case DM_DEV_CREATE:
5082         case DM_DEV_REMOVE:
5083         case DM_DEV_RENAME:
5084         case DM_DEV_SUSPEND:
5085         case DM_DEV_STATUS:
5086         case DM_TABLE_LOAD:
5087         case DM_TABLE_CLEAR:
5088         case DM_TARGET_MSG:
5089         case DM_DEV_SET_GEOMETRY:
5090             /* no return data */
5091             break;
5092         case DM_LIST_DEVICES:
5093         {
5094             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5095             uint32_t remaining_data = guest_data_size;
5096             void *cur_data = argptr;
5097             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5098             int nl_size = 12; /* can't use thunk_size due to alignment */
5099 
5100             while (1) {
5101                 uint32_t next = nl->next;
5102                 if (next) {
5103                     nl->next = nl_size + (strlen(nl->name) + 1);
5104                 }
5105                 if (remaining_data < nl->next) {
5106                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5107                     break;
5108                 }
5109                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5110                 strcpy(cur_data + nl_size, nl->name);
5111                 cur_data += nl->next;
5112                 remaining_data -= nl->next;
5113                 if (!next) {
5114                     break;
5115                 }
5116                 nl = (void*)nl + next;
5117             }
5118             break;
5119         }
5120         case DM_DEV_WAIT:
5121         case DM_TABLE_STATUS:
5122         {
5123             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5124             void *cur_data = argptr;
5125             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5126             int spec_size = thunk_type_size(dm_arg_type, 0);
5127             int i;
5128 
5129             for (i = 0; i < host_dm->target_count; i++) {
5130                 uint32_t next = spec->next;
5131                 int slen = strlen((char*)&spec[1]) + 1;
5132                 spec->next = (cur_data - argptr) + spec_size + slen;
5133                 if (guest_data_size < spec->next) {
5134                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5135                     break;
5136                 }
5137                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5138                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5139                 cur_data = argptr + spec->next;
5140                 spec = (void*)host_dm + host_dm->data_start + next;
5141             }
5142             break;
5143         }
5144         case DM_TABLE_DEPS:
5145         {
5146             void *hdata = (void*)host_dm + host_dm->data_start;
5147             int count = *(uint32_t*)hdata;
5148             uint64_t *hdev = hdata + 8;
5149             uint64_t *gdev = argptr + 8;
5150             int i;
5151 
5152             *(uint32_t*)argptr = tswap32(count);
5153             for (i = 0; i < count; i++) {
5154                 *gdev = tswap64(*hdev);
5155                 gdev++;
5156                 hdev++;
5157             }
5158             break;
5159         }
5160         case DM_LIST_VERSIONS:
5161         {
5162             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5163             uint32_t remaining_data = guest_data_size;
5164             void *cur_data = argptr;
5165             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5166             int vers_size = thunk_type_size(dm_arg_type, 0);
5167 
5168             while (1) {
5169                 uint32_t next = vers->next;
5170                 if (next) {
5171                     vers->next = vers_size + (strlen(vers->name) + 1);
5172                 }
5173                 if (remaining_data < vers->next) {
5174                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5175                     break;
5176                 }
5177                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5178                 strcpy(cur_data + vers_size, vers->name);
5179                 cur_data += vers->next;
5180                 remaining_data -= vers->next;
5181                 if (!next) {
5182                     break;
5183                 }
5184                 vers = (void*)vers + next;
5185             }
5186             break;
5187         }
5188         default:
5189             unlock_user(argptr, guest_data, 0);
5190             ret = -TARGET_EINVAL;
5191             goto out;
5192         }
5193         unlock_user(argptr, guest_data, guest_data_size);
5194 
5195         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5196         if (!argptr) {
5197             ret = -TARGET_EFAULT;
5198             goto out;
5199         }
5200         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5201         unlock_user(argptr, arg, target_size);
5202     }
5203 out:
5204     g_free(big_buf);
5205     return ret;
5206 }
5207 
5208 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5209                                int cmd, abi_long arg)
5210 {
5211     void *argptr;
5212     int target_size;
5213     const argtype *arg_type = ie->arg_type;
5214     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5215     abi_long ret;
5216 
5217     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5218     struct blkpg_partition host_part;
5219 
5220     /* Read and convert blkpg */
5221     arg_type++;
5222     target_size = thunk_type_size(arg_type, 0);
5223     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5224     if (!argptr) {
5225         ret = -TARGET_EFAULT;
5226         goto out;
5227     }
5228     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5229     unlock_user(argptr, arg, 0);
5230 
5231     switch (host_blkpg->op) {
5232     case BLKPG_ADD_PARTITION:
5233     case BLKPG_DEL_PARTITION:
5234         /* payload is struct blkpg_partition */
5235         break;
5236     default:
5237         /* Unknown opcode */
5238         ret = -TARGET_EINVAL;
5239         goto out;
5240     }
5241 
5242     /* Read and convert blkpg->data */
5243     arg = (abi_long)(uintptr_t)host_blkpg->data;
5244     target_size = thunk_type_size(part_arg_type, 0);
5245     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5246     if (!argptr) {
5247         ret = -TARGET_EFAULT;
5248         goto out;
5249     }
5250     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5251     unlock_user(argptr, arg, 0);
5252 
5253     /* Swizzle the data pointer to our local copy and call! */
5254     host_blkpg->data = &host_part;
5255     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5256 
5257 out:
5258     return ret;
5259 }
5260 
5261 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5262                                 int fd, int cmd, abi_long arg)
5263 {
5264     const argtype *arg_type = ie->arg_type;
5265     const StructEntry *se;
5266     const argtype *field_types;
5267     const int *dst_offsets, *src_offsets;
5268     int target_size;
5269     void *argptr;
5270     abi_ulong *target_rt_dev_ptr = NULL;
5271     unsigned long *host_rt_dev_ptr = NULL;
5272     abi_long ret;
5273     int i;
5274 
5275     assert(ie->access == IOC_W);
5276     assert(*arg_type == TYPE_PTR);
5277     arg_type++;
5278     assert(*arg_type == TYPE_STRUCT);
5279     target_size = thunk_type_size(arg_type, 0);
5280     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5281     if (!argptr) {
5282         return -TARGET_EFAULT;
5283     }
5284     arg_type++;
5285     assert(*arg_type == (int)STRUCT_rtentry);
5286     se = struct_entries + *arg_type++;
5287     assert(se->convert[0] == NULL);
5288     /* convert struct here to be able to catch rt_dev string */
5289     field_types = se->field_types;
5290     dst_offsets = se->field_offsets[THUNK_HOST];
5291     src_offsets = se->field_offsets[THUNK_TARGET];
5292     for (i = 0; i < se->nb_fields; i++) {
5293         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5294             assert(*field_types == TYPE_PTRVOID);
5295             target_rt_dev_ptr = argptr + src_offsets[i];
5296             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5297             if (*target_rt_dev_ptr != 0) {
5298                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5299                                                   tswapal(*target_rt_dev_ptr));
5300                 if (!*host_rt_dev_ptr) {
5301                     unlock_user(argptr, arg, 0);
5302                     return -TARGET_EFAULT;
5303                 }
5304             } else {
5305                 *host_rt_dev_ptr = 0;
5306             }
5307             field_types++;
5308             continue;
5309         }
5310         field_types = thunk_convert(buf_temp + dst_offsets[i],
5311                                     argptr + src_offsets[i],
5312                                     field_types, THUNK_HOST);
5313     }
5314     unlock_user(argptr, arg, 0);
5315 
5316     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5317 
5318     assert(host_rt_dev_ptr != NULL);
5319     assert(target_rt_dev_ptr != NULL);
5320     if (*host_rt_dev_ptr != 0) {
5321         unlock_user((void *)*host_rt_dev_ptr,
5322                     *target_rt_dev_ptr, 0);
5323     }
5324     return ret;
5325 }
5326 
5327 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5328                                      int fd, int cmd, abi_long arg)
5329 {
5330     int sig = target_to_host_signal(arg);
5331     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5332 }
5333 
5334 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5335                                     int fd, int cmd, abi_long arg)
5336 {
5337     struct timeval tv;
5338     abi_long ret;
5339 
5340     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5341     if (is_error(ret)) {
5342         return ret;
5343     }
5344 
5345     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5346         if (copy_to_user_timeval(arg, &tv)) {
5347             return -TARGET_EFAULT;
5348         }
5349     } else {
5350         if (copy_to_user_timeval64(arg, &tv)) {
5351             return -TARGET_EFAULT;
5352         }
5353     }
5354 
5355     return ret;
5356 }
5357 
5358 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5359                                       int fd, int cmd, abi_long arg)
5360 {
5361     struct timespec ts;
5362     abi_long ret;
5363 
5364     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5365     if (is_error(ret)) {
5366         return ret;
5367     }
5368 
5369     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5370         if (host_to_target_timespec(arg, &ts)) {
5371             return -TARGET_EFAULT;
5372         }
5373     } else{
5374         if (host_to_target_timespec64(arg, &ts)) {
5375             return -TARGET_EFAULT;
5376         }
5377     }
5378 
5379     return ret;
5380 }
5381 
5382 #ifdef TIOCGPTPEER
5383 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5384                                      int fd, int cmd, abi_long arg)
5385 {
5386     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5387     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5388 }
5389 #endif
5390 
5391 #ifdef HAVE_DRM_H
5392 
5393 static void unlock_drm_version(struct drm_version *host_ver,
5394                                struct target_drm_version *target_ver,
5395                                bool copy)
5396 {
5397     unlock_user(host_ver->name, target_ver->name,
5398                                 copy ? host_ver->name_len : 0);
5399     unlock_user(host_ver->date, target_ver->date,
5400                                 copy ? host_ver->date_len : 0);
5401     unlock_user(host_ver->desc, target_ver->desc,
5402                                 copy ? host_ver->desc_len : 0);
5403 }
5404 
5405 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5406                                           struct target_drm_version *target_ver)
5407 {
5408     memset(host_ver, 0, sizeof(*host_ver));
5409 
5410     __get_user(host_ver->name_len, &target_ver->name_len);
5411     if (host_ver->name_len) {
5412         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5413                                    target_ver->name_len, 0);
5414         if (!host_ver->name) {
5415             return -EFAULT;
5416         }
5417     }
5418 
5419     __get_user(host_ver->date_len, &target_ver->date_len);
5420     if (host_ver->date_len) {
5421         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5422                                    target_ver->date_len, 0);
5423         if (!host_ver->date) {
5424             goto err;
5425         }
5426     }
5427 
5428     __get_user(host_ver->desc_len, &target_ver->desc_len);
5429     if (host_ver->desc_len) {
5430         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5431                                    target_ver->desc_len, 0);
5432         if (!host_ver->desc) {
5433             goto err;
5434         }
5435     }
5436 
5437     return 0;
5438 err:
5439     unlock_drm_version(host_ver, target_ver, false);
5440     return -EFAULT;
5441 }
5442 
5443 static inline void host_to_target_drmversion(
5444                                           struct target_drm_version *target_ver,
5445                                           struct drm_version *host_ver)
5446 {
5447     __put_user(host_ver->version_major, &target_ver->version_major);
5448     __put_user(host_ver->version_minor, &target_ver->version_minor);
5449     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5450     __put_user(host_ver->name_len, &target_ver->name_len);
5451     __put_user(host_ver->date_len, &target_ver->date_len);
5452     __put_user(host_ver->desc_len, &target_ver->desc_len);
5453     unlock_drm_version(host_ver, target_ver, true);
5454 }
5455 
5456 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5457                              int fd, int cmd, abi_long arg)
5458 {
5459     struct drm_version *ver;
5460     struct target_drm_version *target_ver;
5461     abi_long ret;
5462 
5463     switch (ie->host_cmd) {
5464     case DRM_IOCTL_VERSION:
5465         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5466             return -TARGET_EFAULT;
5467         }
5468         ver = (struct drm_version *)buf_temp;
5469         ret = target_to_host_drmversion(ver, target_ver);
5470         if (!is_error(ret)) {
5471             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5472             if (is_error(ret)) {
5473                 unlock_drm_version(ver, target_ver, false);
5474             } else {
5475                 host_to_target_drmversion(target_ver, ver);
5476             }
5477         }
5478         unlock_user_struct(target_ver, arg, 0);
5479         return ret;
5480     }
5481     return -TARGET_ENOSYS;
5482 }
5483 
5484 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5485                                            struct drm_i915_getparam *gparam,
5486                                            int fd, abi_long arg)
5487 {
5488     abi_long ret;
5489     int value;
5490     struct target_drm_i915_getparam *target_gparam;
5491 
5492     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5493         return -TARGET_EFAULT;
5494     }
5495 
5496     __get_user(gparam->param, &target_gparam->param);
5497     gparam->value = &value;
5498     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5499     put_user_s32(value, target_gparam->value);
5500 
5501     unlock_user_struct(target_gparam, arg, 0);
5502     return ret;
5503 }
5504 
5505 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5506                                   int fd, int cmd, abi_long arg)
5507 {
5508     switch (ie->host_cmd) {
5509     case DRM_IOCTL_I915_GETPARAM:
5510         return do_ioctl_drm_i915_getparam(ie,
5511                                           (struct drm_i915_getparam *)buf_temp,
5512                                           fd, arg);
5513     default:
5514         return -TARGET_ENOSYS;
5515     }
5516 }
5517 
5518 #endif
5519 
5520 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5521                                         int fd, int cmd, abi_long arg)
5522 {
5523     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5524     struct tun_filter *target_filter;
5525     char *target_addr;
5526 
5527     assert(ie->access == IOC_W);
5528 
5529     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5530     if (!target_filter) {
5531         return -TARGET_EFAULT;
5532     }
5533     filter->flags = tswap16(target_filter->flags);
5534     filter->count = tswap16(target_filter->count);
5535     unlock_user(target_filter, arg, 0);
5536 
5537     if (filter->count) {
5538         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5539             MAX_STRUCT_SIZE) {
5540             return -TARGET_EFAULT;
5541         }
5542 
5543         target_addr = lock_user(VERIFY_READ,
5544                                 arg + offsetof(struct tun_filter, addr),
5545                                 filter->count * ETH_ALEN, 1);
5546         if (!target_addr) {
5547             return -TARGET_EFAULT;
5548         }
5549         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5550         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5551     }
5552 
5553     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5554 }
5555 
5556 IOCTLEntry ioctl_entries[] = {
5557 #define IOCTL(cmd, access, ...) \
5558     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5559 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5560     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5561 #define IOCTL_IGNORE(cmd) \
5562     { TARGET_ ## cmd, 0, #cmd },
5563 #include "ioctls.h"
5564     { 0, 0, },
5565 };
5566 
5567 /* ??? Implement proper locking for ioctls.  */
5568 /* do_ioctl() Must return target values and target errnos. */
5569 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5570 {
5571     const IOCTLEntry *ie;
5572     const argtype *arg_type;
5573     abi_long ret;
5574     uint8_t buf_temp[MAX_STRUCT_SIZE];
5575     int target_size;
5576     void *argptr;
5577 
5578     ie = ioctl_entries;
5579     for(;;) {
5580         if (ie->target_cmd == 0) {
5581             qemu_log_mask(
5582                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5583             return -TARGET_ENOTTY;
5584         }
5585         if (ie->target_cmd == cmd)
5586             break;
5587         ie++;
5588     }
5589     arg_type = ie->arg_type;
5590     if (ie->do_ioctl) {
5591         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5592     } else if (!ie->host_cmd) {
5593         /* Some architectures define BSD ioctls in their headers
5594            that are not implemented in Linux.  */
5595         return -TARGET_ENOTTY;
5596     }
5597 
5598     switch(arg_type[0]) {
5599     case TYPE_NULL:
5600         /* no argument */
5601         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5602         break;
5603     case TYPE_PTRVOID:
5604     case TYPE_INT:
5605     case TYPE_LONG:
5606     case TYPE_ULONG:
5607         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5608         break;
5609     case TYPE_PTR:
5610         arg_type++;
5611         target_size = thunk_type_size(arg_type, 0);
5612         switch(ie->access) {
5613         case IOC_R:
5614             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5615             if (!is_error(ret)) {
5616                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5617                 if (!argptr)
5618                     return -TARGET_EFAULT;
5619                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5620                 unlock_user(argptr, arg, target_size);
5621             }
5622             break;
5623         case IOC_W:
5624             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5625             if (!argptr)
5626                 return -TARGET_EFAULT;
5627             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5628             unlock_user(argptr, arg, 0);
5629             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5630             break;
5631         default:
5632         case IOC_RW:
5633             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5634             if (!argptr)
5635                 return -TARGET_EFAULT;
5636             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5637             unlock_user(argptr, arg, 0);
5638             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5639             if (!is_error(ret)) {
5640                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5641                 if (!argptr)
5642                     return -TARGET_EFAULT;
5643                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5644                 unlock_user(argptr, arg, target_size);
5645             }
5646             break;
5647         }
5648         break;
5649     default:
5650         qemu_log_mask(LOG_UNIMP,
5651                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5652                       (long)cmd, arg_type[0]);
5653         ret = -TARGET_ENOTTY;
5654         break;
5655     }
5656     return ret;
5657 }
5658 
5659 static const bitmask_transtbl iflag_tbl[] = {
5660         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5661         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5662         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5663         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5664         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5665         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5666         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5667         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5668         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5669         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5670         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5671         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5672         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5673         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5674         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5675 };
5676 
5677 static const bitmask_transtbl oflag_tbl[] = {
5678 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5679 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5680 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5681 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5682 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5683 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5684 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5685 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5686 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5687 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5688 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5689 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5690 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5691 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5692 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5693 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5694 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5695 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5696 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5697 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5698 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5699 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5700 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5701 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5702 };
5703 
5704 static const bitmask_transtbl cflag_tbl[] = {
5705 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5706 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5707 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5708 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5709 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5710 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5711 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5712 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5713 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5714 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5715 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5716 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5717 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5718 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5719 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5720 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5721 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5722 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5723 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5724 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5725 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5726 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5727 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5728 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5729 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5730 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5731 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5732 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5733 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5734 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5735 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5736 };
5737 
5738 static const bitmask_transtbl lflag_tbl[] = {
5739   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5740   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5741   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5742   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5743   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5744   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5745   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5746   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5747   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5748   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5749   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5750   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5751   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5752   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5753   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5754   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5755 };
5756 
5757 static void target_to_host_termios (void *dst, const void *src)
5758 {
5759     struct host_termios *host = dst;
5760     const struct target_termios *target = src;
5761 
5762     host->c_iflag =
5763         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5764     host->c_oflag =
5765         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5766     host->c_cflag =
5767         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5768     host->c_lflag =
5769         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5770     host->c_line = target->c_line;
5771 
5772     memset(host->c_cc, 0, sizeof(host->c_cc));
5773     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5774     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5775     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5776     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5777     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5778     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5779     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5780     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5781     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5782     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5783     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5784     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5785     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5786     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5787     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5788     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5789     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5790 }
5791 
5792 static void host_to_target_termios (void *dst, const void *src)
5793 {
5794     struct target_termios *target = dst;
5795     const struct host_termios *host = src;
5796 
5797     target->c_iflag =
5798         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5799     target->c_oflag =
5800         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5801     target->c_cflag =
5802         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5803     target->c_lflag =
5804         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5805     target->c_line = host->c_line;
5806 
5807     memset(target->c_cc, 0, sizeof(target->c_cc));
5808     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5809     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5810     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5811     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5812     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5813     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5814     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5815     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5816     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5817     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5818     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5819     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5820     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5821     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5822     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5823     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5824     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5825 }
5826 
5827 static const StructEntry struct_termios_def = {
5828     .convert = { host_to_target_termios, target_to_host_termios },
5829     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5830     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5831     .print = print_termios,
5832 };
5833 
5834 /* If the host does not provide these bits, they may be safely discarded. */
5835 #ifndef MAP_SYNC
5836 #define MAP_SYNC 0
5837 #endif
5838 #ifndef MAP_UNINITIALIZED
5839 #define MAP_UNINITIALIZED 0
5840 #endif
5841 
5842 static const bitmask_transtbl mmap_flags_tbl[] = {
5843     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5844     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5845       MAP_ANONYMOUS, MAP_ANONYMOUS },
5846     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5847       MAP_GROWSDOWN, MAP_GROWSDOWN },
5848     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5849       MAP_DENYWRITE, MAP_DENYWRITE },
5850     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5851       MAP_EXECUTABLE, MAP_EXECUTABLE },
5852     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5853     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5854       MAP_NORESERVE, MAP_NORESERVE },
5855     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5856     /* MAP_STACK had been ignored by the kernel for quite some time.
5857        Recognize it for the target insofar as we do not want to pass
5858        it through to the host.  */
5859     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5860     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5861     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5862     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5863       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5864     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5865       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5866 };
5867 
5868 /*
5869  * Arrange for legacy / undefined architecture specific flags to be
5870  * ignored by mmap handling code.
5871  */
5872 #ifndef TARGET_MAP_32BIT
5873 #define TARGET_MAP_32BIT 0
5874 #endif
5875 #ifndef TARGET_MAP_HUGE_2MB
5876 #define TARGET_MAP_HUGE_2MB 0
5877 #endif
5878 #ifndef TARGET_MAP_HUGE_1GB
5879 #define TARGET_MAP_HUGE_1GB 0
5880 #endif
5881 
5882 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5883                         int target_flags, int fd, off_t offset)
5884 {
5885     /*
5886      * The historical set of flags that all mmap types implicitly support.
5887      */
5888     enum {
5889         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5890                                | TARGET_MAP_PRIVATE
5891                                | TARGET_MAP_FIXED
5892                                | TARGET_MAP_ANONYMOUS
5893                                | TARGET_MAP_DENYWRITE
5894                                | TARGET_MAP_EXECUTABLE
5895                                | TARGET_MAP_UNINITIALIZED
5896                                | TARGET_MAP_GROWSDOWN
5897                                | TARGET_MAP_LOCKED
5898                                | TARGET_MAP_NORESERVE
5899                                | TARGET_MAP_POPULATE
5900                                | TARGET_MAP_NONBLOCK
5901                                | TARGET_MAP_STACK
5902                                | TARGET_MAP_HUGETLB
5903                                | TARGET_MAP_32BIT
5904                                | TARGET_MAP_HUGE_2MB
5905                                | TARGET_MAP_HUGE_1GB
5906     };
5907     int host_flags;
5908 
5909     switch (target_flags & TARGET_MAP_TYPE) {
5910     case TARGET_MAP_PRIVATE:
5911         host_flags = MAP_PRIVATE;
5912         break;
5913     case TARGET_MAP_SHARED:
5914         host_flags = MAP_SHARED;
5915         break;
5916     case TARGET_MAP_SHARED_VALIDATE:
5917         /*
5918          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5919          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5920          */
5921         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5922             return -TARGET_EOPNOTSUPP;
5923         }
5924         host_flags = MAP_SHARED_VALIDATE;
5925         if (target_flags & TARGET_MAP_SYNC) {
5926             host_flags |= MAP_SYNC;
5927         }
5928         break;
5929     default:
5930         return -TARGET_EINVAL;
5931     }
5932     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5933 
5934     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5935 }
5936 
5937 /*
5938  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5939  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5940  */
5941 #if defined(TARGET_I386)
5942 
5943 /* NOTE: there is really one LDT for all the threads */
5944 static uint8_t *ldt_table;
5945 
5946 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5947 {
5948     int size;
5949     void *p;
5950 
5951     if (!ldt_table)
5952         return 0;
5953     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5954     if (size > bytecount)
5955         size = bytecount;
5956     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5957     if (!p)
5958         return -TARGET_EFAULT;
5959     /* ??? Should this by byteswapped?  */
5960     memcpy(p, ldt_table, size);
5961     unlock_user(p, ptr, size);
5962     return size;
5963 }
5964 
5965 /* XXX: add locking support */
5966 static abi_long write_ldt(CPUX86State *env,
5967                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5968 {
5969     struct target_modify_ldt_ldt_s ldt_info;
5970     struct target_modify_ldt_ldt_s *target_ldt_info;
5971     int seg_32bit, contents, read_exec_only, limit_in_pages;
5972     int seg_not_present, useable, lm;
5973     uint32_t *lp, entry_1, entry_2;
5974 
5975     if (bytecount != sizeof(ldt_info))
5976         return -TARGET_EINVAL;
5977     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5978         return -TARGET_EFAULT;
5979     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5980     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5981     ldt_info.limit = tswap32(target_ldt_info->limit);
5982     ldt_info.flags = tswap32(target_ldt_info->flags);
5983     unlock_user_struct(target_ldt_info, ptr, 0);
5984 
5985     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5986         return -TARGET_EINVAL;
5987     seg_32bit = ldt_info.flags & 1;
5988     contents = (ldt_info.flags >> 1) & 3;
5989     read_exec_only = (ldt_info.flags >> 3) & 1;
5990     limit_in_pages = (ldt_info.flags >> 4) & 1;
5991     seg_not_present = (ldt_info.flags >> 5) & 1;
5992     useable = (ldt_info.flags >> 6) & 1;
5993 #ifdef TARGET_ABI32
5994     lm = 0;
5995 #else
5996     lm = (ldt_info.flags >> 7) & 1;
5997 #endif
5998     if (contents == 3) {
5999         if (oldmode)
6000             return -TARGET_EINVAL;
6001         if (seg_not_present == 0)
6002             return -TARGET_EINVAL;
6003     }
6004     /* allocate the LDT */
6005     if (!ldt_table) {
6006         env->ldt.base = target_mmap(0,
6007                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6008                                     PROT_READ|PROT_WRITE,
6009                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6010         if (env->ldt.base == -1)
6011             return -TARGET_ENOMEM;
6012         memset(g2h_untagged(env->ldt.base), 0,
6013                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6014         env->ldt.limit = 0xffff;
6015         ldt_table = g2h_untagged(env->ldt.base);
6016     }
6017 
6018     /* NOTE: same code as Linux kernel */
6019     /* Allow LDTs to be cleared by the user. */
6020     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6021         if (oldmode ||
6022             (contents == 0		&&
6023              read_exec_only == 1	&&
6024              seg_32bit == 0		&&
6025              limit_in_pages == 0	&&
6026              seg_not_present == 1	&&
6027              useable == 0 )) {
6028             entry_1 = 0;
6029             entry_2 = 0;
6030             goto install;
6031         }
6032     }
6033 
6034     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6035         (ldt_info.limit & 0x0ffff);
6036     entry_2 = (ldt_info.base_addr & 0xff000000) |
6037         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6038         (ldt_info.limit & 0xf0000) |
6039         ((read_exec_only ^ 1) << 9) |
6040         (contents << 10) |
6041         ((seg_not_present ^ 1) << 15) |
6042         (seg_32bit << 22) |
6043         (limit_in_pages << 23) |
6044         (lm << 21) |
6045         0x7000;
6046     if (!oldmode)
6047         entry_2 |= (useable << 20);
6048 
6049     /* Install the new entry ...  */
6050 install:
6051     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6052     lp[0] = tswap32(entry_1);
6053     lp[1] = tswap32(entry_2);
6054     return 0;
6055 }
6056 
6057 /* specific and weird i386 syscalls */
6058 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6059                               unsigned long bytecount)
6060 {
6061     abi_long ret;
6062 
6063     switch (func) {
6064     case 0:
6065         ret = read_ldt(ptr, bytecount);
6066         break;
6067     case 1:
6068         ret = write_ldt(env, ptr, bytecount, 1);
6069         break;
6070     case 0x11:
6071         ret = write_ldt(env, ptr, bytecount, 0);
6072         break;
6073     default:
6074         ret = -TARGET_ENOSYS;
6075         break;
6076     }
6077     return ret;
6078 }
6079 
6080 #if defined(TARGET_ABI32)
6081 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6082 {
6083     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6084     struct target_modify_ldt_ldt_s ldt_info;
6085     struct target_modify_ldt_ldt_s *target_ldt_info;
6086     int seg_32bit, contents, read_exec_only, limit_in_pages;
6087     int seg_not_present, useable, lm;
6088     uint32_t *lp, entry_1, entry_2;
6089     int i;
6090 
6091     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6092     if (!target_ldt_info)
6093         return -TARGET_EFAULT;
6094     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6095     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6096     ldt_info.limit = tswap32(target_ldt_info->limit);
6097     ldt_info.flags = tswap32(target_ldt_info->flags);
6098     if (ldt_info.entry_number == -1) {
6099         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6100             if (gdt_table[i] == 0) {
6101                 ldt_info.entry_number = i;
6102                 target_ldt_info->entry_number = tswap32(i);
6103                 break;
6104             }
6105         }
6106     }
6107     unlock_user_struct(target_ldt_info, ptr, 1);
6108 
6109     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6110         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6111            return -TARGET_EINVAL;
6112     seg_32bit = ldt_info.flags & 1;
6113     contents = (ldt_info.flags >> 1) & 3;
6114     read_exec_only = (ldt_info.flags >> 3) & 1;
6115     limit_in_pages = (ldt_info.flags >> 4) & 1;
6116     seg_not_present = (ldt_info.flags >> 5) & 1;
6117     useable = (ldt_info.flags >> 6) & 1;
6118 #ifdef TARGET_ABI32
6119     lm = 0;
6120 #else
6121     lm = (ldt_info.flags >> 7) & 1;
6122 #endif
6123 
6124     if (contents == 3) {
6125         if (seg_not_present == 0)
6126             return -TARGET_EINVAL;
6127     }
6128 
6129     /* NOTE: same code as Linux kernel */
6130     /* Allow LDTs to be cleared by the user. */
6131     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6132         if ((contents == 0             &&
6133              read_exec_only == 1       &&
6134              seg_32bit == 0            &&
6135              limit_in_pages == 0       &&
6136              seg_not_present == 1      &&
6137              useable == 0 )) {
6138             entry_1 = 0;
6139             entry_2 = 0;
6140             goto install;
6141         }
6142     }
6143 
6144     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6145         (ldt_info.limit & 0x0ffff);
6146     entry_2 = (ldt_info.base_addr & 0xff000000) |
6147         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6148         (ldt_info.limit & 0xf0000) |
6149         ((read_exec_only ^ 1) << 9) |
6150         (contents << 10) |
6151         ((seg_not_present ^ 1) << 15) |
6152         (seg_32bit << 22) |
6153         (limit_in_pages << 23) |
6154         (useable << 20) |
6155         (lm << 21) |
6156         0x7000;
6157 
6158     /* Install the new entry ...  */
6159 install:
6160     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6161     lp[0] = tswap32(entry_1);
6162     lp[1] = tswap32(entry_2);
6163     return 0;
6164 }
6165 
6166 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6167 {
6168     struct target_modify_ldt_ldt_s *target_ldt_info;
6169     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6170     uint32_t base_addr, limit, flags;
6171     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6172     int seg_not_present, useable, lm;
6173     uint32_t *lp, entry_1, entry_2;
6174 
6175     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6176     if (!target_ldt_info)
6177         return -TARGET_EFAULT;
6178     idx = tswap32(target_ldt_info->entry_number);
6179     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6180         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6181         unlock_user_struct(target_ldt_info, ptr, 1);
6182         return -TARGET_EINVAL;
6183     }
6184     lp = (uint32_t *)(gdt_table + idx);
6185     entry_1 = tswap32(lp[0]);
6186     entry_2 = tswap32(lp[1]);
6187 
6188     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6189     contents = (entry_2 >> 10) & 3;
6190     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6191     seg_32bit = (entry_2 >> 22) & 1;
6192     limit_in_pages = (entry_2 >> 23) & 1;
6193     useable = (entry_2 >> 20) & 1;
6194 #ifdef TARGET_ABI32
6195     lm = 0;
6196 #else
6197     lm = (entry_2 >> 21) & 1;
6198 #endif
6199     flags = (seg_32bit << 0) | (contents << 1) |
6200         (read_exec_only << 3) | (limit_in_pages << 4) |
6201         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6202     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6203     base_addr = (entry_1 >> 16) |
6204         (entry_2 & 0xff000000) |
6205         ((entry_2 & 0xff) << 16);
6206     target_ldt_info->base_addr = tswapal(base_addr);
6207     target_ldt_info->limit = tswap32(limit);
6208     target_ldt_info->flags = tswap32(flags);
6209     unlock_user_struct(target_ldt_info, ptr, 1);
6210     return 0;
6211 }
6212 
6213 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6214 {
6215     return -TARGET_ENOSYS;
6216 }
6217 #else
6218 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6219 {
6220     abi_long ret = 0;
6221     abi_ulong val;
6222     int idx;
6223 
6224     switch(code) {
6225     case TARGET_ARCH_SET_GS:
6226     case TARGET_ARCH_SET_FS:
6227         if (code == TARGET_ARCH_SET_GS)
6228             idx = R_GS;
6229         else
6230             idx = R_FS;
6231         cpu_x86_load_seg(env, idx, 0);
6232         env->segs[idx].base = addr;
6233         break;
6234     case TARGET_ARCH_GET_GS:
6235     case TARGET_ARCH_GET_FS:
6236         if (code == TARGET_ARCH_GET_GS)
6237             idx = R_GS;
6238         else
6239             idx = R_FS;
6240         val = env->segs[idx].base;
6241         if (put_user(val, addr, abi_ulong))
6242             ret = -TARGET_EFAULT;
6243         break;
6244     default:
6245         ret = -TARGET_EINVAL;
6246         break;
6247     }
6248     return ret;
6249 }
6250 #endif /* defined(TARGET_ABI32 */
6251 #endif /* defined(TARGET_I386) */
6252 
6253 /*
6254  * These constants are generic.  Supply any that are missing from the host.
6255  */
6256 #ifndef PR_SET_NAME
6257 # define PR_SET_NAME    15
6258 # define PR_GET_NAME    16
6259 #endif
6260 #ifndef PR_SET_FP_MODE
6261 # define PR_SET_FP_MODE 45
6262 # define PR_GET_FP_MODE 46
6263 # define PR_FP_MODE_FR   (1 << 0)
6264 # define PR_FP_MODE_FRE  (1 << 1)
6265 #endif
6266 #ifndef PR_SVE_SET_VL
6267 # define PR_SVE_SET_VL  50
6268 # define PR_SVE_GET_VL  51
6269 # define PR_SVE_VL_LEN_MASK  0xffff
6270 # define PR_SVE_VL_INHERIT   (1 << 17)
6271 #endif
6272 #ifndef PR_PAC_RESET_KEYS
6273 # define PR_PAC_RESET_KEYS  54
6274 # define PR_PAC_APIAKEY   (1 << 0)
6275 # define PR_PAC_APIBKEY   (1 << 1)
6276 # define PR_PAC_APDAKEY   (1 << 2)
6277 # define PR_PAC_APDBKEY   (1 << 3)
6278 # define PR_PAC_APGAKEY   (1 << 4)
6279 #endif
6280 #ifndef PR_SET_TAGGED_ADDR_CTRL
6281 # define PR_SET_TAGGED_ADDR_CTRL 55
6282 # define PR_GET_TAGGED_ADDR_CTRL 56
6283 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6284 #endif
6285 #ifndef PR_MTE_TCF_SHIFT
6286 # define PR_MTE_TCF_SHIFT       1
6287 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6288 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6289 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6290 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6291 # define PR_MTE_TAG_SHIFT       3
6292 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6293 #endif
6294 #ifndef PR_SET_IO_FLUSHER
6295 # define PR_SET_IO_FLUSHER 57
6296 # define PR_GET_IO_FLUSHER 58
6297 #endif
6298 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6299 # define PR_SET_SYSCALL_USER_DISPATCH 59
6300 #endif
6301 #ifndef PR_SME_SET_VL
6302 # define PR_SME_SET_VL  63
6303 # define PR_SME_GET_VL  64
6304 # define PR_SME_VL_LEN_MASK  0xffff
6305 # define PR_SME_VL_INHERIT   (1 << 17)
6306 #endif
6307 
6308 #include "target_prctl.h"
6309 
6310 static abi_long do_prctl_inval0(CPUArchState *env)
6311 {
6312     return -TARGET_EINVAL;
6313 }
6314 
6315 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6316 {
6317     return -TARGET_EINVAL;
6318 }
6319 
6320 #ifndef do_prctl_get_fp_mode
6321 #define do_prctl_get_fp_mode do_prctl_inval0
6322 #endif
6323 #ifndef do_prctl_set_fp_mode
6324 #define do_prctl_set_fp_mode do_prctl_inval1
6325 #endif
6326 #ifndef do_prctl_sve_get_vl
6327 #define do_prctl_sve_get_vl do_prctl_inval0
6328 #endif
6329 #ifndef do_prctl_sve_set_vl
6330 #define do_prctl_sve_set_vl do_prctl_inval1
6331 #endif
6332 #ifndef do_prctl_reset_keys
6333 #define do_prctl_reset_keys do_prctl_inval1
6334 #endif
6335 #ifndef do_prctl_set_tagged_addr_ctrl
6336 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6337 #endif
6338 #ifndef do_prctl_get_tagged_addr_ctrl
6339 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6340 #endif
6341 #ifndef do_prctl_get_unalign
6342 #define do_prctl_get_unalign do_prctl_inval1
6343 #endif
6344 #ifndef do_prctl_set_unalign
6345 #define do_prctl_set_unalign do_prctl_inval1
6346 #endif
6347 #ifndef do_prctl_sme_get_vl
6348 #define do_prctl_sme_get_vl do_prctl_inval0
6349 #endif
6350 #ifndef do_prctl_sme_set_vl
6351 #define do_prctl_sme_set_vl do_prctl_inval1
6352 #endif
6353 
6354 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6355                          abi_long arg3, abi_long arg4, abi_long arg5)
6356 {
6357     abi_long ret;
6358 
6359     switch (option) {
6360     case PR_GET_PDEATHSIG:
6361         {
6362             int deathsig;
6363             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6364                                   arg3, arg4, arg5));
6365             if (!is_error(ret) &&
6366                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6367                 return -TARGET_EFAULT;
6368             }
6369             return ret;
6370         }
6371     case PR_SET_PDEATHSIG:
6372         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6373                                arg3, arg4, arg5));
6374     case PR_GET_NAME:
6375         {
6376             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6377             if (!name) {
6378                 return -TARGET_EFAULT;
6379             }
6380             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6381                                   arg3, arg4, arg5));
6382             unlock_user(name, arg2, 16);
6383             return ret;
6384         }
6385     case PR_SET_NAME:
6386         {
6387             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6388             if (!name) {
6389                 return -TARGET_EFAULT;
6390             }
6391             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6392                                   arg3, arg4, arg5));
6393             unlock_user(name, arg2, 0);
6394             return ret;
6395         }
6396     case PR_GET_FP_MODE:
6397         return do_prctl_get_fp_mode(env);
6398     case PR_SET_FP_MODE:
6399         return do_prctl_set_fp_mode(env, arg2);
6400     case PR_SVE_GET_VL:
6401         return do_prctl_sve_get_vl(env);
6402     case PR_SVE_SET_VL:
6403         return do_prctl_sve_set_vl(env, arg2);
6404     case PR_SME_GET_VL:
6405         return do_prctl_sme_get_vl(env);
6406     case PR_SME_SET_VL:
6407         return do_prctl_sme_set_vl(env, arg2);
6408     case PR_PAC_RESET_KEYS:
6409         if (arg3 || arg4 || arg5) {
6410             return -TARGET_EINVAL;
6411         }
6412         return do_prctl_reset_keys(env, arg2);
6413     case PR_SET_TAGGED_ADDR_CTRL:
6414         if (arg3 || arg4 || arg5) {
6415             return -TARGET_EINVAL;
6416         }
6417         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6418     case PR_GET_TAGGED_ADDR_CTRL:
6419         if (arg2 || arg3 || arg4 || arg5) {
6420             return -TARGET_EINVAL;
6421         }
6422         return do_prctl_get_tagged_addr_ctrl(env);
6423 
6424     case PR_GET_UNALIGN:
6425         return do_prctl_get_unalign(env, arg2);
6426     case PR_SET_UNALIGN:
6427         return do_prctl_set_unalign(env, arg2);
6428 
6429     case PR_CAP_AMBIENT:
6430     case PR_CAPBSET_READ:
6431     case PR_CAPBSET_DROP:
6432     case PR_GET_DUMPABLE:
6433     case PR_SET_DUMPABLE:
6434     case PR_GET_KEEPCAPS:
6435     case PR_SET_KEEPCAPS:
6436     case PR_GET_SECUREBITS:
6437     case PR_SET_SECUREBITS:
6438     case PR_GET_TIMING:
6439     case PR_SET_TIMING:
6440     case PR_GET_TIMERSLACK:
6441     case PR_SET_TIMERSLACK:
6442     case PR_MCE_KILL:
6443     case PR_MCE_KILL_GET:
6444     case PR_GET_NO_NEW_PRIVS:
6445     case PR_SET_NO_NEW_PRIVS:
6446     case PR_GET_IO_FLUSHER:
6447     case PR_SET_IO_FLUSHER:
6448     case PR_SET_CHILD_SUBREAPER:
6449     case PR_GET_SPECULATION_CTRL:
6450     case PR_SET_SPECULATION_CTRL:
6451         /* Some prctl options have no pointer arguments and we can pass on. */
6452         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6453 
6454     case PR_GET_CHILD_SUBREAPER:
6455         {
6456             int val;
6457             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6458                                   arg3, arg4, arg5));
6459             if (!is_error(ret) && put_user_s32(val, arg2)) {
6460                 return -TARGET_EFAULT;
6461             }
6462             return ret;
6463         }
6464 
6465     case PR_GET_TID_ADDRESS:
6466         {
6467             TaskState *ts = env_cpu(env)->opaque;
6468             return put_user_ual(ts->child_tidptr, arg2);
6469         }
6470 
6471     case PR_GET_FPEXC:
6472     case PR_SET_FPEXC:
6473         /* Was used for SPE on PowerPC. */
6474         return -TARGET_EINVAL;
6475 
6476     case PR_GET_ENDIAN:
6477     case PR_SET_ENDIAN:
6478     case PR_GET_FPEMU:
6479     case PR_SET_FPEMU:
6480     case PR_SET_MM:
6481     case PR_GET_SECCOMP:
6482     case PR_SET_SECCOMP:
6483     case PR_SET_SYSCALL_USER_DISPATCH:
6484     case PR_GET_THP_DISABLE:
6485     case PR_SET_THP_DISABLE:
6486     case PR_GET_TSC:
6487     case PR_SET_TSC:
6488         /* Disable to prevent the target disabling stuff we need. */
6489         return -TARGET_EINVAL;
6490 
6491     default:
6492         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6493                       option);
6494         return -TARGET_EINVAL;
6495     }
6496 }
6497 
6498 #define NEW_STACK_SIZE 0x40000
6499 
6500 
6501 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6502 typedef struct {
6503     CPUArchState *env;
6504     pthread_mutex_t mutex;
6505     pthread_cond_t cond;
6506     pthread_t thread;
6507     uint32_t tid;
6508     abi_ulong child_tidptr;
6509     abi_ulong parent_tidptr;
6510     sigset_t sigmask;
6511 } new_thread_info;
6512 
6513 static void *clone_func(void *arg)
6514 {
6515     new_thread_info *info = arg;
6516     CPUArchState *env;
6517     CPUState *cpu;
6518     TaskState *ts;
6519 
6520     rcu_register_thread();
6521     tcg_register_thread();
6522     env = info->env;
6523     cpu = env_cpu(env);
6524     thread_cpu = cpu;
6525     ts = get_task_state(cpu);
6526     info->tid = sys_gettid();
6527     task_settid(ts);
6528     if (info->child_tidptr)
6529         put_user_u32(info->tid, info->child_tidptr);
6530     if (info->parent_tidptr)
6531         put_user_u32(info->tid, info->parent_tidptr);
6532     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6533     /* Enable signals.  */
6534     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6535     /* Signal to the parent that we're ready.  */
6536     pthread_mutex_lock(&info->mutex);
6537     pthread_cond_broadcast(&info->cond);
6538     pthread_mutex_unlock(&info->mutex);
6539     /* Wait until the parent has finished initializing the tls state.  */
6540     pthread_mutex_lock(&clone_lock);
6541     pthread_mutex_unlock(&clone_lock);
6542     cpu_loop(env);
6543     /* never exits */
6544     return NULL;
6545 }
6546 
6547 /* do_fork() Must return host values and target errnos (unlike most
6548    do_*() functions). */
6549 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6550                    abi_ulong parent_tidptr, target_ulong newtls,
6551                    abi_ulong child_tidptr)
6552 {
6553     CPUState *cpu = env_cpu(env);
6554     int ret;
6555     TaskState *ts;
6556     CPUState *new_cpu;
6557     CPUArchState *new_env;
6558     sigset_t sigmask;
6559 
6560     flags &= ~CLONE_IGNORED_FLAGS;
6561 
6562     /* Emulate vfork() with fork() */
6563     if (flags & CLONE_VFORK)
6564         flags &= ~(CLONE_VFORK | CLONE_VM);
6565 
6566     if (flags & CLONE_VM) {
6567         TaskState *parent_ts = get_task_state(cpu);
6568         new_thread_info info;
6569         pthread_attr_t attr;
6570 
6571         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6572             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6573             return -TARGET_EINVAL;
6574         }
6575 
6576         ts = g_new0(TaskState, 1);
6577         init_task_state(ts);
6578 
6579         /* Grab a mutex so that thread setup appears atomic.  */
6580         pthread_mutex_lock(&clone_lock);
6581 
6582         /*
6583          * If this is our first additional thread, we need to ensure we
6584          * generate code for parallel execution and flush old translations.
6585          * Do this now so that the copy gets CF_PARALLEL too.
6586          */
6587         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6588             cpu->tcg_cflags |= CF_PARALLEL;
6589             tb_flush(cpu);
6590         }
6591 
6592         /* we create a new CPU instance. */
6593         new_env = cpu_copy(env);
6594         /* Init regs that differ from the parent.  */
6595         cpu_clone_regs_child(new_env, newsp, flags);
6596         cpu_clone_regs_parent(env, flags);
6597         new_cpu = env_cpu(new_env);
6598         new_cpu->opaque = ts;
6599         ts->bprm = parent_ts->bprm;
6600         ts->info = parent_ts->info;
6601         ts->signal_mask = parent_ts->signal_mask;
6602 
6603         if (flags & CLONE_CHILD_CLEARTID) {
6604             ts->child_tidptr = child_tidptr;
6605         }
6606 
6607         if (flags & CLONE_SETTLS) {
6608             cpu_set_tls (new_env, newtls);
6609         }
6610 
6611         memset(&info, 0, sizeof(info));
6612         pthread_mutex_init(&info.mutex, NULL);
6613         pthread_mutex_lock(&info.mutex);
6614         pthread_cond_init(&info.cond, NULL);
6615         info.env = new_env;
6616         if (flags & CLONE_CHILD_SETTID) {
6617             info.child_tidptr = child_tidptr;
6618         }
6619         if (flags & CLONE_PARENT_SETTID) {
6620             info.parent_tidptr = parent_tidptr;
6621         }
6622 
6623         ret = pthread_attr_init(&attr);
6624         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6625         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6626         /* It is not safe to deliver signals until the child has finished
6627            initializing, so temporarily block all signals.  */
6628         sigfillset(&sigmask);
6629         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6630         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6631 
6632         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6633         /* TODO: Free new CPU state if thread creation failed.  */
6634 
6635         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6636         pthread_attr_destroy(&attr);
6637         if (ret == 0) {
6638             /* Wait for the child to initialize.  */
6639             pthread_cond_wait(&info.cond, &info.mutex);
6640             ret = info.tid;
6641         } else {
6642             ret = -1;
6643         }
6644         pthread_mutex_unlock(&info.mutex);
6645         pthread_cond_destroy(&info.cond);
6646         pthread_mutex_destroy(&info.mutex);
6647         pthread_mutex_unlock(&clone_lock);
6648     } else {
6649         /* if no CLONE_VM, we consider it is a fork */
6650         if (flags & CLONE_INVALID_FORK_FLAGS) {
6651             return -TARGET_EINVAL;
6652         }
6653 
6654         /* We can't support custom termination signals */
6655         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6656             return -TARGET_EINVAL;
6657         }
6658 
6659 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6660         if (flags & CLONE_PIDFD) {
6661             return -TARGET_EINVAL;
6662         }
6663 #endif
6664 
6665         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6666         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6667             return -TARGET_EINVAL;
6668         }
6669 
6670         if (block_signals()) {
6671             return -QEMU_ERESTARTSYS;
6672         }
6673 
6674         fork_start();
6675         ret = fork();
6676         if (ret == 0) {
6677             /* Child Process.  */
6678             cpu_clone_regs_child(env, newsp, flags);
6679             fork_end(ret);
6680             /* There is a race condition here.  The parent process could
6681                theoretically read the TID in the child process before the child
6682                tid is set.  This would require using either ptrace
6683                (not implemented) or having *_tidptr to point at a shared memory
6684                mapping.  We can't repeat the spinlock hack used above because
6685                the child process gets its own copy of the lock.  */
6686             if (flags & CLONE_CHILD_SETTID)
6687                 put_user_u32(sys_gettid(), child_tidptr);
6688             if (flags & CLONE_PARENT_SETTID)
6689                 put_user_u32(sys_gettid(), parent_tidptr);
6690             ts = get_task_state(cpu);
6691             if (flags & CLONE_SETTLS)
6692                 cpu_set_tls (env, newtls);
6693             if (flags & CLONE_CHILD_CLEARTID)
6694                 ts->child_tidptr = child_tidptr;
6695         } else {
6696             cpu_clone_regs_parent(env, flags);
6697             if (flags & CLONE_PIDFD) {
6698                 int pid_fd = 0;
6699 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6700                 int pid_child = ret;
6701                 pid_fd = pidfd_open(pid_child, 0);
6702                 if (pid_fd >= 0) {
6703                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6704                                                | FD_CLOEXEC);
6705                 } else {
6706                         pid_fd = 0;
6707                 }
6708 #endif
6709                 put_user_u32(pid_fd, parent_tidptr);
6710             }
6711             fork_end(ret);
6712         }
6713         g_assert(!cpu_in_exclusive_context(cpu));
6714     }
6715     return ret;
6716 }
6717 
6718 /* warning : doesn't handle linux specific flags... */
6719 static int target_to_host_fcntl_cmd(int cmd)
6720 {
6721     int ret;
6722 
6723     switch(cmd) {
6724     case TARGET_F_DUPFD:
6725     case TARGET_F_GETFD:
6726     case TARGET_F_SETFD:
6727     case TARGET_F_GETFL:
6728     case TARGET_F_SETFL:
6729     case TARGET_F_OFD_GETLK:
6730     case TARGET_F_OFD_SETLK:
6731     case TARGET_F_OFD_SETLKW:
6732         ret = cmd;
6733         break;
6734     case TARGET_F_GETLK:
6735         ret = F_GETLK64;
6736         break;
6737     case TARGET_F_SETLK:
6738         ret = F_SETLK64;
6739         break;
6740     case TARGET_F_SETLKW:
6741         ret = F_SETLKW64;
6742         break;
6743     case TARGET_F_GETOWN:
6744         ret = F_GETOWN;
6745         break;
6746     case TARGET_F_SETOWN:
6747         ret = F_SETOWN;
6748         break;
6749     case TARGET_F_GETSIG:
6750         ret = F_GETSIG;
6751         break;
6752     case TARGET_F_SETSIG:
6753         ret = F_SETSIG;
6754         break;
6755 #if TARGET_ABI_BITS == 32
6756     case TARGET_F_GETLK64:
6757         ret = F_GETLK64;
6758         break;
6759     case TARGET_F_SETLK64:
6760         ret = F_SETLK64;
6761         break;
6762     case TARGET_F_SETLKW64:
6763         ret = F_SETLKW64;
6764         break;
6765 #endif
6766     case TARGET_F_SETLEASE:
6767         ret = F_SETLEASE;
6768         break;
6769     case TARGET_F_GETLEASE:
6770         ret = F_GETLEASE;
6771         break;
6772 #ifdef F_DUPFD_CLOEXEC
6773     case TARGET_F_DUPFD_CLOEXEC:
6774         ret = F_DUPFD_CLOEXEC;
6775         break;
6776 #endif
6777     case TARGET_F_NOTIFY:
6778         ret = F_NOTIFY;
6779         break;
6780 #ifdef F_GETOWN_EX
6781     case TARGET_F_GETOWN_EX:
6782         ret = F_GETOWN_EX;
6783         break;
6784 #endif
6785 #ifdef F_SETOWN_EX
6786     case TARGET_F_SETOWN_EX:
6787         ret = F_SETOWN_EX;
6788         break;
6789 #endif
6790 #ifdef F_SETPIPE_SZ
6791     case TARGET_F_SETPIPE_SZ:
6792         ret = F_SETPIPE_SZ;
6793         break;
6794     case TARGET_F_GETPIPE_SZ:
6795         ret = F_GETPIPE_SZ;
6796         break;
6797 #endif
6798 #ifdef F_ADD_SEALS
6799     case TARGET_F_ADD_SEALS:
6800         ret = F_ADD_SEALS;
6801         break;
6802     case TARGET_F_GET_SEALS:
6803         ret = F_GET_SEALS;
6804         break;
6805 #endif
6806     default:
6807         ret = -TARGET_EINVAL;
6808         break;
6809     }
6810 
6811 #if defined(__powerpc64__)
6812     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6813      * is not supported by kernel. The glibc fcntl call actually adjusts
6814      * them to 5, 6 and 7 before making the syscall(). Since we make the
6815      * syscall directly, adjust to what is supported by the kernel.
6816      */
6817     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6818         ret -= F_GETLK64 - 5;
6819     }
6820 #endif
6821 
6822     return ret;
6823 }
6824 
6825 #define FLOCK_TRANSTBL \
6826     switch (type) { \
6827     TRANSTBL_CONVERT(F_RDLCK); \
6828     TRANSTBL_CONVERT(F_WRLCK); \
6829     TRANSTBL_CONVERT(F_UNLCK); \
6830     }
6831 
6832 static int target_to_host_flock(int type)
6833 {
6834 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6835     FLOCK_TRANSTBL
6836 #undef  TRANSTBL_CONVERT
6837     return -TARGET_EINVAL;
6838 }
6839 
6840 static int host_to_target_flock(int type)
6841 {
6842 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6843     FLOCK_TRANSTBL
6844 #undef  TRANSTBL_CONVERT
6845     /* if we don't know how to convert the value coming
6846      * from the host we copy to the target field as-is
6847      */
6848     return type;
6849 }
6850 
6851 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6852                                             abi_ulong target_flock_addr)
6853 {
6854     struct target_flock *target_fl;
6855     int l_type;
6856 
6857     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6858         return -TARGET_EFAULT;
6859     }
6860 
6861     __get_user(l_type, &target_fl->l_type);
6862     l_type = target_to_host_flock(l_type);
6863     if (l_type < 0) {
6864         return l_type;
6865     }
6866     fl->l_type = l_type;
6867     __get_user(fl->l_whence, &target_fl->l_whence);
6868     __get_user(fl->l_start, &target_fl->l_start);
6869     __get_user(fl->l_len, &target_fl->l_len);
6870     __get_user(fl->l_pid, &target_fl->l_pid);
6871     unlock_user_struct(target_fl, target_flock_addr, 0);
6872     return 0;
6873 }
6874 
6875 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6876                                           const struct flock64 *fl)
6877 {
6878     struct target_flock *target_fl;
6879     short l_type;
6880 
6881     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6882         return -TARGET_EFAULT;
6883     }
6884 
6885     l_type = host_to_target_flock(fl->l_type);
6886     __put_user(l_type, &target_fl->l_type);
6887     __put_user(fl->l_whence, &target_fl->l_whence);
6888     __put_user(fl->l_start, &target_fl->l_start);
6889     __put_user(fl->l_len, &target_fl->l_len);
6890     __put_user(fl->l_pid, &target_fl->l_pid);
6891     unlock_user_struct(target_fl, target_flock_addr, 1);
6892     return 0;
6893 }
6894 
6895 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6896 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6897 
6898 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6899 struct target_oabi_flock64 {
6900     abi_short l_type;
6901     abi_short l_whence;
6902     abi_llong l_start;
6903     abi_llong l_len;
6904     abi_int   l_pid;
6905 } QEMU_PACKED;
6906 
6907 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6908                                                    abi_ulong target_flock_addr)
6909 {
6910     struct target_oabi_flock64 *target_fl;
6911     int l_type;
6912 
6913     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6914         return -TARGET_EFAULT;
6915     }
6916 
6917     __get_user(l_type, &target_fl->l_type);
6918     l_type = target_to_host_flock(l_type);
6919     if (l_type < 0) {
6920         return l_type;
6921     }
6922     fl->l_type = l_type;
6923     __get_user(fl->l_whence, &target_fl->l_whence);
6924     __get_user(fl->l_start, &target_fl->l_start);
6925     __get_user(fl->l_len, &target_fl->l_len);
6926     __get_user(fl->l_pid, &target_fl->l_pid);
6927     unlock_user_struct(target_fl, target_flock_addr, 0);
6928     return 0;
6929 }
6930 
6931 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6932                                                  const struct flock64 *fl)
6933 {
6934     struct target_oabi_flock64 *target_fl;
6935     short l_type;
6936 
6937     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6938         return -TARGET_EFAULT;
6939     }
6940 
6941     l_type = host_to_target_flock(fl->l_type);
6942     __put_user(l_type, &target_fl->l_type);
6943     __put_user(fl->l_whence, &target_fl->l_whence);
6944     __put_user(fl->l_start, &target_fl->l_start);
6945     __put_user(fl->l_len, &target_fl->l_len);
6946     __put_user(fl->l_pid, &target_fl->l_pid);
6947     unlock_user_struct(target_fl, target_flock_addr, 1);
6948     return 0;
6949 }
6950 #endif
6951 
6952 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6953                                               abi_ulong target_flock_addr)
6954 {
6955     struct target_flock64 *target_fl;
6956     int l_type;
6957 
6958     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6959         return -TARGET_EFAULT;
6960     }
6961 
6962     __get_user(l_type, &target_fl->l_type);
6963     l_type = target_to_host_flock(l_type);
6964     if (l_type < 0) {
6965         return l_type;
6966     }
6967     fl->l_type = l_type;
6968     __get_user(fl->l_whence, &target_fl->l_whence);
6969     __get_user(fl->l_start, &target_fl->l_start);
6970     __get_user(fl->l_len, &target_fl->l_len);
6971     __get_user(fl->l_pid, &target_fl->l_pid);
6972     unlock_user_struct(target_fl, target_flock_addr, 0);
6973     return 0;
6974 }
6975 
6976 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6977                                             const struct flock64 *fl)
6978 {
6979     struct target_flock64 *target_fl;
6980     short l_type;
6981 
6982     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6983         return -TARGET_EFAULT;
6984     }
6985 
6986     l_type = host_to_target_flock(fl->l_type);
6987     __put_user(l_type, &target_fl->l_type);
6988     __put_user(fl->l_whence, &target_fl->l_whence);
6989     __put_user(fl->l_start, &target_fl->l_start);
6990     __put_user(fl->l_len, &target_fl->l_len);
6991     __put_user(fl->l_pid, &target_fl->l_pid);
6992     unlock_user_struct(target_fl, target_flock_addr, 1);
6993     return 0;
6994 }
6995 
6996 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6997 {
6998     struct flock64 fl64;
6999 #ifdef F_GETOWN_EX
7000     struct f_owner_ex fox;
7001     struct target_f_owner_ex *target_fox;
7002 #endif
7003     abi_long ret;
7004     int host_cmd = target_to_host_fcntl_cmd(cmd);
7005 
7006     if (host_cmd == -TARGET_EINVAL)
7007 	    return host_cmd;
7008 
7009     switch(cmd) {
7010     case TARGET_F_GETLK:
7011         ret = copy_from_user_flock(&fl64, arg);
7012         if (ret) {
7013             return ret;
7014         }
7015         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7016         if (ret == 0) {
7017             ret = copy_to_user_flock(arg, &fl64);
7018         }
7019         break;
7020 
7021     case TARGET_F_SETLK:
7022     case TARGET_F_SETLKW:
7023         ret = copy_from_user_flock(&fl64, arg);
7024         if (ret) {
7025             return ret;
7026         }
7027         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7028         break;
7029 
7030     case TARGET_F_GETLK64:
7031     case TARGET_F_OFD_GETLK:
7032         ret = copy_from_user_flock64(&fl64, arg);
7033         if (ret) {
7034             return ret;
7035         }
7036         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7037         if (ret == 0) {
7038             ret = copy_to_user_flock64(arg, &fl64);
7039         }
7040         break;
7041     case TARGET_F_SETLK64:
7042     case TARGET_F_SETLKW64:
7043     case TARGET_F_OFD_SETLK:
7044     case TARGET_F_OFD_SETLKW:
7045         ret = copy_from_user_flock64(&fl64, arg);
7046         if (ret) {
7047             return ret;
7048         }
7049         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7050         break;
7051 
7052     case TARGET_F_GETFL:
7053         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7054         if (ret >= 0) {
7055             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7056             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7057             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7058                 ret |= TARGET_O_LARGEFILE;
7059             }
7060         }
7061         break;
7062 
7063     case TARGET_F_SETFL:
7064         ret = get_errno(safe_fcntl(fd, host_cmd,
7065                                    target_to_host_bitmask(arg,
7066                                                           fcntl_flags_tbl)));
7067         break;
7068 
7069 #ifdef F_GETOWN_EX
7070     case TARGET_F_GETOWN_EX:
7071         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7072         if (ret >= 0) {
7073             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7074                 return -TARGET_EFAULT;
7075             target_fox->type = tswap32(fox.type);
7076             target_fox->pid = tswap32(fox.pid);
7077             unlock_user_struct(target_fox, arg, 1);
7078         }
7079         break;
7080 #endif
7081 
7082 #ifdef F_SETOWN_EX
7083     case TARGET_F_SETOWN_EX:
7084         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7085             return -TARGET_EFAULT;
7086         fox.type = tswap32(target_fox->type);
7087         fox.pid = tswap32(target_fox->pid);
7088         unlock_user_struct(target_fox, arg, 0);
7089         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7090         break;
7091 #endif
7092 
7093     case TARGET_F_SETSIG:
7094         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7095         break;
7096 
7097     case TARGET_F_GETSIG:
7098         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7099         break;
7100 
7101     case TARGET_F_SETOWN:
7102     case TARGET_F_GETOWN:
7103     case TARGET_F_SETLEASE:
7104     case TARGET_F_GETLEASE:
7105     case TARGET_F_SETPIPE_SZ:
7106     case TARGET_F_GETPIPE_SZ:
7107     case TARGET_F_ADD_SEALS:
7108     case TARGET_F_GET_SEALS:
7109         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7110         break;
7111 
7112     default:
7113         ret = get_errno(safe_fcntl(fd, cmd, arg));
7114         break;
7115     }
7116     return ret;
7117 }
7118 
7119 #ifdef USE_UID16
7120 
7121 static inline int high2lowuid(int uid)
7122 {
7123     if (uid > 65535)
7124         return 65534;
7125     else
7126         return uid;
7127 }
7128 
7129 static inline int high2lowgid(int gid)
7130 {
7131     if (gid > 65535)
7132         return 65534;
7133     else
7134         return gid;
7135 }
7136 
7137 static inline int low2highuid(int uid)
7138 {
7139     if ((int16_t)uid == -1)
7140         return -1;
7141     else
7142         return uid;
7143 }
7144 
7145 static inline int low2highgid(int gid)
7146 {
7147     if ((int16_t)gid == -1)
7148         return -1;
7149     else
7150         return gid;
7151 }
7152 static inline int tswapid(int id)
7153 {
7154     return tswap16(id);
7155 }
7156 
7157 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7158 
7159 #else /* !USE_UID16 */
7160 static inline int high2lowuid(int uid)
7161 {
7162     return uid;
7163 }
7164 static inline int high2lowgid(int gid)
7165 {
7166     return gid;
7167 }
7168 static inline int low2highuid(int uid)
7169 {
7170     return uid;
7171 }
7172 static inline int low2highgid(int gid)
7173 {
7174     return gid;
7175 }
7176 static inline int tswapid(int id)
7177 {
7178     return tswap32(id);
7179 }
7180 
7181 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7182 
7183 #endif /* USE_UID16 */
7184 
7185 /* We must do direct syscalls for setting UID/GID, because we want to
7186  * implement the Linux system call semantics of "change only for this thread",
7187  * not the libc/POSIX semantics of "change for all threads in process".
7188  * (See http://ewontfix.com/17/ for more details.)
7189  * We use the 32-bit version of the syscalls if present; if it is not
7190  * then either the host architecture supports 32-bit UIDs natively with
7191  * the standard syscall, or the 16-bit UID is the best we can do.
7192  */
7193 #ifdef __NR_setuid32
7194 #define __NR_sys_setuid __NR_setuid32
7195 #else
7196 #define __NR_sys_setuid __NR_setuid
7197 #endif
7198 #ifdef __NR_setgid32
7199 #define __NR_sys_setgid __NR_setgid32
7200 #else
7201 #define __NR_sys_setgid __NR_setgid
7202 #endif
7203 #ifdef __NR_setresuid32
7204 #define __NR_sys_setresuid __NR_setresuid32
7205 #else
7206 #define __NR_sys_setresuid __NR_setresuid
7207 #endif
7208 #ifdef __NR_setresgid32
7209 #define __NR_sys_setresgid __NR_setresgid32
7210 #else
7211 #define __NR_sys_setresgid __NR_setresgid
7212 #endif
7213 
7214 _syscall1(int, sys_setuid, uid_t, uid)
7215 _syscall1(int, sys_setgid, gid_t, gid)
7216 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7217 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7218 
7219 void syscall_init(void)
7220 {
7221     IOCTLEntry *ie;
7222     const argtype *arg_type;
7223     int size;
7224 
7225     thunk_init(STRUCT_MAX);
7226 
7227 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7228 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7229 #include "syscall_types.h"
7230 #undef STRUCT
7231 #undef STRUCT_SPECIAL
7232 
7233     /* we patch the ioctl size if necessary. We rely on the fact that
7234        no ioctl has all the bits at '1' in the size field */
7235     ie = ioctl_entries;
7236     while (ie->target_cmd != 0) {
7237         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7238             TARGET_IOC_SIZEMASK) {
7239             arg_type = ie->arg_type;
7240             if (arg_type[0] != TYPE_PTR) {
7241                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7242                         ie->target_cmd);
7243                 exit(1);
7244             }
7245             arg_type++;
7246             size = thunk_type_size(arg_type, 0);
7247             ie->target_cmd = (ie->target_cmd &
7248                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7249                 (size << TARGET_IOC_SIZESHIFT);
7250         }
7251 
7252         /* automatic consistency check if same arch */
7253 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7254     (defined(__x86_64__) && defined(TARGET_X86_64))
7255         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7256             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7257                     ie->name, ie->target_cmd, ie->host_cmd);
7258         }
7259 #endif
7260         ie++;
7261     }
7262 }
7263 
7264 #ifdef TARGET_NR_truncate64
7265 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7266                                          abi_long arg2,
7267                                          abi_long arg3,
7268                                          abi_long arg4)
7269 {
7270     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7271         arg2 = arg3;
7272         arg3 = arg4;
7273     }
7274     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7275 }
7276 #endif
7277 
7278 #ifdef TARGET_NR_ftruncate64
7279 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7280                                           abi_long arg2,
7281                                           abi_long arg3,
7282                                           abi_long arg4)
7283 {
7284     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7285         arg2 = arg3;
7286         arg3 = arg4;
7287     }
7288     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7289 }
7290 #endif
7291 
7292 #if defined(TARGET_NR_timer_settime) || \
7293     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7294 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7295                                                  abi_ulong target_addr)
7296 {
7297     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7298                                 offsetof(struct target_itimerspec,
7299                                          it_interval)) ||
7300         target_to_host_timespec(&host_its->it_value, target_addr +
7301                                 offsetof(struct target_itimerspec,
7302                                          it_value))) {
7303         return -TARGET_EFAULT;
7304     }
7305 
7306     return 0;
7307 }
7308 #endif
7309 
7310 #if defined(TARGET_NR_timer_settime64) || \
7311     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7312 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7313                                                    abi_ulong target_addr)
7314 {
7315     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7316                                   offsetof(struct target__kernel_itimerspec,
7317                                            it_interval)) ||
7318         target_to_host_timespec64(&host_its->it_value, target_addr +
7319                                   offsetof(struct target__kernel_itimerspec,
7320                                            it_value))) {
7321         return -TARGET_EFAULT;
7322     }
7323 
7324     return 0;
7325 }
7326 #endif
7327 
7328 #if ((defined(TARGET_NR_timerfd_gettime) || \
7329       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7330       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7331 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7332                                                  struct itimerspec *host_its)
7333 {
7334     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7335                                                        it_interval),
7336                                 &host_its->it_interval) ||
7337         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7338                                                        it_value),
7339                                 &host_its->it_value)) {
7340         return -TARGET_EFAULT;
7341     }
7342     return 0;
7343 }
7344 #endif
7345 
7346 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7347       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7348       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7349 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7350                                                    struct itimerspec *host_its)
7351 {
7352     if (host_to_target_timespec64(target_addr +
7353                                   offsetof(struct target__kernel_itimerspec,
7354                                            it_interval),
7355                                   &host_its->it_interval) ||
7356         host_to_target_timespec64(target_addr +
7357                                   offsetof(struct target__kernel_itimerspec,
7358                                            it_value),
7359                                   &host_its->it_value)) {
7360         return -TARGET_EFAULT;
7361     }
7362     return 0;
7363 }
7364 #endif
7365 
7366 #if defined(TARGET_NR_adjtimex) || \
7367     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7368 static inline abi_long target_to_host_timex(struct timex *host_tx,
7369                                             abi_long target_addr)
7370 {
7371     struct target_timex *target_tx;
7372 
7373     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7374         return -TARGET_EFAULT;
7375     }
7376 
7377     __get_user(host_tx->modes, &target_tx->modes);
7378     __get_user(host_tx->offset, &target_tx->offset);
7379     __get_user(host_tx->freq, &target_tx->freq);
7380     __get_user(host_tx->maxerror, &target_tx->maxerror);
7381     __get_user(host_tx->esterror, &target_tx->esterror);
7382     __get_user(host_tx->status, &target_tx->status);
7383     __get_user(host_tx->constant, &target_tx->constant);
7384     __get_user(host_tx->precision, &target_tx->precision);
7385     __get_user(host_tx->tolerance, &target_tx->tolerance);
7386     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7387     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7388     __get_user(host_tx->tick, &target_tx->tick);
7389     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7390     __get_user(host_tx->jitter, &target_tx->jitter);
7391     __get_user(host_tx->shift, &target_tx->shift);
7392     __get_user(host_tx->stabil, &target_tx->stabil);
7393     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7394     __get_user(host_tx->calcnt, &target_tx->calcnt);
7395     __get_user(host_tx->errcnt, &target_tx->errcnt);
7396     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7397     __get_user(host_tx->tai, &target_tx->tai);
7398 
7399     unlock_user_struct(target_tx, target_addr, 0);
7400     return 0;
7401 }
7402 
7403 static inline abi_long host_to_target_timex(abi_long target_addr,
7404                                             struct timex *host_tx)
7405 {
7406     struct target_timex *target_tx;
7407 
7408     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7409         return -TARGET_EFAULT;
7410     }
7411 
7412     __put_user(host_tx->modes, &target_tx->modes);
7413     __put_user(host_tx->offset, &target_tx->offset);
7414     __put_user(host_tx->freq, &target_tx->freq);
7415     __put_user(host_tx->maxerror, &target_tx->maxerror);
7416     __put_user(host_tx->esterror, &target_tx->esterror);
7417     __put_user(host_tx->status, &target_tx->status);
7418     __put_user(host_tx->constant, &target_tx->constant);
7419     __put_user(host_tx->precision, &target_tx->precision);
7420     __put_user(host_tx->tolerance, &target_tx->tolerance);
7421     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7422     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7423     __put_user(host_tx->tick, &target_tx->tick);
7424     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7425     __put_user(host_tx->jitter, &target_tx->jitter);
7426     __put_user(host_tx->shift, &target_tx->shift);
7427     __put_user(host_tx->stabil, &target_tx->stabil);
7428     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7429     __put_user(host_tx->calcnt, &target_tx->calcnt);
7430     __put_user(host_tx->errcnt, &target_tx->errcnt);
7431     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7432     __put_user(host_tx->tai, &target_tx->tai);
7433 
7434     unlock_user_struct(target_tx, target_addr, 1);
7435     return 0;
7436 }
7437 #endif
7438 
7439 
7440 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7441 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7442                                               abi_long target_addr)
7443 {
7444     struct target__kernel_timex *target_tx;
7445 
7446     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7447                                  offsetof(struct target__kernel_timex,
7448                                           time))) {
7449         return -TARGET_EFAULT;
7450     }
7451 
7452     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7453         return -TARGET_EFAULT;
7454     }
7455 
7456     __get_user(host_tx->modes, &target_tx->modes);
7457     __get_user(host_tx->offset, &target_tx->offset);
7458     __get_user(host_tx->freq, &target_tx->freq);
7459     __get_user(host_tx->maxerror, &target_tx->maxerror);
7460     __get_user(host_tx->esterror, &target_tx->esterror);
7461     __get_user(host_tx->status, &target_tx->status);
7462     __get_user(host_tx->constant, &target_tx->constant);
7463     __get_user(host_tx->precision, &target_tx->precision);
7464     __get_user(host_tx->tolerance, &target_tx->tolerance);
7465     __get_user(host_tx->tick, &target_tx->tick);
7466     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7467     __get_user(host_tx->jitter, &target_tx->jitter);
7468     __get_user(host_tx->shift, &target_tx->shift);
7469     __get_user(host_tx->stabil, &target_tx->stabil);
7470     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7471     __get_user(host_tx->calcnt, &target_tx->calcnt);
7472     __get_user(host_tx->errcnt, &target_tx->errcnt);
7473     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7474     __get_user(host_tx->tai, &target_tx->tai);
7475 
7476     unlock_user_struct(target_tx, target_addr, 0);
7477     return 0;
7478 }
7479 
7480 static inline abi_long host_to_target_timex64(abi_long target_addr,
7481                                               struct timex *host_tx)
7482 {
7483     struct target__kernel_timex *target_tx;
7484 
7485    if (copy_to_user_timeval64(target_addr +
7486                               offsetof(struct target__kernel_timex, time),
7487                               &host_tx->time)) {
7488         return -TARGET_EFAULT;
7489     }
7490 
7491     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7492         return -TARGET_EFAULT;
7493     }
7494 
7495     __put_user(host_tx->modes, &target_tx->modes);
7496     __put_user(host_tx->offset, &target_tx->offset);
7497     __put_user(host_tx->freq, &target_tx->freq);
7498     __put_user(host_tx->maxerror, &target_tx->maxerror);
7499     __put_user(host_tx->esterror, &target_tx->esterror);
7500     __put_user(host_tx->status, &target_tx->status);
7501     __put_user(host_tx->constant, &target_tx->constant);
7502     __put_user(host_tx->precision, &target_tx->precision);
7503     __put_user(host_tx->tolerance, &target_tx->tolerance);
7504     __put_user(host_tx->tick, &target_tx->tick);
7505     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7506     __put_user(host_tx->jitter, &target_tx->jitter);
7507     __put_user(host_tx->shift, &target_tx->shift);
7508     __put_user(host_tx->stabil, &target_tx->stabil);
7509     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7510     __put_user(host_tx->calcnt, &target_tx->calcnt);
7511     __put_user(host_tx->errcnt, &target_tx->errcnt);
7512     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7513     __put_user(host_tx->tai, &target_tx->tai);
7514 
7515     unlock_user_struct(target_tx, target_addr, 1);
7516     return 0;
7517 }
7518 #endif
7519 
7520 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7521 #define sigev_notify_thread_id _sigev_un._tid
7522 #endif
7523 
7524 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7525                                                abi_ulong target_addr)
7526 {
7527     struct target_sigevent *target_sevp;
7528 
7529     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7530         return -TARGET_EFAULT;
7531     }
7532 
7533     /* This union is awkward on 64 bit systems because it has a 32 bit
7534      * integer and a pointer in it; we follow the conversion approach
7535      * used for handling sigval types in signal.c so the guest should get
7536      * the correct value back even if we did a 64 bit byteswap and it's
7537      * using the 32 bit integer.
7538      */
7539     host_sevp->sigev_value.sival_ptr =
7540         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7541     host_sevp->sigev_signo =
7542         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7543     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7544     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7545 
7546     unlock_user_struct(target_sevp, target_addr, 1);
7547     return 0;
7548 }
7549 
7550 #if defined(TARGET_NR_mlockall)
7551 static inline int target_to_host_mlockall_arg(int arg)
7552 {
7553     int result = 0;
7554 
7555     if (arg & TARGET_MCL_CURRENT) {
7556         result |= MCL_CURRENT;
7557     }
7558     if (arg & TARGET_MCL_FUTURE) {
7559         result |= MCL_FUTURE;
7560     }
7561 #ifdef MCL_ONFAULT
7562     if (arg & TARGET_MCL_ONFAULT) {
7563         result |= MCL_ONFAULT;
7564     }
7565 #endif
7566 
7567     return result;
7568 }
7569 #endif
7570 
7571 static inline int target_to_host_msync_arg(abi_long arg)
7572 {
7573     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7574            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7575            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7576            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7577 }
7578 
7579 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7580      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7581      defined(TARGET_NR_newfstatat))
7582 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7583                                              abi_ulong target_addr,
7584                                              struct stat *host_st)
7585 {
7586 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7587     if (cpu_env->eabi) {
7588         struct target_eabi_stat64 *target_st;
7589 
7590         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7591             return -TARGET_EFAULT;
7592         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7593         __put_user(host_st->st_dev, &target_st->st_dev);
7594         __put_user(host_st->st_ino, &target_st->st_ino);
7595 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7596         __put_user(host_st->st_ino, &target_st->__st_ino);
7597 #endif
7598         __put_user(host_st->st_mode, &target_st->st_mode);
7599         __put_user(host_st->st_nlink, &target_st->st_nlink);
7600         __put_user(host_st->st_uid, &target_st->st_uid);
7601         __put_user(host_st->st_gid, &target_st->st_gid);
7602         __put_user(host_st->st_rdev, &target_st->st_rdev);
7603         __put_user(host_st->st_size, &target_st->st_size);
7604         __put_user(host_st->st_blksize, &target_st->st_blksize);
7605         __put_user(host_st->st_blocks, &target_st->st_blocks);
7606         __put_user(host_st->st_atime, &target_st->target_st_atime);
7607         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7608         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7609 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7610         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7611         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7612         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7613 #endif
7614         unlock_user_struct(target_st, target_addr, 1);
7615     } else
7616 #endif
7617     {
7618 #if defined(TARGET_HAS_STRUCT_STAT64)
7619         struct target_stat64 *target_st;
7620 #else
7621         struct target_stat *target_st;
7622 #endif
7623 
7624         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7625             return -TARGET_EFAULT;
7626         memset(target_st, 0, sizeof(*target_st));
7627         __put_user(host_st->st_dev, &target_st->st_dev);
7628         __put_user(host_st->st_ino, &target_st->st_ino);
7629 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7630         __put_user(host_st->st_ino, &target_st->__st_ino);
7631 #endif
7632         __put_user(host_st->st_mode, &target_st->st_mode);
7633         __put_user(host_st->st_nlink, &target_st->st_nlink);
7634         __put_user(host_st->st_uid, &target_st->st_uid);
7635         __put_user(host_st->st_gid, &target_st->st_gid);
7636         __put_user(host_st->st_rdev, &target_st->st_rdev);
7637         /* XXX: better use of kernel struct */
7638         __put_user(host_st->st_size, &target_st->st_size);
7639         __put_user(host_st->st_blksize, &target_st->st_blksize);
7640         __put_user(host_st->st_blocks, &target_st->st_blocks);
7641         __put_user(host_st->st_atime, &target_st->target_st_atime);
7642         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7643         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7644 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7645         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7646         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7647         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7648 #endif
7649         unlock_user_struct(target_st, target_addr, 1);
7650     }
7651 
7652     return 0;
7653 }
7654 #endif
7655 
7656 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7657 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7658                                             abi_ulong target_addr)
7659 {
7660     struct target_statx *target_stx;
7661 
7662     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7663         return -TARGET_EFAULT;
7664     }
7665     memset(target_stx, 0, sizeof(*target_stx));
7666 
7667     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7668     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7669     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7670     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7671     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7672     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7673     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7674     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7675     __put_user(host_stx->stx_size, &target_stx->stx_size);
7676     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7677     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7678     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7679     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7680     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7681     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7682     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7683     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7684     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7685     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7686     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7687     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7688     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7689     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7690 
7691     unlock_user_struct(target_stx, target_addr, 1);
7692 
7693     return 0;
7694 }
7695 #endif
7696 
7697 static int do_sys_futex(int *uaddr, int op, int val,
7698                          const struct timespec *timeout, int *uaddr2,
7699                          int val3)
7700 {
7701 #if HOST_LONG_BITS == 64
7702 #if defined(__NR_futex)
7703     /* always a 64-bit time_t, it doesn't define _time64 version  */
7704     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7705 
7706 #endif
7707 #else /* HOST_LONG_BITS == 64 */
7708 #if defined(__NR_futex_time64)
7709     if (sizeof(timeout->tv_sec) == 8) {
7710         /* _time64 function on 32bit arch */
7711         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7712     }
7713 #endif
7714 #if defined(__NR_futex)
7715     /* old function on 32bit arch */
7716     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7717 #endif
7718 #endif /* HOST_LONG_BITS == 64 */
7719     g_assert_not_reached();
7720 }
7721 
7722 static int do_safe_futex(int *uaddr, int op, int val,
7723                          const struct timespec *timeout, int *uaddr2,
7724                          int val3)
7725 {
7726 #if HOST_LONG_BITS == 64
7727 #if defined(__NR_futex)
7728     /* always a 64-bit time_t, it doesn't define _time64 version  */
7729     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7730 #endif
7731 #else /* HOST_LONG_BITS == 64 */
7732 #if defined(__NR_futex_time64)
7733     if (sizeof(timeout->tv_sec) == 8) {
7734         /* _time64 function on 32bit arch */
7735         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7736                                            val3));
7737     }
7738 #endif
7739 #if defined(__NR_futex)
7740     /* old function on 32bit arch */
7741     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7742 #endif
7743 #endif /* HOST_LONG_BITS == 64 */
7744     return -TARGET_ENOSYS;
7745 }
7746 
7747 /* ??? Using host futex calls even when target atomic operations
7748    are not really atomic probably breaks things.  However implementing
7749    futexes locally would make futexes shared between multiple processes
7750    tricky.  However they're probably useless because guest atomic
7751    operations won't work either.  */
7752 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7753 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7754                     int op, int val, target_ulong timeout,
7755                     target_ulong uaddr2, int val3)
7756 {
7757     struct timespec ts, *pts = NULL;
7758     void *haddr2 = NULL;
7759     int base_op;
7760 
7761     /* We assume FUTEX_* constants are the same on both host and target. */
7762 #ifdef FUTEX_CMD_MASK
7763     base_op = op & FUTEX_CMD_MASK;
7764 #else
7765     base_op = op;
7766 #endif
7767     switch (base_op) {
7768     case FUTEX_WAIT:
7769     case FUTEX_WAIT_BITSET:
7770         val = tswap32(val);
7771         break;
7772     case FUTEX_WAIT_REQUEUE_PI:
7773         val = tswap32(val);
7774         haddr2 = g2h(cpu, uaddr2);
7775         break;
7776     case FUTEX_LOCK_PI:
7777     case FUTEX_LOCK_PI2:
7778         break;
7779     case FUTEX_WAKE:
7780     case FUTEX_WAKE_BITSET:
7781     case FUTEX_TRYLOCK_PI:
7782     case FUTEX_UNLOCK_PI:
7783         timeout = 0;
7784         break;
7785     case FUTEX_FD:
7786         val = target_to_host_signal(val);
7787         timeout = 0;
7788         break;
7789     case FUTEX_CMP_REQUEUE:
7790     case FUTEX_CMP_REQUEUE_PI:
7791         val3 = tswap32(val3);
7792         /* fall through */
7793     case FUTEX_REQUEUE:
7794     case FUTEX_WAKE_OP:
7795         /*
7796          * For these, the 4th argument is not TIMEOUT, but VAL2.
7797          * But the prototype of do_safe_futex takes a pointer, so
7798          * insert casts to satisfy the compiler.  We do not need
7799          * to tswap VAL2 since it's not compared to guest memory.
7800           */
7801         pts = (struct timespec *)(uintptr_t)timeout;
7802         timeout = 0;
7803         haddr2 = g2h(cpu, uaddr2);
7804         break;
7805     default:
7806         return -TARGET_ENOSYS;
7807     }
7808     if (timeout) {
7809         pts = &ts;
7810         if (time64
7811             ? target_to_host_timespec64(pts, timeout)
7812             : target_to_host_timespec(pts, timeout)) {
7813             return -TARGET_EFAULT;
7814         }
7815     }
7816     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7817 }
7818 #endif
7819 
7820 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7821 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7822                                      abi_long handle, abi_long mount_id,
7823                                      abi_long flags)
7824 {
7825     struct file_handle *target_fh;
7826     struct file_handle *fh;
7827     int mid = 0;
7828     abi_long ret;
7829     char *name;
7830     unsigned int size, total_size;
7831 
7832     if (get_user_s32(size, handle)) {
7833         return -TARGET_EFAULT;
7834     }
7835 
7836     name = lock_user_string(pathname);
7837     if (!name) {
7838         return -TARGET_EFAULT;
7839     }
7840 
7841     total_size = sizeof(struct file_handle) + size;
7842     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7843     if (!target_fh) {
7844         unlock_user(name, pathname, 0);
7845         return -TARGET_EFAULT;
7846     }
7847 
7848     fh = g_malloc0(total_size);
7849     fh->handle_bytes = size;
7850 
7851     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7852     unlock_user(name, pathname, 0);
7853 
7854     /* man name_to_handle_at(2):
7855      * Other than the use of the handle_bytes field, the caller should treat
7856      * the file_handle structure as an opaque data type
7857      */
7858 
7859     memcpy(target_fh, fh, total_size);
7860     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7861     target_fh->handle_type = tswap32(fh->handle_type);
7862     g_free(fh);
7863     unlock_user(target_fh, handle, total_size);
7864 
7865     if (put_user_s32(mid, mount_id)) {
7866         return -TARGET_EFAULT;
7867     }
7868 
7869     return ret;
7870 
7871 }
7872 #endif
7873 
7874 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7875 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7876                                      abi_long flags)
7877 {
7878     struct file_handle *target_fh;
7879     struct file_handle *fh;
7880     unsigned int size, total_size;
7881     abi_long ret;
7882 
7883     if (get_user_s32(size, handle)) {
7884         return -TARGET_EFAULT;
7885     }
7886 
7887     total_size = sizeof(struct file_handle) + size;
7888     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7889     if (!target_fh) {
7890         return -TARGET_EFAULT;
7891     }
7892 
7893     fh = g_memdup(target_fh, total_size);
7894     fh->handle_bytes = size;
7895     fh->handle_type = tswap32(target_fh->handle_type);
7896 
7897     ret = get_errno(open_by_handle_at(mount_fd, fh,
7898                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7899 
7900     g_free(fh);
7901 
7902     unlock_user(target_fh, handle, total_size);
7903 
7904     return ret;
7905 }
7906 #endif
7907 
7908 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7909 
7910 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7911 {
7912     int host_flags;
7913     target_sigset_t *target_mask;
7914     sigset_t host_mask;
7915     abi_long ret;
7916 
7917     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7918         return -TARGET_EINVAL;
7919     }
7920     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7921         return -TARGET_EFAULT;
7922     }
7923 
7924     target_to_host_sigset(&host_mask, target_mask);
7925 
7926     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7927 
7928     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7929     if (ret >= 0) {
7930         fd_trans_register(ret, &target_signalfd_trans);
7931     }
7932 
7933     unlock_user_struct(target_mask, mask, 0);
7934 
7935     return ret;
7936 }
7937 #endif
7938 
7939 /* Map host to target signal numbers for the wait family of syscalls.
7940    Assume all other status bits are the same.  */
7941 int host_to_target_waitstatus(int status)
7942 {
7943     if (WIFSIGNALED(status)) {
7944         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7945     }
7946     if (WIFSTOPPED(status)) {
7947         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7948                | (status & 0xff);
7949     }
7950     return status;
7951 }
7952 
7953 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7954 {
7955     CPUState *cpu = env_cpu(cpu_env);
7956     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
7957     int i;
7958 
7959     for (i = 0; i < bprm->argc; i++) {
7960         size_t len = strlen(bprm->argv[i]) + 1;
7961 
7962         if (write(fd, bprm->argv[i], len) != len) {
7963             return -1;
7964         }
7965     }
7966 
7967     return 0;
7968 }
7969 
7970 struct open_self_maps_data {
7971     TaskState *ts;
7972     IntervalTreeRoot *host_maps;
7973     int fd;
7974     bool smaps;
7975 };
7976 
7977 /*
7978  * Subroutine to output one line of /proc/self/maps,
7979  * or one region of /proc/self/smaps.
7980  */
7981 
7982 #ifdef TARGET_HPPA
7983 # define test_stack(S, E, L)  (E == L)
7984 #else
7985 # define test_stack(S, E, L)  (S == L)
7986 #endif
7987 
7988 static void open_self_maps_4(const struct open_self_maps_data *d,
7989                              const MapInfo *mi, abi_ptr start,
7990                              abi_ptr end, unsigned flags)
7991 {
7992     const struct image_info *info = d->ts->info;
7993     const char *path = mi->path;
7994     uint64_t offset;
7995     int fd = d->fd;
7996     int count;
7997 
7998     if (test_stack(start, end, info->stack_limit)) {
7999         path = "[stack]";
8000     } else if (start == info->brk) {
8001         path = "[heap]";
8002     } else if (start == info->vdso) {
8003         path = "[vdso]";
8004 #ifdef TARGET_X86_64
8005     } else if (start == TARGET_VSYSCALL_PAGE) {
8006         path = "[vsyscall]";
8007 #endif
8008     }
8009 
8010     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8011     offset = mi->offset;
8012     if (mi->dev) {
8013         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8014         offset += hstart - mi->itree.start;
8015     }
8016 
8017     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8018                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8019                     start, end,
8020                     (flags & PAGE_READ) ? 'r' : '-',
8021                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8022                     (flags & PAGE_EXEC) ? 'x' : '-',
8023                     mi->is_priv ? 'p' : 's',
8024                     offset, major(mi->dev), minor(mi->dev),
8025                     (uint64_t)mi->inode);
8026     if (path) {
8027         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8028     } else {
8029         dprintf(fd, "\n");
8030     }
8031 
8032     if (d->smaps) {
8033         unsigned long size = end - start;
8034         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8035         unsigned long size_kb = size >> 10;
8036 
8037         dprintf(fd, "Size:                  %lu kB\n"
8038                 "KernelPageSize:        %lu kB\n"
8039                 "MMUPageSize:           %lu kB\n"
8040                 "Rss:                   0 kB\n"
8041                 "Pss:                   0 kB\n"
8042                 "Pss_Dirty:             0 kB\n"
8043                 "Shared_Clean:          0 kB\n"
8044                 "Shared_Dirty:          0 kB\n"
8045                 "Private_Clean:         0 kB\n"
8046                 "Private_Dirty:         0 kB\n"
8047                 "Referenced:            0 kB\n"
8048                 "Anonymous:             %lu kB\n"
8049                 "LazyFree:              0 kB\n"
8050                 "AnonHugePages:         0 kB\n"
8051                 "ShmemPmdMapped:        0 kB\n"
8052                 "FilePmdMapped:         0 kB\n"
8053                 "Shared_Hugetlb:        0 kB\n"
8054                 "Private_Hugetlb:       0 kB\n"
8055                 "Swap:                  0 kB\n"
8056                 "SwapPss:               0 kB\n"
8057                 "Locked:                0 kB\n"
8058                 "THPeligible:    0\n"
8059                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8060                 size_kb, page_size_kb, page_size_kb,
8061                 (flags & PAGE_ANON ? size_kb : 0),
8062                 (flags & PAGE_READ) ? " rd" : "",
8063                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8064                 (flags & PAGE_EXEC) ? " ex" : "",
8065                 mi->is_priv ? "" : " sh",
8066                 (flags & PAGE_READ) ? " mr" : "",
8067                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8068                 (flags & PAGE_EXEC) ? " me" : "",
8069                 mi->is_priv ? "" : " ms");
8070     }
8071 }
8072 
8073 /*
8074  * Callback for walk_memory_regions, when read_self_maps() fails.
8075  * Proceed without the benefit of host /proc/self/maps cross-check.
8076  */
8077 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8078                             target_ulong guest_end, unsigned long flags)
8079 {
8080     static const MapInfo mi = { .is_priv = true };
8081 
8082     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8083     return 0;
8084 }
8085 
8086 /*
8087  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8088  */
8089 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8090                             target_ulong guest_end, unsigned long flags)
8091 {
8092     const struct open_self_maps_data *d = opaque;
8093     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8094     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8095 
8096 #ifdef TARGET_X86_64
8097     /*
8098      * Because of the extremely high position of the page within the guest
8099      * virtual address space, this is not backed by host memory at all.
8100      * Therefore the loop below would fail.  This is the only instance
8101      * of not having host backing memory.
8102      */
8103     if (guest_start == TARGET_VSYSCALL_PAGE) {
8104         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8105     }
8106 #endif
8107 
8108     while (1) {
8109         IntervalTreeNode *n =
8110             interval_tree_iter_first(d->host_maps, host_start, host_start);
8111         MapInfo *mi = container_of(n, MapInfo, itree);
8112         uintptr_t this_hlast = MIN(host_last, n->last);
8113         target_ulong this_gend = h2g(this_hlast) + 1;
8114 
8115         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8116 
8117         if (this_hlast == host_last) {
8118             return 0;
8119         }
8120         host_start = this_hlast + 1;
8121         guest_start = h2g(host_start);
8122     }
8123 }
8124 
8125 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8126 {
8127     struct open_self_maps_data d = {
8128         .ts = env_cpu(env)->opaque,
8129         .host_maps = read_self_maps(),
8130         .fd = fd,
8131         .smaps = smaps
8132     };
8133 
8134     if (d.host_maps) {
8135         walk_memory_regions(&d, open_self_maps_2);
8136         free_self_maps(d.host_maps);
8137     } else {
8138         walk_memory_regions(&d, open_self_maps_3);
8139     }
8140     return 0;
8141 }
8142 
8143 static int open_self_maps(CPUArchState *cpu_env, int fd)
8144 {
8145     return open_self_maps_1(cpu_env, fd, false);
8146 }
8147 
8148 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8149 {
8150     return open_self_maps_1(cpu_env, fd, true);
8151 }
8152 
8153 static int open_self_stat(CPUArchState *cpu_env, int fd)
8154 {
8155     CPUState *cpu = env_cpu(cpu_env);
8156     TaskState *ts = get_task_state(cpu);
8157     g_autoptr(GString) buf = g_string_new(NULL);
8158     int i;
8159 
8160     for (i = 0; i < 44; i++) {
8161         if (i == 0) {
8162             /* pid */
8163             g_string_printf(buf, FMT_pid " ", getpid());
8164         } else if (i == 1) {
8165             /* app name */
8166             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8167             bin = bin ? bin + 1 : ts->bprm->argv[0];
8168             g_string_printf(buf, "(%.15s) ", bin);
8169         } else if (i == 2) {
8170             /* task state */
8171             g_string_assign(buf, "R "); /* we are running right now */
8172         } else if (i == 3) {
8173             /* ppid */
8174             g_string_printf(buf, FMT_pid " ", getppid());
8175         } else if (i == 21) {
8176             /* starttime */
8177             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8178         } else if (i == 27) {
8179             /* stack bottom */
8180             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8181         } else {
8182             /* for the rest, there is MasterCard */
8183             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8184         }
8185 
8186         if (write(fd, buf->str, buf->len) != buf->len) {
8187             return -1;
8188         }
8189     }
8190 
8191     return 0;
8192 }
8193 
8194 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8195 {
8196     CPUState *cpu = env_cpu(cpu_env);
8197     TaskState *ts = get_task_state(cpu);
8198     abi_ulong auxv = ts->info->saved_auxv;
8199     abi_ulong len = ts->info->auxv_len;
8200     char *ptr;
8201 
8202     /*
8203      * Auxiliary vector is stored in target process stack.
8204      * read in whole auxv vector and copy it to file
8205      */
8206     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8207     if (ptr != NULL) {
8208         while (len > 0) {
8209             ssize_t r;
8210             r = write(fd, ptr, len);
8211             if (r <= 0) {
8212                 break;
8213             }
8214             len -= r;
8215             ptr += r;
8216         }
8217         lseek(fd, 0, SEEK_SET);
8218         unlock_user(ptr, auxv, len);
8219     }
8220 
8221     return 0;
8222 }
8223 
8224 static int is_proc_myself(const char *filename, const char *entry)
8225 {
8226     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8227         filename += strlen("/proc/");
8228         if (!strncmp(filename, "self/", strlen("self/"))) {
8229             filename += strlen("self/");
8230         } else if (*filename >= '1' && *filename <= '9') {
8231             char myself[80];
8232             snprintf(myself, sizeof(myself), "%d/", getpid());
8233             if (!strncmp(filename, myself, strlen(myself))) {
8234                 filename += strlen(myself);
8235             } else {
8236                 return 0;
8237             }
8238         } else {
8239             return 0;
8240         }
8241         if (!strcmp(filename, entry)) {
8242             return 1;
8243         }
8244     }
8245     return 0;
8246 }
8247 
8248 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8249                       const char *fmt, int code)
8250 {
8251     if (logfile) {
8252         CPUState *cs = env_cpu(env);
8253 
8254         fprintf(logfile, fmt, code);
8255         fprintf(logfile, "Failing executable: %s\n", exec_path);
8256         cpu_dump_state(cs, logfile, 0);
8257         open_self_maps(env, fileno(logfile));
8258     }
8259 }
8260 
8261 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8262 {
8263     /* dump to console */
8264     excp_dump_file(stderr, env, fmt, code);
8265 
8266     /* dump to log file */
8267     if (qemu_log_separate()) {
8268         FILE *logfile = qemu_log_trylock();
8269 
8270         excp_dump_file(logfile, env, fmt, code);
8271         qemu_log_unlock(logfile);
8272     }
8273 }
8274 
8275 #include "target_proc.h"
8276 
8277 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8278     defined(HAVE_ARCH_PROC_CPUINFO) || \
8279     defined(HAVE_ARCH_PROC_HARDWARE)
8280 static int is_proc(const char *filename, const char *entry)
8281 {
8282     return strcmp(filename, entry) == 0;
8283 }
8284 #endif
8285 
8286 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8287 static int open_net_route(CPUArchState *cpu_env, int fd)
8288 {
8289     FILE *fp;
8290     char *line = NULL;
8291     size_t len = 0;
8292     ssize_t read;
8293 
8294     fp = fopen("/proc/net/route", "r");
8295     if (fp == NULL) {
8296         return -1;
8297     }
8298 
8299     /* read header */
8300 
8301     read = getline(&line, &len, fp);
8302     dprintf(fd, "%s", line);
8303 
8304     /* read routes */
8305 
8306     while ((read = getline(&line, &len, fp)) != -1) {
8307         char iface[16];
8308         uint32_t dest, gw, mask;
8309         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8310         int fields;
8311 
8312         fields = sscanf(line,
8313                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8314                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8315                         &mask, &mtu, &window, &irtt);
8316         if (fields != 11) {
8317             continue;
8318         }
8319         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8320                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8321                 metric, tswap32(mask), mtu, window, irtt);
8322     }
8323 
8324     free(line);
8325     fclose(fp);
8326 
8327     return 0;
8328 }
8329 #endif
8330 
8331 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname,
8332                     int flags, mode_t mode, bool safe)
8333 {
8334     g_autofree char *proc_name = NULL;
8335     const char *pathname;
8336     struct fake_open {
8337         const char *filename;
8338         int (*fill)(CPUArchState *cpu_env, int fd);
8339         int (*cmp)(const char *s1, const char *s2);
8340     };
8341     const struct fake_open *fake_open;
8342     static const struct fake_open fakes[] = {
8343         { "maps", open_self_maps, is_proc_myself },
8344         { "smaps", open_self_smaps, is_proc_myself },
8345         { "stat", open_self_stat, is_proc_myself },
8346         { "auxv", open_self_auxv, is_proc_myself },
8347         { "cmdline", open_self_cmdline, is_proc_myself },
8348 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8349         { "/proc/net/route", open_net_route, is_proc },
8350 #endif
8351 #if defined(HAVE_ARCH_PROC_CPUINFO)
8352         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8353 #endif
8354 #if defined(HAVE_ARCH_PROC_HARDWARE)
8355         { "/proc/hardware", open_hardware, is_proc },
8356 #endif
8357         { NULL, NULL, NULL }
8358     };
8359 
8360     /* if this is a file from /proc/ filesystem, expand full name */
8361     proc_name = realpath(fname, NULL);
8362     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8363         pathname = proc_name;
8364     } else {
8365         pathname = fname;
8366     }
8367 
8368     if (is_proc_myself(pathname, "exe")) {
8369         if (safe) {
8370             return safe_openat(dirfd, exec_path, flags, mode);
8371         } else {
8372             return openat(dirfd, exec_path, flags, mode);
8373         }
8374     }
8375 
8376     for (fake_open = fakes; fake_open->filename; fake_open++) {
8377         if (fake_open->cmp(pathname, fake_open->filename)) {
8378             break;
8379         }
8380     }
8381 
8382     if (fake_open->filename) {
8383         const char *tmpdir;
8384         char filename[PATH_MAX];
8385         int fd, r;
8386 
8387         fd = memfd_create("qemu-open", 0);
8388         if (fd < 0) {
8389             if (errno != ENOSYS) {
8390                 return fd;
8391             }
8392             /* create temporary file to map stat to */
8393             tmpdir = getenv("TMPDIR");
8394             if (!tmpdir)
8395                 tmpdir = "/tmp";
8396             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8397             fd = mkstemp(filename);
8398             if (fd < 0) {
8399                 return fd;
8400             }
8401             unlink(filename);
8402         }
8403 
8404         if ((r = fake_open->fill(cpu_env, fd))) {
8405             int e = errno;
8406             close(fd);
8407             errno = e;
8408             return r;
8409         }
8410         lseek(fd, 0, SEEK_SET);
8411 
8412         return fd;
8413     }
8414 
8415     if (safe) {
8416         return safe_openat(dirfd, path(pathname), flags, mode);
8417     } else {
8418         return openat(dirfd, path(pathname), flags, mode);
8419     }
8420 }
8421 
8422 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8423 {
8424     ssize_t ret;
8425 
8426     if (!pathname || !buf) {
8427         errno = EFAULT;
8428         return -1;
8429     }
8430 
8431     if (!bufsiz) {
8432         /* Short circuit this for the magic exe check. */
8433         errno = EINVAL;
8434         return -1;
8435     }
8436 
8437     if (is_proc_myself((const char *)pathname, "exe")) {
8438         /*
8439          * Don't worry about sign mismatch as earlier mapping
8440          * logic would have thrown a bad address error.
8441          */
8442         ret = MIN(strlen(exec_path), bufsiz);
8443         /* We cannot NUL terminate the string. */
8444         memcpy(buf, exec_path, ret);
8445     } else {
8446         ret = readlink(path(pathname), buf, bufsiz);
8447     }
8448 
8449     return ret;
8450 }
8451 
8452 static int do_execv(CPUArchState *cpu_env, int dirfd,
8453                     abi_long pathname, abi_long guest_argp,
8454                     abi_long guest_envp, int flags, bool is_execveat)
8455 {
8456     int ret;
8457     char **argp, **envp;
8458     int argc, envc;
8459     abi_ulong gp;
8460     abi_ulong addr;
8461     char **q;
8462     void *p;
8463 
8464     argc = 0;
8465 
8466     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8467         if (get_user_ual(addr, gp)) {
8468             return -TARGET_EFAULT;
8469         }
8470         if (!addr) {
8471             break;
8472         }
8473         argc++;
8474     }
8475     envc = 0;
8476     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8477         if (get_user_ual(addr, gp)) {
8478             return -TARGET_EFAULT;
8479         }
8480         if (!addr) {
8481             break;
8482         }
8483         envc++;
8484     }
8485 
8486     argp = g_new0(char *, argc + 1);
8487     envp = g_new0(char *, envc + 1);
8488 
8489     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8490         if (get_user_ual(addr, gp)) {
8491             goto execve_efault;
8492         }
8493         if (!addr) {
8494             break;
8495         }
8496         *q = lock_user_string(addr);
8497         if (!*q) {
8498             goto execve_efault;
8499         }
8500     }
8501     *q = NULL;
8502 
8503     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8504         if (get_user_ual(addr, gp)) {
8505             goto execve_efault;
8506         }
8507         if (!addr) {
8508             break;
8509         }
8510         *q = lock_user_string(addr);
8511         if (!*q) {
8512             goto execve_efault;
8513         }
8514     }
8515     *q = NULL;
8516 
8517     /*
8518      * Although execve() is not an interruptible syscall it is
8519      * a special case where we must use the safe_syscall wrapper:
8520      * if we allow a signal to happen before we make the host
8521      * syscall then we will 'lose' it, because at the point of
8522      * execve the process leaves QEMU's control. So we use the
8523      * safe syscall wrapper to ensure that we either take the
8524      * signal as a guest signal, or else it does not happen
8525      * before the execve completes and makes it the other
8526      * program's problem.
8527      */
8528     p = lock_user_string(pathname);
8529     if (!p) {
8530         goto execve_efault;
8531     }
8532 
8533     const char *exe = p;
8534     if (is_proc_myself(p, "exe")) {
8535         exe = exec_path;
8536     }
8537     ret = is_execveat
8538         ? safe_execveat(dirfd, exe, argp, envp, flags)
8539         : safe_execve(exe, argp, envp);
8540     ret = get_errno(ret);
8541 
8542     unlock_user(p, pathname, 0);
8543 
8544     goto execve_end;
8545 
8546 execve_efault:
8547     ret = -TARGET_EFAULT;
8548 
8549 execve_end:
8550     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8551         if (get_user_ual(addr, gp) || !addr) {
8552             break;
8553         }
8554         unlock_user(*q, addr, 0);
8555     }
8556     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8557         if (get_user_ual(addr, gp) || !addr) {
8558             break;
8559         }
8560         unlock_user(*q, addr, 0);
8561     }
8562 
8563     g_free(argp);
8564     g_free(envp);
8565     return ret;
8566 }
8567 
8568 #define TIMER_MAGIC 0x0caf0000
8569 #define TIMER_MAGIC_MASK 0xffff0000
8570 
8571 /* Convert QEMU provided timer ID back to internal 16bit index format */
8572 static target_timer_t get_timer_id(abi_long arg)
8573 {
8574     target_timer_t timerid = arg;
8575 
8576     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8577         return -TARGET_EINVAL;
8578     }
8579 
8580     timerid &= 0xffff;
8581 
8582     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8583         return -TARGET_EINVAL;
8584     }
8585 
8586     return timerid;
8587 }
8588 
8589 static int target_to_host_cpu_mask(unsigned long *host_mask,
8590                                    size_t host_size,
8591                                    abi_ulong target_addr,
8592                                    size_t target_size)
8593 {
8594     unsigned target_bits = sizeof(abi_ulong) * 8;
8595     unsigned host_bits = sizeof(*host_mask) * 8;
8596     abi_ulong *target_mask;
8597     unsigned i, j;
8598 
8599     assert(host_size >= target_size);
8600 
8601     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8602     if (!target_mask) {
8603         return -TARGET_EFAULT;
8604     }
8605     memset(host_mask, 0, host_size);
8606 
8607     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8608         unsigned bit = i * target_bits;
8609         abi_ulong val;
8610 
8611         __get_user(val, &target_mask[i]);
8612         for (j = 0; j < target_bits; j++, bit++) {
8613             if (val & (1UL << j)) {
8614                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8615             }
8616         }
8617     }
8618 
8619     unlock_user(target_mask, target_addr, 0);
8620     return 0;
8621 }
8622 
8623 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8624                                    size_t host_size,
8625                                    abi_ulong target_addr,
8626                                    size_t target_size)
8627 {
8628     unsigned target_bits = sizeof(abi_ulong) * 8;
8629     unsigned host_bits = sizeof(*host_mask) * 8;
8630     abi_ulong *target_mask;
8631     unsigned i, j;
8632 
8633     assert(host_size >= target_size);
8634 
8635     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8636     if (!target_mask) {
8637         return -TARGET_EFAULT;
8638     }
8639 
8640     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8641         unsigned bit = i * target_bits;
8642         abi_ulong val = 0;
8643 
8644         for (j = 0; j < target_bits; j++, bit++) {
8645             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8646                 val |= 1UL << j;
8647             }
8648         }
8649         __put_user(val, &target_mask[i]);
8650     }
8651 
8652     unlock_user(target_mask, target_addr, target_size);
8653     return 0;
8654 }
8655 
8656 #ifdef TARGET_NR_getdents
8657 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8658 {
8659     g_autofree void *hdirp = NULL;
8660     void *tdirp;
8661     int hlen, hoff, toff;
8662     int hreclen, treclen;
8663     off64_t prev_diroff = 0;
8664 
8665     hdirp = g_try_malloc(count);
8666     if (!hdirp) {
8667         return -TARGET_ENOMEM;
8668     }
8669 
8670 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8671     hlen = sys_getdents(dirfd, hdirp, count);
8672 #else
8673     hlen = sys_getdents64(dirfd, hdirp, count);
8674 #endif
8675 
8676     hlen = get_errno(hlen);
8677     if (is_error(hlen)) {
8678         return hlen;
8679     }
8680 
8681     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8682     if (!tdirp) {
8683         return -TARGET_EFAULT;
8684     }
8685 
8686     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8687 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8688         struct linux_dirent *hde = hdirp + hoff;
8689 #else
8690         struct linux_dirent64 *hde = hdirp + hoff;
8691 #endif
8692         struct target_dirent *tde = tdirp + toff;
8693         int namelen;
8694         uint8_t type;
8695 
8696         namelen = strlen(hde->d_name);
8697         hreclen = hde->d_reclen;
8698         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8699         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8700 
8701         if (toff + treclen > count) {
8702             /*
8703              * If the host struct is smaller than the target struct, or
8704              * requires less alignment and thus packs into less space,
8705              * then the host can return more entries than we can pass
8706              * on to the guest.
8707              */
8708             if (toff == 0) {
8709                 toff = -TARGET_EINVAL; /* result buffer is too small */
8710                 break;
8711             }
8712             /*
8713              * Return what we have, resetting the file pointer to the
8714              * location of the first record not returned.
8715              */
8716             lseek64(dirfd, prev_diroff, SEEK_SET);
8717             break;
8718         }
8719 
8720         prev_diroff = hde->d_off;
8721         tde->d_ino = tswapal(hde->d_ino);
8722         tde->d_off = tswapal(hde->d_off);
8723         tde->d_reclen = tswap16(treclen);
8724         memcpy(tde->d_name, hde->d_name, namelen + 1);
8725 
8726         /*
8727          * The getdents type is in what was formerly a padding byte at the
8728          * end of the structure.
8729          */
8730 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8731         type = *((uint8_t *)hde + hreclen - 1);
8732 #else
8733         type = hde->d_type;
8734 #endif
8735         *((uint8_t *)tde + treclen - 1) = type;
8736     }
8737 
8738     unlock_user(tdirp, arg2, toff);
8739     return toff;
8740 }
8741 #endif /* TARGET_NR_getdents */
8742 
8743 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8744 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8745 {
8746     g_autofree void *hdirp = NULL;
8747     void *tdirp;
8748     int hlen, hoff, toff;
8749     int hreclen, treclen;
8750     off64_t prev_diroff = 0;
8751 
8752     hdirp = g_try_malloc(count);
8753     if (!hdirp) {
8754         return -TARGET_ENOMEM;
8755     }
8756 
8757     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8758     if (is_error(hlen)) {
8759         return hlen;
8760     }
8761 
8762     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8763     if (!tdirp) {
8764         return -TARGET_EFAULT;
8765     }
8766 
8767     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8768         struct linux_dirent64 *hde = hdirp + hoff;
8769         struct target_dirent64 *tde = tdirp + toff;
8770         int namelen;
8771 
8772         namelen = strlen(hde->d_name) + 1;
8773         hreclen = hde->d_reclen;
8774         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8775         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8776 
8777         if (toff + treclen > count) {
8778             /*
8779              * If the host struct is smaller than the target struct, or
8780              * requires less alignment and thus packs into less space,
8781              * then the host can return more entries than we can pass
8782              * on to the guest.
8783              */
8784             if (toff == 0) {
8785                 toff = -TARGET_EINVAL; /* result buffer is too small */
8786                 break;
8787             }
8788             /*
8789              * Return what we have, resetting the file pointer to the
8790              * location of the first record not returned.
8791              */
8792             lseek64(dirfd, prev_diroff, SEEK_SET);
8793             break;
8794         }
8795 
8796         prev_diroff = hde->d_off;
8797         tde->d_ino = tswap64(hde->d_ino);
8798         tde->d_off = tswap64(hde->d_off);
8799         tde->d_reclen = tswap16(treclen);
8800         tde->d_type = hde->d_type;
8801         memcpy(tde->d_name, hde->d_name, namelen);
8802     }
8803 
8804     unlock_user(tdirp, arg2, toff);
8805     return toff;
8806 }
8807 #endif /* TARGET_NR_getdents64 */
8808 
8809 #if defined(TARGET_NR_riscv_hwprobe)
8810 
8811 #define RISCV_HWPROBE_KEY_MVENDORID     0
8812 #define RISCV_HWPROBE_KEY_MARCHID       1
8813 #define RISCV_HWPROBE_KEY_MIMPID        2
8814 
8815 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8816 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8817 
8818 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8819 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8820 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8821 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8822 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8823 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8824 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8825 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8826 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8827 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8828 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8829 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8830 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8831 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8832 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8833 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8834 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8835 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8836 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8837 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8838 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8839 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8840 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8841 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8842 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8843 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8844 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8845 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8846 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8847 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8848 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8849 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8850 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1 << 31)
8851 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8852 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8853 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8854 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8855 
8856 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8857 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8858 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8859 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8860 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8861 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8862 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8863 
8864 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8865 
8866 struct riscv_hwprobe {
8867     abi_llong  key;
8868     abi_ullong value;
8869 };
8870 
8871 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8872                                     struct riscv_hwprobe *pair,
8873                                     size_t pair_count)
8874 {
8875     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
8876 
8877     for (; pair_count > 0; pair_count--, pair++) {
8878         abi_llong key;
8879         abi_ullong value;
8880         __put_user(0, &pair->value);
8881         __get_user(key, &pair->key);
8882         switch (key) {
8883         case RISCV_HWPROBE_KEY_MVENDORID:
8884             __put_user(cfg->mvendorid, &pair->value);
8885             break;
8886         case RISCV_HWPROBE_KEY_MARCHID:
8887             __put_user(cfg->marchid, &pair->value);
8888             break;
8889         case RISCV_HWPROBE_KEY_MIMPID:
8890             __put_user(cfg->mimpid, &pair->value);
8891             break;
8892         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
8893             value = riscv_has_ext(env, RVI) &&
8894                     riscv_has_ext(env, RVM) &&
8895                     riscv_has_ext(env, RVA) ?
8896                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
8897             __put_user(value, &pair->value);
8898             break;
8899         case RISCV_HWPROBE_KEY_IMA_EXT_0:
8900             value = riscv_has_ext(env, RVF) &&
8901                     riscv_has_ext(env, RVD) ?
8902                     RISCV_HWPROBE_IMA_FD : 0;
8903             value |= riscv_has_ext(env, RVC) ?
8904                      RISCV_HWPROBE_IMA_C : 0;
8905             value |= riscv_has_ext(env, RVV) ?
8906                      RISCV_HWPROBE_IMA_V : 0;
8907             value |= cfg->ext_zba ?
8908                      RISCV_HWPROBE_EXT_ZBA : 0;
8909             value |= cfg->ext_zbb ?
8910                      RISCV_HWPROBE_EXT_ZBB : 0;
8911             value |= cfg->ext_zbs ?
8912                      RISCV_HWPROBE_EXT_ZBS : 0;
8913             value |= cfg->ext_zicboz ?
8914                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
8915             value |= cfg->ext_zbc ?
8916                      RISCV_HWPROBE_EXT_ZBC : 0;
8917             value |= cfg->ext_zbkb ?
8918                      RISCV_HWPROBE_EXT_ZBKB : 0;
8919             value |= cfg->ext_zbkc ?
8920                      RISCV_HWPROBE_EXT_ZBKC : 0;
8921             value |= cfg->ext_zbkx ?
8922                      RISCV_HWPROBE_EXT_ZBKX : 0;
8923             value |= cfg->ext_zknd ?
8924                      RISCV_HWPROBE_EXT_ZKND : 0;
8925             value |= cfg->ext_zkne ?
8926                      RISCV_HWPROBE_EXT_ZKNE : 0;
8927             value |= cfg->ext_zknh ?
8928                      RISCV_HWPROBE_EXT_ZKNH : 0;
8929             value |= cfg->ext_zksed ?
8930                      RISCV_HWPROBE_EXT_ZKSED : 0;
8931             value |= cfg->ext_zksh ?
8932                      RISCV_HWPROBE_EXT_ZKSH : 0;
8933             value |= cfg->ext_zkt ?
8934                      RISCV_HWPROBE_EXT_ZKT : 0;
8935             value |= cfg->ext_zvbb ?
8936                      RISCV_HWPROBE_EXT_ZVBB : 0;
8937             value |= cfg->ext_zvbc ?
8938                      RISCV_HWPROBE_EXT_ZVBC : 0;
8939             value |= cfg->ext_zvkb ?
8940                      RISCV_HWPROBE_EXT_ZVKB : 0;
8941             value |= cfg->ext_zvkg ?
8942                      RISCV_HWPROBE_EXT_ZVKG : 0;
8943             value |= cfg->ext_zvkned ?
8944                      RISCV_HWPROBE_EXT_ZVKNED : 0;
8945             value |= cfg->ext_zvknha ?
8946                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
8947             value |= cfg->ext_zvknhb ?
8948                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
8949             value |= cfg->ext_zvksed ?
8950                      RISCV_HWPROBE_EXT_ZVKSED : 0;
8951             value |= cfg->ext_zvksh ?
8952                      RISCV_HWPROBE_EXT_ZVKSH : 0;
8953             value |= cfg->ext_zvkt ?
8954                      RISCV_HWPROBE_EXT_ZVKT : 0;
8955             value |= cfg->ext_zfh ?
8956                      RISCV_HWPROBE_EXT_ZFH : 0;
8957             value |= cfg->ext_zfhmin ?
8958                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
8959             value |= cfg->ext_zihintntl ?
8960                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
8961             value |= cfg->ext_zvfh ?
8962                      RISCV_HWPROBE_EXT_ZVFH : 0;
8963             value |= cfg->ext_zvfhmin ?
8964                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
8965             value |= cfg->ext_zfa ?
8966                      RISCV_HWPROBE_EXT_ZFA : 0;
8967             value |= cfg->ext_ztso ?
8968                      RISCV_HWPROBE_EXT_ZTSO : 0;
8969             value |= cfg->ext_zacas ?
8970                      RISCV_HWPROBE_EXT_ZACAS : 0;
8971             value |= cfg->ext_zicond ?
8972                      RISCV_HWPROBE_EXT_ZICOND : 0;
8973             __put_user(value, &pair->value);
8974             break;
8975         case RISCV_HWPROBE_KEY_CPUPERF_0:
8976             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
8977             break;
8978         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
8979             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
8980             __put_user(value, &pair->value);
8981             break;
8982         default:
8983             __put_user(-1, &pair->key);
8984             break;
8985         }
8986     }
8987 }
8988 
8989 static int cpu_set_valid(abi_long arg3, abi_long arg4)
8990 {
8991     int ret, i, tmp;
8992     size_t host_mask_size, target_mask_size;
8993     unsigned long *host_mask;
8994 
8995     /*
8996      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
8997      * arg3 contains the cpu count.
8998      */
8999     tmp = (8 * sizeof(abi_ulong));
9000     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9001     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9002                      ~(sizeof(*host_mask) - 1);
9003 
9004     host_mask = alloca(host_mask_size);
9005 
9006     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9007                                   arg4, target_mask_size);
9008     if (ret != 0) {
9009         return ret;
9010     }
9011 
9012     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9013         if (host_mask[i] != 0) {
9014             return 0;
9015         }
9016     }
9017     return -TARGET_EINVAL;
9018 }
9019 
9020 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9021                                  abi_long arg2, abi_long arg3,
9022                                  abi_long arg4, abi_long arg5)
9023 {
9024     int ret;
9025     struct riscv_hwprobe *host_pairs;
9026 
9027     /* flags must be 0 */
9028     if (arg5 != 0) {
9029         return -TARGET_EINVAL;
9030     }
9031 
9032     /* check cpu_set */
9033     if (arg3 != 0) {
9034         ret = cpu_set_valid(arg3, arg4);
9035         if (ret != 0) {
9036             return ret;
9037         }
9038     } else if (arg4 != 0) {
9039         return -TARGET_EINVAL;
9040     }
9041 
9042     /* no pairs */
9043     if (arg2 == 0) {
9044         return 0;
9045     }
9046 
9047     host_pairs = lock_user(VERIFY_WRITE, arg1,
9048                            sizeof(*host_pairs) * (size_t)arg2, 0);
9049     if (host_pairs == NULL) {
9050         return -TARGET_EFAULT;
9051     }
9052     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9053     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9054     return 0;
9055 }
9056 #endif /* TARGET_NR_riscv_hwprobe */
9057 
9058 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9059 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9060 #endif
9061 
9062 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9063 #define __NR_sys_open_tree __NR_open_tree
9064 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9065           unsigned int, __flags)
9066 #endif
9067 
9068 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9069 #define __NR_sys_move_mount __NR_move_mount
9070 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9071            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9072 #endif
9073 
9074 /* This is an internal helper for do_syscall so that it is easier
9075  * to have a single return point, so that actions, such as logging
9076  * of syscall results, can be performed.
9077  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9078  */
9079 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9080                             abi_long arg2, abi_long arg3, abi_long arg4,
9081                             abi_long arg5, abi_long arg6, abi_long arg7,
9082                             abi_long arg8)
9083 {
9084     CPUState *cpu = env_cpu(cpu_env);
9085     abi_long ret;
9086 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9087     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9088     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9089     || defined(TARGET_NR_statx)
9090     struct stat st;
9091 #endif
9092 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9093     || defined(TARGET_NR_fstatfs)
9094     struct statfs stfs;
9095 #endif
9096     void *p;
9097 
9098     switch(num) {
9099     case TARGET_NR_exit:
9100         /* In old applications this may be used to implement _exit(2).
9101            However in threaded applications it is used for thread termination,
9102            and _exit_group is used for application termination.
9103            Do thread termination if we have more then one thread.  */
9104 
9105         if (block_signals()) {
9106             return -QEMU_ERESTARTSYS;
9107         }
9108 
9109         pthread_mutex_lock(&clone_lock);
9110 
9111         if (CPU_NEXT(first_cpu)) {
9112             TaskState *ts = get_task_state(cpu);
9113 
9114             if (ts->child_tidptr) {
9115                 put_user_u32(0, ts->child_tidptr);
9116                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9117                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9118             }
9119 
9120             object_unparent(OBJECT(cpu));
9121             object_unref(OBJECT(cpu));
9122             /*
9123              * At this point the CPU should be unrealized and removed
9124              * from cpu lists. We can clean-up the rest of the thread
9125              * data without the lock held.
9126              */
9127 
9128             pthread_mutex_unlock(&clone_lock);
9129 
9130             thread_cpu = NULL;
9131             g_free(ts);
9132             rcu_unregister_thread();
9133             pthread_exit(NULL);
9134         }
9135 
9136         pthread_mutex_unlock(&clone_lock);
9137         preexit_cleanup(cpu_env, arg1);
9138         _exit(arg1);
9139         return 0; /* avoid warning */
9140     case TARGET_NR_read:
9141         if (arg2 == 0 && arg3 == 0) {
9142             return get_errno(safe_read(arg1, 0, 0));
9143         } else {
9144             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9145                 return -TARGET_EFAULT;
9146             ret = get_errno(safe_read(arg1, p, arg3));
9147             if (ret >= 0 &&
9148                 fd_trans_host_to_target_data(arg1)) {
9149                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9150             }
9151             unlock_user(p, arg2, ret);
9152         }
9153         return ret;
9154     case TARGET_NR_write:
9155         if (arg2 == 0 && arg3 == 0) {
9156             return get_errno(safe_write(arg1, 0, 0));
9157         }
9158         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9159             return -TARGET_EFAULT;
9160         if (fd_trans_target_to_host_data(arg1)) {
9161             void *copy = g_malloc(arg3);
9162             memcpy(copy, p, arg3);
9163             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9164             if (ret >= 0) {
9165                 ret = get_errno(safe_write(arg1, copy, ret));
9166             }
9167             g_free(copy);
9168         } else {
9169             ret = get_errno(safe_write(arg1, p, arg3));
9170         }
9171         unlock_user(p, arg2, 0);
9172         return ret;
9173 
9174 #ifdef TARGET_NR_open
9175     case TARGET_NR_open:
9176         if (!(p = lock_user_string(arg1)))
9177             return -TARGET_EFAULT;
9178         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9179                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9180                                   arg3, true));
9181         fd_trans_unregister(ret);
9182         unlock_user(p, arg1, 0);
9183         return ret;
9184 #endif
9185     case TARGET_NR_openat:
9186         if (!(p = lock_user_string(arg2)))
9187             return -TARGET_EFAULT;
9188         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9189                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9190                                   arg4, true));
9191         fd_trans_unregister(ret);
9192         unlock_user(p, arg2, 0);
9193         return ret;
9194 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9195     case TARGET_NR_name_to_handle_at:
9196         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9197         return ret;
9198 #endif
9199 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9200     case TARGET_NR_open_by_handle_at:
9201         ret = do_open_by_handle_at(arg1, arg2, arg3);
9202         fd_trans_unregister(ret);
9203         return ret;
9204 #endif
9205 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9206     case TARGET_NR_pidfd_open:
9207         return get_errno(pidfd_open(arg1, arg2));
9208 #endif
9209 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9210     case TARGET_NR_pidfd_send_signal:
9211         {
9212             siginfo_t uinfo, *puinfo;
9213 
9214             if (arg3) {
9215                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9216                 if (!p) {
9217                     return -TARGET_EFAULT;
9218                  }
9219                  target_to_host_siginfo(&uinfo, p);
9220                  unlock_user(p, arg3, 0);
9221                  puinfo = &uinfo;
9222             } else {
9223                  puinfo = NULL;
9224             }
9225             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9226                                               puinfo, arg4));
9227         }
9228         return ret;
9229 #endif
9230 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9231     case TARGET_NR_pidfd_getfd:
9232         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9233 #endif
9234     case TARGET_NR_close:
9235         fd_trans_unregister(arg1);
9236         return get_errno(close(arg1));
9237 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9238     case TARGET_NR_close_range:
9239         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9240         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9241             abi_long fd, maxfd;
9242             maxfd = MIN(arg2, target_fd_max);
9243             for (fd = arg1; fd < maxfd; fd++) {
9244                 fd_trans_unregister(fd);
9245             }
9246         }
9247         return ret;
9248 #endif
9249 
9250     case TARGET_NR_brk:
9251         return do_brk(arg1);
9252 #ifdef TARGET_NR_fork
9253     case TARGET_NR_fork:
9254         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9255 #endif
9256 #ifdef TARGET_NR_waitpid
9257     case TARGET_NR_waitpid:
9258         {
9259             int status;
9260             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9261             if (!is_error(ret) && arg2 && ret
9262                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9263                 return -TARGET_EFAULT;
9264         }
9265         return ret;
9266 #endif
9267 #ifdef TARGET_NR_waitid
9268     case TARGET_NR_waitid:
9269         {
9270             struct rusage ru;
9271             siginfo_t info;
9272 
9273             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9274                                         arg4, (arg5 ? &ru : NULL)));
9275             if (!is_error(ret)) {
9276                 if (arg3) {
9277                     p = lock_user(VERIFY_WRITE, arg3,
9278                                   sizeof(target_siginfo_t), 0);
9279                     if (!p) {
9280                         return -TARGET_EFAULT;
9281                     }
9282                     host_to_target_siginfo(p, &info);
9283                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9284                 }
9285                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9286                     return -TARGET_EFAULT;
9287                 }
9288             }
9289         }
9290         return ret;
9291 #endif
9292 #ifdef TARGET_NR_creat /* not on alpha */
9293     case TARGET_NR_creat:
9294         if (!(p = lock_user_string(arg1)))
9295             return -TARGET_EFAULT;
9296         ret = get_errno(creat(p, arg2));
9297         fd_trans_unregister(ret);
9298         unlock_user(p, arg1, 0);
9299         return ret;
9300 #endif
9301 #ifdef TARGET_NR_link
9302     case TARGET_NR_link:
9303         {
9304             void * p2;
9305             p = lock_user_string(arg1);
9306             p2 = lock_user_string(arg2);
9307             if (!p || !p2)
9308                 ret = -TARGET_EFAULT;
9309             else
9310                 ret = get_errno(link(p, p2));
9311             unlock_user(p2, arg2, 0);
9312             unlock_user(p, arg1, 0);
9313         }
9314         return ret;
9315 #endif
9316 #if defined(TARGET_NR_linkat)
9317     case TARGET_NR_linkat:
9318         {
9319             void * p2 = NULL;
9320             if (!arg2 || !arg4)
9321                 return -TARGET_EFAULT;
9322             p  = lock_user_string(arg2);
9323             p2 = lock_user_string(arg4);
9324             if (!p || !p2)
9325                 ret = -TARGET_EFAULT;
9326             else
9327                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9328             unlock_user(p, arg2, 0);
9329             unlock_user(p2, arg4, 0);
9330         }
9331         return ret;
9332 #endif
9333 #ifdef TARGET_NR_unlink
9334     case TARGET_NR_unlink:
9335         if (!(p = lock_user_string(arg1)))
9336             return -TARGET_EFAULT;
9337         ret = get_errno(unlink(p));
9338         unlock_user(p, arg1, 0);
9339         return ret;
9340 #endif
9341 #if defined(TARGET_NR_unlinkat)
9342     case TARGET_NR_unlinkat:
9343         if (!(p = lock_user_string(arg2)))
9344             return -TARGET_EFAULT;
9345         ret = get_errno(unlinkat(arg1, p, arg3));
9346         unlock_user(p, arg2, 0);
9347         return ret;
9348 #endif
9349     case TARGET_NR_execveat:
9350         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9351     case TARGET_NR_execve:
9352         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9353     case TARGET_NR_chdir:
9354         if (!(p = lock_user_string(arg1)))
9355             return -TARGET_EFAULT;
9356         ret = get_errno(chdir(p));
9357         unlock_user(p, arg1, 0);
9358         return ret;
9359 #ifdef TARGET_NR_time
9360     case TARGET_NR_time:
9361         {
9362             time_t host_time;
9363             ret = get_errno(time(&host_time));
9364             if (!is_error(ret)
9365                 && arg1
9366                 && put_user_sal(host_time, arg1))
9367                 return -TARGET_EFAULT;
9368         }
9369         return ret;
9370 #endif
9371 #ifdef TARGET_NR_mknod
9372     case TARGET_NR_mknod:
9373         if (!(p = lock_user_string(arg1)))
9374             return -TARGET_EFAULT;
9375         ret = get_errno(mknod(p, arg2, arg3));
9376         unlock_user(p, arg1, 0);
9377         return ret;
9378 #endif
9379 #if defined(TARGET_NR_mknodat)
9380     case TARGET_NR_mknodat:
9381         if (!(p = lock_user_string(arg2)))
9382             return -TARGET_EFAULT;
9383         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9384         unlock_user(p, arg2, 0);
9385         return ret;
9386 #endif
9387 #ifdef TARGET_NR_chmod
9388     case TARGET_NR_chmod:
9389         if (!(p = lock_user_string(arg1)))
9390             return -TARGET_EFAULT;
9391         ret = get_errno(chmod(p, arg2));
9392         unlock_user(p, arg1, 0);
9393         return ret;
9394 #endif
9395 #ifdef TARGET_NR_lseek
9396     case TARGET_NR_lseek:
9397         return get_errno(lseek(arg1, arg2, arg3));
9398 #endif
9399 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9400     /* Alpha specific */
9401     case TARGET_NR_getxpid:
9402         cpu_env->ir[IR_A4] = getppid();
9403         return get_errno(getpid());
9404 #endif
9405 #ifdef TARGET_NR_getpid
9406     case TARGET_NR_getpid:
9407         return get_errno(getpid());
9408 #endif
9409     case TARGET_NR_mount:
9410         {
9411             /* need to look at the data field */
9412             void *p2, *p3;
9413 
9414             if (arg1) {
9415                 p = lock_user_string(arg1);
9416                 if (!p) {
9417                     return -TARGET_EFAULT;
9418                 }
9419             } else {
9420                 p = NULL;
9421             }
9422 
9423             p2 = lock_user_string(arg2);
9424             if (!p2) {
9425                 if (arg1) {
9426                     unlock_user(p, arg1, 0);
9427                 }
9428                 return -TARGET_EFAULT;
9429             }
9430 
9431             if (arg3) {
9432                 p3 = lock_user_string(arg3);
9433                 if (!p3) {
9434                     if (arg1) {
9435                         unlock_user(p, arg1, 0);
9436                     }
9437                     unlock_user(p2, arg2, 0);
9438                     return -TARGET_EFAULT;
9439                 }
9440             } else {
9441                 p3 = NULL;
9442             }
9443 
9444             /* FIXME - arg5 should be locked, but it isn't clear how to
9445              * do that since it's not guaranteed to be a NULL-terminated
9446              * string.
9447              */
9448             if (!arg5) {
9449                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9450             } else {
9451                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9452             }
9453             ret = get_errno(ret);
9454 
9455             if (arg1) {
9456                 unlock_user(p, arg1, 0);
9457             }
9458             unlock_user(p2, arg2, 0);
9459             if (arg3) {
9460                 unlock_user(p3, arg3, 0);
9461             }
9462         }
9463         return ret;
9464 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9465 #if defined(TARGET_NR_umount)
9466     case TARGET_NR_umount:
9467 #endif
9468 #if defined(TARGET_NR_oldumount)
9469     case TARGET_NR_oldumount:
9470 #endif
9471         if (!(p = lock_user_string(arg1)))
9472             return -TARGET_EFAULT;
9473         ret = get_errno(umount(p));
9474         unlock_user(p, arg1, 0);
9475         return ret;
9476 #endif
9477 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9478     case TARGET_NR_move_mount:
9479         {
9480             void *p2, *p4;
9481 
9482             if (!arg2 || !arg4) {
9483                 return -TARGET_EFAULT;
9484             }
9485 
9486             p2 = lock_user_string(arg2);
9487             if (!p2) {
9488                 return -TARGET_EFAULT;
9489             }
9490 
9491             p4 = lock_user_string(arg4);
9492             if (!p4) {
9493                 unlock_user(p2, arg2, 0);
9494                 return -TARGET_EFAULT;
9495             }
9496             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9497 
9498             unlock_user(p2, arg2, 0);
9499             unlock_user(p4, arg4, 0);
9500 
9501             return ret;
9502         }
9503 #endif
9504 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9505     case TARGET_NR_open_tree:
9506         {
9507             void *p2;
9508             int host_flags;
9509 
9510             if (!arg2) {
9511                 return -TARGET_EFAULT;
9512             }
9513 
9514             p2 = lock_user_string(arg2);
9515             if (!p2) {
9516                 return -TARGET_EFAULT;
9517             }
9518 
9519             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9520             if (arg3 & TARGET_O_CLOEXEC) {
9521                 host_flags |= O_CLOEXEC;
9522             }
9523 
9524             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9525 
9526             unlock_user(p2, arg2, 0);
9527 
9528             return ret;
9529         }
9530 #endif
9531 #ifdef TARGET_NR_stime /* not on alpha */
9532     case TARGET_NR_stime:
9533         {
9534             struct timespec ts;
9535             ts.tv_nsec = 0;
9536             if (get_user_sal(ts.tv_sec, arg1)) {
9537                 return -TARGET_EFAULT;
9538             }
9539             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9540         }
9541 #endif
9542 #ifdef TARGET_NR_alarm /* not on alpha */
9543     case TARGET_NR_alarm:
9544         return alarm(arg1);
9545 #endif
9546 #ifdef TARGET_NR_pause /* not on alpha */
9547     case TARGET_NR_pause:
9548         if (!block_signals()) {
9549             sigsuspend(&get_task_state(cpu)->signal_mask);
9550         }
9551         return -TARGET_EINTR;
9552 #endif
9553 #ifdef TARGET_NR_utime
9554     case TARGET_NR_utime:
9555         {
9556             struct utimbuf tbuf, *host_tbuf;
9557             struct target_utimbuf *target_tbuf;
9558             if (arg2) {
9559                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9560                     return -TARGET_EFAULT;
9561                 tbuf.actime = tswapal(target_tbuf->actime);
9562                 tbuf.modtime = tswapal(target_tbuf->modtime);
9563                 unlock_user_struct(target_tbuf, arg2, 0);
9564                 host_tbuf = &tbuf;
9565             } else {
9566                 host_tbuf = NULL;
9567             }
9568             if (!(p = lock_user_string(arg1)))
9569                 return -TARGET_EFAULT;
9570             ret = get_errno(utime(p, host_tbuf));
9571             unlock_user(p, arg1, 0);
9572         }
9573         return ret;
9574 #endif
9575 #ifdef TARGET_NR_utimes
9576     case TARGET_NR_utimes:
9577         {
9578             struct timeval *tvp, tv[2];
9579             if (arg2) {
9580                 if (copy_from_user_timeval(&tv[0], arg2)
9581                     || copy_from_user_timeval(&tv[1],
9582                                               arg2 + sizeof(struct target_timeval)))
9583                     return -TARGET_EFAULT;
9584                 tvp = tv;
9585             } else {
9586                 tvp = NULL;
9587             }
9588             if (!(p = lock_user_string(arg1)))
9589                 return -TARGET_EFAULT;
9590             ret = get_errno(utimes(p, tvp));
9591             unlock_user(p, arg1, 0);
9592         }
9593         return ret;
9594 #endif
9595 #if defined(TARGET_NR_futimesat)
9596     case TARGET_NR_futimesat:
9597         {
9598             struct timeval *tvp, tv[2];
9599             if (arg3) {
9600                 if (copy_from_user_timeval(&tv[0], arg3)
9601                     || copy_from_user_timeval(&tv[1],
9602                                               arg3 + sizeof(struct target_timeval)))
9603                     return -TARGET_EFAULT;
9604                 tvp = tv;
9605             } else {
9606                 tvp = NULL;
9607             }
9608             if (!(p = lock_user_string(arg2))) {
9609                 return -TARGET_EFAULT;
9610             }
9611             ret = get_errno(futimesat(arg1, path(p), tvp));
9612             unlock_user(p, arg2, 0);
9613         }
9614         return ret;
9615 #endif
9616 #ifdef TARGET_NR_access
9617     case TARGET_NR_access:
9618         if (!(p = lock_user_string(arg1))) {
9619             return -TARGET_EFAULT;
9620         }
9621         ret = get_errno(access(path(p), arg2));
9622         unlock_user(p, arg1, 0);
9623         return ret;
9624 #endif
9625 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9626     case TARGET_NR_faccessat:
9627         if (!(p = lock_user_string(arg2))) {
9628             return -TARGET_EFAULT;
9629         }
9630         ret = get_errno(faccessat(arg1, p, arg3, 0));
9631         unlock_user(p, arg2, 0);
9632         return ret;
9633 #endif
9634 #if defined(TARGET_NR_faccessat2)
9635     case TARGET_NR_faccessat2:
9636         if (!(p = lock_user_string(arg2))) {
9637             return -TARGET_EFAULT;
9638         }
9639         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9640         unlock_user(p, arg2, 0);
9641         return ret;
9642 #endif
9643 #ifdef TARGET_NR_nice /* not on alpha */
9644     case TARGET_NR_nice:
9645         return get_errno(nice(arg1));
9646 #endif
9647     case TARGET_NR_sync:
9648         sync();
9649         return 0;
9650 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9651     case TARGET_NR_syncfs:
9652         return get_errno(syncfs(arg1));
9653 #endif
9654     case TARGET_NR_kill:
9655         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9656 #ifdef TARGET_NR_rename
9657     case TARGET_NR_rename:
9658         {
9659             void *p2;
9660             p = lock_user_string(arg1);
9661             p2 = lock_user_string(arg2);
9662             if (!p || !p2)
9663                 ret = -TARGET_EFAULT;
9664             else
9665                 ret = get_errno(rename(p, p2));
9666             unlock_user(p2, arg2, 0);
9667             unlock_user(p, arg1, 0);
9668         }
9669         return ret;
9670 #endif
9671 #if defined(TARGET_NR_renameat)
9672     case TARGET_NR_renameat:
9673         {
9674             void *p2;
9675             p  = lock_user_string(arg2);
9676             p2 = lock_user_string(arg4);
9677             if (!p || !p2)
9678                 ret = -TARGET_EFAULT;
9679             else
9680                 ret = get_errno(renameat(arg1, p, arg3, p2));
9681             unlock_user(p2, arg4, 0);
9682             unlock_user(p, arg2, 0);
9683         }
9684         return ret;
9685 #endif
9686 #if defined(TARGET_NR_renameat2)
9687     case TARGET_NR_renameat2:
9688         {
9689             void *p2;
9690             p  = lock_user_string(arg2);
9691             p2 = lock_user_string(arg4);
9692             if (!p || !p2) {
9693                 ret = -TARGET_EFAULT;
9694             } else {
9695                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9696             }
9697             unlock_user(p2, arg4, 0);
9698             unlock_user(p, arg2, 0);
9699         }
9700         return ret;
9701 #endif
9702 #ifdef TARGET_NR_mkdir
9703     case TARGET_NR_mkdir:
9704         if (!(p = lock_user_string(arg1)))
9705             return -TARGET_EFAULT;
9706         ret = get_errno(mkdir(p, arg2));
9707         unlock_user(p, arg1, 0);
9708         return ret;
9709 #endif
9710 #if defined(TARGET_NR_mkdirat)
9711     case TARGET_NR_mkdirat:
9712         if (!(p = lock_user_string(arg2)))
9713             return -TARGET_EFAULT;
9714         ret = get_errno(mkdirat(arg1, p, arg3));
9715         unlock_user(p, arg2, 0);
9716         return ret;
9717 #endif
9718 #ifdef TARGET_NR_rmdir
9719     case TARGET_NR_rmdir:
9720         if (!(p = lock_user_string(arg1)))
9721             return -TARGET_EFAULT;
9722         ret = get_errno(rmdir(p));
9723         unlock_user(p, arg1, 0);
9724         return ret;
9725 #endif
9726     case TARGET_NR_dup:
9727         ret = get_errno(dup(arg1));
9728         if (ret >= 0) {
9729             fd_trans_dup(arg1, ret);
9730         }
9731         return ret;
9732 #ifdef TARGET_NR_pipe
9733     case TARGET_NR_pipe:
9734         return do_pipe(cpu_env, arg1, 0, 0);
9735 #endif
9736 #ifdef TARGET_NR_pipe2
9737     case TARGET_NR_pipe2:
9738         return do_pipe(cpu_env, arg1,
9739                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9740 #endif
9741     case TARGET_NR_times:
9742         {
9743             struct target_tms *tmsp;
9744             struct tms tms;
9745             ret = get_errno(times(&tms));
9746             if (arg1) {
9747                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9748                 if (!tmsp)
9749                     return -TARGET_EFAULT;
9750                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9751                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9752                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9753                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9754             }
9755             if (!is_error(ret))
9756                 ret = host_to_target_clock_t(ret);
9757         }
9758         return ret;
9759     case TARGET_NR_acct:
9760         if (arg1 == 0) {
9761             ret = get_errno(acct(NULL));
9762         } else {
9763             if (!(p = lock_user_string(arg1))) {
9764                 return -TARGET_EFAULT;
9765             }
9766             ret = get_errno(acct(path(p)));
9767             unlock_user(p, arg1, 0);
9768         }
9769         return ret;
9770 #ifdef TARGET_NR_umount2
9771     case TARGET_NR_umount2:
9772         if (!(p = lock_user_string(arg1)))
9773             return -TARGET_EFAULT;
9774         ret = get_errno(umount2(p, arg2));
9775         unlock_user(p, arg1, 0);
9776         return ret;
9777 #endif
9778     case TARGET_NR_ioctl:
9779         return do_ioctl(arg1, arg2, arg3);
9780 #ifdef TARGET_NR_fcntl
9781     case TARGET_NR_fcntl:
9782         return do_fcntl(arg1, arg2, arg3);
9783 #endif
9784     case TARGET_NR_setpgid:
9785         return get_errno(setpgid(arg1, arg2));
9786     case TARGET_NR_umask:
9787         return get_errno(umask(arg1));
9788     case TARGET_NR_chroot:
9789         if (!(p = lock_user_string(arg1)))
9790             return -TARGET_EFAULT;
9791         ret = get_errno(chroot(p));
9792         unlock_user(p, arg1, 0);
9793         return ret;
9794 #ifdef TARGET_NR_dup2
9795     case TARGET_NR_dup2:
9796         ret = get_errno(dup2(arg1, arg2));
9797         if (ret >= 0) {
9798             fd_trans_dup(arg1, arg2);
9799         }
9800         return ret;
9801 #endif
9802 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9803     case TARGET_NR_dup3:
9804     {
9805         int host_flags;
9806 
9807         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9808             return -EINVAL;
9809         }
9810         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9811         ret = get_errno(dup3(arg1, arg2, host_flags));
9812         if (ret >= 0) {
9813             fd_trans_dup(arg1, arg2);
9814         }
9815         return ret;
9816     }
9817 #endif
9818 #ifdef TARGET_NR_getppid /* not on alpha */
9819     case TARGET_NR_getppid:
9820         return get_errno(getppid());
9821 #endif
9822 #ifdef TARGET_NR_getpgrp
9823     case TARGET_NR_getpgrp:
9824         return get_errno(getpgrp());
9825 #endif
9826     case TARGET_NR_setsid:
9827         return get_errno(setsid());
9828 #ifdef TARGET_NR_sigaction
9829     case TARGET_NR_sigaction:
9830         {
9831 #if defined(TARGET_MIPS)
9832 	    struct target_sigaction act, oact, *pact, *old_act;
9833 
9834 	    if (arg2) {
9835                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9836                     return -TARGET_EFAULT;
9837 		act._sa_handler = old_act->_sa_handler;
9838 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9839 		act.sa_flags = old_act->sa_flags;
9840 		unlock_user_struct(old_act, arg2, 0);
9841 		pact = &act;
9842 	    } else {
9843 		pact = NULL;
9844 	    }
9845 
9846         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9847 
9848 	    if (!is_error(ret) && arg3) {
9849                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9850                     return -TARGET_EFAULT;
9851 		old_act->_sa_handler = oact._sa_handler;
9852 		old_act->sa_flags = oact.sa_flags;
9853 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9854 		old_act->sa_mask.sig[1] = 0;
9855 		old_act->sa_mask.sig[2] = 0;
9856 		old_act->sa_mask.sig[3] = 0;
9857 		unlock_user_struct(old_act, arg3, 1);
9858 	    }
9859 #else
9860             struct target_old_sigaction *old_act;
9861             struct target_sigaction act, oact, *pact;
9862             if (arg2) {
9863                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9864                     return -TARGET_EFAULT;
9865                 act._sa_handler = old_act->_sa_handler;
9866                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9867                 act.sa_flags = old_act->sa_flags;
9868 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9869                 act.sa_restorer = old_act->sa_restorer;
9870 #endif
9871                 unlock_user_struct(old_act, arg2, 0);
9872                 pact = &act;
9873             } else {
9874                 pact = NULL;
9875             }
9876             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9877             if (!is_error(ret) && arg3) {
9878                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9879                     return -TARGET_EFAULT;
9880                 old_act->_sa_handler = oact._sa_handler;
9881                 old_act->sa_mask = oact.sa_mask.sig[0];
9882                 old_act->sa_flags = oact.sa_flags;
9883 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9884                 old_act->sa_restorer = oact.sa_restorer;
9885 #endif
9886                 unlock_user_struct(old_act, arg3, 1);
9887             }
9888 #endif
9889         }
9890         return ret;
9891 #endif
9892     case TARGET_NR_rt_sigaction:
9893         {
9894             /*
9895              * For Alpha and SPARC this is a 5 argument syscall, with
9896              * a 'restorer' parameter which must be copied into the
9897              * sa_restorer field of the sigaction struct.
9898              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9899              * and arg5 is the sigsetsize.
9900              */
9901 #if defined(TARGET_ALPHA)
9902             target_ulong sigsetsize = arg4;
9903             target_ulong restorer = arg5;
9904 #elif defined(TARGET_SPARC)
9905             target_ulong restorer = arg4;
9906             target_ulong sigsetsize = arg5;
9907 #else
9908             target_ulong sigsetsize = arg4;
9909             target_ulong restorer = 0;
9910 #endif
9911             struct target_sigaction *act = NULL;
9912             struct target_sigaction *oact = NULL;
9913 
9914             if (sigsetsize != sizeof(target_sigset_t)) {
9915                 return -TARGET_EINVAL;
9916             }
9917             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9918                 return -TARGET_EFAULT;
9919             }
9920             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9921                 ret = -TARGET_EFAULT;
9922             } else {
9923                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9924                 if (oact) {
9925                     unlock_user_struct(oact, arg3, 1);
9926                 }
9927             }
9928             if (act) {
9929                 unlock_user_struct(act, arg2, 0);
9930             }
9931         }
9932         return ret;
9933 #ifdef TARGET_NR_sgetmask /* not on alpha */
9934     case TARGET_NR_sgetmask:
9935         {
9936             sigset_t cur_set;
9937             abi_ulong target_set;
9938             ret = do_sigprocmask(0, NULL, &cur_set);
9939             if (!ret) {
9940                 host_to_target_old_sigset(&target_set, &cur_set);
9941                 ret = target_set;
9942             }
9943         }
9944         return ret;
9945 #endif
9946 #ifdef TARGET_NR_ssetmask /* not on alpha */
9947     case TARGET_NR_ssetmask:
9948         {
9949             sigset_t set, oset;
9950             abi_ulong target_set = arg1;
9951             target_to_host_old_sigset(&set, &target_set);
9952             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9953             if (!ret) {
9954                 host_to_target_old_sigset(&target_set, &oset);
9955                 ret = target_set;
9956             }
9957         }
9958         return ret;
9959 #endif
9960 #ifdef TARGET_NR_sigprocmask
9961     case TARGET_NR_sigprocmask:
9962         {
9963 #if defined(TARGET_ALPHA)
9964             sigset_t set, oldset;
9965             abi_ulong mask;
9966             int how;
9967 
9968             switch (arg1) {
9969             case TARGET_SIG_BLOCK:
9970                 how = SIG_BLOCK;
9971                 break;
9972             case TARGET_SIG_UNBLOCK:
9973                 how = SIG_UNBLOCK;
9974                 break;
9975             case TARGET_SIG_SETMASK:
9976                 how = SIG_SETMASK;
9977                 break;
9978             default:
9979                 return -TARGET_EINVAL;
9980             }
9981             mask = arg2;
9982             target_to_host_old_sigset(&set, &mask);
9983 
9984             ret = do_sigprocmask(how, &set, &oldset);
9985             if (!is_error(ret)) {
9986                 host_to_target_old_sigset(&mask, &oldset);
9987                 ret = mask;
9988                 cpu_env->ir[IR_V0] = 0; /* force no error */
9989             }
9990 #else
9991             sigset_t set, oldset, *set_ptr;
9992             int how;
9993 
9994             if (arg2) {
9995                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9996                 if (!p) {
9997                     return -TARGET_EFAULT;
9998                 }
9999                 target_to_host_old_sigset(&set, p);
10000                 unlock_user(p, arg2, 0);
10001                 set_ptr = &set;
10002                 switch (arg1) {
10003                 case TARGET_SIG_BLOCK:
10004                     how = SIG_BLOCK;
10005                     break;
10006                 case TARGET_SIG_UNBLOCK:
10007                     how = SIG_UNBLOCK;
10008                     break;
10009                 case TARGET_SIG_SETMASK:
10010                     how = SIG_SETMASK;
10011                     break;
10012                 default:
10013                     return -TARGET_EINVAL;
10014                 }
10015             } else {
10016                 how = 0;
10017                 set_ptr = NULL;
10018             }
10019             ret = do_sigprocmask(how, set_ptr, &oldset);
10020             if (!is_error(ret) && arg3) {
10021                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10022                     return -TARGET_EFAULT;
10023                 host_to_target_old_sigset(p, &oldset);
10024                 unlock_user(p, arg3, sizeof(target_sigset_t));
10025             }
10026 #endif
10027         }
10028         return ret;
10029 #endif
10030     case TARGET_NR_rt_sigprocmask:
10031         {
10032             int how = arg1;
10033             sigset_t set, oldset, *set_ptr;
10034 
10035             if (arg4 != sizeof(target_sigset_t)) {
10036                 return -TARGET_EINVAL;
10037             }
10038 
10039             if (arg2) {
10040                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10041                 if (!p) {
10042                     return -TARGET_EFAULT;
10043                 }
10044                 target_to_host_sigset(&set, p);
10045                 unlock_user(p, arg2, 0);
10046                 set_ptr = &set;
10047                 switch(how) {
10048                 case TARGET_SIG_BLOCK:
10049                     how = SIG_BLOCK;
10050                     break;
10051                 case TARGET_SIG_UNBLOCK:
10052                     how = SIG_UNBLOCK;
10053                     break;
10054                 case TARGET_SIG_SETMASK:
10055                     how = SIG_SETMASK;
10056                     break;
10057                 default:
10058                     return -TARGET_EINVAL;
10059                 }
10060             } else {
10061                 how = 0;
10062                 set_ptr = NULL;
10063             }
10064             ret = do_sigprocmask(how, set_ptr, &oldset);
10065             if (!is_error(ret) && arg3) {
10066                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10067                     return -TARGET_EFAULT;
10068                 host_to_target_sigset(p, &oldset);
10069                 unlock_user(p, arg3, sizeof(target_sigset_t));
10070             }
10071         }
10072         return ret;
10073 #ifdef TARGET_NR_sigpending
10074     case TARGET_NR_sigpending:
10075         {
10076             sigset_t set;
10077             ret = get_errno(sigpending(&set));
10078             if (!is_error(ret)) {
10079                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10080                     return -TARGET_EFAULT;
10081                 host_to_target_old_sigset(p, &set);
10082                 unlock_user(p, arg1, sizeof(target_sigset_t));
10083             }
10084         }
10085         return ret;
10086 #endif
10087     case TARGET_NR_rt_sigpending:
10088         {
10089             sigset_t set;
10090 
10091             /* Yes, this check is >, not != like most. We follow the kernel's
10092              * logic and it does it like this because it implements
10093              * NR_sigpending through the same code path, and in that case
10094              * the old_sigset_t is smaller in size.
10095              */
10096             if (arg2 > sizeof(target_sigset_t)) {
10097                 return -TARGET_EINVAL;
10098             }
10099 
10100             ret = get_errno(sigpending(&set));
10101             if (!is_error(ret)) {
10102                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10103                     return -TARGET_EFAULT;
10104                 host_to_target_sigset(p, &set);
10105                 unlock_user(p, arg1, sizeof(target_sigset_t));
10106             }
10107         }
10108         return ret;
10109 #ifdef TARGET_NR_sigsuspend
10110     case TARGET_NR_sigsuspend:
10111         {
10112             sigset_t *set;
10113 
10114 #if defined(TARGET_ALPHA)
10115             TaskState *ts = get_task_state(cpu);
10116             /* target_to_host_old_sigset will bswap back */
10117             abi_ulong mask = tswapal(arg1);
10118             set = &ts->sigsuspend_mask;
10119             target_to_host_old_sigset(set, &mask);
10120 #else
10121             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10122             if (ret != 0) {
10123                 return ret;
10124             }
10125 #endif
10126             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10127             finish_sigsuspend_mask(ret);
10128         }
10129         return ret;
10130 #endif
10131     case TARGET_NR_rt_sigsuspend:
10132         {
10133             sigset_t *set;
10134 
10135             ret = process_sigsuspend_mask(&set, arg1, arg2);
10136             if (ret != 0) {
10137                 return ret;
10138             }
10139             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10140             finish_sigsuspend_mask(ret);
10141         }
10142         return ret;
10143 #ifdef TARGET_NR_rt_sigtimedwait
10144     case TARGET_NR_rt_sigtimedwait:
10145         {
10146             sigset_t set;
10147             struct timespec uts, *puts;
10148             siginfo_t uinfo;
10149 
10150             if (arg4 != sizeof(target_sigset_t)) {
10151                 return -TARGET_EINVAL;
10152             }
10153 
10154             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10155                 return -TARGET_EFAULT;
10156             target_to_host_sigset(&set, p);
10157             unlock_user(p, arg1, 0);
10158             if (arg3) {
10159                 puts = &uts;
10160                 if (target_to_host_timespec(puts, arg3)) {
10161                     return -TARGET_EFAULT;
10162                 }
10163             } else {
10164                 puts = NULL;
10165             }
10166             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10167                                                  SIGSET_T_SIZE));
10168             if (!is_error(ret)) {
10169                 if (arg2) {
10170                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10171                                   0);
10172                     if (!p) {
10173                         return -TARGET_EFAULT;
10174                     }
10175                     host_to_target_siginfo(p, &uinfo);
10176                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10177                 }
10178                 ret = host_to_target_signal(ret);
10179             }
10180         }
10181         return ret;
10182 #endif
10183 #ifdef TARGET_NR_rt_sigtimedwait_time64
10184     case TARGET_NR_rt_sigtimedwait_time64:
10185         {
10186             sigset_t set;
10187             struct timespec uts, *puts;
10188             siginfo_t uinfo;
10189 
10190             if (arg4 != sizeof(target_sigset_t)) {
10191                 return -TARGET_EINVAL;
10192             }
10193 
10194             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10195             if (!p) {
10196                 return -TARGET_EFAULT;
10197             }
10198             target_to_host_sigset(&set, p);
10199             unlock_user(p, arg1, 0);
10200             if (arg3) {
10201                 puts = &uts;
10202                 if (target_to_host_timespec64(puts, arg3)) {
10203                     return -TARGET_EFAULT;
10204                 }
10205             } else {
10206                 puts = NULL;
10207             }
10208             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10209                                                  SIGSET_T_SIZE));
10210             if (!is_error(ret)) {
10211                 if (arg2) {
10212                     p = lock_user(VERIFY_WRITE, arg2,
10213                                   sizeof(target_siginfo_t), 0);
10214                     if (!p) {
10215                         return -TARGET_EFAULT;
10216                     }
10217                     host_to_target_siginfo(p, &uinfo);
10218                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10219                 }
10220                 ret = host_to_target_signal(ret);
10221             }
10222         }
10223         return ret;
10224 #endif
10225     case TARGET_NR_rt_sigqueueinfo:
10226         {
10227             siginfo_t uinfo;
10228 
10229             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10230             if (!p) {
10231                 return -TARGET_EFAULT;
10232             }
10233             target_to_host_siginfo(&uinfo, p);
10234             unlock_user(p, arg3, 0);
10235             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10236         }
10237         return ret;
10238     case TARGET_NR_rt_tgsigqueueinfo:
10239         {
10240             siginfo_t uinfo;
10241 
10242             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10243             if (!p) {
10244                 return -TARGET_EFAULT;
10245             }
10246             target_to_host_siginfo(&uinfo, p);
10247             unlock_user(p, arg4, 0);
10248             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10249         }
10250         return ret;
10251 #ifdef TARGET_NR_sigreturn
10252     case TARGET_NR_sigreturn:
10253         if (block_signals()) {
10254             return -QEMU_ERESTARTSYS;
10255         }
10256         return do_sigreturn(cpu_env);
10257 #endif
10258     case TARGET_NR_rt_sigreturn:
10259         if (block_signals()) {
10260             return -QEMU_ERESTARTSYS;
10261         }
10262         return do_rt_sigreturn(cpu_env);
10263     case TARGET_NR_sethostname:
10264         if (!(p = lock_user_string(arg1)))
10265             return -TARGET_EFAULT;
10266         ret = get_errno(sethostname(p, arg2));
10267         unlock_user(p, arg1, 0);
10268         return ret;
10269 #ifdef TARGET_NR_setrlimit
10270     case TARGET_NR_setrlimit:
10271         {
10272             int resource = target_to_host_resource(arg1);
10273             struct target_rlimit *target_rlim;
10274             struct rlimit rlim;
10275             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10276                 return -TARGET_EFAULT;
10277             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10278             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10279             unlock_user_struct(target_rlim, arg2, 0);
10280             /*
10281              * If we just passed through resource limit settings for memory then
10282              * they would also apply to QEMU's own allocations, and QEMU will
10283              * crash or hang or die if its allocations fail. Ideally we would
10284              * track the guest allocations in QEMU and apply the limits ourselves.
10285              * For now, just tell the guest the call succeeded but don't actually
10286              * limit anything.
10287              */
10288             if (resource != RLIMIT_AS &&
10289                 resource != RLIMIT_DATA &&
10290                 resource != RLIMIT_STACK) {
10291                 return get_errno(setrlimit(resource, &rlim));
10292             } else {
10293                 return 0;
10294             }
10295         }
10296 #endif
10297 #ifdef TARGET_NR_getrlimit
10298     case TARGET_NR_getrlimit:
10299         {
10300             int resource = target_to_host_resource(arg1);
10301             struct target_rlimit *target_rlim;
10302             struct rlimit rlim;
10303 
10304             ret = get_errno(getrlimit(resource, &rlim));
10305             if (!is_error(ret)) {
10306                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10307                     return -TARGET_EFAULT;
10308                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10309                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10310                 unlock_user_struct(target_rlim, arg2, 1);
10311             }
10312         }
10313         return ret;
10314 #endif
10315     case TARGET_NR_getrusage:
10316         {
10317             struct rusage rusage;
10318             ret = get_errno(getrusage(arg1, &rusage));
10319             if (!is_error(ret)) {
10320                 ret = host_to_target_rusage(arg2, &rusage);
10321             }
10322         }
10323         return ret;
10324 #if defined(TARGET_NR_gettimeofday)
10325     case TARGET_NR_gettimeofday:
10326         {
10327             struct timeval tv;
10328             struct timezone tz;
10329 
10330             ret = get_errno(gettimeofday(&tv, &tz));
10331             if (!is_error(ret)) {
10332                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10333                     return -TARGET_EFAULT;
10334                 }
10335                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10336                     return -TARGET_EFAULT;
10337                 }
10338             }
10339         }
10340         return ret;
10341 #endif
10342 #if defined(TARGET_NR_settimeofday)
10343     case TARGET_NR_settimeofday:
10344         {
10345             struct timeval tv, *ptv = NULL;
10346             struct timezone tz, *ptz = NULL;
10347 
10348             if (arg1) {
10349                 if (copy_from_user_timeval(&tv, arg1)) {
10350                     return -TARGET_EFAULT;
10351                 }
10352                 ptv = &tv;
10353             }
10354 
10355             if (arg2) {
10356                 if (copy_from_user_timezone(&tz, arg2)) {
10357                     return -TARGET_EFAULT;
10358                 }
10359                 ptz = &tz;
10360             }
10361 
10362             return get_errno(settimeofday(ptv, ptz));
10363         }
10364 #endif
10365 #if defined(TARGET_NR_select)
10366     case TARGET_NR_select:
10367 #if defined(TARGET_WANT_NI_OLD_SELECT)
10368         /* some architectures used to have old_select here
10369          * but now ENOSYS it.
10370          */
10371         ret = -TARGET_ENOSYS;
10372 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10373         ret = do_old_select(arg1);
10374 #else
10375         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10376 #endif
10377         return ret;
10378 #endif
10379 #ifdef TARGET_NR_pselect6
10380     case TARGET_NR_pselect6:
10381         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10382 #endif
10383 #ifdef TARGET_NR_pselect6_time64
10384     case TARGET_NR_pselect6_time64:
10385         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10386 #endif
10387 #ifdef TARGET_NR_symlink
10388     case TARGET_NR_symlink:
10389         {
10390             void *p2;
10391             p = lock_user_string(arg1);
10392             p2 = lock_user_string(arg2);
10393             if (!p || !p2)
10394                 ret = -TARGET_EFAULT;
10395             else
10396                 ret = get_errno(symlink(p, p2));
10397             unlock_user(p2, arg2, 0);
10398             unlock_user(p, arg1, 0);
10399         }
10400         return ret;
10401 #endif
10402 #if defined(TARGET_NR_symlinkat)
10403     case TARGET_NR_symlinkat:
10404         {
10405             void *p2;
10406             p  = lock_user_string(arg1);
10407             p2 = lock_user_string(arg3);
10408             if (!p || !p2)
10409                 ret = -TARGET_EFAULT;
10410             else
10411                 ret = get_errno(symlinkat(p, arg2, p2));
10412             unlock_user(p2, arg3, 0);
10413             unlock_user(p, arg1, 0);
10414         }
10415         return ret;
10416 #endif
10417 #ifdef TARGET_NR_readlink
10418     case TARGET_NR_readlink:
10419         {
10420             void *p2;
10421             p = lock_user_string(arg1);
10422             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10423             ret = get_errno(do_guest_readlink(p, p2, arg3));
10424             unlock_user(p2, arg2, ret);
10425             unlock_user(p, arg1, 0);
10426         }
10427         return ret;
10428 #endif
10429 #if defined(TARGET_NR_readlinkat)
10430     case TARGET_NR_readlinkat:
10431         {
10432             void *p2;
10433             p  = lock_user_string(arg2);
10434             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10435             if (!p || !p2) {
10436                 ret = -TARGET_EFAULT;
10437             } else if (!arg4) {
10438                 /* Short circuit this for the magic exe check. */
10439                 ret = -TARGET_EINVAL;
10440             } else if (is_proc_myself((const char *)p, "exe")) {
10441                 /*
10442                  * Don't worry about sign mismatch as earlier mapping
10443                  * logic would have thrown a bad address error.
10444                  */
10445                 ret = MIN(strlen(exec_path), arg4);
10446                 /* We cannot NUL terminate the string. */
10447                 memcpy(p2, exec_path, ret);
10448             } else {
10449                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10450             }
10451             unlock_user(p2, arg3, ret);
10452             unlock_user(p, arg2, 0);
10453         }
10454         return ret;
10455 #endif
10456 #ifdef TARGET_NR_swapon
10457     case TARGET_NR_swapon:
10458         if (!(p = lock_user_string(arg1)))
10459             return -TARGET_EFAULT;
10460         ret = get_errno(swapon(p, arg2));
10461         unlock_user(p, arg1, 0);
10462         return ret;
10463 #endif
10464     case TARGET_NR_reboot:
10465         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10466            /* arg4 must be ignored in all other cases */
10467            p = lock_user_string(arg4);
10468            if (!p) {
10469                return -TARGET_EFAULT;
10470            }
10471            ret = get_errno(reboot(arg1, arg2, arg3, p));
10472            unlock_user(p, arg4, 0);
10473         } else {
10474            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10475         }
10476         return ret;
10477 #ifdef TARGET_NR_mmap
10478     case TARGET_NR_mmap:
10479 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10480     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10481     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10482     || defined(TARGET_S390X)
10483         {
10484             abi_ulong *v;
10485             abi_ulong v1, v2, v3, v4, v5, v6;
10486             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10487                 return -TARGET_EFAULT;
10488             v1 = tswapal(v[0]);
10489             v2 = tswapal(v[1]);
10490             v3 = tswapal(v[2]);
10491             v4 = tswapal(v[3]);
10492             v5 = tswapal(v[4]);
10493             v6 = tswapal(v[5]);
10494             unlock_user(v, arg1, 0);
10495             return do_mmap(v1, v2, v3, v4, v5, v6);
10496         }
10497 #else
10498         /* mmap pointers are always untagged */
10499         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10500 #endif
10501 #endif
10502 #ifdef TARGET_NR_mmap2
10503     case TARGET_NR_mmap2:
10504 #ifndef MMAP_SHIFT
10505 #define MMAP_SHIFT 12
10506 #endif
10507         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10508                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10509 #endif
10510     case TARGET_NR_munmap:
10511         arg1 = cpu_untagged_addr(cpu, arg1);
10512         return get_errno(target_munmap(arg1, arg2));
10513     case TARGET_NR_mprotect:
10514         arg1 = cpu_untagged_addr(cpu, arg1);
10515         {
10516             TaskState *ts = get_task_state(cpu);
10517             /* Special hack to detect libc making the stack executable.  */
10518             if ((arg3 & PROT_GROWSDOWN)
10519                 && arg1 >= ts->info->stack_limit
10520                 && arg1 <= ts->info->start_stack) {
10521                 arg3 &= ~PROT_GROWSDOWN;
10522                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10523                 arg1 = ts->info->stack_limit;
10524             }
10525         }
10526         return get_errno(target_mprotect(arg1, arg2, arg3));
10527 #ifdef TARGET_NR_mremap
10528     case TARGET_NR_mremap:
10529         arg1 = cpu_untagged_addr(cpu, arg1);
10530         /* mremap new_addr (arg5) is always untagged */
10531         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10532 #endif
10533         /* ??? msync/mlock/munlock are broken for softmmu.  */
10534 #ifdef TARGET_NR_msync
10535     case TARGET_NR_msync:
10536         return get_errno(msync(g2h(cpu, arg1), arg2,
10537                                target_to_host_msync_arg(arg3)));
10538 #endif
10539 #ifdef TARGET_NR_mlock
10540     case TARGET_NR_mlock:
10541         return get_errno(mlock(g2h(cpu, arg1), arg2));
10542 #endif
10543 #ifdef TARGET_NR_munlock
10544     case TARGET_NR_munlock:
10545         return get_errno(munlock(g2h(cpu, arg1), arg2));
10546 #endif
10547 #ifdef TARGET_NR_mlockall
10548     case TARGET_NR_mlockall:
10549         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10550 #endif
10551 #ifdef TARGET_NR_munlockall
10552     case TARGET_NR_munlockall:
10553         return get_errno(munlockall());
10554 #endif
10555 #ifdef TARGET_NR_truncate
10556     case TARGET_NR_truncate:
10557         if (!(p = lock_user_string(arg1)))
10558             return -TARGET_EFAULT;
10559         ret = get_errno(truncate(p, arg2));
10560         unlock_user(p, arg1, 0);
10561         return ret;
10562 #endif
10563 #ifdef TARGET_NR_ftruncate
10564     case TARGET_NR_ftruncate:
10565         return get_errno(ftruncate(arg1, arg2));
10566 #endif
10567     case TARGET_NR_fchmod:
10568         return get_errno(fchmod(arg1, arg2));
10569 #if defined(TARGET_NR_fchmodat)
10570     case TARGET_NR_fchmodat:
10571         if (!(p = lock_user_string(arg2)))
10572             return -TARGET_EFAULT;
10573         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10574         unlock_user(p, arg2, 0);
10575         return ret;
10576 #endif
10577     case TARGET_NR_getpriority:
10578         /* Note that negative values are valid for getpriority, so we must
10579            differentiate based on errno settings.  */
10580         errno = 0;
10581         ret = getpriority(arg1, arg2);
10582         if (ret == -1 && errno != 0) {
10583             return -host_to_target_errno(errno);
10584         }
10585 #ifdef TARGET_ALPHA
10586         /* Return value is the unbiased priority.  Signal no error.  */
10587         cpu_env->ir[IR_V0] = 0;
10588 #else
10589         /* Return value is a biased priority to avoid negative numbers.  */
10590         ret = 20 - ret;
10591 #endif
10592         return ret;
10593     case TARGET_NR_setpriority:
10594         return get_errno(setpriority(arg1, arg2, arg3));
10595 #ifdef TARGET_NR_statfs
10596     case TARGET_NR_statfs:
10597         if (!(p = lock_user_string(arg1))) {
10598             return -TARGET_EFAULT;
10599         }
10600         ret = get_errno(statfs(path(p), &stfs));
10601         unlock_user(p, arg1, 0);
10602     convert_statfs:
10603         if (!is_error(ret)) {
10604             struct target_statfs *target_stfs;
10605 
10606             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10607                 return -TARGET_EFAULT;
10608             __put_user(stfs.f_type, &target_stfs->f_type);
10609             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10610             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10611             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10612             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10613             __put_user(stfs.f_files, &target_stfs->f_files);
10614             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10615             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10616             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10617             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10618             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10619 #ifdef _STATFS_F_FLAGS
10620             __put_user(stfs.f_flags, &target_stfs->f_flags);
10621 #else
10622             __put_user(0, &target_stfs->f_flags);
10623 #endif
10624             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10625             unlock_user_struct(target_stfs, arg2, 1);
10626         }
10627         return ret;
10628 #endif
10629 #ifdef TARGET_NR_fstatfs
10630     case TARGET_NR_fstatfs:
10631         ret = get_errno(fstatfs(arg1, &stfs));
10632         goto convert_statfs;
10633 #endif
10634 #ifdef TARGET_NR_statfs64
10635     case TARGET_NR_statfs64:
10636         if (!(p = lock_user_string(arg1))) {
10637             return -TARGET_EFAULT;
10638         }
10639         ret = get_errno(statfs(path(p), &stfs));
10640         unlock_user(p, arg1, 0);
10641     convert_statfs64:
10642         if (!is_error(ret)) {
10643             struct target_statfs64 *target_stfs;
10644 
10645             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10646                 return -TARGET_EFAULT;
10647             __put_user(stfs.f_type, &target_stfs->f_type);
10648             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10649             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10650             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10651             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10652             __put_user(stfs.f_files, &target_stfs->f_files);
10653             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10654             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10655             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10656             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10657             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10658 #ifdef _STATFS_F_FLAGS
10659             __put_user(stfs.f_flags, &target_stfs->f_flags);
10660 #else
10661             __put_user(0, &target_stfs->f_flags);
10662 #endif
10663             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10664             unlock_user_struct(target_stfs, arg3, 1);
10665         }
10666         return ret;
10667     case TARGET_NR_fstatfs64:
10668         ret = get_errno(fstatfs(arg1, &stfs));
10669         goto convert_statfs64;
10670 #endif
10671 #ifdef TARGET_NR_socketcall
10672     case TARGET_NR_socketcall:
10673         return do_socketcall(arg1, arg2);
10674 #endif
10675 #ifdef TARGET_NR_accept
10676     case TARGET_NR_accept:
10677         return do_accept4(arg1, arg2, arg3, 0);
10678 #endif
10679 #ifdef TARGET_NR_accept4
10680     case TARGET_NR_accept4:
10681         return do_accept4(arg1, arg2, arg3, arg4);
10682 #endif
10683 #ifdef TARGET_NR_bind
10684     case TARGET_NR_bind:
10685         return do_bind(arg1, arg2, arg3);
10686 #endif
10687 #ifdef TARGET_NR_connect
10688     case TARGET_NR_connect:
10689         return do_connect(arg1, arg2, arg3);
10690 #endif
10691 #ifdef TARGET_NR_getpeername
10692     case TARGET_NR_getpeername:
10693         return do_getpeername(arg1, arg2, arg3);
10694 #endif
10695 #ifdef TARGET_NR_getsockname
10696     case TARGET_NR_getsockname:
10697         return do_getsockname(arg1, arg2, arg3);
10698 #endif
10699 #ifdef TARGET_NR_getsockopt
10700     case TARGET_NR_getsockopt:
10701         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10702 #endif
10703 #ifdef TARGET_NR_listen
10704     case TARGET_NR_listen:
10705         return get_errno(listen(arg1, arg2));
10706 #endif
10707 #ifdef TARGET_NR_recv
10708     case TARGET_NR_recv:
10709         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10710 #endif
10711 #ifdef TARGET_NR_recvfrom
10712     case TARGET_NR_recvfrom:
10713         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10714 #endif
10715 #ifdef TARGET_NR_recvmsg
10716     case TARGET_NR_recvmsg:
10717         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10718 #endif
10719 #ifdef TARGET_NR_send
10720     case TARGET_NR_send:
10721         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10722 #endif
10723 #ifdef TARGET_NR_sendmsg
10724     case TARGET_NR_sendmsg:
10725         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10726 #endif
10727 #ifdef TARGET_NR_sendmmsg
10728     case TARGET_NR_sendmmsg:
10729         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10730 #endif
10731 #ifdef TARGET_NR_recvmmsg
10732     case TARGET_NR_recvmmsg:
10733         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10734 #endif
10735 #ifdef TARGET_NR_sendto
10736     case TARGET_NR_sendto:
10737         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10738 #endif
10739 #ifdef TARGET_NR_shutdown
10740     case TARGET_NR_shutdown:
10741         return get_errno(shutdown(arg1, arg2));
10742 #endif
10743 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10744     case TARGET_NR_getrandom:
10745         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10746         if (!p) {
10747             return -TARGET_EFAULT;
10748         }
10749         ret = get_errno(getrandom(p, arg2, arg3));
10750         unlock_user(p, arg1, ret);
10751         return ret;
10752 #endif
10753 #ifdef TARGET_NR_socket
10754     case TARGET_NR_socket:
10755         return do_socket(arg1, arg2, arg3);
10756 #endif
10757 #ifdef TARGET_NR_socketpair
10758     case TARGET_NR_socketpair:
10759         return do_socketpair(arg1, arg2, arg3, arg4);
10760 #endif
10761 #ifdef TARGET_NR_setsockopt
10762     case TARGET_NR_setsockopt:
10763         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10764 #endif
10765 #if defined(TARGET_NR_syslog)
10766     case TARGET_NR_syslog:
10767         {
10768             int len = arg2;
10769 
10770             switch (arg1) {
10771             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10772             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10773             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10774             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10775             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10776             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10777             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10778             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10779                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10780             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10781             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10782             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10783                 {
10784                     if (len < 0) {
10785                         return -TARGET_EINVAL;
10786                     }
10787                     if (len == 0) {
10788                         return 0;
10789                     }
10790                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10791                     if (!p) {
10792                         return -TARGET_EFAULT;
10793                     }
10794                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10795                     unlock_user(p, arg2, arg3);
10796                 }
10797                 return ret;
10798             default:
10799                 return -TARGET_EINVAL;
10800             }
10801         }
10802         break;
10803 #endif
10804     case TARGET_NR_setitimer:
10805         {
10806             struct itimerval value, ovalue, *pvalue;
10807 
10808             if (arg2) {
10809                 pvalue = &value;
10810                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10811                     || copy_from_user_timeval(&pvalue->it_value,
10812                                               arg2 + sizeof(struct target_timeval)))
10813                     return -TARGET_EFAULT;
10814             } else {
10815                 pvalue = NULL;
10816             }
10817             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10818             if (!is_error(ret) && arg3) {
10819                 if (copy_to_user_timeval(arg3,
10820                                          &ovalue.it_interval)
10821                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10822                                             &ovalue.it_value))
10823                     return -TARGET_EFAULT;
10824             }
10825         }
10826         return ret;
10827     case TARGET_NR_getitimer:
10828         {
10829             struct itimerval value;
10830 
10831             ret = get_errno(getitimer(arg1, &value));
10832             if (!is_error(ret) && arg2) {
10833                 if (copy_to_user_timeval(arg2,
10834                                          &value.it_interval)
10835                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10836                                             &value.it_value))
10837                     return -TARGET_EFAULT;
10838             }
10839         }
10840         return ret;
10841 #ifdef TARGET_NR_stat
10842     case TARGET_NR_stat:
10843         if (!(p = lock_user_string(arg1))) {
10844             return -TARGET_EFAULT;
10845         }
10846         ret = get_errno(stat(path(p), &st));
10847         unlock_user(p, arg1, 0);
10848         goto do_stat;
10849 #endif
10850 #ifdef TARGET_NR_lstat
10851     case TARGET_NR_lstat:
10852         if (!(p = lock_user_string(arg1))) {
10853             return -TARGET_EFAULT;
10854         }
10855         ret = get_errno(lstat(path(p), &st));
10856         unlock_user(p, arg1, 0);
10857         goto do_stat;
10858 #endif
10859 #ifdef TARGET_NR_fstat
10860     case TARGET_NR_fstat:
10861         {
10862             ret = get_errno(fstat(arg1, &st));
10863 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10864         do_stat:
10865 #endif
10866             if (!is_error(ret)) {
10867                 struct target_stat *target_st;
10868 
10869                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10870                     return -TARGET_EFAULT;
10871                 memset(target_st, 0, sizeof(*target_st));
10872                 __put_user(st.st_dev, &target_st->st_dev);
10873                 __put_user(st.st_ino, &target_st->st_ino);
10874                 __put_user(st.st_mode, &target_st->st_mode);
10875                 __put_user(st.st_uid, &target_st->st_uid);
10876                 __put_user(st.st_gid, &target_st->st_gid);
10877                 __put_user(st.st_nlink, &target_st->st_nlink);
10878                 __put_user(st.st_rdev, &target_st->st_rdev);
10879                 __put_user(st.st_size, &target_st->st_size);
10880                 __put_user(st.st_blksize, &target_st->st_blksize);
10881                 __put_user(st.st_blocks, &target_st->st_blocks);
10882                 __put_user(st.st_atime, &target_st->target_st_atime);
10883                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10884                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10885 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10886                 __put_user(st.st_atim.tv_nsec,
10887                            &target_st->target_st_atime_nsec);
10888                 __put_user(st.st_mtim.tv_nsec,
10889                            &target_st->target_st_mtime_nsec);
10890                 __put_user(st.st_ctim.tv_nsec,
10891                            &target_st->target_st_ctime_nsec);
10892 #endif
10893                 unlock_user_struct(target_st, arg2, 1);
10894             }
10895         }
10896         return ret;
10897 #endif
10898     case TARGET_NR_vhangup:
10899         return get_errno(vhangup());
10900 #ifdef TARGET_NR_syscall
10901     case TARGET_NR_syscall:
10902         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10903                           arg6, arg7, arg8, 0);
10904 #endif
10905 #if defined(TARGET_NR_wait4)
10906     case TARGET_NR_wait4:
10907         {
10908             int status;
10909             abi_long status_ptr = arg2;
10910             struct rusage rusage, *rusage_ptr;
10911             abi_ulong target_rusage = arg4;
10912             abi_long rusage_err;
10913             if (target_rusage)
10914                 rusage_ptr = &rusage;
10915             else
10916                 rusage_ptr = NULL;
10917             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10918             if (!is_error(ret)) {
10919                 if (status_ptr && ret) {
10920                     status = host_to_target_waitstatus(status);
10921                     if (put_user_s32(status, status_ptr))
10922                         return -TARGET_EFAULT;
10923                 }
10924                 if (target_rusage) {
10925                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10926                     if (rusage_err) {
10927                         ret = rusage_err;
10928                     }
10929                 }
10930             }
10931         }
10932         return ret;
10933 #endif
10934 #ifdef TARGET_NR_swapoff
10935     case TARGET_NR_swapoff:
10936         if (!(p = lock_user_string(arg1)))
10937             return -TARGET_EFAULT;
10938         ret = get_errno(swapoff(p));
10939         unlock_user(p, arg1, 0);
10940         return ret;
10941 #endif
10942     case TARGET_NR_sysinfo:
10943         {
10944             struct target_sysinfo *target_value;
10945             struct sysinfo value;
10946             ret = get_errno(sysinfo(&value));
10947             if (!is_error(ret) && arg1)
10948             {
10949                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10950                     return -TARGET_EFAULT;
10951                 __put_user(value.uptime, &target_value->uptime);
10952                 __put_user(value.loads[0], &target_value->loads[0]);
10953                 __put_user(value.loads[1], &target_value->loads[1]);
10954                 __put_user(value.loads[2], &target_value->loads[2]);
10955                 __put_user(value.totalram, &target_value->totalram);
10956                 __put_user(value.freeram, &target_value->freeram);
10957                 __put_user(value.sharedram, &target_value->sharedram);
10958                 __put_user(value.bufferram, &target_value->bufferram);
10959                 __put_user(value.totalswap, &target_value->totalswap);
10960                 __put_user(value.freeswap, &target_value->freeswap);
10961                 __put_user(value.procs, &target_value->procs);
10962                 __put_user(value.totalhigh, &target_value->totalhigh);
10963                 __put_user(value.freehigh, &target_value->freehigh);
10964                 __put_user(value.mem_unit, &target_value->mem_unit);
10965                 unlock_user_struct(target_value, arg1, 1);
10966             }
10967         }
10968         return ret;
10969 #ifdef TARGET_NR_ipc
10970     case TARGET_NR_ipc:
10971         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10972 #endif
10973 #ifdef TARGET_NR_semget
10974     case TARGET_NR_semget:
10975         return get_errno(semget(arg1, arg2, arg3));
10976 #endif
10977 #ifdef TARGET_NR_semop
10978     case TARGET_NR_semop:
10979         return do_semtimedop(arg1, arg2, arg3, 0, false);
10980 #endif
10981 #ifdef TARGET_NR_semtimedop
10982     case TARGET_NR_semtimedop:
10983         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10984 #endif
10985 #ifdef TARGET_NR_semtimedop_time64
10986     case TARGET_NR_semtimedop_time64:
10987         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10988 #endif
10989 #ifdef TARGET_NR_semctl
10990     case TARGET_NR_semctl:
10991         return do_semctl(arg1, arg2, arg3, arg4);
10992 #endif
10993 #ifdef TARGET_NR_msgctl
10994     case TARGET_NR_msgctl:
10995         return do_msgctl(arg1, arg2, arg3);
10996 #endif
10997 #ifdef TARGET_NR_msgget
10998     case TARGET_NR_msgget:
10999         return get_errno(msgget(arg1, arg2));
11000 #endif
11001 #ifdef TARGET_NR_msgrcv
11002     case TARGET_NR_msgrcv:
11003         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11004 #endif
11005 #ifdef TARGET_NR_msgsnd
11006     case TARGET_NR_msgsnd:
11007         return do_msgsnd(arg1, arg2, arg3, arg4);
11008 #endif
11009 #ifdef TARGET_NR_shmget
11010     case TARGET_NR_shmget:
11011         return get_errno(shmget(arg1, arg2, arg3));
11012 #endif
11013 #ifdef TARGET_NR_shmctl
11014     case TARGET_NR_shmctl:
11015         return do_shmctl(arg1, arg2, arg3);
11016 #endif
11017 #ifdef TARGET_NR_shmat
11018     case TARGET_NR_shmat:
11019         return target_shmat(cpu_env, arg1, arg2, arg3);
11020 #endif
11021 #ifdef TARGET_NR_shmdt
11022     case TARGET_NR_shmdt:
11023         return target_shmdt(arg1);
11024 #endif
11025     case TARGET_NR_fsync:
11026         return get_errno(fsync(arg1));
11027     case TARGET_NR_clone:
11028         /* Linux manages to have three different orderings for its
11029          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11030          * match the kernel's CONFIG_CLONE_* settings.
11031          * Microblaze is further special in that it uses a sixth
11032          * implicit argument to clone for the TLS pointer.
11033          */
11034 #if defined(TARGET_MICROBLAZE)
11035         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11036 #elif defined(TARGET_CLONE_BACKWARDS)
11037         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11038 #elif defined(TARGET_CLONE_BACKWARDS2)
11039         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11040 #else
11041         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11042 #endif
11043         return ret;
11044 #ifdef __NR_exit_group
11045         /* new thread calls */
11046     case TARGET_NR_exit_group:
11047         preexit_cleanup(cpu_env, arg1);
11048         return get_errno(exit_group(arg1));
11049 #endif
11050     case TARGET_NR_setdomainname:
11051         if (!(p = lock_user_string(arg1)))
11052             return -TARGET_EFAULT;
11053         ret = get_errno(setdomainname(p, arg2));
11054         unlock_user(p, arg1, 0);
11055         return ret;
11056     case TARGET_NR_uname:
11057         /* no need to transcode because we use the linux syscall */
11058         {
11059             struct new_utsname * buf;
11060 
11061             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11062                 return -TARGET_EFAULT;
11063             ret = get_errno(sys_uname(buf));
11064             if (!is_error(ret)) {
11065                 /* Overwrite the native machine name with whatever is being
11066                    emulated. */
11067                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11068                           sizeof(buf->machine));
11069                 /* Allow the user to override the reported release.  */
11070                 if (qemu_uname_release && *qemu_uname_release) {
11071                     g_strlcpy(buf->release, qemu_uname_release,
11072                               sizeof(buf->release));
11073                 }
11074             }
11075             unlock_user_struct(buf, arg1, 1);
11076         }
11077         return ret;
11078 #ifdef TARGET_I386
11079     case TARGET_NR_modify_ldt:
11080         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11081 #if !defined(TARGET_X86_64)
11082     case TARGET_NR_vm86:
11083         return do_vm86(cpu_env, arg1, arg2);
11084 #endif
11085 #endif
11086 #if defined(TARGET_NR_adjtimex)
11087     case TARGET_NR_adjtimex:
11088         {
11089             struct timex host_buf;
11090 
11091             if (target_to_host_timex(&host_buf, arg1) != 0) {
11092                 return -TARGET_EFAULT;
11093             }
11094             ret = get_errno(adjtimex(&host_buf));
11095             if (!is_error(ret)) {
11096                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11097                     return -TARGET_EFAULT;
11098                 }
11099             }
11100         }
11101         return ret;
11102 #endif
11103 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11104     case TARGET_NR_clock_adjtime:
11105         {
11106             struct timex htx;
11107 
11108             if (target_to_host_timex(&htx, arg2) != 0) {
11109                 return -TARGET_EFAULT;
11110             }
11111             ret = get_errno(clock_adjtime(arg1, &htx));
11112             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11113                 return -TARGET_EFAULT;
11114             }
11115         }
11116         return ret;
11117 #endif
11118 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11119     case TARGET_NR_clock_adjtime64:
11120         {
11121             struct timex htx;
11122 
11123             if (target_to_host_timex64(&htx, arg2) != 0) {
11124                 return -TARGET_EFAULT;
11125             }
11126             ret = get_errno(clock_adjtime(arg1, &htx));
11127             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11128                     return -TARGET_EFAULT;
11129             }
11130         }
11131         return ret;
11132 #endif
11133     case TARGET_NR_getpgid:
11134         return get_errno(getpgid(arg1));
11135     case TARGET_NR_fchdir:
11136         return get_errno(fchdir(arg1));
11137     case TARGET_NR_personality:
11138         return get_errno(personality(arg1));
11139 #ifdef TARGET_NR__llseek /* Not on alpha */
11140     case TARGET_NR__llseek:
11141         {
11142             int64_t res;
11143 #if !defined(__NR_llseek)
11144             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11145             if (res == -1) {
11146                 ret = get_errno(res);
11147             } else {
11148                 ret = 0;
11149             }
11150 #else
11151             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11152 #endif
11153             if ((ret == 0) && put_user_s64(res, arg4)) {
11154                 return -TARGET_EFAULT;
11155             }
11156         }
11157         return ret;
11158 #endif
11159 #ifdef TARGET_NR_getdents
11160     case TARGET_NR_getdents:
11161         return do_getdents(arg1, arg2, arg3);
11162 #endif /* TARGET_NR_getdents */
11163 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11164     case TARGET_NR_getdents64:
11165         return do_getdents64(arg1, arg2, arg3);
11166 #endif /* TARGET_NR_getdents64 */
11167 #if defined(TARGET_NR__newselect)
11168     case TARGET_NR__newselect:
11169         return do_select(arg1, arg2, arg3, arg4, arg5);
11170 #endif
11171 #ifdef TARGET_NR_poll
11172     case TARGET_NR_poll:
11173         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11174 #endif
11175 #ifdef TARGET_NR_ppoll
11176     case TARGET_NR_ppoll:
11177         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11178 #endif
11179 #ifdef TARGET_NR_ppoll_time64
11180     case TARGET_NR_ppoll_time64:
11181         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11182 #endif
11183     case TARGET_NR_flock:
11184         /* NOTE: the flock constant seems to be the same for every
11185            Linux platform */
11186         return get_errno(safe_flock(arg1, arg2));
11187     case TARGET_NR_readv:
11188         {
11189             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11190             if (vec != NULL) {
11191                 ret = get_errno(safe_readv(arg1, vec, arg3));
11192                 unlock_iovec(vec, arg2, arg3, 1);
11193             } else {
11194                 ret = -host_to_target_errno(errno);
11195             }
11196         }
11197         return ret;
11198     case TARGET_NR_writev:
11199         {
11200             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11201             if (vec != NULL) {
11202                 ret = get_errno(safe_writev(arg1, vec, arg3));
11203                 unlock_iovec(vec, arg2, arg3, 0);
11204             } else {
11205                 ret = -host_to_target_errno(errno);
11206             }
11207         }
11208         return ret;
11209 #if defined(TARGET_NR_preadv)
11210     case TARGET_NR_preadv:
11211         {
11212             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11213             if (vec != NULL) {
11214                 unsigned long low, high;
11215 
11216                 target_to_host_low_high(arg4, arg5, &low, &high);
11217                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11218                 unlock_iovec(vec, arg2, arg3, 1);
11219             } else {
11220                 ret = -host_to_target_errno(errno);
11221            }
11222         }
11223         return ret;
11224 #endif
11225 #if defined(TARGET_NR_pwritev)
11226     case TARGET_NR_pwritev:
11227         {
11228             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11229             if (vec != NULL) {
11230                 unsigned long low, high;
11231 
11232                 target_to_host_low_high(arg4, arg5, &low, &high);
11233                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11234                 unlock_iovec(vec, arg2, arg3, 0);
11235             } else {
11236                 ret = -host_to_target_errno(errno);
11237            }
11238         }
11239         return ret;
11240 #endif
11241     case TARGET_NR_getsid:
11242         return get_errno(getsid(arg1));
11243 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11244     case TARGET_NR_fdatasync:
11245         return get_errno(fdatasync(arg1));
11246 #endif
11247     case TARGET_NR_sched_getaffinity:
11248         {
11249             unsigned int mask_size;
11250             unsigned long *mask;
11251 
11252             /*
11253              * sched_getaffinity needs multiples of ulong, so need to take
11254              * care of mismatches between target ulong and host ulong sizes.
11255              */
11256             if (arg2 & (sizeof(abi_ulong) - 1)) {
11257                 return -TARGET_EINVAL;
11258             }
11259             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11260 
11261             mask = alloca(mask_size);
11262             memset(mask, 0, mask_size);
11263             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11264 
11265             if (!is_error(ret)) {
11266                 if (ret > arg2) {
11267                     /* More data returned than the caller's buffer will fit.
11268                      * This only happens if sizeof(abi_long) < sizeof(long)
11269                      * and the caller passed us a buffer holding an odd number
11270                      * of abi_longs. If the host kernel is actually using the
11271                      * extra 4 bytes then fail EINVAL; otherwise we can just
11272                      * ignore them and only copy the interesting part.
11273                      */
11274                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11275                     if (numcpus > arg2 * 8) {
11276                         return -TARGET_EINVAL;
11277                     }
11278                     ret = arg2;
11279                 }
11280 
11281                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11282                     return -TARGET_EFAULT;
11283                 }
11284             }
11285         }
11286         return ret;
11287     case TARGET_NR_sched_setaffinity:
11288         {
11289             unsigned int mask_size;
11290             unsigned long *mask;
11291 
11292             /*
11293              * sched_setaffinity needs multiples of ulong, so need to take
11294              * care of mismatches between target ulong and host ulong sizes.
11295              */
11296             if (arg2 & (sizeof(abi_ulong) - 1)) {
11297                 return -TARGET_EINVAL;
11298             }
11299             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11300             mask = alloca(mask_size);
11301 
11302             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11303             if (ret) {
11304                 return ret;
11305             }
11306 
11307             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11308         }
11309     case TARGET_NR_getcpu:
11310         {
11311             unsigned cpuid, node;
11312             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11313                                        arg2 ? &node : NULL,
11314                                        NULL));
11315             if (is_error(ret)) {
11316                 return ret;
11317             }
11318             if (arg1 && put_user_u32(cpuid, arg1)) {
11319                 return -TARGET_EFAULT;
11320             }
11321             if (arg2 && put_user_u32(node, arg2)) {
11322                 return -TARGET_EFAULT;
11323             }
11324         }
11325         return ret;
11326     case TARGET_NR_sched_setparam:
11327         {
11328             struct target_sched_param *target_schp;
11329             struct sched_param schp;
11330 
11331             if (arg2 == 0) {
11332                 return -TARGET_EINVAL;
11333             }
11334             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11335                 return -TARGET_EFAULT;
11336             }
11337             schp.sched_priority = tswap32(target_schp->sched_priority);
11338             unlock_user_struct(target_schp, arg2, 0);
11339             return get_errno(sys_sched_setparam(arg1, &schp));
11340         }
11341     case TARGET_NR_sched_getparam:
11342         {
11343             struct target_sched_param *target_schp;
11344             struct sched_param schp;
11345 
11346             if (arg2 == 0) {
11347                 return -TARGET_EINVAL;
11348             }
11349             ret = get_errno(sys_sched_getparam(arg1, &schp));
11350             if (!is_error(ret)) {
11351                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11352                     return -TARGET_EFAULT;
11353                 }
11354                 target_schp->sched_priority = tswap32(schp.sched_priority);
11355                 unlock_user_struct(target_schp, arg2, 1);
11356             }
11357         }
11358         return ret;
11359     case TARGET_NR_sched_setscheduler:
11360         {
11361             struct target_sched_param *target_schp;
11362             struct sched_param schp;
11363             if (arg3 == 0) {
11364                 return -TARGET_EINVAL;
11365             }
11366             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11367                 return -TARGET_EFAULT;
11368             }
11369             schp.sched_priority = tswap32(target_schp->sched_priority);
11370             unlock_user_struct(target_schp, arg3, 0);
11371             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11372         }
11373     case TARGET_NR_sched_getscheduler:
11374         return get_errno(sys_sched_getscheduler(arg1));
11375     case TARGET_NR_sched_getattr:
11376         {
11377             struct target_sched_attr *target_scha;
11378             struct sched_attr scha;
11379             if (arg2 == 0) {
11380                 return -TARGET_EINVAL;
11381             }
11382             if (arg3 > sizeof(scha)) {
11383                 arg3 = sizeof(scha);
11384             }
11385             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11386             if (!is_error(ret)) {
11387                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11388                 if (!target_scha) {
11389                     return -TARGET_EFAULT;
11390                 }
11391                 target_scha->size = tswap32(scha.size);
11392                 target_scha->sched_policy = tswap32(scha.sched_policy);
11393                 target_scha->sched_flags = tswap64(scha.sched_flags);
11394                 target_scha->sched_nice = tswap32(scha.sched_nice);
11395                 target_scha->sched_priority = tswap32(scha.sched_priority);
11396                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11397                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11398                 target_scha->sched_period = tswap64(scha.sched_period);
11399                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11400                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11401                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11402                 }
11403                 unlock_user(target_scha, arg2, arg3);
11404             }
11405             return ret;
11406         }
11407     case TARGET_NR_sched_setattr:
11408         {
11409             struct target_sched_attr *target_scha;
11410             struct sched_attr scha;
11411             uint32_t size;
11412             int zeroed;
11413             if (arg2 == 0) {
11414                 return -TARGET_EINVAL;
11415             }
11416             if (get_user_u32(size, arg2)) {
11417                 return -TARGET_EFAULT;
11418             }
11419             if (!size) {
11420                 size = offsetof(struct target_sched_attr, sched_util_min);
11421             }
11422             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11423                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11424                     return -TARGET_EFAULT;
11425                 }
11426                 return -TARGET_E2BIG;
11427             }
11428 
11429             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11430             if (zeroed < 0) {
11431                 return zeroed;
11432             } else if (zeroed == 0) {
11433                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11434                     return -TARGET_EFAULT;
11435                 }
11436                 return -TARGET_E2BIG;
11437             }
11438             if (size > sizeof(struct target_sched_attr)) {
11439                 size = sizeof(struct target_sched_attr);
11440             }
11441 
11442             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11443             if (!target_scha) {
11444                 return -TARGET_EFAULT;
11445             }
11446             scha.size = size;
11447             scha.sched_policy = tswap32(target_scha->sched_policy);
11448             scha.sched_flags = tswap64(target_scha->sched_flags);
11449             scha.sched_nice = tswap32(target_scha->sched_nice);
11450             scha.sched_priority = tswap32(target_scha->sched_priority);
11451             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11452             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11453             scha.sched_period = tswap64(target_scha->sched_period);
11454             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11455                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11456                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11457             }
11458             unlock_user(target_scha, arg2, 0);
11459             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11460         }
11461     case TARGET_NR_sched_yield:
11462         return get_errno(sched_yield());
11463     case TARGET_NR_sched_get_priority_max:
11464         return get_errno(sched_get_priority_max(arg1));
11465     case TARGET_NR_sched_get_priority_min:
11466         return get_errno(sched_get_priority_min(arg1));
11467 #ifdef TARGET_NR_sched_rr_get_interval
11468     case TARGET_NR_sched_rr_get_interval:
11469         {
11470             struct timespec ts;
11471             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11472             if (!is_error(ret)) {
11473                 ret = host_to_target_timespec(arg2, &ts);
11474             }
11475         }
11476         return ret;
11477 #endif
11478 #ifdef TARGET_NR_sched_rr_get_interval_time64
11479     case TARGET_NR_sched_rr_get_interval_time64:
11480         {
11481             struct timespec ts;
11482             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11483             if (!is_error(ret)) {
11484                 ret = host_to_target_timespec64(arg2, &ts);
11485             }
11486         }
11487         return ret;
11488 #endif
11489 #if defined(TARGET_NR_nanosleep)
11490     case TARGET_NR_nanosleep:
11491         {
11492             struct timespec req, rem;
11493             target_to_host_timespec(&req, arg1);
11494             ret = get_errno(safe_nanosleep(&req, &rem));
11495             if (is_error(ret) && arg2) {
11496                 host_to_target_timespec(arg2, &rem);
11497             }
11498         }
11499         return ret;
11500 #endif
11501     case TARGET_NR_prctl:
11502         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11503         break;
11504 #ifdef TARGET_NR_arch_prctl
11505     case TARGET_NR_arch_prctl:
11506         return do_arch_prctl(cpu_env, arg1, arg2);
11507 #endif
11508 #ifdef TARGET_NR_pread64
11509     case TARGET_NR_pread64:
11510         if (regpairs_aligned(cpu_env, num)) {
11511             arg4 = arg5;
11512             arg5 = arg6;
11513         }
11514         if (arg2 == 0 && arg3 == 0) {
11515             /* Special-case NULL buffer and zero length, which should succeed */
11516             p = 0;
11517         } else {
11518             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11519             if (!p) {
11520                 return -TARGET_EFAULT;
11521             }
11522         }
11523         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11524         unlock_user(p, arg2, ret);
11525         return ret;
11526     case TARGET_NR_pwrite64:
11527         if (regpairs_aligned(cpu_env, num)) {
11528             arg4 = arg5;
11529             arg5 = arg6;
11530         }
11531         if (arg2 == 0 && arg3 == 0) {
11532             /* Special-case NULL buffer and zero length, which should succeed */
11533             p = 0;
11534         } else {
11535             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11536             if (!p) {
11537                 return -TARGET_EFAULT;
11538             }
11539         }
11540         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11541         unlock_user(p, arg2, 0);
11542         return ret;
11543 #endif
11544     case TARGET_NR_getcwd:
11545         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11546             return -TARGET_EFAULT;
11547         ret = get_errno(sys_getcwd1(p, arg2));
11548         unlock_user(p, arg1, ret);
11549         return ret;
11550     case TARGET_NR_capget:
11551     case TARGET_NR_capset:
11552     {
11553         struct target_user_cap_header *target_header;
11554         struct target_user_cap_data *target_data = NULL;
11555         struct __user_cap_header_struct header;
11556         struct __user_cap_data_struct data[2];
11557         struct __user_cap_data_struct *dataptr = NULL;
11558         int i, target_datalen;
11559         int data_items = 1;
11560 
11561         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11562             return -TARGET_EFAULT;
11563         }
11564         header.version = tswap32(target_header->version);
11565         header.pid = tswap32(target_header->pid);
11566 
11567         if (header.version != _LINUX_CAPABILITY_VERSION) {
11568             /* Version 2 and up takes pointer to two user_data structs */
11569             data_items = 2;
11570         }
11571 
11572         target_datalen = sizeof(*target_data) * data_items;
11573 
11574         if (arg2) {
11575             if (num == TARGET_NR_capget) {
11576                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11577             } else {
11578                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11579             }
11580             if (!target_data) {
11581                 unlock_user_struct(target_header, arg1, 0);
11582                 return -TARGET_EFAULT;
11583             }
11584 
11585             if (num == TARGET_NR_capset) {
11586                 for (i = 0; i < data_items; i++) {
11587                     data[i].effective = tswap32(target_data[i].effective);
11588                     data[i].permitted = tswap32(target_data[i].permitted);
11589                     data[i].inheritable = tswap32(target_data[i].inheritable);
11590                 }
11591             }
11592 
11593             dataptr = data;
11594         }
11595 
11596         if (num == TARGET_NR_capget) {
11597             ret = get_errno(capget(&header, dataptr));
11598         } else {
11599             ret = get_errno(capset(&header, dataptr));
11600         }
11601 
11602         /* The kernel always updates version for both capget and capset */
11603         target_header->version = tswap32(header.version);
11604         unlock_user_struct(target_header, arg1, 1);
11605 
11606         if (arg2) {
11607             if (num == TARGET_NR_capget) {
11608                 for (i = 0; i < data_items; i++) {
11609                     target_data[i].effective = tswap32(data[i].effective);
11610                     target_data[i].permitted = tswap32(data[i].permitted);
11611                     target_data[i].inheritable = tswap32(data[i].inheritable);
11612                 }
11613                 unlock_user(target_data, arg2, target_datalen);
11614             } else {
11615                 unlock_user(target_data, arg2, 0);
11616             }
11617         }
11618         return ret;
11619     }
11620     case TARGET_NR_sigaltstack:
11621         return do_sigaltstack(arg1, arg2, cpu_env);
11622 
11623 #ifdef CONFIG_SENDFILE
11624 #ifdef TARGET_NR_sendfile
11625     case TARGET_NR_sendfile:
11626     {
11627         off_t *offp = NULL;
11628         off_t off;
11629         if (arg3) {
11630             ret = get_user_sal(off, arg3);
11631             if (is_error(ret)) {
11632                 return ret;
11633             }
11634             offp = &off;
11635         }
11636         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11637         if (!is_error(ret) && arg3) {
11638             abi_long ret2 = put_user_sal(off, arg3);
11639             if (is_error(ret2)) {
11640                 ret = ret2;
11641             }
11642         }
11643         return ret;
11644     }
11645 #endif
11646 #ifdef TARGET_NR_sendfile64
11647     case TARGET_NR_sendfile64:
11648     {
11649         off_t *offp = NULL;
11650         off_t off;
11651         if (arg3) {
11652             ret = get_user_s64(off, arg3);
11653             if (is_error(ret)) {
11654                 return ret;
11655             }
11656             offp = &off;
11657         }
11658         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11659         if (!is_error(ret) && arg3) {
11660             abi_long ret2 = put_user_s64(off, arg3);
11661             if (is_error(ret2)) {
11662                 ret = ret2;
11663             }
11664         }
11665         return ret;
11666     }
11667 #endif
11668 #endif
11669 #ifdef TARGET_NR_vfork
11670     case TARGET_NR_vfork:
11671         return get_errno(do_fork(cpu_env,
11672                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11673                          0, 0, 0, 0));
11674 #endif
11675 #ifdef TARGET_NR_ugetrlimit
11676     case TARGET_NR_ugetrlimit:
11677     {
11678 	struct rlimit rlim;
11679 	int resource = target_to_host_resource(arg1);
11680 	ret = get_errno(getrlimit(resource, &rlim));
11681 	if (!is_error(ret)) {
11682 	    struct target_rlimit *target_rlim;
11683             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11684                 return -TARGET_EFAULT;
11685 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11686 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11687             unlock_user_struct(target_rlim, arg2, 1);
11688 	}
11689         return ret;
11690     }
11691 #endif
11692 #ifdef TARGET_NR_truncate64
11693     case TARGET_NR_truncate64:
11694         if (!(p = lock_user_string(arg1)))
11695             return -TARGET_EFAULT;
11696 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11697         unlock_user(p, arg1, 0);
11698         return ret;
11699 #endif
11700 #ifdef TARGET_NR_ftruncate64
11701     case TARGET_NR_ftruncate64:
11702         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11703 #endif
11704 #ifdef TARGET_NR_stat64
11705     case TARGET_NR_stat64:
11706         if (!(p = lock_user_string(arg1))) {
11707             return -TARGET_EFAULT;
11708         }
11709         ret = get_errno(stat(path(p), &st));
11710         unlock_user(p, arg1, 0);
11711         if (!is_error(ret))
11712             ret = host_to_target_stat64(cpu_env, arg2, &st);
11713         return ret;
11714 #endif
11715 #ifdef TARGET_NR_lstat64
11716     case TARGET_NR_lstat64:
11717         if (!(p = lock_user_string(arg1))) {
11718             return -TARGET_EFAULT;
11719         }
11720         ret = get_errno(lstat(path(p), &st));
11721         unlock_user(p, arg1, 0);
11722         if (!is_error(ret))
11723             ret = host_to_target_stat64(cpu_env, arg2, &st);
11724         return ret;
11725 #endif
11726 #ifdef TARGET_NR_fstat64
11727     case TARGET_NR_fstat64:
11728         ret = get_errno(fstat(arg1, &st));
11729         if (!is_error(ret))
11730             ret = host_to_target_stat64(cpu_env, arg2, &st);
11731         return ret;
11732 #endif
11733 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11734 #ifdef TARGET_NR_fstatat64
11735     case TARGET_NR_fstatat64:
11736 #endif
11737 #ifdef TARGET_NR_newfstatat
11738     case TARGET_NR_newfstatat:
11739 #endif
11740         if (!(p = lock_user_string(arg2))) {
11741             return -TARGET_EFAULT;
11742         }
11743         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11744         unlock_user(p, arg2, 0);
11745         if (!is_error(ret))
11746             ret = host_to_target_stat64(cpu_env, arg3, &st);
11747         return ret;
11748 #endif
11749 #if defined(TARGET_NR_statx)
11750     case TARGET_NR_statx:
11751         {
11752             struct target_statx *target_stx;
11753             int dirfd = arg1;
11754             int flags = arg3;
11755 
11756             p = lock_user_string(arg2);
11757             if (p == NULL) {
11758                 return -TARGET_EFAULT;
11759             }
11760 #if defined(__NR_statx)
11761             {
11762                 /*
11763                  * It is assumed that struct statx is architecture independent.
11764                  */
11765                 struct target_statx host_stx;
11766                 int mask = arg4;
11767 
11768                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11769                 if (!is_error(ret)) {
11770                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11771                         unlock_user(p, arg2, 0);
11772                         return -TARGET_EFAULT;
11773                     }
11774                 }
11775 
11776                 if (ret != -TARGET_ENOSYS) {
11777                     unlock_user(p, arg2, 0);
11778                     return ret;
11779                 }
11780             }
11781 #endif
11782             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11783             unlock_user(p, arg2, 0);
11784 
11785             if (!is_error(ret)) {
11786                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11787                     return -TARGET_EFAULT;
11788                 }
11789                 memset(target_stx, 0, sizeof(*target_stx));
11790                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11791                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11792                 __put_user(st.st_ino, &target_stx->stx_ino);
11793                 __put_user(st.st_mode, &target_stx->stx_mode);
11794                 __put_user(st.st_uid, &target_stx->stx_uid);
11795                 __put_user(st.st_gid, &target_stx->stx_gid);
11796                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11797                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11798                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11799                 __put_user(st.st_size, &target_stx->stx_size);
11800                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11801                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11802                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11803                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11804                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11805                 unlock_user_struct(target_stx, arg5, 1);
11806             }
11807         }
11808         return ret;
11809 #endif
11810 #ifdef TARGET_NR_lchown
11811     case TARGET_NR_lchown:
11812         if (!(p = lock_user_string(arg1)))
11813             return -TARGET_EFAULT;
11814         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11815         unlock_user(p, arg1, 0);
11816         return ret;
11817 #endif
11818 #ifdef TARGET_NR_getuid
11819     case TARGET_NR_getuid:
11820         return get_errno(high2lowuid(getuid()));
11821 #endif
11822 #ifdef TARGET_NR_getgid
11823     case TARGET_NR_getgid:
11824         return get_errno(high2lowgid(getgid()));
11825 #endif
11826 #ifdef TARGET_NR_geteuid
11827     case TARGET_NR_geteuid:
11828         return get_errno(high2lowuid(geteuid()));
11829 #endif
11830 #ifdef TARGET_NR_getegid
11831     case TARGET_NR_getegid:
11832         return get_errno(high2lowgid(getegid()));
11833 #endif
11834     case TARGET_NR_setreuid:
11835         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11836     case TARGET_NR_setregid:
11837         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11838     case TARGET_NR_getgroups:
11839         { /* the same code as for TARGET_NR_getgroups32 */
11840             int gidsetsize = arg1;
11841             target_id *target_grouplist;
11842             g_autofree gid_t *grouplist = NULL;
11843             int i;
11844 
11845             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11846                 return -TARGET_EINVAL;
11847             }
11848             if (gidsetsize > 0) {
11849                 grouplist = g_try_new(gid_t, gidsetsize);
11850                 if (!grouplist) {
11851                     return -TARGET_ENOMEM;
11852                 }
11853             }
11854             ret = get_errno(getgroups(gidsetsize, grouplist));
11855             if (!is_error(ret) && gidsetsize > 0) {
11856                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11857                                              gidsetsize * sizeof(target_id), 0);
11858                 if (!target_grouplist) {
11859                     return -TARGET_EFAULT;
11860                 }
11861                 for (i = 0; i < ret; i++) {
11862                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11863                 }
11864                 unlock_user(target_grouplist, arg2,
11865                             gidsetsize * sizeof(target_id));
11866             }
11867             return ret;
11868         }
11869     case TARGET_NR_setgroups:
11870         { /* the same code as for TARGET_NR_setgroups32 */
11871             int gidsetsize = arg1;
11872             target_id *target_grouplist;
11873             g_autofree gid_t *grouplist = NULL;
11874             int i;
11875 
11876             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11877                 return -TARGET_EINVAL;
11878             }
11879             if (gidsetsize > 0) {
11880                 grouplist = g_try_new(gid_t, gidsetsize);
11881                 if (!grouplist) {
11882                     return -TARGET_ENOMEM;
11883                 }
11884                 target_grouplist = lock_user(VERIFY_READ, arg2,
11885                                              gidsetsize * sizeof(target_id), 1);
11886                 if (!target_grouplist) {
11887                     return -TARGET_EFAULT;
11888                 }
11889                 for (i = 0; i < gidsetsize; i++) {
11890                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11891                 }
11892                 unlock_user(target_grouplist, arg2,
11893                             gidsetsize * sizeof(target_id));
11894             }
11895             return get_errno(setgroups(gidsetsize, grouplist));
11896         }
11897     case TARGET_NR_fchown:
11898         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11899 #if defined(TARGET_NR_fchownat)
11900     case TARGET_NR_fchownat:
11901         if (!(p = lock_user_string(arg2)))
11902             return -TARGET_EFAULT;
11903         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11904                                  low2highgid(arg4), arg5));
11905         unlock_user(p, arg2, 0);
11906         return ret;
11907 #endif
11908 #ifdef TARGET_NR_setresuid
11909     case TARGET_NR_setresuid:
11910         return get_errno(sys_setresuid(low2highuid(arg1),
11911                                        low2highuid(arg2),
11912                                        low2highuid(arg3)));
11913 #endif
11914 #ifdef TARGET_NR_getresuid
11915     case TARGET_NR_getresuid:
11916         {
11917             uid_t ruid, euid, suid;
11918             ret = get_errno(getresuid(&ruid, &euid, &suid));
11919             if (!is_error(ret)) {
11920                 if (put_user_id(high2lowuid(ruid), arg1)
11921                     || put_user_id(high2lowuid(euid), arg2)
11922                     || put_user_id(high2lowuid(suid), arg3))
11923                     return -TARGET_EFAULT;
11924             }
11925         }
11926         return ret;
11927 #endif
11928 #ifdef TARGET_NR_getresgid
11929     case TARGET_NR_setresgid:
11930         return get_errno(sys_setresgid(low2highgid(arg1),
11931                                        low2highgid(arg2),
11932                                        low2highgid(arg3)));
11933 #endif
11934 #ifdef TARGET_NR_getresgid
11935     case TARGET_NR_getresgid:
11936         {
11937             gid_t rgid, egid, sgid;
11938             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11939             if (!is_error(ret)) {
11940                 if (put_user_id(high2lowgid(rgid), arg1)
11941                     || put_user_id(high2lowgid(egid), arg2)
11942                     || put_user_id(high2lowgid(sgid), arg3))
11943                     return -TARGET_EFAULT;
11944             }
11945         }
11946         return ret;
11947 #endif
11948 #ifdef TARGET_NR_chown
11949     case TARGET_NR_chown:
11950         if (!(p = lock_user_string(arg1)))
11951             return -TARGET_EFAULT;
11952         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11953         unlock_user(p, arg1, 0);
11954         return ret;
11955 #endif
11956     case TARGET_NR_setuid:
11957         return get_errno(sys_setuid(low2highuid(arg1)));
11958     case TARGET_NR_setgid:
11959         return get_errno(sys_setgid(low2highgid(arg1)));
11960     case TARGET_NR_setfsuid:
11961         return get_errno(setfsuid(arg1));
11962     case TARGET_NR_setfsgid:
11963         return get_errno(setfsgid(arg1));
11964 
11965 #ifdef TARGET_NR_lchown32
11966     case TARGET_NR_lchown32:
11967         if (!(p = lock_user_string(arg1)))
11968             return -TARGET_EFAULT;
11969         ret = get_errno(lchown(p, arg2, arg3));
11970         unlock_user(p, arg1, 0);
11971         return ret;
11972 #endif
11973 #ifdef TARGET_NR_getuid32
11974     case TARGET_NR_getuid32:
11975         return get_errno(getuid());
11976 #endif
11977 
11978 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11979    /* Alpha specific */
11980     case TARGET_NR_getxuid:
11981          {
11982             uid_t euid;
11983             euid=geteuid();
11984             cpu_env->ir[IR_A4]=euid;
11985          }
11986         return get_errno(getuid());
11987 #endif
11988 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11989    /* Alpha specific */
11990     case TARGET_NR_getxgid:
11991          {
11992             uid_t egid;
11993             egid=getegid();
11994             cpu_env->ir[IR_A4]=egid;
11995          }
11996         return get_errno(getgid());
11997 #endif
11998 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11999     /* Alpha specific */
12000     case TARGET_NR_osf_getsysinfo:
12001         ret = -TARGET_EOPNOTSUPP;
12002         switch (arg1) {
12003           case TARGET_GSI_IEEE_FP_CONTROL:
12004             {
12005                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12006                 uint64_t swcr = cpu_env->swcr;
12007 
12008                 swcr &= ~SWCR_STATUS_MASK;
12009                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12010 
12011                 if (put_user_u64 (swcr, arg2))
12012                         return -TARGET_EFAULT;
12013                 ret = 0;
12014             }
12015             break;
12016 
12017           /* case GSI_IEEE_STATE_AT_SIGNAL:
12018              -- Not implemented in linux kernel.
12019              case GSI_UACPROC:
12020              -- Retrieves current unaligned access state; not much used.
12021              case GSI_PROC_TYPE:
12022              -- Retrieves implver information; surely not used.
12023              case GSI_GET_HWRPB:
12024              -- Grabs a copy of the HWRPB; surely not used.
12025           */
12026         }
12027         return ret;
12028 #endif
12029 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12030     /* Alpha specific */
12031     case TARGET_NR_osf_setsysinfo:
12032         ret = -TARGET_EOPNOTSUPP;
12033         switch (arg1) {
12034           case TARGET_SSI_IEEE_FP_CONTROL:
12035             {
12036                 uint64_t swcr, fpcr;
12037 
12038                 if (get_user_u64 (swcr, arg2)) {
12039                     return -TARGET_EFAULT;
12040                 }
12041 
12042                 /*
12043                  * The kernel calls swcr_update_status to update the
12044                  * status bits from the fpcr at every point that it
12045                  * could be queried.  Therefore, we store the status
12046                  * bits only in FPCR.
12047                  */
12048                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12049 
12050                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12051                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12052                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12053                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12054                 ret = 0;
12055             }
12056             break;
12057 
12058           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12059             {
12060                 uint64_t exc, fpcr, fex;
12061 
12062                 if (get_user_u64(exc, arg2)) {
12063                     return -TARGET_EFAULT;
12064                 }
12065                 exc &= SWCR_STATUS_MASK;
12066                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12067 
12068                 /* Old exceptions are not signaled.  */
12069                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12070                 fex = exc & ~fex;
12071                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12072                 fex &= (cpu_env)->swcr;
12073 
12074                 /* Update the hardware fpcr.  */
12075                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12076                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12077 
12078                 if (fex) {
12079                     int si_code = TARGET_FPE_FLTUNK;
12080                     target_siginfo_t info;
12081 
12082                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12083                         si_code = TARGET_FPE_FLTUND;
12084                     }
12085                     if (fex & SWCR_TRAP_ENABLE_INE) {
12086                         si_code = TARGET_FPE_FLTRES;
12087                     }
12088                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12089                         si_code = TARGET_FPE_FLTUND;
12090                     }
12091                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12092                         si_code = TARGET_FPE_FLTOVF;
12093                     }
12094                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12095                         si_code = TARGET_FPE_FLTDIV;
12096                     }
12097                     if (fex & SWCR_TRAP_ENABLE_INV) {
12098                         si_code = TARGET_FPE_FLTINV;
12099                     }
12100 
12101                     info.si_signo = SIGFPE;
12102                     info.si_errno = 0;
12103                     info.si_code = si_code;
12104                     info._sifields._sigfault._addr = (cpu_env)->pc;
12105                     queue_signal(cpu_env, info.si_signo,
12106                                  QEMU_SI_FAULT, &info);
12107                 }
12108                 ret = 0;
12109             }
12110             break;
12111 
12112           /* case SSI_NVPAIRS:
12113              -- Used with SSIN_UACPROC to enable unaligned accesses.
12114              case SSI_IEEE_STATE_AT_SIGNAL:
12115              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12116              -- Not implemented in linux kernel
12117           */
12118         }
12119         return ret;
12120 #endif
12121 #ifdef TARGET_NR_osf_sigprocmask
12122     /* Alpha specific.  */
12123     case TARGET_NR_osf_sigprocmask:
12124         {
12125             abi_ulong mask;
12126             int how;
12127             sigset_t set, oldset;
12128 
12129             switch(arg1) {
12130             case TARGET_SIG_BLOCK:
12131                 how = SIG_BLOCK;
12132                 break;
12133             case TARGET_SIG_UNBLOCK:
12134                 how = SIG_UNBLOCK;
12135                 break;
12136             case TARGET_SIG_SETMASK:
12137                 how = SIG_SETMASK;
12138                 break;
12139             default:
12140                 return -TARGET_EINVAL;
12141             }
12142             mask = arg2;
12143             target_to_host_old_sigset(&set, &mask);
12144             ret = do_sigprocmask(how, &set, &oldset);
12145             if (!ret) {
12146                 host_to_target_old_sigset(&mask, &oldset);
12147                 ret = mask;
12148             }
12149         }
12150         return ret;
12151 #endif
12152 
12153 #ifdef TARGET_NR_getgid32
12154     case TARGET_NR_getgid32:
12155         return get_errno(getgid());
12156 #endif
12157 #ifdef TARGET_NR_geteuid32
12158     case TARGET_NR_geteuid32:
12159         return get_errno(geteuid());
12160 #endif
12161 #ifdef TARGET_NR_getegid32
12162     case TARGET_NR_getegid32:
12163         return get_errno(getegid());
12164 #endif
12165 #ifdef TARGET_NR_setreuid32
12166     case TARGET_NR_setreuid32:
12167         return get_errno(setreuid(arg1, arg2));
12168 #endif
12169 #ifdef TARGET_NR_setregid32
12170     case TARGET_NR_setregid32:
12171         return get_errno(setregid(arg1, arg2));
12172 #endif
12173 #ifdef TARGET_NR_getgroups32
12174     case TARGET_NR_getgroups32:
12175         { /* the same code as for TARGET_NR_getgroups */
12176             int gidsetsize = arg1;
12177             uint32_t *target_grouplist;
12178             g_autofree gid_t *grouplist = NULL;
12179             int i;
12180 
12181             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12182                 return -TARGET_EINVAL;
12183             }
12184             if (gidsetsize > 0) {
12185                 grouplist = g_try_new(gid_t, gidsetsize);
12186                 if (!grouplist) {
12187                     return -TARGET_ENOMEM;
12188                 }
12189             }
12190             ret = get_errno(getgroups(gidsetsize, grouplist));
12191             if (!is_error(ret) && gidsetsize > 0) {
12192                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12193                                              gidsetsize * 4, 0);
12194                 if (!target_grouplist) {
12195                     return -TARGET_EFAULT;
12196                 }
12197                 for (i = 0; i < ret; i++) {
12198                     target_grouplist[i] = tswap32(grouplist[i]);
12199                 }
12200                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12201             }
12202             return ret;
12203         }
12204 #endif
12205 #ifdef TARGET_NR_setgroups32
12206     case TARGET_NR_setgroups32:
12207         { /* the same code as for TARGET_NR_setgroups */
12208             int gidsetsize = arg1;
12209             uint32_t *target_grouplist;
12210             g_autofree gid_t *grouplist = NULL;
12211             int i;
12212 
12213             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12214                 return -TARGET_EINVAL;
12215             }
12216             if (gidsetsize > 0) {
12217                 grouplist = g_try_new(gid_t, gidsetsize);
12218                 if (!grouplist) {
12219                     return -TARGET_ENOMEM;
12220                 }
12221                 target_grouplist = lock_user(VERIFY_READ, arg2,
12222                                              gidsetsize * 4, 1);
12223                 if (!target_grouplist) {
12224                     return -TARGET_EFAULT;
12225                 }
12226                 for (i = 0; i < gidsetsize; i++) {
12227                     grouplist[i] = tswap32(target_grouplist[i]);
12228                 }
12229                 unlock_user(target_grouplist, arg2, 0);
12230             }
12231             return get_errno(setgroups(gidsetsize, grouplist));
12232         }
12233 #endif
12234 #ifdef TARGET_NR_fchown32
12235     case TARGET_NR_fchown32:
12236         return get_errno(fchown(arg1, arg2, arg3));
12237 #endif
12238 #ifdef TARGET_NR_setresuid32
12239     case TARGET_NR_setresuid32:
12240         return get_errno(sys_setresuid(arg1, arg2, arg3));
12241 #endif
12242 #ifdef TARGET_NR_getresuid32
12243     case TARGET_NR_getresuid32:
12244         {
12245             uid_t ruid, euid, suid;
12246             ret = get_errno(getresuid(&ruid, &euid, &suid));
12247             if (!is_error(ret)) {
12248                 if (put_user_u32(ruid, arg1)
12249                     || put_user_u32(euid, arg2)
12250                     || put_user_u32(suid, arg3))
12251                     return -TARGET_EFAULT;
12252             }
12253         }
12254         return ret;
12255 #endif
12256 #ifdef TARGET_NR_setresgid32
12257     case TARGET_NR_setresgid32:
12258         return get_errno(sys_setresgid(arg1, arg2, arg3));
12259 #endif
12260 #ifdef TARGET_NR_getresgid32
12261     case TARGET_NR_getresgid32:
12262         {
12263             gid_t rgid, egid, sgid;
12264             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12265             if (!is_error(ret)) {
12266                 if (put_user_u32(rgid, arg1)
12267                     || put_user_u32(egid, arg2)
12268                     || put_user_u32(sgid, arg3))
12269                     return -TARGET_EFAULT;
12270             }
12271         }
12272         return ret;
12273 #endif
12274 #ifdef TARGET_NR_chown32
12275     case TARGET_NR_chown32:
12276         if (!(p = lock_user_string(arg1)))
12277             return -TARGET_EFAULT;
12278         ret = get_errno(chown(p, arg2, arg3));
12279         unlock_user(p, arg1, 0);
12280         return ret;
12281 #endif
12282 #ifdef TARGET_NR_setuid32
12283     case TARGET_NR_setuid32:
12284         return get_errno(sys_setuid(arg1));
12285 #endif
12286 #ifdef TARGET_NR_setgid32
12287     case TARGET_NR_setgid32:
12288         return get_errno(sys_setgid(arg1));
12289 #endif
12290 #ifdef TARGET_NR_setfsuid32
12291     case TARGET_NR_setfsuid32:
12292         return get_errno(setfsuid(arg1));
12293 #endif
12294 #ifdef TARGET_NR_setfsgid32
12295     case TARGET_NR_setfsgid32:
12296         return get_errno(setfsgid(arg1));
12297 #endif
12298 #ifdef TARGET_NR_mincore
12299     case TARGET_NR_mincore:
12300         {
12301             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12302             if (!a) {
12303                 return -TARGET_ENOMEM;
12304             }
12305             p = lock_user_string(arg3);
12306             if (!p) {
12307                 ret = -TARGET_EFAULT;
12308             } else {
12309                 ret = get_errno(mincore(a, arg2, p));
12310                 unlock_user(p, arg3, ret);
12311             }
12312             unlock_user(a, arg1, 0);
12313         }
12314         return ret;
12315 #endif
12316 #ifdef TARGET_NR_arm_fadvise64_64
12317     case TARGET_NR_arm_fadvise64_64:
12318         /* arm_fadvise64_64 looks like fadvise64_64 but
12319          * with different argument order: fd, advice, offset, len
12320          * rather than the usual fd, offset, len, advice.
12321          * Note that offset and len are both 64-bit so appear as
12322          * pairs of 32-bit registers.
12323          */
12324         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12325                             target_offset64(arg5, arg6), arg2);
12326         return -host_to_target_errno(ret);
12327 #endif
12328 
12329 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12330 
12331 #ifdef TARGET_NR_fadvise64_64
12332     case TARGET_NR_fadvise64_64:
12333 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12334         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12335         ret = arg2;
12336         arg2 = arg3;
12337         arg3 = arg4;
12338         arg4 = arg5;
12339         arg5 = arg6;
12340         arg6 = ret;
12341 #else
12342         /* 6 args: fd, offset (high, low), len (high, low), advice */
12343         if (regpairs_aligned(cpu_env, num)) {
12344             /* offset is in (3,4), len in (5,6) and advice in 7 */
12345             arg2 = arg3;
12346             arg3 = arg4;
12347             arg4 = arg5;
12348             arg5 = arg6;
12349             arg6 = arg7;
12350         }
12351 #endif
12352         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12353                             target_offset64(arg4, arg5), arg6);
12354         return -host_to_target_errno(ret);
12355 #endif
12356 
12357 #ifdef TARGET_NR_fadvise64
12358     case TARGET_NR_fadvise64:
12359         /* 5 args: fd, offset (high, low), len, advice */
12360         if (regpairs_aligned(cpu_env, num)) {
12361             /* offset is in (3,4), len in 5 and advice in 6 */
12362             arg2 = arg3;
12363             arg3 = arg4;
12364             arg4 = arg5;
12365             arg5 = arg6;
12366         }
12367         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12368         return -host_to_target_errno(ret);
12369 #endif
12370 
12371 #else /* not a 32-bit ABI */
12372 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12373 #ifdef TARGET_NR_fadvise64_64
12374     case TARGET_NR_fadvise64_64:
12375 #endif
12376 #ifdef TARGET_NR_fadvise64
12377     case TARGET_NR_fadvise64:
12378 #endif
12379 #ifdef TARGET_S390X
12380         switch (arg4) {
12381         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12382         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12383         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12384         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12385         default: break;
12386         }
12387 #endif
12388         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12389 #endif
12390 #endif /* end of 64-bit ABI fadvise handling */
12391 
12392 #ifdef TARGET_NR_madvise
12393     case TARGET_NR_madvise:
12394         return target_madvise(arg1, arg2, arg3);
12395 #endif
12396 #ifdef TARGET_NR_fcntl64
12397     case TARGET_NR_fcntl64:
12398     {
12399         int cmd;
12400         struct flock64 fl;
12401         from_flock64_fn *copyfrom = copy_from_user_flock64;
12402         to_flock64_fn *copyto = copy_to_user_flock64;
12403 
12404 #ifdef TARGET_ARM
12405         if (!cpu_env->eabi) {
12406             copyfrom = copy_from_user_oabi_flock64;
12407             copyto = copy_to_user_oabi_flock64;
12408         }
12409 #endif
12410 
12411         cmd = target_to_host_fcntl_cmd(arg2);
12412         if (cmd == -TARGET_EINVAL) {
12413             return cmd;
12414         }
12415 
12416         switch(arg2) {
12417         case TARGET_F_GETLK64:
12418             ret = copyfrom(&fl, arg3);
12419             if (ret) {
12420                 break;
12421             }
12422             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12423             if (ret == 0) {
12424                 ret = copyto(arg3, &fl);
12425             }
12426 	    break;
12427 
12428         case TARGET_F_SETLK64:
12429         case TARGET_F_SETLKW64:
12430             ret = copyfrom(&fl, arg3);
12431             if (ret) {
12432                 break;
12433             }
12434             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12435 	    break;
12436         default:
12437             ret = do_fcntl(arg1, arg2, arg3);
12438             break;
12439         }
12440         return ret;
12441     }
12442 #endif
12443 #ifdef TARGET_NR_cacheflush
12444     case TARGET_NR_cacheflush:
12445         /* self-modifying code is handled automatically, so nothing needed */
12446         return 0;
12447 #endif
12448 #ifdef TARGET_NR_getpagesize
12449     case TARGET_NR_getpagesize:
12450         return TARGET_PAGE_SIZE;
12451 #endif
12452     case TARGET_NR_gettid:
12453         return get_errno(sys_gettid());
12454 #ifdef TARGET_NR_readahead
12455     case TARGET_NR_readahead:
12456 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12457         if (regpairs_aligned(cpu_env, num)) {
12458             arg2 = arg3;
12459             arg3 = arg4;
12460             arg4 = arg5;
12461         }
12462         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12463 #else
12464         ret = get_errno(readahead(arg1, arg2, arg3));
12465 #endif
12466         return ret;
12467 #endif
12468 #ifdef CONFIG_ATTR
12469 #ifdef TARGET_NR_setxattr
12470     case TARGET_NR_listxattr:
12471     case TARGET_NR_llistxattr:
12472     {
12473         void *b = 0;
12474         if (arg2) {
12475             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12476             if (!b) {
12477                 return -TARGET_EFAULT;
12478             }
12479         }
12480         p = lock_user_string(arg1);
12481         if (p) {
12482             if (num == TARGET_NR_listxattr) {
12483                 ret = get_errno(listxattr(p, b, arg3));
12484             } else {
12485                 ret = get_errno(llistxattr(p, b, arg3));
12486             }
12487         } else {
12488             ret = -TARGET_EFAULT;
12489         }
12490         unlock_user(p, arg1, 0);
12491         unlock_user(b, arg2, arg3);
12492         return ret;
12493     }
12494     case TARGET_NR_flistxattr:
12495     {
12496         void *b = 0;
12497         if (arg2) {
12498             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12499             if (!b) {
12500                 return -TARGET_EFAULT;
12501             }
12502         }
12503         ret = get_errno(flistxattr(arg1, b, arg3));
12504         unlock_user(b, arg2, arg3);
12505         return ret;
12506     }
12507     case TARGET_NR_setxattr:
12508     case TARGET_NR_lsetxattr:
12509         {
12510             void *n, *v = 0;
12511             if (arg3) {
12512                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12513                 if (!v) {
12514                     return -TARGET_EFAULT;
12515                 }
12516             }
12517             p = lock_user_string(arg1);
12518             n = lock_user_string(arg2);
12519             if (p && n) {
12520                 if (num == TARGET_NR_setxattr) {
12521                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12522                 } else {
12523                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12524                 }
12525             } else {
12526                 ret = -TARGET_EFAULT;
12527             }
12528             unlock_user(p, arg1, 0);
12529             unlock_user(n, arg2, 0);
12530             unlock_user(v, arg3, 0);
12531         }
12532         return ret;
12533     case TARGET_NR_fsetxattr:
12534         {
12535             void *n, *v = 0;
12536             if (arg3) {
12537                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12538                 if (!v) {
12539                     return -TARGET_EFAULT;
12540                 }
12541             }
12542             n = lock_user_string(arg2);
12543             if (n) {
12544                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12545             } else {
12546                 ret = -TARGET_EFAULT;
12547             }
12548             unlock_user(n, arg2, 0);
12549             unlock_user(v, arg3, 0);
12550         }
12551         return ret;
12552     case TARGET_NR_getxattr:
12553     case TARGET_NR_lgetxattr:
12554         {
12555             void *n, *v = 0;
12556             if (arg3) {
12557                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12558                 if (!v) {
12559                     return -TARGET_EFAULT;
12560                 }
12561             }
12562             p = lock_user_string(arg1);
12563             n = lock_user_string(arg2);
12564             if (p && n) {
12565                 if (num == TARGET_NR_getxattr) {
12566                     ret = get_errno(getxattr(p, n, v, arg4));
12567                 } else {
12568                     ret = get_errno(lgetxattr(p, n, v, arg4));
12569                 }
12570             } else {
12571                 ret = -TARGET_EFAULT;
12572             }
12573             unlock_user(p, arg1, 0);
12574             unlock_user(n, arg2, 0);
12575             unlock_user(v, arg3, arg4);
12576         }
12577         return ret;
12578     case TARGET_NR_fgetxattr:
12579         {
12580             void *n, *v = 0;
12581             if (arg3) {
12582                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12583                 if (!v) {
12584                     return -TARGET_EFAULT;
12585                 }
12586             }
12587             n = lock_user_string(arg2);
12588             if (n) {
12589                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12590             } else {
12591                 ret = -TARGET_EFAULT;
12592             }
12593             unlock_user(n, arg2, 0);
12594             unlock_user(v, arg3, arg4);
12595         }
12596         return ret;
12597     case TARGET_NR_removexattr:
12598     case TARGET_NR_lremovexattr:
12599         {
12600             void *n;
12601             p = lock_user_string(arg1);
12602             n = lock_user_string(arg2);
12603             if (p && n) {
12604                 if (num == TARGET_NR_removexattr) {
12605                     ret = get_errno(removexattr(p, n));
12606                 } else {
12607                     ret = get_errno(lremovexattr(p, n));
12608                 }
12609             } else {
12610                 ret = -TARGET_EFAULT;
12611             }
12612             unlock_user(p, arg1, 0);
12613             unlock_user(n, arg2, 0);
12614         }
12615         return ret;
12616     case TARGET_NR_fremovexattr:
12617         {
12618             void *n;
12619             n = lock_user_string(arg2);
12620             if (n) {
12621                 ret = get_errno(fremovexattr(arg1, n));
12622             } else {
12623                 ret = -TARGET_EFAULT;
12624             }
12625             unlock_user(n, arg2, 0);
12626         }
12627         return ret;
12628 #endif
12629 #endif /* CONFIG_ATTR */
12630 #ifdef TARGET_NR_set_thread_area
12631     case TARGET_NR_set_thread_area:
12632 #if defined(TARGET_MIPS)
12633       cpu_env->active_tc.CP0_UserLocal = arg1;
12634       return 0;
12635 #elif defined(TARGET_CRIS)
12636       if (arg1 & 0xff)
12637           ret = -TARGET_EINVAL;
12638       else {
12639           cpu_env->pregs[PR_PID] = arg1;
12640           ret = 0;
12641       }
12642       return ret;
12643 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12644       return do_set_thread_area(cpu_env, arg1);
12645 #elif defined(TARGET_M68K)
12646       {
12647           TaskState *ts = get_task_state(cpu);
12648           ts->tp_value = arg1;
12649           return 0;
12650       }
12651 #else
12652       return -TARGET_ENOSYS;
12653 #endif
12654 #endif
12655 #ifdef TARGET_NR_get_thread_area
12656     case TARGET_NR_get_thread_area:
12657 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12658         return do_get_thread_area(cpu_env, arg1);
12659 #elif defined(TARGET_M68K)
12660         {
12661             TaskState *ts = get_task_state(cpu);
12662             return ts->tp_value;
12663         }
12664 #else
12665         return -TARGET_ENOSYS;
12666 #endif
12667 #endif
12668 #ifdef TARGET_NR_getdomainname
12669     case TARGET_NR_getdomainname:
12670         return -TARGET_ENOSYS;
12671 #endif
12672 
12673 #ifdef TARGET_NR_clock_settime
12674     case TARGET_NR_clock_settime:
12675     {
12676         struct timespec ts;
12677 
12678         ret = target_to_host_timespec(&ts, arg2);
12679         if (!is_error(ret)) {
12680             ret = get_errno(clock_settime(arg1, &ts));
12681         }
12682         return ret;
12683     }
12684 #endif
12685 #ifdef TARGET_NR_clock_settime64
12686     case TARGET_NR_clock_settime64:
12687     {
12688         struct timespec ts;
12689 
12690         ret = target_to_host_timespec64(&ts, arg2);
12691         if (!is_error(ret)) {
12692             ret = get_errno(clock_settime(arg1, &ts));
12693         }
12694         return ret;
12695     }
12696 #endif
12697 #ifdef TARGET_NR_clock_gettime
12698     case TARGET_NR_clock_gettime:
12699     {
12700         struct timespec ts;
12701         ret = get_errno(clock_gettime(arg1, &ts));
12702         if (!is_error(ret)) {
12703             ret = host_to_target_timespec(arg2, &ts);
12704         }
12705         return ret;
12706     }
12707 #endif
12708 #ifdef TARGET_NR_clock_gettime64
12709     case TARGET_NR_clock_gettime64:
12710     {
12711         struct timespec ts;
12712         ret = get_errno(clock_gettime(arg1, &ts));
12713         if (!is_error(ret)) {
12714             ret = host_to_target_timespec64(arg2, &ts);
12715         }
12716         return ret;
12717     }
12718 #endif
12719 #ifdef TARGET_NR_clock_getres
12720     case TARGET_NR_clock_getres:
12721     {
12722         struct timespec ts;
12723         ret = get_errno(clock_getres(arg1, &ts));
12724         if (!is_error(ret)) {
12725             host_to_target_timespec(arg2, &ts);
12726         }
12727         return ret;
12728     }
12729 #endif
12730 #ifdef TARGET_NR_clock_getres_time64
12731     case TARGET_NR_clock_getres_time64:
12732     {
12733         struct timespec ts;
12734         ret = get_errno(clock_getres(arg1, &ts));
12735         if (!is_error(ret)) {
12736             host_to_target_timespec64(arg2, &ts);
12737         }
12738         return ret;
12739     }
12740 #endif
12741 #ifdef TARGET_NR_clock_nanosleep
12742     case TARGET_NR_clock_nanosleep:
12743     {
12744         struct timespec ts;
12745         if (target_to_host_timespec(&ts, arg3)) {
12746             return -TARGET_EFAULT;
12747         }
12748         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12749                                              &ts, arg4 ? &ts : NULL));
12750         /*
12751          * if the call is interrupted by a signal handler, it fails
12752          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12753          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12754          */
12755         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12756             host_to_target_timespec(arg4, &ts)) {
12757               return -TARGET_EFAULT;
12758         }
12759 
12760         return ret;
12761     }
12762 #endif
12763 #ifdef TARGET_NR_clock_nanosleep_time64
12764     case TARGET_NR_clock_nanosleep_time64:
12765     {
12766         struct timespec ts;
12767 
12768         if (target_to_host_timespec64(&ts, arg3)) {
12769             return -TARGET_EFAULT;
12770         }
12771 
12772         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12773                                              &ts, arg4 ? &ts : NULL));
12774 
12775         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12776             host_to_target_timespec64(arg4, &ts)) {
12777             return -TARGET_EFAULT;
12778         }
12779         return ret;
12780     }
12781 #endif
12782 
12783 #if defined(TARGET_NR_set_tid_address)
12784     case TARGET_NR_set_tid_address:
12785     {
12786         TaskState *ts = get_task_state(cpu);
12787         ts->child_tidptr = arg1;
12788         /* do not call host set_tid_address() syscall, instead return tid() */
12789         return get_errno(sys_gettid());
12790     }
12791 #endif
12792 
12793     case TARGET_NR_tkill:
12794         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12795 
12796     case TARGET_NR_tgkill:
12797         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12798                          target_to_host_signal(arg3)));
12799 
12800 #ifdef TARGET_NR_set_robust_list
12801     case TARGET_NR_set_robust_list:
12802     case TARGET_NR_get_robust_list:
12803         /* The ABI for supporting robust futexes has userspace pass
12804          * the kernel a pointer to a linked list which is updated by
12805          * userspace after the syscall; the list is walked by the kernel
12806          * when the thread exits. Since the linked list in QEMU guest
12807          * memory isn't a valid linked list for the host and we have
12808          * no way to reliably intercept the thread-death event, we can't
12809          * support these. Silently return ENOSYS so that guest userspace
12810          * falls back to a non-robust futex implementation (which should
12811          * be OK except in the corner case of the guest crashing while
12812          * holding a mutex that is shared with another process via
12813          * shared memory).
12814          */
12815         return -TARGET_ENOSYS;
12816 #endif
12817 
12818 #if defined(TARGET_NR_utimensat)
12819     case TARGET_NR_utimensat:
12820         {
12821             struct timespec *tsp, ts[2];
12822             if (!arg3) {
12823                 tsp = NULL;
12824             } else {
12825                 if (target_to_host_timespec(ts, arg3)) {
12826                     return -TARGET_EFAULT;
12827                 }
12828                 if (target_to_host_timespec(ts + 1, arg3 +
12829                                             sizeof(struct target_timespec))) {
12830                     return -TARGET_EFAULT;
12831                 }
12832                 tsp = ts;
12833             }
12834             if (!arg2)
12835                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12836             else {
12837                 if (!(p = lock_user_string(arg2))) {
12838                     return -TARGET_EFAULT;
12839                 }
12840                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12841                 unlock_user(p, arg2, 0);
12842             }
12843         }
12844         return ret;
12845 #endif
12846 #ifdef TARGET_NR_utimensat_time64
12847     case TARGET_NR_utimensat_time64:
12848         {
12849             struct timespec *tsp, ts[2];
12850             if (!arg3) {
12851                 tsp = NULL;
12852             } else {
12853                 if (target_to_host_timespec64(ts, arg3)) {
12854                     return -TARGET_EFAULT;
12855                 }
12856                 if (target_to_host_timespec64(ts + 1, arg3 +
12857                                      sizeof(struct target__kernel_timespec))) {
12858                     return -TARGET_EFAULT;
12859                 }
12860                 tsp = ts;
12861             }
12862             if (!arg2)
12863                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12864             else {
12865                 p = lock_user_string(arg2);
12866                 if (!p) {
12867                     return -TARGET_EFAULT;
12868                 }
12869                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12870                 unlock_user(p, arg2, 0);
12871             }
12872         }
12873         return ret;
12874 #endif
12875 #ifdef TARGET_NR_futex
12876     case TARGET_NR_futex:
12877         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12878 #endif
12879 #ifdef TARGET_NR_futex_time64
12880     case TARGET_NR_futex_time64:
12881         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12882 #endif
12883 #ifdef CONFIG_INOTIFY
12884 #if defined(TARGET_NR_inotify_init)
12885     case TARGET_NR_inotify_init:
12886         ret = get_errno(inotify_init());
12887         if (ret >= 0) {
12888             fd_trans_register(ret, &target_inotify_trans);
12889         }
12890         return ret;
12891 #endif
12892 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12893     case TARGET_NR_inotify_init1:
12894         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12895                                           fcntl_flags_tbl)));
12896         if (ret >= 0) {
12897             fd_trans_register(ret, &target_inotify_trans);
12898         }
12899         return ret;
12900 #endif
12901 #if defined(TARGET_NR_inotify_add_watch)
12902     case TARGET_NR_inotify_add_watch:
12903         p = lock_user_string(arg2);
12904         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12905         unlock_user(p, arg2, 0);
12906         return ret;
12907 #endif
12908 #if defined(TARGET_NR_inotify_rm_watch)
12909     case TARGET_NR_inotify_rm_watch:
12910         return get_errno(inotify_rm_watch(arg1, arg2));
12911 #endif
12912 #endif
12913 
12914 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12915     case TARGET_NR_mq_open:
12916         {
12917             struct mq_attr posix_mq_attr;
12918             struct mq_attr *pposix_mq_attr;
12919             int host_flags;
12920 
12921             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12922             pposix_mq_attr = NULL;
12923             if (arg4) {
12924                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12925                     return -TARGET_EFAULT;
12926                 }
12927                 pposix_mq_attr = &posix_mq_attr;
12928             }
12929             p = lock_user_string(arg1 - 1);
12930             if (!p) {
12931                 return -TARGET_EFAULT;
12932             }
12933             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12934             unlock_user (p, arg1, 0);
12935         }
12936         return ret;
12937 
12938     case TARGET_NR_mq_unlink:
12939         p = lock_user_string(arg1 - 1);
12940         if (!p) {
12941             return -TARGET_EFAULT;
12942         }
12943         ret = get_errno(mq_unlink(p));
12944         unlock_user (p, arg1, 0);
12945         return ret;
12946 
12947 #ifdef TARGET_NR_mq_timedsend
12948     case TARGET_NR_mq_timedsend:
12949         {
12950             struct timespec ts;
12951 
12952             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12953             if (arg5 != 0) {
12954                 if (target_to_host_timespec(&ts, arg5)) {
12955                     return -TARGET_EFAULT;
12956                 }
12957                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12958                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12959                     return -TARGET_EFAULT;
12960                 }
12961             } else {
12962                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12963             }
12964             unlock_user (p, arg2, arg3);
12965         }
12966         return ret;
12967 #endif
12968 #ifdef TARGET_NR_mq_timedsend_time64
12969     case TARGET_NR_mq_timedsend_time64:
12970         {
12971             struct timespec ts;
12972 
12973             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12974             if (arg5 != 0) {
12975                 if (target_to_host_timespec64(&ts, arg5)) {
12976                     return -TARGET_EFAULT;
12977                 }
12978                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12979                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12980                     return -TARGET_EFAULT;
12981                 }
12982             } else {
12983                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12984             }
12985             unlock_user(p, arg2, arg3);
12986         }
12987         return ret;
12988 #endif
12989 
12990 #ifdef TARGET_NR_mq_timedreceive
12991     case TARGET_NR_mq_timedreceive:
12992         {
12993             struct timespec ts;
12994             unsigned int prio;
12995 
12996             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12997             if (arg5 != 0) {
12998                 if (target_to_host_timespec(&ts, arg5)) {
12999                     return -TARGET_EFAULT;
13000                 }
13001                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13002                                                      &prio, &ts));
13003                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13004                     return -TARGET_EFAULT;
13005                 }
13006             } else {
13007                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13008                                                      &prio, NULL));
13009             }
13010             unlock_user (p, arg2, arg3);
13011             if (arg4 != 0)
13012                 put_user_u32(prio, arg4);
13013         }
13014         return ret;
13015 #endif
13016 #ifdef TARGET_NR_mq_timedreceive_time64
13017     case TARGET_NR_mq_timedreceive_time64:
13018         {
13019             struct timespec ts;
13020             unsigned int prio;
13021 
13022             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13023             if (arg5 != 0) {
13024                 if (target_to_host_timespec64(&ts, arg5)) {
13025                     return -TARGET_EFAULT;
13026                 }
13027                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13028                                                      &prio, &ts));
13029                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13030                     return -TARGET_EFAULT;
13031                 }
13032             } else {
13033                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13034                                                      &prio, NULL));
13035             }
13036             unlock_user(p, arg2, arg3);
13037             if (arg4 != 0) {
13038                 put_user_u32(prio, arg4);
13039             }
13040         }
13041         return ret;
13042 #endif
13043 
13044     /* Not implemented for now... */
13045 /*     case TARGET_NR_mq_notify: */
13046 /*         break; */
13047 
13048     case TARGET_NR_mq_getsetattr:
13049         {
13050             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13051             ret = 0;
13052             if (arg2 != 0) {
13053                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13054                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13055                                            &posix_mq_attr_out));
13056             } else if (arg3 != 0) {
13057                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13058             }
13059             if (ret == 0 && arg3 != 0) {
13060                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13061             }
13062         }
13063         return ret;
13064 #endif
13065 
13066 #ifdef CONFIG_SPLICE
13067 #ifdef TARGET_NR_tee
13068     case TARGET_NR_tee:
13069         {
13070             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13071         }
13072         return ret;
13073 #endif
13074 #ifdef TARGET_NR_splice
13075     case TARGET_NR_splice:
13076         {
13077             loff_t loff_in, loff_out;
13078             loff_t *ploff_in = NULL, *ploff_out = NULL;
13079             if (arg2) {
13080                 if (get_user_u64(loff_in, arg2)) {
13081                     return -TARGET_EFAULT;
13082                 }
13083                 ploff_in = &loff_in;
13084             }
13085             if (arg4) {
13086                 if (get_user_u64(loff_out, arg4)) {
13087                     return -TARGET_EFAULT;
13088                 }
13089                 ploff_out = &loff_out;
13090             }
13091             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13092             if (arg2) {
13093                 if (put_user_u64(loff_in, arg2)) {
13094                     return -TARGET_EFAULT;
13095                 }
13096             }
13097             if (arg4) {
13098                 if (put_user_u64(loff_out, arg4)) {
13099                     return -TARGET_EFAULT;
13100                 }
13101             }
13102         }
13103         return ret;
13104 #endif
13105 #ifdef TARGET_NR_vmsplice
13106 	case TARGET_NR_vmsplice:
13107         {
13108             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13109             if (vec != NULL) {
13110                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13111                 unlock_iovec(vec, arg2, arg3, 0);
13112             } else {
13113                 ret = -host_to_target_errno(errno);
13114             }
13115         }
13116         return ret;
13117 #endif
13118 #endif /* CONFIG_SPLICE */
13119 #ifdef CONFIG_EVENTFD
13120 #if defined(TARGET_NR_eventfd)
13121     case TARGET_NR_eventfd:
13122         ret = get_errno(eventfd(arg1, 0));
13123         if (ret >= 0) {
13124             fd_trans_register(ret, &target_eventfd_trans);
13125         }
13126         return ret;
13127 #endif
13128 #if defined(TARGET_NR_eventfd2)
13129     case TARGET_NR_eventfd2:
13130     {
13131         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13132         if (arg2 & TARGET_O_NONBLOCK) {
13133             host_flags |= O_NONBLOCK;
13134         }
13135         if (arg2 & TARGET_O_CLOEXEC) {
13136             host_flags |= O_CLOEXEC;
13137         }
13138         ret = get_errno(eventfd(arg1, host_flags));
13139         if (ret >= 0) {
13140             fd_trans_register(ret, &target_eventfd_trans);
13141         }
13142         return ret;
13143     }
13144 #endif
13145 #endif /* CONFIG_EVENTFD  */
13146 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13147     case TARGET_NR_fallocate:
13148 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13149         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13150                                   target_offset64(arg5, arg6)));
13151 #else
13152         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13153 #endif
13154         return ret;
13155 #endif
13156 #if defined(CONFIG_SYNC_FILE_RANGE)
13157 #if defined(TARGET_NR_sync_file_range)
13158     case TARGET_NR_sync_file_range:
13159 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13160 #if defined(TARGET_MIPS)
13161         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13162                                         target_offset64(arg5, arg6), arg7));
13163 #else
13164         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13165                                         target_offset64(arg4, arg5), arg6));
13166 #endif /* !TARGET_MIPS */
13167 #else
13168         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13169 #endif
13170         return ret;
13171 #endif
13172 #if defined(TARGET_NR_sync_file_range2) || \
13173     defined(TARGET_NR_arm_sync_file_range)
13174 #if defined(TARGET_NR_sync_file_range2)
13175     case TARGET_NR_sync_file_range2:
13176 #endif
13177 #if defined(TARGET_NR_arm_sync_file_range)
13178     case TARGET_NR_arm_sync_file_range:
13179 #endif
13180         /* This is like sync_file_range but the arguments are reordered */
13181 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13182         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13183                                         target_offset64(arg5, arg6), arg2));
13184 #else
13185         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13186 #endif
13187         return ret;
13188 #endif
13189 #endif
13190 #if defined(TARGET_NR_signalfd4)
13191     case TARGET_NR_signalfd4:
13192         return do_signalfd4(arg1, arg2, arg4);
13193 #endif
13194 #if defined(TARGET_NR_signalfd)
13195     case TARGET_NR_signalfd:
13196         return do_signalfd4(arg1, arg2, 0);
13197 #endif
13198 #if defined(CONFIG_EPOLL)
13199 #if defined(TARGET_NR_epoll_create)
13200     case TARGET_NR_epoll_create:
13201         return get_errno(epoll_create(arg1));
13202 #endif
13203 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13204     case TARGET_NR_epoll_create1:
13205         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13206 #endif
13207 #if defined(TARGET_NR_epoll_ctl)
13208     case TARGET_NR_epoll_ctl:
13209     {
13210         struct epoll_event ep;
13211         struct epoll_event *epp = 0;
13212         if (arg4) {
13213             if (arg2 != EPOLL_CTL_DEL) {
13214                 struct target_epoll_event *target_ep;
13215                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13216                     return -TARGET_EFAULT;
13217                 }
13218                 ep.events = tswap32(target_ep->events);
13219                 /*
13220                  * The epoll_data_t union is just opaque data to the kernel,
13221                  * so we transfer all 64 bits across and need not worry what
13222                  * actual data type it is.
13223                  */
13224                 ep.data.u64 = tswap64(target_ep->data.u64);
13225                 unlock_user_struct(target_ep, arg4, 0);
13226             }
13227             /*
13228              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13229              * non-null pointer, even though this argument is ignored.
13230              *
13231              */
13232             epp = &ep;
13233         }
13234         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13235     }
13236 #endif
13237 
13238 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13239 #if defined(TARGET_NR_epoll_wait)
13240     case TARGET_NR_epoll_wait:
13241 #endif
13242 #if defined(TARGET_NR_epoll_pwait)
13243     case TARGET_NR_epoll_pwait:
13244 #endif
13245     {
13246         struct target_epoll_event *target_ep;
13247         struct epoll_event *ep;
13248         int epfd = arg1;
13249         int maxevents = arg3;
13250         int timeout = arg4;
13251 
13252         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13253             return -TARGET_EINVAL;
13254         }
13255 
13256         target_ep = lock_user(VERIFY_WRITE, arg2,
13257                               maxevents * sizeof(struct target_epoll_event), 1);
13258         if (!target_ep) {
13259             return -TARGET_EFAULT;
13260         }
13261 
13262         ep = g_try_new(struct epoll_event, maxevents);
13263         if (!ep) {
13264             unlock_user(target_ep, arg2, 0);
13265             return -TARGET_ENOMEM;
13266         }
13267 
13268         switch (num) {
13269 #if defined(TARGET_NR_epoll_pwait)
13270         case TARGET_NR_epoll_pwait:
13271         {
13272             sigset_t *set = NULL;
13273 
13274             if (arg5) {
13275                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13276                 if (ret != 0) {
13277                     break;
13278                 }
13279             }
13280 
13281             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13282                                              set, SIGSET_T_SIZE));
13283 
13284             if (set) {
13285                 finish_sigsuspend_mask(ret);
13286             }
13287             break;
13288         }
13289 #endif
13290 #if defined(TARGET_NR_epoll_wait)
13291         case TARGET_NR_epoll_wait:
13292             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13293                                              NULL, 0));
13294             break;
13295 #endif
13296         default:
13297             ret = -TARGET_ENOSYS;
13298         }
13299         if (!is_error(ret)) {
13300             int i;
13301             for (i = 0; i < ret; i++) {
13302                 target_ep[i].events = tswap32(ep[i].events);
13303                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13304             }
13305             unlock_user(target_ep, arg2,
13306                         ret * sizeof(struct target_epoll_event));
13307         } else {
13308             unlock_user(target_ep, arg2, 0);
13309         }
13310         g_free(ep);
13311         return ret;
13312     }
13313 #endif
13314 #endif
13315 #ifdef TARGET_NR_prlimit64
13316     case TARGET_NR_prlimit64:
13317     {
13318         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13319         struct target_rlimit64 *target_rnew, *target_rold;
13320         struct host_rlimit64 rnew, rold, *rnewp = 0;
13321         int resource = target_to_host_resource(arg2);
13322 
13323         if (arg3 && (resource != RLIMIT_AS &&
13324                      resource != RLIMIT_DATA &&
13325                      resource != RLIMIT_STACK)) {
13326             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13327                 return -TARGET_EFAULT;
13328             }
13329             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13330             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13331             unlock_user_struct(target_rnew, arg3, 0);
13332             rnewp = &rnew;
13333         }
13334 
13335         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13336         if (!is_error(ret) && arg4) {
13337             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13338                 return -TARGET_EFAULT;
13339             }
13340             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13341             __put_user(rold.rlim_max, &target_rold->rlim_max);
13342             unlock_user_struct(target_rold, arg4, 1);
13343         }
13344         return ret;
13345     }
13346 #endif
13347 #ifdef TARGET_NR_gethostname
13348     case TARGET_NR_gethostname:
13349     {
13350         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13351         if (name) {
13352             ret = get_errno(gethostname(name, arg2));
13353             unlock_user(name, arg1, arg2);
13354         } else {
13355             ret = -TARGET_EFAULT;
13356         }
13357         return ret;
13358     }
13359 #endif
13360 #ifdef TARGET_NR_atomic_cmpxchg_32
13361     case TARGET_NR_atomic_cmpxchg_32:
13362     {
13363         /* should use start_exclusive from main.c */
13364         abi_ulong mem_value;
13365         if (get_user_u32(mem_value, arg6)) {
13366             target_siginfo_t info;
13367             info.si_signo = SIGSEGV;
13368             info.si_errno = 0;
13369             info.si_code = TARGET_SEGV_MAPERR;
13370             info._sifields._sigfault._addr = arg6;
13371             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13372             ret = 0xdeadbeef;
13373 
13374         }
13375         if (mem_value == arg2)
13376             put_user_u32(arg1, arg6);
13377         return mem_value;
13378     }
13379 #endif
13380 #ifdef TARGET_NR_atomic_barrier
13381     case TARGET_NR_atomic_barrier:
13382         /* Like the kernel implementation and the
13383            qemu arm barrier, no-op this? */
13384         return 0;
13385 #endif
13386 
13387 #ifdef TARGET_NR_timer_create
13388     case TARGET_NR_timer_create:
13389     {
13390         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13391 
13392         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13393 
13394         int clkid = arg1;
13395         int timer_index = next_free_host_timer();
13396 
13397         if (timer_index < 0) {
13398             ret = -TARGET_EAGAIN;
13399         } else {
13400             timer_t *phtimer = g_posix_timers  + timer_index;
13401 
13402             if (arg2) {
13403                 phost_sevp = &host_sevp;
13404                 ret = target_to_host_sigevent(phost_sevp, arg2);
13405                 if (ret != 0) {
13406                     free_host_timer_slot(timer_index);
13407                     return ret;
13408                 }
13409             }
13410 
13411             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13412             if (ret) {
13413                 free_host_timer_slot(timer_index);
13414             } else {
13415                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13416                     timer_delete(*phtimer);
13417                     free_host_timer_slot(timer_index);
13418                     return -TARGET_EFAULT;
13419                 }
13420             }
13421         }
13422         return ret;
13423     }
13424 #endif
13425 
13426 #ifdef TARGET_NR_timer_settime
13427     case TARGET_NR_timer_settime:
13428     {
13429         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13430          * struct itimerspec * old_value */
13431         target_timer_t timerid = get_timer_id(arg1);
13432 
13433         if (timerid < 0) {
13434             ret = timerid;
13435         } else if (arg3 == 0) {
13436             ret = -TARGET_EINVAL;
13437         } else {
13438             timer_t htimer = g_posix_timers[timerid];
13439             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13440 
13441             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13442                 return -TARGET_EFAULT;
13443             }
13444             ret = get_errno(
13445                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13446             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13447                 return -TARGET_EFAULT;
13448             }
13449         }
13450         return ret;
13451     }
13452 #endif
13453 
13454 #ifdef TARGET_NR_timer_settime64
13455     case TARGET_NR_timer_settime64:
13456     {
13457         target_timer_t timerid = get_timer_id(arg1);
13458 
13459         if (timerid < 0) {
13460             ret = timerid;
13461         } else if (arg3 == 0) {
13462             ret = -TARGET_EINVAL;
13463         } else {
13464             timer_t htimer = g_posix_timers[timerid];
13465             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13466 
13467             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13468                 return -TARGET_EFAULT;
13469             }
13470             ret = get_errno(
13471                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13472             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13473                 return -TARGET_EFAULT;
13474             }
13475         }
13476         return ret;
13477     }
13478 #endif
13479 
13480 #ifdef TARGET_NR_timer_gettime
13481     case TARGET_NR_timer_gettime:
13482     {
13483         /* args: timer_t timerid, struct itimerspec *curr_value */
13484         target_timer_t timerid = get_timer_id(arg1);
13485 
13486         if (timerid < 0) {
13487             ret = timerid;
13488         } else if (!arg2) {
13489             ret = -TARGET_EFAULT;
13490         } else {
13491             timer_t htimer = g_posix_timers[timerid];
13492             struct itimerspec hspec;
13493             ret = get_errno(timer_gettime(htimer, &hspec));
13494 
13495             if (host_to_target_itimerspec(arg2, &hspec)) {
13496                 ret = -TARGET_EFAULT;
13497             }
13498         }
13499         return ret;
13500     }
13501 #endif
13502 
13503 #ifdef TARGET_NR_timer_gettime64
13504     case TARGET_NR_timer_gettime64:
13505     {
13506         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13507         target_timer_t timerid = get_timer_id(arg1);
13508 
13509         if (timerid < 0) {
13510             ret = timerid;
13511         } else if (!arg2) {
13512             ret = -TARGET_EFAULT;
13513         } else {
13514             timer_t htimer = g_posix_timers[timerid];
13515             struct itimerspec hspec;
13516             ret = get_errno(timer_gettime(htimer, &hspec));
13517 
13518             if (host_to_target_itimerspec64(arg2, &hspec)) {
13519                 ret = -TARGET_EFAULT;
13520             }
13521         }
13522         return ret;
13523     }
13524 #endif
13525 
13526 #ifdef TARGET_NR_timer_getoverrun
13527     case TARGET_NR_timer_getoverrun:
13528     {
13529         /* args: timer_t timerid */
13530         target_timer_t timerid = get_timer_id(arg1);
13531 
13532         if (timerid < 0) {
13533             ret = timerid;
13534         } else {
13535             timer_t htimer = g_posix_timers[timerid];
13536             ret = get_errno(timer_getoverrun(htimer));
13537         }
13538         return ret;
13539     }
13540 #endif
13541 
13542 #ifdef TARGET_NR_timer_delete
13543     case TARGET_NR_timer_delete:
13544     {
13545         /* args: timer_t timerid */
13546         target_timer_t timerid = get_timer_id(arg1);
13547 
13548         if (timerid < 0) {
13549             ret = timerid;
13550         } else {
13551             timer_t htimer = g_posix_timers[timerid];
13552             ret = get_errno(timer_delete(htimer));
13553             free_host_timer_slot(timerid);
13554         }
13555         return ret;
13556     }
13557 #endif
13558 
13559 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13560     case TARGET_NR_timerfd_create:
13561         ret = get_errno(timerfd_create(arg1,
13562                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13563         if (ret >= 0) {
13564             fd_trans_register(ret, &target_timerfd_trans);
13565         }
13566         return ret;
13567 #endif
13568 
13569 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13570     case TARGET_NR_timerfd_gettime:
13571         {
13572             struct itimerspec its_curr;
13573 
13574             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13575 
13576             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13577                 return -TARGET_EFAULT;
13578             }
13579         }
13580         return ret;
13581 #endif
13582 
13583 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13584     case TARGET_NR_timerfd_gettime64:
13585         {
13586             struct itimerspec its_curr;
13587 
13588             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13589 
13590             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13591                 return -TARGET_EFAULT;
13592             }
13593         }
13594         return ret;
13595 #endif
13596 
13597 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13598     case TARGET_NR_timerfd_settime:
13599         {
13600             struct itimerspec its_new, its_old, *p_new;
13601 
13602             if (arg3) {
13603                 if (target_to_host_itimerspec(&its_new, arg3)) {
13604                     return -TARGET_EFAULT;
13605                 }
13606                 p_new = &its_new;
13607             } else {
13608                 p_new = NULL;
13609             }
13610 
13611             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13612 
13613             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13614                 return -TARGET_EFAULT;
13615             }
13616         }
13617         return ret;
13618 #endif
13619 
13620 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13621     case TARGET_NR_timerfd_settime64:
13622         {
13623             struct itimerspec its_new, its_old, *p_new;
13624 
13625             if (arg3) {
13626                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13627                     return -TARGET_EFAULT;
13628                 }
13629                 p_new = &its_new;
13630             } else {
13631                 p_new = NULL;
13632             }
13633 
13634             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13635 
13636             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13637                 return -TARGET_EFAULT;
13638             }
13639         }
13640         return ret;
13641 #endif
13642 
13643 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13644     case TARGET_NR_ioprio_get:
13645         return get_errno(ioprio_get(arg1, arg2));
13646 #endif
13647 
13648 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13649     case TARGET_NR_ioprio_set:
13650         return get_errno(ioprio_set(arg1, arg2, arg3));
13651 #endif
13652 
13653 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13654     case TARGET_NR_setns:
13655         return get_errno(setns(arg1, arg2));
13656 #endif
13657 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13658     case TARGET_NR_unshare:
13659         return get_errno(unshare(arg1));
13660 #endif
13661 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13662     case TARGET_NR_kcmp:
13663         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13664 #endif
13665 #ifdef TARGET_NR_swapcontext
13666     case TARGET_NR_swapcontext:
13667         /* PowerPC specific.  */
13668         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13669 #endif
13670 #ifdef TARGET_NR_memfd_create
13671     case TARGET_NR_memfd_create:
13672         p = lock_user_string(arg1);
13673         if (!p) {
13674             return -TARGET_EFAULT;
13675         }
13676         ret = get_errno(memfd_create(p, arg2));
13677         fd_trans_unregister(ret);
13678         unlock_user(p, arg1, 0);
13679         return ret;
13680 #endif
13681 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13682     case TARGET_NR_membarrier:
13683         return get_errno(membarrier(arg1, arg2));
13684 #endif
13685 
13686 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13687     case TARGET_NR_copy_file_range:
13688         {
13689             loff_t inoff, outoff;
13690             loff_t *pinoff = NULL, *poutoff = NULL;
13691 
13692             if (arg2) {
13693                 if (get_user_u64(inoff, arg2)) {
13694                     return -TARGET_EFAULT;
13695                 }
13696                 pinoff = &inoff;
13697             }
13698             if (arg4) {
13699                 if (get_user_u64(outoff, arg4)) {
13700                     return -TARGET_EFAULT;
13701                 }
13702                 poutoff = &outoff;
13703             }
13704             /* Do not sign-extend the count parameter. */
13705             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13706                                                  (abi_ulong)arg5, arg6));
13707             if (!is_error(ret) && ret > 0) {
13708                 if (arg2) {
13709                     if (put_user_u64(inoff, arg2)) {
13710                         return -TARGET_EFAULT;
13711                     }
13712                 }
13713                 if (arg4) {
13714                     if (put_user_u64(outoff, arg4)) {
13715                         return -TARGET_EFAULT;
13716                     }
13717                 }
13718             }
13719         }
13720         return ret;
13721 #endif
13722 
13723 #if defined(TARGET_NR_pivot_root)
13724     case TARGET_NR_pivot_root:
13725         {
13726             void *p2;
13727             p = lock_user_string(arg1); /* new_root */
13728             p2 = lock_user_string(arg2); /* put_old */
13729             if (!p || !p2) {
13730                 ret = -TARGET_EFAULT;
13731             } else {
13732                 ret = get_errno(pivot_root(p, p2));
13733             }
13734             unlock_user(p2, arg2, 0);
13735             unlock_user(p, arg1, 0);
13736         }
13737         return ret;
13738 #endif
13739 
13740 #if defined(TARGET_NR_riscv_hwprobe)
13741     case TARGET_NR_riscv_hwprobe:
13742         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13743 #endif
13744 
13745     default:
13746         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13747         return -TARGET_ENOSYS;
13748     }
13749     return ret;
13750 }
13751 
13752 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13753                     abi_long arg2, abi_long arg3, abi_long arg4,
13754                     abi_long arg5, abi_long arg6, abi_long arg7,
13755                     abi_long arg8)
13756 {
13757     CPUState *cpu = env_cpu(cpu_env);
13758     abi_long ret;
13759 
13760 #ifdef DEBUG_ERESTARTSYS
13761     /* Debug-only code for exercising the syscall-restart code paths
13762      * in the per-architecture cpu main loops: restart every syscall
13763      * the guest makes once before letting it through.
13764      */
13765     {
13766         static bool flag;
13767         flag = !flag;
13768         if (flag) {
13769             return -QEMU_ERESTARTSYS;
13770         }
13771     }
13772 #endif
13773 
13774     record_syscall_start(cpu, num, arg1,
13775                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13776 
13777     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13778         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13779     }
13780 
13781     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13782                       arg5, arg6, arg7, arg8);
13783 
13784     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13785         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13786                           arg3, arg4, arg5, arg6);
13787     }
13788 
13789     record_syscall_return(cpu, num, ret);
13790     return ret;
13791 }
13792